94
107
pVM->hwaccm.s.lLastError = rc;
98
* Sets up and activates VT-x on the current CPU
112
* Sets up and activates VT-x on the current CPU.
100
114
* @returns VBox status code.
101
* @param pCpu CPU info struct
102
* @param pVM The VM to operate on. (can be NULL after a resume!!)
103
* @param pvCpuPage Pointer to the global cpu page.
104
* @param HCPhysCpuPage Physical address of the global cpu page.
115
* @param pCpu Pointer to the CPU info struct.
116
* @param pVM Pointer to the VM. (can be NULL after a resume!!)
117
* @param pvCpuPage Pointer to the global CPU page.
118
* @param HCPhysCpuPage Physical address of the global CPU page.
119
* @param fEnabledByHost Set if SUPR0EnableVTx or similar was used to enable
120
* VT-x/AMD-V on the host.
106
VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
122
VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost)
108
AssertReturn(HCPhysCpuPage != 0 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
109
AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
113
/* Set revision dword at the beginning of the VMXON structure. */
114
*(uint32_t *)pvCpuPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hwaccm.s.vmx.msr.vmx_basic_info);
117
/** @todo we should unmap the two pages from the virtual address space in order to prevent accidental corruption.
118
* (which can have very bad consequences!!!)
121
if (ASMGetCR4() & X86_CR4_VMXE)
122
return VERR_VMX_IN_VMX_ROOT_MODE;
124
/* Make sure the VMX instructions don't cause #UD faults. */
125
ASMSetCR4(ASMGetCR4() | X86_CR4_VMXE);
127
/* Enter VMX Root Mode. */
128
int rc = VMXEnable(HCPhysCpuPage);
131
ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
132
return VERR_VMX_VMXON_FAILED;
126
AssertReturn(HCPhysCpuPage != 0 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
127
AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
131
/* Set revision dword at the beginning of the VMXON structure. */
132
*(uint32_t *)pvCpuPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hwaccm.s.vmx.msr.vmx_basic_info);
135
/** @todo we should unmap the two pages from the virtual address space in order to prevent accidental corruption.
136
* (which can have very bad consequences!!!)
139
if (ASMGetCR4() & X86_CR4_VMXE)
140
return VERR_VMX_IN_VMX_ROOT_MODE;
142
ASMSetCR4(ASMGetCR4() | X86_CR4_VMXE); /* Make sure the VMX instructions don't cause #UD faults. */
145
* Enter VM root mode.
147
int rc = VMXEnable(HCPhysCpuPage);
150
ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
151
return VERR_VMX_VMXON_FAILED;
156
* Flush all VPIDs (in case we or any other hypervisor have been using VPIDs) so that
157
* we can avoid an explicit flush while using new VPIDs. We would still need to flush
158
* each time while reusing a VPID after hitting the MaxASID limit once.
161
&& pVM->hwaccm.s.vmx.fVPID
162
&& (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL_CONTEXTS))
164
hmR0VmxFlushVPID(pVM, NULL /* pvCpu */, VMX_FLUSH_VPID_ALL_CONTEXTS, 0 /* GCPtr */);
165
pCpu->fFlushASIDBeforeUse = false;
168
pCpu->fFlushASIDBeforeUse = true;
171
* Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}.
134
175
return VINF_SUCCESS;
138
* Deactivates VT-x on the current CPU
180
* Deactivates VT-x on the current CPU.
140
182
* @returns VBox status code.
141
* @param pCpu CPU info struct
142
* @param pvCpuPage Pointer to the global cpu page.
143
* @param HCPhysCpuPage Physical address of the global cpu page.
183
* @param pCpu Pointer to the CPU info struct.
184
* @param pvCpuPage Pointer to the global CPU page.
185
* @param HCPhysCpuPage Physical address of the global CPU page.
145
187
VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
147
189
AssertReturn(HCPhysCpuPage != 0 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
148
190
AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
150
193
/* If we're somehow not in VMX root mode, then we shouldn't dare leaving it. */
151
194
if (!(ASMGetCR4() & X86_CR4_VMXE))
524
656
rc = VMXWriteVMCS64(VMX_VMCS_CTRL_MSR_BITMAP_FULL, pVCpu->hwaccm.s.vmx.pMSRBitmapPhys);
527
/* Allow the guest to directly modify these MSRs; they are restored and saved automatically. */
528
vmxR0SetMSRPermission(pVCpu, MSR_IA32_SYSENTER_CS, true, true);
529
vmxR0SetMSRPermission(pVCpu, MSR_IA32_SYSENTER_ESP, true, true);
530
vmxR0SetMSRPermission(pVCpu, MSR_IA32_SYSENTER_EIP, true, true);
531
vmxR0SetMSRPermission(pVCpu, MSR_K8_LSTAR, true, true);
532
vmxR0SetMSRPermission(pVCpu, MSR_K6_STAR, true, true);
533
vmxR0SetMSRPermission(pVCpu, MSR_K8_SF_MASK, true, true);
534
vmxR0SetMSRPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, true, true);
535
vmxR0SetMSRPermission(pVCpu, MSR_K8_GS_BASE, true, true);
536
vmxR0SetMSRPermission(pVCpu, MSR_K8_FS_BASE, true, true);
660
* Allow the guest to directly modify these MSRs; they are loaded/stored automatically
661
* using MSR-load/store areas in the VMCS.
663
hmR0VmxSetMSRPermission(pVCpu, MSR_IA32_SYSENTER_CS, true, true);
664
hmR0VmxSetMSRPermission(pVCpu, MSR_IA32_SYSENTER_ESP, true, true);
665
hmR0VmxSetMSRPermission(pVCpu, MSR_IA32_SYSENTER_EIP, true, true);
666
hmR0VmxSetMSRPermission(pVCpu, MSR_K8_LSTAR, true, true);
667
hmR0VmxSetMSRPermission(pVCpu, MSR_K6_STAR, true, true);
668
hmR0VmxSetMSRPermission(pVCpu, MSR_K8_SF_MASK, true, true);
669
hmR0VmxSetMSRPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, true, true);
670
hmR0VmxSetMSRPermission(pVCpu, MSR_K8_GS_BASE, true, true);
671
hmR0VmxSetMSRPermission(pVCpu, MSR_K8_FS_BASE, true, true);
672
if (pVCpu->hwaccm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
673
hmR0VmxSetMSRPermission(pVCpu, MSR_K8_TSC_AUX, true, true);
539
676
#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
540
/* Set the guest & host MSR load/store physical addresses. */
678
* Set the guest & host MSR load/store physical addresses.
541
680
Assert(pVCpu->hwaccm.s.vmx.pGuestMSRPhys);
542
681
rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL, pVCpu->hwaccm.s.vmx.pGuestMSRPhys);
544
683
rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_STORE_FULL, pVCpu->hwaccm.s.vmx.pGuestMSRPhys);
547
685
Assert(pVCpu->hwaccm.s.vmx.pHostMSRPhys);
548
686
rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL, pVCpu->hwaccm.s.vmx.pHostMSRPhys);
626
772
pCache->Read.cValidEntries = VMX_VMCS_MAX_CACHE_IDX;
627
773
} /* for each VMCPU */
629
/* Choose the right TLB setup function. */
630
if (pVM->hwaccm.s.fNestedPaging)
632
pVM->hwaccm.s.vmx.pfnSetupTaggedTLB = vmxR0SetupTLBEPT;
634
/* Default values for flushing. */
635
pVM->hwaccm.s.vmx.enmFlushPage = VMX_FLUSH_ALL_CONTEXTS;
636
pVM->hwaccm.s.vmx.enmFlushContext = VMX_FLUSH_ALL_CONTEXTS;
638
/* If the capabilities specify we can do more, then make use of it. */
639
if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_INDIV)
640
pVM->hwaccm.s.vmx.enmFlushPage = VMX_FLUSH_PAGE;
642
if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_CONTEXT)
643
pVM->hwaccm.s.vmx.enmFlushPage = VMX_FLUSH_SINGLE_CONTEXT;
645
if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_CONTEXT)
646
pVM->hwaccm.s.vmx.enmFlushContext = VMX_FLUSH_SINGLE_CONTEXT;
648
#ifdef HWACCM_VTX_WITH_VPID
650
if (pVM->hwaccm.s.vmx.fVPID)
652
pVM->hwaccm.s.vmx.pfnSetupTaggedTLB = vmxR0SetupTLBVPID;
654
/* Default values for flushing. */
655
pVM->hwaccm.s.vmx.enmFlushPage = VMX_FLUSH_ALL_CONTEXTS;
656
pVM->hwaccm.s.vmx.enmFlushContext = VMX_FLUSH_ALL_CONTEXTS;
658
/* If the capabilities specify we can do more, then make use of it. */
659
if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV)
660
pVM->hwaccm.s.vmx.enmFlushPage = VMX_FLUSH_PAGE;
662
if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_CONTEXT)
663
pVM->hwaccm.s.vmx.enmFlushPage = VMX_FLUSH_SINGLE_CONTEXT;
665
if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_CONTEXT)
666
pVM->hwaccm.s.vmx.enmFlushContext = VMX_FLUSH_SINGLE_CONTEXT;
668
#endif /* HWACCM_VTX_WITH_VPID */
670
pVM->hwaccm.s.vmx.pfnSetupTaggedTLB = vmxR0SetupTLBDummy;
776
* Setup the right TLB function based on CPU capabilities.
778
if (pVM->hwaccm.s.fNestedPaging && pVM->hwaccm.s.vmx.fVPID)
779
pVM->hwaccm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBBoth;
780
else if (pVM->hwaccm.s.fNestedPaging)
781
pVM->hwaccm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBEPT;
782
else if (pVM->hwaccm.s.vmx.fVPID)
783
pVM->hwaccm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBVPID;
785
pVM->hwaccm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBDummy;
673
VMXR0CheckError(pVM, &pVM->aCpus[0], rc);
788
hmR0VmxCheckError(pVM, &pVM->aCpus[0], rc);
678
* Sets the permission bits for the specified MSR
794
* Sets the permission bits for the specified MSR.
680
* @param pVCpu The VMCPU to operate on.
681
* @param ulMSR MSR value
682
* @param fRead Reading allowed/disallowed
683
* @param fWrite Writing allowed/disallowed
796
* @param pVCpu Pointer to the VMCPU.
797
* @param ulMSR The MSR value.
798
* @param fRead Whether reading is allowed.
799
* @param fWrite Whether writing is allowed.
685
static void vmxR0SetMSRPermission(PVMCPU pVCpu, unsigned ulMSR, bool fRead, bool fWrite)
801
static void hmR0VmxSetMSRPermission(PVMCPU pVCpu, unsigned ulMSR, bool fRead, bool fWrite)
688
804
uint8_t *pMSRBitmap = (uint8_t *)pVCpu->hwaccm.s.vmx.pMSRBitmap;
691
808
* 0x000 - 0x3ff - Low MSR read bits
692
809
* 0x400 - 0x7ff - High MSR read bits
693
810
* 0x800 - 0xbff - Low MSR write bits
2127
* Setup the tagged TLB for EPT
2456
* Setup the tagged TLB for EPT+VPID.
2458
* @param pVM Pointer to the VM.
2459
* @param pVCpu Pointer to the VMCPU.
2461
static DECLCALLBACK(void) hmR0VmxSetupTLBBoth(PVM pVM, PVMCPU pVCpu)
2463
PHMGLOBLCPUINFO pCpu;
2465
Assert(pVM->hwaccm.s.fNestedPaging && pVM->hwaccm.s.vmx.fVPID);
2467
pCpu = HWACCMR0GetCurrentCpu();
2470
* Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last
2471
* This can happen both for start & resume due to long jumps back to ring-3.
2472
* If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
2473
* or the host Cpu is online after a suspend/resume, so we cannot reuse the current ASID anymore.
2475
bool fNewASID = false;
2476
if ( pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu
2477
|| pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
2479
pVCpu->hwaccm.s.fForceTLBFlush = true;
2484
* Check for explicit TLB shootdowns.
2486
if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
2487
pVCpu->hwaccm.s.fForceTLBFlush = true;
2489
pVCpu->hwaccm.s.idLastCpu = pCpu->idCpu;
2491
if (pVCpu->hwaccm.s.fForceTLBFlush)
2495
++pCpu->uCurrentASID;
2496
if (pCpu->uCurrentASID >= pVM->hwaccm.s.uMaxASID)
2498
pCpu->uCurrentASID = 1; /* start at 1; host uses 0 */
2499
pCpu->cTLBFlushes++;
2500
pCpu->fFlushASIDBeforeUse = true;
2503
pVCpu->hwaccm.s.uCurrentASID = pCpu->uCurrentASID;
2504
if (pCpu->fFlushASIDBeforeUse)
2506
hmR0VmxFlushVPID(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushVPID, 0 /* GCPtr */);
2507
#ifdef VBOX_WITH_STATISTICS
2508
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushASID);
2514
if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT)
2515
hmR0VmxFlushVPID(pVM, pVCpu, VMX_FLUSH_VPID_SINGLE_CONTEXT, 0 /* GCPtr */);
2517
hmR0VmxFlushEPT(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushEPT);
2519
#ifdef VBOX_WITH_STATISTICS
2521
* This is not terribly accurate (i.e. we don't have any StatFlushEPT counter). We currently count these
2522
* as ASID flushes too, better than including them under StatFlushTLBWorldSwitch.
2524
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushASID);
2528
pVCpu->hwaccm.s.cTLBFlushes = pCpu->cTLBFlushes;
2529
pVCpu->hwaccm.s.fForceTLBFlush = false;
2533
AssertMsg(pVCpu->hwaccm.s.uCurrentASID && pCpu->uCurrentASID,
2534
("hwaccm->uCurrentASID=%lu hwaccm->cTLBFlushes=%lu cpu->uCurrentASID=%lu cpu->cTLBFlushes=%lu\n",
2535
pVCpu->hwaccm.s.uCurrentASID, pVCpu->hwaccm.s.cTLBFlushes,
2536
pCpu->uCurrentASID, pCpu->cTLBFlushes));
2538
/** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
2539
* not be executed. See hwaccmQueueInvlPage() where it is commented
2540
* out. Support individual entry flushing someday. */
2541
if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
2543
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdown);
2546
* Flush individual guest entries using VPID from the TLB or as little as possible with EPT
2547
* as supported by the CPU.
2549
if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR)
2551
for (unsigned i = 0; i < pVCpu->hwaccm.s.TlbShootdown.cPages; i++)
2552
hmR0VmxFlushVPID(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hwaccm.s.TlbShootdown.aPages[i]);
2555
hmR0VmxFlushEPT(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushEPT);
2559
#ifdef VBOX_WITH_STATISTICS
2560
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatNoFlushTLBWorldSwitch);
2564
pVCpu->hwaccm.s.TlbShootdown.cPages = 0;
2565
VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
2567
AssertMsg(pVCpu->hwaccm.s.cTLBFlushes == pCpu->cTLBFlushes,
2568
("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes));
2569
AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hwaccm.s.uMaxASID,
2570
("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID));
2571
AssertMsg(pVCpu->hwaccm.s.uCurrentASID >= 1 && pVCpu->hwaccm.s.uCurrentASID < pVM->hwaccm.s.uMaxASID,
2572
("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->hwaccm.s.uCurrentASID));
2574
/* Update VMCS with the VPID. */
2575
int rc = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hwaccm.s.uCurrentASID);
2581
* Setup the tagged TLB for EPT only.
2129
2583
* @returns VBox status code.
2130
* @param pVM The VM to operate on.
2131
* @param pVCpu The VMCPU to operate on.
2584
* @param pVM Pointer to the VM.
2585
* @param pVCpu Pointer to the VMCPU.
2133
static DECLCALLBACK(void) vmxR0SetupTLBEPT(PVM pVM, PVMCPU pVCpu)
2587
static DECLCALLBACK(void) hmR0VmxSetupTLBEPT(PVM pVM, PVMCPU pVCpu)
2135
2589
PHMGLOBLCPUINFO pCpu;
2137
2591
Assert(pVM->hwaccm.s.fNestedPaging);
2138
2592
Assert(!pVM->hwaccm.s.vmx.fVPID);
2140
/* Deal with tagged TLBs if VPID or EPT is supported. */
2141
2594
pCpu = HWACCMR0GetCurrentCpu();
2142
/* Force a TLB flush for the first world switch if the current cpu differs from the one we ran on last. */
2143
/* Note that this can happen both for start and resume due to long jumps back to ring 3. */
2144
if ( pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu
2145
/* if the tlb flush count has changed, another VM has flushed the TLB of this cpu, so we can't use our current ASID anymore. */
2146
|| pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
2597
* Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last
2598
* This can happen both for start & resume due to long jumps back to ring-3.
2599
* A change in the TLB flush count implies the host Cpu is online after a suspend/resume.
2601
if ( pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu
2602
|| pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
2148
/* Force a TLB flush on VM entry. */
2149
2604
pVCpu->hwaccm.s.fForceTLBFlush = true;
2151
/* Disabled because this has triggered every time I have suspended my
2152
* laptop with a VM running for the past three months or more. */
2154
// Assert(!pCpu->fFlushTLB);
2156
/* Check for tlb shootdown flushes. */
2608
* Check for explicit TLB shootdown flushes.
2157
2610
if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
2158
2611
pVCpu->hwaccm.s.fForceTLBFlush = true;
2160
pVCpu->hwaccm.s.idLastCpu = pCpu->idCpu;
2161
pCpu->fFlushTLB = false;
2613
pVCpu->hwaccm.s.idLastCpu = pCpu->idCpu;
2614
pVCpu->hwaccm.s.cTLBFlushes = pCpu->cTLBFlushes;
2163
2616
if (pVCpu->hwaccm.s.fForceTLBFlush)
2165
vmxR0FlushEPT(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushContext, 0);
2617
hmR0VmxFlushEPT(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushEPT);
2168
if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
2170
/* Deal with pending TLB shootdown actions which were queued when we were not executing code. */
2171
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdown);
2173
for (unsigned i=0;i<pVCpu->hwaccm.s.TlbShootdown.cPages;i++)
2620
/** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
2621
* not be executed. See hwaccmQueueInvlPage() where it is commented
2622
* out. Support individual entry flushing someday. */
2623
if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
2175
/* aTlbShootdownPages contains physical addresses in this case. */
2176
vmxR0FlushEPT(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushPage, pVCpu->hwaccm.s.TlbShootdown.aPages[i]);
2626
* We cannot flush individual entries without VPID support. Flush using EPT.
2628
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdown);
2629
hmR0VmxFlushEPT(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushEPT);
2179
2632
pVCpu->hwaccm.s.TlbShootdown.cPages= 0;
2239
2693
pVCpu->hwaccm.s.fForceTLBFlush = false;
2240
2694
pVCpu->hwaccm.s.cTLBFlushes = pCpu->cTLBFlushes;
2241
2695
pVCpu->hwaccm.s.uCurrentASID = pCpu->uCurrentASID;
2696
if (pCpu->fFlushASIDBeforeUse)
2697
hmR0VmxFlushVPID(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushVPID, 0 /* GCPtr */);
2245
Assert(!pCpu->fFlushTLB);
2246
Assert(pVCpu->hwaccm.s.uCurrentASID && pCpu->uCurrentASID);
2701
AssertMsg(pVCpu->hwaccm.s.uCurrentASID && pCpu->uCurrentASID,
2702
("hwaccm->uCurrentASID=%lu hwaccm->cTLBFlushes=%lu cpu->uCurrentASID=%lu cpu->cTLBFlushes=%lu\n",
2703
pVCpu->hwaccm.s.uCurrentASID, pVCpu->hwaccm.s.cTLBFlushes,
2704
pCpu->uCurrentASID, pCpu->cTLBFlushes));
2706
/** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
2707
* not be executed. See hwaccmQueueInvlPage() where it is commented
2708
* out. Support individual entry flushing someday. */
2248
2709
if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
2250
/* Deal with pending TLB shootdown actions which were queued when we were not executing code. */
2251
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdown);
2252
for (unsigned i = 0; i < pVCpu->hwaccm.s.TlbShootdown.cPages; i++)
2253
vmxR0FlushVPID(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushPage, pVCpu->hwaccm.s.TlbShootdown.aPages[i]);
2712
* Flush individual guest entries using VPID from the TLB or as little as possible with EPT
2713
* as supported by the CPU.
2715
if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR)
2717
for (unsigned i = 0; i < pVCpu->hwaccm.s.TlbShootdown.cPages; i++)
2718
hmR0VmxFlushVPID(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hwaccm.s.TlbShootdown.aPages[i]);
2721
hmR0VmxFlushVPID(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushVPID, 0 /* GCPtr */);
2256
2724
pVCpu->hwaccm.s.TlbShootdown.cPages = 0;
2257
2725
VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
2259
AssertMsg(pVCpu->hwaccm.s.cTLBFlushes == pCpu->cTLBFlushes, ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes));
2260
AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hwaccm.s.uMaxASID, ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID));
2261
AssertMsg(pVCpu->hwaccm.s.uCurrentASID >= 1 && pVCpu->hwaccm.s.uCurrentASID < pVM->hwaccm.s.uMaxASID, ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->hwaccm.s.uCurrentASID));
2727
AssertMsg(pVCpu->hwaccm.s.cTLBFlushes == pCpu->cTLBFlushes,
2728
("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes));
2729
AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hwaccm.s.uMaxASID,
2730
("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID));
2731
AssertMsg(pVCpu->hwaccm.s.uCurrentASID >= 1 && pVCpu->hwaccm.s.uCurrentASID < pVM->hwaccm.s.uMaxASID,
2732
("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->hwaccm.s.uCurrentASID));
2263
2734
int rc = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hwaccm.s.uCurrentASID);
2266
if (pVCpu->hwaccm.s.fForceTLBFlush)
2267
vmxR0FlushVPID(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushContext, 0);
2269
2737
# ifdef VBOX_WITH_STATISTICS
2270
2738
if (pVCpu->hwaccm.s.fForceTLBFlush)
2271
2739
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushTLBWorldSwitch);
3118
3676
Assert(CPUMIsGuestInRealModeEx(pCtx));
3120
LogFlow(("Real mode X86_XCPT_GP instruction emulation at %x:%RGv\n", pCtx->cs, (RTGCPTR)pCtx->rip));
3678
LogFlow(("Real mode X86_XCPT_GP instruction emulation at %x:%RGv\n", pCtx->cs.Sel, (RTGCPTR)pCtx->rip));
3122
rc2 = EMInterpretDisasOne(pVM, pVCpu, CPUMCTX2CORE(pCtx), pDis, &cbOp);
3680
rc2 = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
3123
3681
if (RT_SUCCESS(rc2))
3125
3683
bool fUpdateRIP = true;
3127
3685
rc = VINF_SUCCESS;
3128
Assert(cbOp == pDis->opsize);
3129
switch (pDis->pCurInstr->opcode)
3132
pCtx->eflags.Bits.u1IF = 0;
3133
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCli);
3137
pCtx->eflags.Bits.u1IF = 1;
3138
EMSetInhibitInterruptsPC(pVCpu, pCtx->rip + pDis->opsize);
3139
Assert(VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
3140
rc2 = VMXWriteVMCS(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
3142
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitSti);
3148
pCtx->rip += pDis->opsize;
3149
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitHlt);
3159
if (pDis->prefix & PREFIX_OPSIZE)
3170
rc2 = SELMToFlatEx(pVM, DIS_SELREG_SS, CPUMCTX2CORE(pCtx), pCtx->esp & uMask, 0, &GCPtrStack);
3171
if (RT_FAILURE(rc2))
3173
rc = VERR_EM_INTERPRETER;
3177
rc2 = PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &eflags.u, cbParm);
3178
if (RT_FAILURE(rc2))
3180
rc = VERR_EM_INTERPRETER;
3183
LogFlow(("POPF %x -> %RGv mask=%x\n", eflags.u, pCtx->rsp, uMask));
3184
pCtx->eflags.u = (pCtx->eflags.u & ~(X86_EFL_POPF_BITS & uMask)) | (eflags.u & X86_EFL_POPF_BITS & uMask);
3185
/* RF cleared when popped in real mode; see pushf description in AMD manual. */
3186
pCtx->eflags.Bits.u1RF = 0;
3187
pCtx->esp += cbParm;
3190
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitPopf);
3201
if (pDis->prefix & PREFIX_OPSIZE)
3212
rc2 = SELMToFlatEx(pVM, DIS_SELREG_SS, CPUMCTX2CORE(pCtx), (pCtx->esp - cbParm) & uMask, 0, &GCPtrStack);
3213
if (RT_FAILURE(rc2))
3215
rc = VERR_EM_INTERPRETER;
3218
eflags = pCtx->eflags;
3219
/* RF & VM cleared when pushed in real mode; see pushf description in AMD manual. */
3220
eflags.Bits.u1RF = 0;
3221
eflags.Bits.u1VM = 0;
3223
rc2 = PGMPhysWrite(pVM, (RTGCPHYS)GCPtrStack, &eflags.u, cbParm);
3224
if (RT_FAILURE(rc2))
3226
rc = VERR_EM_INTERPRETER;
3229
LogFlow(("PUSHF %x -> %RGv\n", eflags.u, GCPtrStack));
3230
pCtx->esp -= cbParm;
3232
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitPushf);
3239
uint32_t uMask = 0xffff;
3240
uint16_t aIretFrame[3];
3242
if (pDis->prefix & (PREFIX_OPSIZE | PREFIX_ADDRSIZE))
3244
rc = VERR_EM_INTERPRETER;
3248
rc2 = SELMToFlatEx(pVM, DIS_SELREG_SS, CPUMCTX2CORE(pCtx), pCtx->esp & uMask, 0, &GCPtrStack);
3249
if (RT_FAILURE(rc2))
3251
rc = VERR_EM_INTERPRETER;
3254
rc2 = PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &aIretFrame[0], sizeof(aIretFrame));
3255
if (RT_FAILURE(rc2))
3257
rc = VERR_EM_INTERPRETER;
3260
pCtx->ip = aIretFrame[0];
3261
pCtx->cs = aIretFrame[1];
3262
pCtx->csHid.u64Base = pCtx->cs << 4;
3263
pCtx->eflags.u = (pCtx->eflags.u & ~(X86_EFL_POPF_BITS & uMask)) | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask);
3264
pCtx->sp += sizeof(aIretFrame);
3266
LogFlow(("iret to %04x:%x\n", pCtx->cs, pCtx->ip));
3268
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitIret);
3276
LogFlow(("Realmode: INT %x\n", pDis->param1.parval & 0xff));
3277
intInfo2 = pDis->param1.parval & 0xff;
3278
intInfo2 |= (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
3279
intInfo2 |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
3281
rc = VMXR0InjectEvent(pVM, pVCpu, pCtx, intInfo2, cbOp, 0);
3282
AssertRC(VBOXSTRICTRC_VAL(rc));
3284
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitInt);
3290
if (pCtx->eflags.Bits.u1OF)
3294
LogFlow(("Realmode: INTO\n"));
3295
intInfo2 = X86_XCPT_OF;
3296
intInfo2 |= (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
3297
intInfo2 |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
3299
rc = VMXR0InjectEvent(pVM, pVCpu, pCtx, intInfo2, cbOp, 0);
3300
AssertRC(VBOXSTRICTRC_VAL(rc));
3302
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitInt);
3311
LogFlow(("Realmode: INT 3\n"));
3313
intInfo2 |= (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
3314
intInfo2 |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
3316
rc = VMXR0InjectEvent(pVM, pVCpu, pCtx, intInfo2, cbOp, 0);
3317
AssertRC(VBOXSTRICTRC_VAL(rc));
3319
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitInt);
3324
rc = EMInterpretInstructionCPU(pVM, pVCpu, pDis, CPUMCTX2CORE(pCtx), 0, EMCODETYPE_SUPERVISOR, &cbSize);
3686
Assert(cbOp == pDis->cbInstr);
3687
switch (pDis->pCurInstr->uOpcode)
3690
pCtx->eflags.Bits.u1IF = 0;
3691
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCli);
3695
pCtx->eflags.Bits.u1IF = 1;
3696
EMSetInhibitInterruptsPC(pVCpu, pCtx->rip + pDis->cbInstr);
3697
Assert(VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
3698
rc2 = VMXWriteVMCS(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE,
3699
VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
3701
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitSti);
3707
pCtx->rip += pDis->cbInstr;
3708
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitHlt);
3718
if (pDis->fPrefix & DISPREFIX_OPSIZE)
3729
rc2 = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pCtx), pCtx->esp & uMask, 0, &GCPtrStack);
3730
if (RT_FAILURE(rc2))
3732
rc = VERR_EM_INTERPRETER;
3736
rc2 = PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &eflags.u, cbParm);
3737
if (RT_FAILURE(rc2))
3739
rc = VERR_EM_INTERPRETER;
3742
LogFlow(("POPF %x -> %RGv mask=%x\n", eflags.u, pCtx->rsp, uMask));
3743
pCtx->eflags.u = (pCtx->eflags.u & ~(X86_EFL_POPF_BITS & uMask))
3744
| (eflags.u & X86_EFL_POPF_BITS & uMask);
3745
/* RF cleared when popped in real mode; see pushf description in AMD manual. */
3746
pCtx->eflags.Bits.u1RF = 0;
3747
pCtx->esp += cbParm;
3750
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitPopf);
3761
if (pDis->fPrefix & DISPREFIX_OPSIZE)
3772
rc2 = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pCtx), (pCtx->esp - cbParm) & uMask, 0,
3774
if (RT_FAILURE(rc2))
3776
rc = VERR_EM_INTERPRETER;
3779
eflags = pCtx->eflags;
3780
/* RF & VM cleared when pushed in real mode; see pushf description in AMD manual. */
3781
eflags.Bits.u1RF = 0;
3782
eflags.Bits.u1VM = 0;
3784
rc2 = PGMPhysWrite(pVM, (RTGCPHYS)GCPtrStack, &eflags.u, cbParm);
3785
if (RT_FAILURE(rc2))
3787
rc = VERR_EM_INTERPRETER;
3790
LogFlow(("PUSHF %x -> %RGv\n", eflags.u, GCPtrStack));
3791
pCtx->esp -= cbParm;
3793
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitPushf);
3800
uint32_t uMask = 0xffff;
3801
uint16_t aIretFrame[3];
3803
if (pDis->fPrefix & (DISPREFIX_OPSIZE | DISPREFIX_ADDRSIZE))
3805
rc = VERR_EM_INTERPRETER;
3809
rc2 = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pCtx), pCtx->esp & uMask, 0, &GCPtrStack);
3810
if (RT_FAILURE(rc2))
3812
rc = VERR_EM_INTERPRETER;
3815
rc2 = PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &aIretFrame[0], sizeof(aIretFrame));
3816
if (RT_FAILURE(rc2))
3818
rc = VERR_EM_INTERPRETER;
3821
pCtx->ip = aIretFrame[0];
3822
pCtx->cs.Sel = aIretFrame[1];
3823
pCtx->cs.ValidSel = aIretFrame[1];
3824
pCtx->cs.u64Base = (uint32_t)pCtx->cs.Sel << 4;
3825
pCtx->eflags.u = (pCtx->eflags.u & ~(X86_EFL_POPF_BITS & uMask))
3826
| (aIretFrame[2] & X86_EFL_POPF_BITS & uMask);
3827
pCtx->sp += sizeof(aIretFrame);
3829
LogFlow(("iret to %04x:%x\n", pCtx->cs.Sel, pCtx->ip));
3831
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitIret);
3839
LogFlow(("Realmode: INT %x\n", pDis->Param1.uValue & 0xff));
3840
intInfo2 = pDis->Param1.uValue & 0xff;
3841
intInfo2 |= (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
3842
intInfo2 |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
3844
rc = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, intInfo2, cbOp, 0);
3845
AssertRC(VBOXSTRICTRC_VAL(rc));
3847
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitInt);
3853
if (pCtx->eflags.Bits.u1OF)
3857
LogFlow(("Realmode: INTO\n"));
3858
intInfo2 = X86_XCPT_OF;
3859
intInfo2 |= (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
3860
intInfo2 |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
3862
rc = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, intInfo2, cbOp, 0);
3863
AssertRC(VBOXSTRICTRC_VAL(rc));
3865
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitInt);
3874
LogFlow(("Realmode: INT 3\n"));
3876
intInfo2 |= (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
3877
intInfo2 |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
3879
rc = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, intInfo2, cbOp, 0);
3880
AssertRC(VBOXSTRICTRC_VAL(rc));
3882
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitInt);
3887
rc = EMInterpretInstructionDisasState(pVCpu, pDis, CPUMCTX2CORE(pCtx), 0, EMCODETYPE_SUPERVISOR);
3328
3892
if (rc == VINF_SUCCESS)
3671
4268
switch (VMX_EXIT_QUALIFICATION_CRX_ACCESS(exitQualification))
3673
case VMX_EXIT_QUALIFICATION_CRX_ACCESS_WRITE:
3674
Log2(("VMX: %RGv mov cr%d, x\n", (RTGCPTR)pCtx->rip, VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)));
3675
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)]);
3676
rc = EMInterpretCRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
3677
VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification),
3678
VMX_EXIT_QUALIFICATION_CRX_GENREG(exitQualification));
3680
switch (VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification))
3683
pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_GUEST_CR3;
3688
Assert(!pVM->hwaccm.s.fNestedPaging || !CPUMIsGuestInPagedProtectedModeEx(pCtx));
3689
pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR3;
3692
pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR4;
3695
/* CR8 contains the APIC TPR */
3696
Assert(!(pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW));
3705
case VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ:
3706
Log2(("VMX: mov x, crx\n"));
3707
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)]);
3709
Assert(!pVM->hwaccm.s.fNestedPaging || !CPUMIsGuestInPagedProtectedModeEx(pCtx) || VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification) != USE_REG_CR3);
3711
/* CR8 reads only cause an exit when the TPR shadow feature isn't present. */
3712
Assert(VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification) != 8 || !(pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW));
3714
rc = EMInterpretCRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
3715
VMX_EXIT_QUALIFICATION_CRX_GENREG(exitQualification),
3716
VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification));
3719
case VMX_EXIT_QUALIFICATION_CRX_ACCESS_CLTS:
3720
Log2(("VMX: clts\n"));
3721
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCLTS);
3722
rc = EMInterpretCLTS(pVM, pVCpu);
3723
pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
3726
case VMX_EXIT_QUALIFICATION_CRX_ACCESS_LMSW:
3727
Log2(("VMX: lmsw %x\n", VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(exitQualification)));
3728
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitLMSW);
3729
rc = EMInterpretLMSW(pVM, pVCpu, CPUMCTX2CORE(pCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(exitQualification));
3730
pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
4270
case VMX_EXIT_QUALIFICATION_CRX_ACCESS_WRITE:
4272
Log2(("VMX: %RGv mov cr%d, x\n", (RTGCPTR)pCtx->rip, VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)));
4273
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)]);
4274
rc = EMInterpretCRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
4275
VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification),
4276
VMX_EXIT_QUALIFICATION_CRX_GENREG(exitQualification));
4277
switch (VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification))
4280
pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_GUEST_CR3;
4285
Assert(!pVM->hwaccm.s.fNestedPaging || !CPUMIsGuestInPagedProtectedModeEx(pCtx));
4286
pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR3;
4289
pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR4;
4292
/* CR8 contains the APIC TPR */
4293
Assert(!(pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1
4294
& VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW));
4304
case VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ:
4306
Log2(("VMX: mov x, crx\n"));
4307
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)]);
4309
Assert( !pVM->hwaccm.s.fNestedPaging
4310
|| !CPUMIsGuestInPagedProtectedModeEx(pCtx)
4311
|| VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification) != DISCREG_CR3);
4313
/* CR8 reads only cause an exit when the TPR shadow feature isn't present. */
4314
Assert( VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification) != 8
4315
|| !(pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW));
4317
rc = EMInterpretCRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
4318
VMX_EXIT_QUALIFICATION_CRX_GENREG(exitQualification),
4319
VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification));
4323
case VMX_EXIT_QUALIFICATION_CRX_ACCESS_CLTS:
4325
Log2(("VMX: clts\n"));
4326
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCLTS);
4327
rc = EMInterpretCLTS(pVM, pVCpu);
4328
pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
4332
case VMX_EXIT_QUALIFICATION_CRX_ACCESS_LMSW:
4334
Log2(("VMX: lmsw %x\n", VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(exitQualification)));
4335
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitLMSW);
4336
rc = EMInterpretLMSW(pVM, pVCpu, CPUMCTX2CORE(pCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(exitQualification));
4337
pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
3734
4342
/* Update EIP if no error occurred. */
4319
4972
Assert(pVCpu->hwaccm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT);
4321
/* Clear VM Control Structure. Marking it inactive, clearing implementation specific data and writing back VMCS data to memory. */
4975
* Clear VMCS, marking it inactive, clearing implementation-specific data and writing
4976
* VMCS data back to memory.
4322
4978
int rc = VMXClearVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS);
4325
4981
return VINF_SUCCESS;
4329
* Flush the TLB (EPT)
4986
* Flush the TLB using EPT.
4331
4988
* @returns VBox status code.
4332
* @param pVM The VM to operate on.
4333
* @param pVCpu The VM CPU to operate on.
4334
* @param enmFlush Type of flush
4335
* @param GCPhys Physical address of the page to flush
4989
* @param pVM Pointer to the VM.
4990
* @param pVCpu Pointer to the VMCPU.
4991
* @param enmFlush Type of flush.
4337
static void vmxR0FlushEPT(PVM pVM, PVMCPU pVCpu, VMX_FLUSH enmFlush, RTGCPHYS GCPhys)
4993
static void hmR0VmxFlushEPT(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush)
4339
4995
uint64_t descriptor[2];
4341
LogFlow(("vmxR0FlushEPT %d %RGv\n", enmFlush, GCPhys));
4997
LogFlow(("hmR0VmxFlushEPT %d\n", enmFlush));
4342
4998
Assert(pVM->hwaccm.s.fNestedPaging);
4343
4999
descriptor[0] = pVCpu->hwaccm.s.vmx.GCPhysEPTP;
4344
descriptor[1] = GCPhys;
5000
descriptor[1] = 0; /* MBZ. Intel spec. 33.3 VMX Instructions */
4345
5001
int rc = VMXR0InvEPT(enmFlush, &descriptor[0]);
5002
AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %x %RGv failed with %d\n", enmFlush, pVCpu->hwaccm.s.vmx.GCPhysEPTP, rc));
4349
#ifdef HWACCM_VTX_WITH_VPID
4351
* Flush the TLB (EPT)
5007
* Flush the TLB using VPID.
4353
5009
* @returns VBox status code.
4354
* @param pVM The VM to operate on.
4355
* @param pVCpu The VM CPU to operate on.
4356
* @param enmFlush Type of flush
4357
* @param GCPtr Virtual address of the page to flush
5010
* @param pVM Pointer to the VM.
5011
* @param pVCpu Pointer to the VMCPU (can be NULL depending on @a
5013
* @param enmFlush Type of flush.
5014
* @param GCPtr Virtual address of the page to flush (can be 0 depending
4359
static void vmxR0FlushVPID(PVM pVM, PVMCPU pVCpu, VMX_FLUSH enmFlush, RTGCPTR GCPtr)
5017
static void hmR0VmxFlushVPID(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_VPID enmFlush, RTGCPTR GCPtr)
4361
#if HC_ARCH_BITS == 32
4362
/* If we get a flush in 64 bits guest mode, then force a full TLB flush. Invvpid probably takes only 32 bits addresses. (@todo) */
4363
if ( CPUMIsGuestInLongMode(pVCpu)
4364
&& !VMX_IS_64BIT_HOST_MODE())
5019
uint64_t descriptor[2];
5021
Assert(pVM->hwaccm.s.vmx.fVPID);
5022
if (enmFlush == VMX_FLUSH_VPID_ALL_CONTEXTS)
4366
VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
4371
uint64_t descriptor[2];
4373
Assert(pVM->hwaccm.s.vmx.fVPID);
5030
AssertMsg(pVCpu->hwaccm.s.uCurrentASID != 0, ("VMXR0InvVPID invalid ASID %lu\n", pVCpu->hwaccm.s.uCurrentASID));
5031
AssertMsg(pVCpu->hwaccm.s.uCurrentASID <= UINT16_MAX, ("VMXR0InvVPID invalid ASID %lu\n", pVCpu->hwaccm.s.uCurrentASID));
4374
5032
descriptor[0] = pVCpu->hwaccm.s.uCurrentASID;
4375
5033
descriptor[1] = GCPtr;
4376
int rc = VMXR0InvVPID(enmFlush, &descriptor[0]);
4377
AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvVPID %x %x %RGv failed with %d\n", enmFlush, pVCpu->hwaccm.s.uCurrentASID, GCPtr, rc));
5035
int rc = VMXR0InvVPID(enmFlush, &descriptor[0]); NOREF(rc);
5036
AssertMsg(rc == VINF_SUCCESS,
5037
("VMXR0InvVPID %x %x %RGv failed with %d\n", enmFlush, pVCpu ? pVCpu->hwaccm.s.uCurrentASID : 0, GCPtr, rc));
4380
#endif /* HWACCM_VTX_WITH_VPID */
4383
* Invalidates a guest page
5042
* Invalidates a guest page by guest virtual address. Only relevant for
5043
* EPT/VPID, otherwise there is nothing really to invalidate.
4385
5045
* @returns VBox status code.
4386
* @param pVM The VM to operate on.
4387
* @param pVCpu The VM CPU to operate on.
4388
* @param GCVirt Page to invalidate
5046
* @param pVM Pointer to the VM.
5047
* @param pVCpu Pointer to the VMCPU.
5048
* @param GCVirt Guest virtual address of the page to invalidate.
4390
5050
VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt)
4394
5054
Log2(("VMXR0InvalidatePage %RGv\n", GCVirt));
4396
/* Only relevant if we want to use VPID as otherwise every VMX transition
4397
* will flush the TLBs and paging-structure caches.
4398
* In the nested paging case we still see such calls, but
4399
* can safely ignore them. (e.g. after cr3 updates)
4401
#ifdef HWACCM_VTX_WITH_VPID
4402
/* Skip it if a TLB flush is already pending. */
4404
&& pVM->hwaccm.s.vmx.fVPID)
4405
vmxR0FlushVPID(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushPage, GCVirt);
4406
#endif /* HWACCM_VTX_WITH_VPID */
5059
* We must invalidate the guest TLB entry in either case, we cannot ignore it even for the EPT case
5060
* See @bugref{6043} and @bugref{6177}
5062
* Set the VMCPU_FF_TLB_FLUSH force flag and flush before VMENTRY in hmR0VmxSetupTLB*() as this
5063
* function maybe called in a loop with individual addresses.
5065
if (pVM->hwaccm.s.vmx.fVPID)
5067
/* If we can flush just this page do it, otherwise flush as little as possible. */
5068
if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR)
5069
hmR0VmxFlushVPID(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, GCVirt);
5071
VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
5073
else if (pVM->hwaccm.s.fNestedPaging)
5074
VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
4408
5077
return VINF_SUCCESS;
4412
* Invalidates a guest page by physical address
5082
* Invalidates a guest page by physical address. Only relevant for EPT/VPID,
5083
* otherwise there is nothing really to invalidate.
4414
5085
* NOTE: Assumes the current instruction references this physical page though a virtual address!!
4416
5087
* @returns VBox status code.
4417
* @param pVM The VM to operate on.
4418
* @param pVCpu The VM CPU to operate on.
4419
* @param GCPhys Page to invalidate
5088
* @param pVM Pointer to the VM.
5089
* @param pVCpu Pointer to the VMCPU.
5090
* @param GCPhys Guest physical address of the page to invalidate.
4421
5092
VMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
4423
bool fFlushPending = VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH);
4425
Assert(pVM->hwaccm.s.fNestedPaging);
4427
5094
LogFlow(("VMXR0InvalidatePhysPage %RGp\n", GCPhys));
4429
/* Skip it if a TLB flush is already pending. */
4431
vmxR0FlushEPT(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushPage, GCPhys);
5097
* We cannot flush a page by guest-physical address. invvpid takes only a linear address
5098
* while invept only flushes by EPT not individual addresses. We update the force flag here
5099
* and flush before VMENTRY in hmR0VmxSetupTLB*(). This function might be called in a loop.
5101
VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
4433
5102
return VINF_SUCCESS;
4437
* Report world switch error and dump some useful debug info
5107
* Report world switch error and dump some useful debug info.
4439
* @param pVM The VM to operate on.
4440
* @param pVCpu The VMCPU to operate on.
4441
* @param rc Return code
4442
* @param pCtx Current CPU context (not updated)
5109
* @param pVM Pointer to the VM.
5110
* @param pVCpu Pointer to the VMCPU.
5111
* @param rc Return code.
5112
* @param pCtx Pointer to the current guest CPU context (not updated).
4444
static void VMXR0ReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc, PCPUMCTX pCtx)
5114
static void hmR0VmxReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc, PCPUMCTX pCtx)
4446
5118
switch (VBOXSTRICTRC_VAL(rc))
4448
case VERR_VMX_INVALID_VMXON_PTR:
4452
case VERR_VMX_UNABLE_TO_START_VM:
4453
case VERR_VMX_UNABLE_TO_RESUME_VM:
4456
RTCCUINTREG exitReason, instrError;
4458
rc2 = VMXReadVMCS(VMX_VMCS32_RO_EXIT_REASON, &exitReason);
4459
rc2 |= VMXReadVMCS(VMX_VMCS32_RO_VM_INSTR_ERROR, &instrError);
4461
if (rc2 == VINF_SUCCESS)
5120
case VERR_VMX_INVALID_VMXON_PTR:
5124
case VERR_VMX_UNABLE_TO_START_VM:
5125
case VERR_VMX_UNABLE_TO_RESUME_VM:
4463
Log(("Unable to start/resume VM for reason: %x. Instruction error %x\n", (uint32_t)exitReason, (uint32_t)instrError));
4464
Log(("Current stack %08x\n", &rc2));
4466
pVCpu->hwaccm.s.vmx.lasterror.ulInstrError = instrError;
4467
pVCpu->hwaccm.s.vmx.lasterror.ulExitReason = exitReason;
5128
RTCCUINTREG exitReason, instrError;
5130
rc2 = VMXReadVMCS(VMX_VMCS32_RO_EXIT_REASON, &exitReason);
5131
rc2 |= VMXReadVMCS(VMX_VMCS32_RO_VM_INSTR_ERROR, &instrError);
5133
if (rc2 == VINF_SUCCESS)
5135
Log(("Unable to start/resume VM for reason: %x. Instruction error %x\n", (uint32_t)exitReason,
5136
(uint32_t)instrError));
5137
Log(("Current stack %08x\n", &rc2));
5139
pVCpu->hwaccm.s.vmx.lasterror.ulInstrError = instrError;
5140
pVCpu->hwaccm.s.vmx.lasterror.ulExitReason = exitReason;
4469
5142
#ifdef VBOX_STRICT
4476
VMXReadVMCS(VMX_VMCS64_GUEST_RIP, &val);
4477
Log(("Old eip %RGv new %RGv\n", (RTGCPTR)pCtx->rip, (RTGCPTR)val));
4478
VMXReadVMCS(VMX_VMCS_CTRL_PIN_EXEC_CONTROLS, &val);
4479
Log(("VMX_VMCS_CTRL_PIN_EXEC_CONTROLS %08x\n", val));
4480
VMXReadVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, &val);
4481
Log(("VMX_VMCS_CTRL_PROC_EXEC_CONTROLS %08x\n", val));
4482
VMXReadVMCS(VMX_VMCS_CTRL_ENTRY_CONTROLS, &val);
4483
Log(("VMX_VMCS_CTRL_ENTRY_CONTROLS %08x\n", val));
4484
VMXReadVMCS(VMX_VMCS_CTRL_EXIT_CONTROLS, &val);
4485
Log(("VMX_VMCS_CTRL_EXIT_CONTROLS %08x\n", val));
4487
VMXReadVMCS(VMX_VMCS_HOST_CR0, &val);
4488
Log(("VMX_VMCS_HOST_CR0 %08x\n", val));
4490
VMXReadVMCS(VMX_VMCS_HOST_CR3, &val);
4491
Log(("VMX_VMCS_HOST_CR3 %08x\n", val));
4493
VMXReadVMCS(VMX_VMCS_HOST_CR4, &val);
4494
Log(("VMX_VMCS_HOST_CR4 %08x\n", val));
4496
VMXReadVMCS(VMX_VMCS16_HOST_FIELD_CS, &val);
4497
Log(("VMX_VMCS_HOST_FIELD_CS %08x\n", val));
4499
VMXReadVMCS(VMX_VMCS_GUEST_RFLAGS, &val);
4500
Log(("VMX_VMCS_GUEST_RFLAGS %08x\n", val));
4502
if (val < gdtr.cbGdt)
4504
pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
4505
HWACCMR0DumpDescriptor(pDesc, val, "CS: ");
4508
VMXReadVMCS(VMX_VMCS16_HOST_FIELD_DS, &val);
4509
Log(("VMX_VMCS_HOST_FIELD_DS %08x\n", val));
4510
if (val < gdtr.cbGdt)
4512
pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
4513
HWACCMR0DumpDescriptor(pDesc, val, "DS: ");
4516
VMXReadVMCS(VMX_VMCS16_HOST_FIELD_ES, &val);
4517
Log(("VMX_VMCS_HOST_FIELD_ES %08x\n", val));
4518
if (val < gdtr.cbGdt)
4520
pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
4521
HWACCMR0DumpDescriptor(pDesc, val, "ES: ");
4524
VMXReadVMCS(VMX_VMCS16_HOST_FIELD_FS, &val);
4525
Log(("VMX_VMCS16_HOST_FIELD_FS %08x\n", val));
4526
if (val < gdtr.cbGdt)
4528
pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
4529
HWACCMR0DumpDescriptor(pDesc, val, "FS: ");
4532
VMXReadVMCS(VMX_VMCS16_HOST_FIELD_GS, &val);
4533
Log(("VMX_VMCS16_HOST_FIELD_GS %08x\n", val));
4534
if (val < gdtr.cbGdt)
4536
pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
4537
HWACCMR0DumpDescriptor(pDesc, val, "GS: ");
4540
VMXReadVMCS(VMX_VMCS16_HOST_FIELD_SS, &val);
4541
Log(("VMX_VMCS16_HOST_FIELD_SS %08x\n", val));
4542
if (val < gdtr.cbGdt)
4544
pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
4545
HWACCMR0DumpDescriptor(pDesc, val, "SS: ");
4548
VMXReadVMCS(VMX_VMCS16_HOST_FIELD_TR, &val);
4549
Log(("VMX_VMCS16_HOST_FIELD_TR %08x\n", val));
4550
if (val < gdtr.cbGdt)
4552
pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
4553
HWACCMR0DumpDescriptor(pDesc, val, "TR: ");
4556
VMXReadVMCS(VMX_VMCS_HOST_TR_BASE, &val);
4557
Log(("VMX_VMCS_HOST_TR_BASE %RHv\n", val));
4559
VMXReadVMCS(VMX_VMCS_HOST_GDTR_BASE, &val);
4560
Log(("VMX_VMCS_HOST_GDTR_BASE %RHv\n", val));
4561
VMXReadVMCS(VMX_VMCS_HOST_IDTR_BASE, &val);
4562
Log(("VMX_VMCS_HOST_IDTR_BASE %RHv\n", val));
4564
VMXReadVMCS(VMX_VMCS32_HOST_SYSENTER_CS, &val);
4565
Log(("VMX_VMCS_HOST_SYSENTER_CS %08x\n", val));
4567
VMXReadVMCS(VMX_VMCS_HOST_SYSENTER_EIP, &val);
4568
Log(("VMX_VMCS_HOST_SYSENTER_EIP %RHv\n", val));
4570
VMXReadVMCS(VMX_VMCS_HOST_SYSENTER_ESP, &val);
4571
Log(("VMX_VMCS_HOST_SYSENTER_ESP %RHv\n", val));
4573
VMXReadVMCS(VMX_VMCS_HOST_RSP, &val);
4574
Log(("VMX_VMCS_HOST_RSP %RHv\n", val));
4575
VMXReadVMCS(VMX_VMCS_HOST_RIP, &val);
4576
Log(("VMX_VMCS_HOST_RIP %RHv\n", val));
5149
VMXReadVMCS(VMX_VMCS64_GUEST_RIP, &val);
5150
Log(("Old eip %RGv new %RGv\n", (RTGCPTR)pCtx->rip, (RTGCPTR)val));
5151
VMXReadVMCS(VMX_VMCS_CTRL_PIN_EXEC_CONTROLS, &val);
5152
Log(("VMX_VMCS_CTRL_PIN_EXEC_CONTROLS %08x\n", val));
5153
VMXReadVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, &val);
5154
Log(("VMX_VMCS_CTRL_PROC_EXEC_CONTROLS %08x\n", val));
5155
VMXReadVMCS(VMX_VMCS_CTRL_ENTRY_CONTROLS, &val);
5156
Log(("VMX_VMCS_CTRL_ENTRY_CONTROLS %08x\n", val));
5157
VMXReadVMCS(VMX_VMCS_CTRL_EXIT_CONTROLS, &val);
5158
Log(("VMX_VMCS_CTRL_EXIT_CONTROLS %08x\n", val));
5160
VMXReadVMCS(VMX_VMCS_HOST_CR0, &val);
5161
Log(("VMX_VMCS_HOST_CR0 %08x\n", val));
5162
VMXReadVMCS(VMX_VMCS_HOST_CR3, &val);
5163
Log(("VMX_VMCS_HOST_CR3 %08x\n", val));
5164
VMXReadVMCS(VMX_VMCS_HOST_CR4, &val);
5165
Log(("VMX_VMCS_HOST_CR4 %08x\n", val));
5167
VMXReadVMCS(VMX_VMCS16_HOST_FIELD_CS, &val);
5168
Log(("VMX_VMCS_HOST_FIELD_CS %08x\n", val));
5169
VMXReadVMCS(VMX_VMCS_GUEST_RFLAGS, &val);
5170
Log(("VMX_VMCS_GUEST_RFLAGS %08x\n", val));
5172
if (val < gdtr.cbGdt)
5174
pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
5175
HWACCMR0DumpDescriptor(pDesc, val, "CS: ");
5178
VMXReadVMCS(VMX_VMCS16_HOST_FIELD_DS, &val);
5179
Log(("VMX_VMCS_HOST_FIELD_DS %08x\n", val));
5180
if (val < gdtr.cbGdt)
5182
pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
5183
HWACCMR0DumpDescriptor(pDesc, val, "DS: ");
5186
VMXReadVMCS(VMX_VMCS16_HOST_FIELD_ES, &val);
5187
Log(("VMX_VMCS_HOST_FIELD_ES %08x\n", val));
5188
if (val < gdtr.cbGdt)
5190
pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
5191
HWACCMR0DumpDescriptor(pDesc, val, "ES: ");
5194
VMXReadVMCS(VMX_VMCS16_HOST_FIELD_FS, &val);
5195
Log(("VMX_VMCS16_HOST_FIELD_FS %08x\n", val));
5196
if (val < gdtr.cbGdt)
5198
pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
5199
HWACCMR0DumpDescriptor(pDesc, val, "FS: ");
5202
VMXReadVMCS(VMX_VMCS16_HOST_FIELD_GS, &val);
5203
Log(("VMX_VMCS16_HOST_FIELD_GS %08x\n", val));
5204
if (val < gdtr.cbGdt)
5206
pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
5207
HWACCMR0DumpDescriptor(pDesc, val, "GS: ");
5210
VMXReadVMCS(VMX_VMCS16_HOST_FIELD_SS, &val);
5211
Log(("VMX_VMCS16_HOST_FIELD_SS %08x\n", val));
5212
if (val < gdtr.cbGdt)
5214
pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
5215
HWACCMR0DumpDescriptor(pDesc, val, "SS: ");
5218
VMXReadVMCS(VMX_VMCS16_HOST_FIELD_TR, &val);
5219
Log(("VMX_VMCS16_HOST_FIELD_TR %08x\n", val));
5220
if (val < gdtr.cbGdt)
5222
pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
5223
HWACCMR0DumpDescriptor(pDesc, val, "TR: ");
5226
VMXReadVMCS(VMX_VMCS_HOST_TR_BASE, &val);
5227
Log(("VMX_VMCS_HOST_TR_BASE %RHv\n", val));
5228
VMXReadVMCS(VMX_VMCS_HOST_GDTR_BASE, &val);
5229
Log(("VMX_VMCS_HOST_GDTR_BASE %RHv\n", val));
5230
VMXReadVMCS(VMX_VMCS_HOST_IDTR_BASE, &val);
5231
Log(("VMX_VMCS_HOST_IDTR_BASE %RHv\n", val));
5232
VMXReadVMCS(VMX_VMCS32_HOST_SYSENTER_CS, &val);
5233
Log(("VMX_VMCS_HOST_SYSENTER_CS %08x\n", val));
5234
VMXReadVMCS(VMX_VMCS_HOST_SYSENTER_EIP, &val);
5235
Log(("VMX_VMCS_HOST_SYSENTER_EIP %RHv\n", val));
5236
VMXReadVMCS(VMX_VMCS_HOST_SYSENTER_ESP, &val);
5237
Log(("VMX_VMCS_HOST_SYSENTER_ESP %RHv\n", val));
5238
VMXReadVMCS(VMX_VMCS_HOST_RSP, &val);
5239
Log(("VMX_VMCS_HOST_RSP %RHv\n", val));
5240
VMXReadVMCS(VMX_VMCS_HOST_RIP, &val);
5241
Log(("VMX_VMCS_HOST_RIP %RHv\n", val));
4578
5242
# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4579
if (VMX_IS_64BIT_HOST_MODE())
4581
Log(("MSR_K6_EFER = %RX64\n", ASMRdMsr(MSR_K6_EFER)));
4582
Log(("MSR_K6_STAR = %RX64\n", ASMRdMsr(MSR_K6_STAR)));
4583
Log(("MSR_K8_LSTAR = %RX64\n", ASMRdMsr(MSR_K8_LSTAR)));
4584
Log(("MSR_K8_CSTAR = %RX64\n", ASMRdMsr(MSR_K8_CSTAR)));
4585
Log(("MSR_K8_SF_MASK = %RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
5243
if (VMX_IS_64BIT_HOST_MODE())
5245
Log(("MSR_K6_EFER = %RX64\n", ASMRdMsr(MSR_K6_EFER)));
5246
Log(("MSR_K6_STAR = %RX64\n", ASMRdMsr(MSR_K6_STAR)));
5247
Log(("MSR_K8_LSTAR = %RX64\n", ASMRdMsr(MSR_K8_LSTAR)));
5248
Log(("MSR_K8_CSTAR = %RX64\n", ASMRdMsr(MSR_K8_CSTAR)));
5249
Log(("MSR_K8_SF_MASK = %RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
5250
Log(("MSR_K8_KERNEL_GS_BASE = %RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE)));
4588
5253
#endif /* VBOX_STRICT */
4595
AssertMsgFailed(("%Rrc (%#x)\n", VBOXSTRICTRC_VAL(rc), VBOXSTRICTRC_VAL(rc)));
5260
AssertMsgFailed(("%Rrc (%#x)\n", VBOXSTRICTRC_VAL(rc), VBOXSTRICTRC_VAL(rc)));
4600
5266
#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4602
* Prepares for and executes VMLAUNCH (64 bits guest mode)
5268
* Prepares for and executes VMLAUNCH (64 bits guest mode).
4604
* @returns VBox status code
4605
* @param fResume vmlauch/vmresume
4606
* @param pCtx Guest context
4607
* @param pCache VMCS cache
4608
* @param pVM The VM to operate on.
4609
* @param pVCpu The VMCPU to operate on.
5270
* @returns VBox status code.
5271
* @param fResume Whether to vmlauch/vmresume.
5272
* @param pCtx Pointer to the guest CPU context.
5273
* @param pCache Pointer to the VMCS cache.
5274
* @param pVM Pointer to the VM.
5275
* @param pVCpu Pointer to the VMCPU.
4611
5277
DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu)
4657
5323
AssertMsg(pCache->TestIn.HCPhysCpuPage== HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage));
4658
AssertMsg(pCache->TestIn.HCPhysVMCS == pVCpu->hwaccm.s.vmx.HCPhysVMCS, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVMCS, pVCpu->hwaccm.s.vmx.HCPhysVMCS));
4659
AssertMsg(pCache->TestIn.HCPhysVMCS == pCache->TestOut.HCPhysVMCS, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVMCS, pCache->TestOut.HCPhysVMCS));
4660
AssertMsg(pCache->TestIn.pCache == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache, pCache->TestOut.pCache));
4661
AssertMsg(pCache->TestIn.pCache == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hwaccm.s.vmx.VMCSCache), ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hwaccm.s.vmx.VMCSCache)));
4662
AssertMsg(pCache->TestIn.pCtx == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx, pCache->TestOut.pCtx));
5324
AssertMsg(pCache->TestIn.HCPhysVMCS == pVCpu->hwaccm.s.vmx.HCPhysVMCS, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVMCS,
5325
pVCpu->hwaccm.s.vmx.HCPhysVMCS));
5326
AssertMsg(pCache->TestIn.HCPhysVMCS == pCache->TestOut.HCPhysVMCS, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVMCS,
5327
pCache->TestOut.HCPhysVMCS));
5328
AssertMsg(pCache->TestIn.pCache == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache,
5329
pCache->TestOut.pCache));
5330
AssertMsg(pCache->TestIn.pCache == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hwaccm.s.vmx.VMCSCache),
5331
("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hwaccm.s.vmx.VMCSCache)));
5332
AssertMsg(pCache->TestIn.pCtx == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx,
5333
pCache->TestOut.pCtx));
4663
5334
Assert(!(pCache->TestOut.eflags & X86_EFL_IF));
5341
static bool hmR0VmxIsValidReadField(uint32_t idxField)
5345
case VMX_VMCS64_GUEST_RIP:
5346
case VMX_VMCS64_GUEST_RSP:
5347
case VMX_VMCS_GUEST_RFLAGS:
5348
case VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE:
5349
case VMX_VMCS_CTRL_CR0_READ_SHADOW:
5350
case VMX_VMCS64_GUEST_CR0:
5351
case VMX_VMCS_CTRL_CR4_READ_SHADOW:
5352
case VMX_VMCS64_GUEST_CR4:
5353
case VMX_VMCS64_GUEST_DR7:
5354
case VMX_VMCS32_GUEST_SYSENTER_CS:
5355
case VMX_VMCS64_GUEST_SYSENTER_EIP:
5356
case VMX_VMCS64_GUEST_SYSENTER_ESP:
5357
case VMX_VMCS32_GUEST_GDTR_LIMIT:
5358
case VMX_VMCS64_GUEST_GDTR_BASE:
5359
case VMX_VMCS32_GUEST_IDTR_LIMIT:
5360
case VMX_VMCS64_GUEST_IDTR_BASE:
5361
case VMX_VMCS16_GUEST_FIELD_CS:
5362
case VMX_VMCS32_GUEST_CS_LIMIT:
5363
case VMX_VMCS64_GUEST_CS_BASE:
5364
case VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS:
5365
case VMX_VMCS16_GUEST_FIELD_DS:
5366
case VMX_VMCS32_GUEST_DS_LIMIT:
5367
case VMX_VMCS64_GUEST_DS_BASE:
5368
case VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS:
5369
case VMX_VMCS16_GUEST_FIELD_ES:
5370
case VMX_VMCS32_GUEST_ES_LIMIT:
5371
case VMX_VMCS64_GUEST_ES_BASE:
5372
case VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS:
5373
case VMX_VMCS16_GUEST_FIELD_FS:
5374
case VMX_VMCS32_GUEST_FS_LIMIT:
5375
case VMX_VMCS64_GUEST_FS_BASE:
5376
case VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS:
5377
case VMX_VMCS16_GUEST_FIELD_GS:
5378
case VMX_VMCS32_GUEST_GS_LIMIT:
5379
case VMX_VMCS64_GUEST_GS_BASE:
5380
case VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS:
5381
case VMX_VMCS16_GUEST_FIELD_SS:
5382
case VMX_VMCS32_GUEST_SS_LIMIT:
5383
case VMX_VMCS64_GUEST_SS_BASE:
5384
case VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS:
5385
case VMX_VMCS16_GUEST_FIELD_LDTR:
5386
case VMX_VMCS32_GUEST_LDTR_LIMIT:
5387
case VMX_VMCS64_GUEST_LDTR_BASE:
5388
case VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS:
5389
case VMX_VMCS16_GUEST_FIELD_TR:
5390
case VMX_VMCS32_GUEST_TR_LIMIT:
5391
case VMX_VMCS64_GUEST_TR_BASE:
5392
case VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS:
5393
case VMX_VMCS32_RO_EXIT_REASON:
5394
case VMX_VMCS32_RO_VM_INSTR_ERROR:
5395
case VMX_VMCS32_RO_EXIT_INSTR_LENGTH:
5396
case VMX_VMCS32_RO_EXIT_INTERRUPTION_ERRCODE:
5397
case VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO:
5398
case VMX_VMCS32_RO_EXIT_INSTR_INFO:
5399
case VMX_VMCS_RO_EXIT_QUALIFICATION:
5400
case VMX_VMCS32_RO_IDT_INFO:
5401
case VMX_VMCS32_RO_IDT_ERRCODE:
5402
case VMX_VMCS64_GUEST_CR3:
5403
case VMX_VMCS_EXIT_PHYS_ADDR_FULL:
5410
static bool hmR0VmxIsValidWriteField(uint32_t idxField)
5414
case VMX_VMCS64_GUEST_LDTR_BASE:
5415
case VMX_VMCS64_GUEST_TR_BASE:
5416
case VMX_VMCS64_GUEST_GDTR_BASE:
5417
case VMX_VMCS64_GUEST_IDTR_BASE:
5418
case VMX_VMCS64_GUEST_SYSENTER_EIP:
5419
case VMX_VMCS64_GUEST_SYSENTER_ESP:
5420
case VMX_VMCS64_GUEST_CR0:
5421
case VMX_VMCS64_GUEST_CR4:
5422
case VMX_VMCS64_GUEST_CR3:
5423
case VMX_VMCS64_GUEST_DR7:
5424
case VMX_VMCS64_GUEST_RIP:
5425
case VMX_VMCS64_GUEST_RSP:
5426
case VMX_VMCS64_GUEST_CS_BASE:
5427
case VMX_VMCS64_GUEST_DS_BASE:
5428
case VMX_VMCS64_GUEST_ES_BASE:
5429
case VMX_VMCS64_GUEST_FS_BASE:
5430
case VMX_VMCS64_GUEST_GS_BASE:
5431
case VMX_VMCS64_GUEST_SS_BASE:
5436
# endif /* VBOX_STRICT */
4669
* Executes the specified handler in 64 mode
5440
* Executes the specified handler in 64-bit mode.
4671
5442
* @returns VBox status code.
4672
* @param pVM The VM to operate on.
4673
* @param pVCpu The VMCPU to operate on.
4674
* @param pCtx Guest context
4675
* @param pfnHandler RC handler
4676
* @param cbParam Number of parameters
4677
* @param paParam Array of 32 bits parameters
5443
* @param pVM Pointer to the VM.
5444
* @param pVCpu Pointer to the VMCPU.
5445
* @param pCtx Pointer to the guest CPU context.
5446
* @param pfnHandler Pointer to the RC handler function.
5447
* @param cbParam Number of parameters.
5448
* @param paParam Array of 32-bit parameters.
4679
VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTRCPTR pfnHandler, uint32_t cbParam, uint32_t *paParam)
5450
VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTRCPTR pfnHandler, uint32_t cbParam,
4682
5454
PHMGLOBLCPUINFO pCpu;
4743
5516
ASMSetFlags(uOldEFlags);
4747
5519
#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
4750
5522
#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
4754
5526
* @returns VBox status code
4755
* @param pVCpu The VMCPU to operate on.
4756
* @param idxField VMCS index
4757
* @param u64Val 16, 32 or 64 bits value
5527
* @param pVCpu Pointer to the VMCPU.
5528
* @param idxField VMCS field index.
5529
* @param u64Val 16, 32 or 64 bits value.
4759
5531
VMMR0DECL(int) VMXWriteVMCS64Ex(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
4763
5534
switch (idxField)
4765
case VMX_VMCS_CTRL_TSC_OFFSET_FULL:
4766
case VMX_VMCS_CTRL_IO_BITMAP_A_FULL:
4767
case VMX_VMCS_CTRL_IO_BITMAP_B_FULL:
4768
case VMX_VMCS_CTRL_MSR_BITMAP_FULL:
4769
case VMX_VMCS_CTRL_VMEXIT_MSR_STORE_FULL:
4770
case VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL:
4771
case VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL:
4772
case VMX_VMCS_CTRL_VAPIC_PAGEADDR_FULL:
4773
case VMX_VMCS_CTRL_APIC_ACCESSADDR_FULL:
4774
case VMX_VMCS_GUEST_LINK_PTR_FULL:
4775
case VMX_VMCS_GUEST_PDPTR0_FULL:
4776
case VMX_VMCS_GUEST_PDPTR1_FULL:
4777
case VMX_VMCS_GUEST_PDPTR2_FULL:
4778
case VMX_VMCS_GUEST_PDPTR3_FULL:
4779
case VMX_VMCS_GUEST_DEBUGCTL_FULL:
4780
case VMX_VMCS_GUEST_EFER_FULL:
4781
case VMX_VMCS_CTRL_EPTP_FULL:
4782
/* These fields consist of two parts, which are both writable in 32 bits mode. */
4783
rc = VMXWriteVMCS32(idxField, u64Val);
4784
rc |= VMXWriteVMCS32(idxField + 1, (uint32_t)(u64Val >> 32ULL));
4788
case VMX_VMCS64_GUEST_LDTR_BASE:
4789
case VMX_VMCS64_GUEST_TR_BASE:
4790
case VMX_VMCS64_GUEST_GDTR_BASE:
4791
case VMX_VMCS64_GUEST_IDTR_BASE:
4792
case VMX_VMCS64_GUEST_SYSENTER_EIP:
4793
case VMX_VMCS64_GUEST_SYSENTER_ESP:
4794
case VMX_VMCS64_GUEST_CR0:
4795
case VMX_VMCS64_GUEST_CR4:
4796
case VMX_VMCS64_GUEST_CR3:
4797
case VMX_VMCS64_GUEST_DR7:
4798
case VMX_VMCS64_GUEST_RIP:
4799
case VMX_VMCS64_GUEST_RSP:
4800
case VMX_VMCS64_GUEST_CS_BASE:
4801
case VMX_VMCS64_GUEST_DS_BASE:
4802
case VMX_VMCS64_GUEST_ES_BASE:
4803
case VMX_VMCS64_GUEST_FS_BASE:
4804
case VMX_VMCS64_GUEST_GS_BASE:
4805
case VMX_VMCS64_GUEST_SS_BASE:
4806
/* Queue a 64 bits value as we can't set it in 32 bits host mode. */
4807
if (u64Val >> 32ULL)
4808
rc = VMXWriteCachedVMCSEx(pVCpu, idxField, u64Val);
4810
rc = VMXWriteVMCS32(idxField, (uint32_t)u64Val);
4815
AssertMsgFailed(("Unexpected field %x\n", idxField));
4816
return VERR_INVALID_PARAMETER;
5536
case VMX_VMCS_CTRL_TSC_OFFSET_FULL:
5537
case VMX_VMCS_CTRL_IO_BITMAP_A_FULL:
5538
case VMX_VMCS_CTRL_IO_BITMAP_B_FULL:
5539
case VMX_VMCS_CTRL_MSR_BITMAP_FULL:
5540
case VMX_VMCS_CTRL_VMEXIT_MSR_STORE_FULL:
5541
case VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL:
5542
case VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL:
5543
case VMX_VMCS_CTRL_VAPIC_PAGEADDR_FULL:
5544
case VMX_VMCS_CTRL_APIC_ACCESSADDR_FULL:
5545
case VMX_VMCS_GUEST_LINK_PTR_FULL:
5546
case VMX_VMCS_GUEST_PDPTR0_FULL:
5547
case VMX_VMCS_GUEST_PDPTR1_FULL:
5548
case VMX_VMCS_GUEST_PDPTR2_FULL:
5549
case VMX_VMCS_GUEST_PDPTR3_FULL:
5550
case VMX_VMCS_GUEST_DEBUGCTL_FULL:
5551
case VMX_VMCS_GUEST_EFER_FULL:
5552
case VMX_VMCS_CTRL_EPTP_FULL:
5553
/* These fields consist of two parts, which are both writable in 32 bits mode. */
5554
rc = VMXWriteVMCS32(idxField, u64Val);
5555
rc |= VMXWriteVMCS32(idxField + 1, (uint32_t)(u64Val >> 32ULL));
5559
case VMX_VMCS64_GUEST_LDTR_BASE:
5560
case VMX_VMCS64_GUEST_TR_BASE:
5561
case VMX_VMCS64_GUEST_GDTR_BASE:
5562
case VMX_VMCS64_GUEST_IDTR_BASE:
5563
case VMX_VMCS64_GUEST_SYSENTER_EIP:
5564
case VMX_VMCS64_GUEST_SYSENTER_ESP:
5565
case VMX_VMCS64_GUEST_CR0:
5566
case VMX_VMCS64_GUEST_CR4:
5567
case VMX_VMCS64_GUEST_CR3:
5568
case VMX_VMCS64_GUEST_DR7:
5569
case VMX_VMCS64_GUEST_RIP:
5570
case VMX_VMCS64_GUEST_RSP:
5571
case VMX_VMCS64_GUEST_CS_BASE:
5572
case VMX_VMCS64_GUEST_DS_BASE:
5573
case VMX_VMCS64_GUEST_ES_BASE:
5574
case VMX_VMCS64_GUEST_FS_BASE:
5575
case VMX_VMCS64_GUEST_GS_BASE:
5576
case VMX_VMCS64_GUEST_SS_BASE:
5577
/* Queue a 64 bits value as we can't set it in 32 bits host mode. */
5578
if (u64Val >> 32ULL)
5579
rc = VMXWriteCachedVMCSEx(pVCpu, idxField, u64Val);
5581
rc = VMXWriteVMCS32(idxField, (uint32_t)u64Val);
5586
AssertMsgFailed(("Unexpected field %x\n", idxField));
5587
return VERR_INVALID_PARAMETER;
4821
5593
* Cache VMCS writes for performance reasons (Darwin) and for running 64 bits guests on 32 bits hosts.
4823
* @param pVCpu The VMCPU to operate on.
4824
* @param idxField VMCS field
4825
* @param u64Val Value
5595
* @param pVCpu Pointer to the VMCPU.
5596
* @param idxField VMCS field index.
5597
* @param u64Val 16, 32 or 64 bits value.
4827
5599
VMMR0DECL(int) VMXWriteCachedVMCSEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
4829
5601
PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache;
4831
AssertMsgReturn(pCache->Write.cValidEntries < VMCSCACHE_MAX_ENTRY - 1, ("entries=%x\n", pCache->Write.cValidEntries), VERR_ACCESS_DENIED);
5603
AssertMsgReturn(pCache->Write.cValidEntries < VMCSCACHE_MAX_ENTRY - 1,
5604
("entries=%x\n", pCache->Write.cValidEntries), VERR_ACCESS_DENIED);
4833
5606
/* Make sure there are no duplicates. */
4834
for (unsigned i=0;i<pCache->Write.cValidEntries;i++)
5607
for (unsigned i = 0; i < pCache->Write.cValidEntries; i++)
4836
5609
if (pCache->Write.aField[i] == idxField)
4849
5622
#endif /* HC_ARCH_BITS == 32 && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
4852
static bool vmxR0IsValidReadField(uint32_t idxField)
4856
case VMX_VMCS64_GUEST_RIP:
4857
case VMX_VMCS64_GUEST_RSP:
4858
case VMX_VMCS_GUEST_RFLAGS:
4859
case VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE:
4860
case VMX_VMCS_CTRL_CR0_READ_SHADOW:
4861
case VMX_VMCS64_GUEST_CR0:
4862
case VMX_VMCS_CTRL_CR4_READ_SHADOW:
4863
case VMX_VMCS64_GUEST_CR4:
4864
case VMX_VMCS64_GUEST_DR7:
4865
case VMX_VMCS32_GUEST_SYSENTER_CS:
4866
case VMX_VMCS64_GUEST_SYSENTER_EIP:
4867
case VMX_VMCS64_GUEST_SYSENTER_ESP:
4868
case VMX_VMCS32_GUEST_GDTR_LIMIT:
4869
case VMX_VMCS64_GUEST_GDTR_BASE:
4870
case VMX_VMCS32_GUEST_IDTR_LIMIT:
4871
case VMX_VMCS64_GUEST_IDTR_BASE:
4872
case VMX_VMCS16_GUEST_FIELD_CS:
4873
case VMX_VMCS32_GUEST_CS_LIMIT:
4874
case VMX_VMCS64_GUEST_CS_BASE:
4875
case VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS:
4876
case VMX_VMCS16_GUEST_FIELD_DS:
4877
case VMX_VMCS32_GUEST_DS_LIMIT:
4878
case VMX_VMCS64_GUEST_DS_BASE:
4879
case VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS:
4880
case VMX_VMCS16_GUEST_FIELD_ES:
4881
case VMX_VMCS32_GUEST_ES_LIMIT:
4882
case VMX_VMCS64_GUEST_ES_BASE:
4883
case VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS:
4884
case VMX_VMCS16_GUEST_FIELD_FS:
4885
case VMX_VMCS32_GUEST_FS_LIMIT:
4886
case VMX_VMCS64_GUEST_FS_BASE:
4887
case VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS:
4888
case VMX_VMCS16_GUEST_FIELD_GS:
4889
case VMX_VMCS32_GUEST_GS_LIMIT:
4890
case VMX_VMCS64_GUEST_GS_BASE:
4891
case VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS:
4892
case VMX_VMCS16_GUEST_FIELD_SS:
4893
case VMX_VMCS32_GUEST_SS_LIMIT:
4894
case VMX_VMCS64_GUEST_SS_BASE:
4895
case VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS:
4896
case VMX_VMCS16_GUEST_FIELD_LDTR:
4897
case VMX_VMCS32_GUEST_LDTR_LIMIT:
4898
case VMX_VMCS64_GUEST_LDTR_BASE:
4899
case VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS:
4900
case VMX_VMCS16_GUEST_FIELD_TR:
4901
case VMX_VMCS32_GUEST_TR_LIMIT:
4902
case VMX_VMCS64_GUEST_TR_BASE:
4903
case VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS:
4904
case VMX_VMCS32_RO_EXIT_REASON:
4905
case VMX_VMCS32_RO_VM_INSTR_ERROR:
4906
case VMX_VMCS32_RO_EXIT_INSTR_LENGTH:
4907
case VMX_VMCS32_RO_EXIT_INTERRUPTION_ERRCODE:
4908
case VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO:
4909
case VMX_VMCS32_RO_EXIT_INSTR_INFO:
4910
case VMX_VMCS_RO_EXIT_QUALIFICATION:
4911
case VMX_VMCS32_RO_IDT_INFO:
4912
case VMX_VMCS32_RO_IDT_ERRCODE:
4913
case VMX_VMCS64_GUEST_CR3:
4914
case VMX_VMCS_EXIT_PHYS_ADDR_FULL:
4920
static bool vmxR0IsValidWriteField(uint32_t idxField)
4924
case VMX_VMCS64_GUEST_LDTR_BASE:
4925
case VMX_VMCS64_GUEST_TR_BASE:
4926
case VMX_VMCS64_GUEST_GDTR_BASE:
4927
case VMX_VMCS64_GUEST_IDTR_BASE:
4928
case VMX_VMCS64_GUEST_SYSENTER_EIP:
4929
case VMX_VMCS64_GUEST_SYSENTER_ESP:
4930
case VMX_VMCS64_GUEST_CR0:
4931
case VMX_VMCS64_GUEST_CR4:
4932
case VMX_VMCS64_GUEST_CR3:
4933
case VMX_VMCS64_GUEST_DR7:
4934
case VMX_VMCS64_GUEST_RIP:
4935
case VMX_VMCS64_GUEST_RSP:
4936
case VMX_VMCS64_GUEST_CS_BASE:
4937
case VMX_VMCS64_GUEST_DS_BASE:
4938
case VMX_VMCS64_GUEST_ES_BASE:
4939
case VMX_VMCS64_GUEST_FS_BASE:
4940
case VMX_VMCS64_GUEST_GS_BASE:
4941
case VMX_VMCS64_GUEST_SS_BASE: