77
77
/* We must turn on AMD-V and setup the host state physical address, as those MSRs are per-cpu/core. */
78
78
uint64_t val = ASMRdMsr(MSR_K6_EFER);
79
79
if (val & MSR_K6_EFER_SVME)
80
return VERR_SVM_IN_USE;
81
/* If the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE hack is active, then we blindly use AMD-V. */
83
&& pVM->hwaccm.s.svm.fIgnoreInUseError)
85
pCpu->fIgnoreAMDVInUseError = true;
88
if (!pCpu->fIgnoreAMDVInUseError)
89
return VERR_SVM_IN_USE;
82
92
/* Turn on AMD-V in the EFER MSR. */
83
93
ASMWrMsr(MSR_K6_EFER, val | MSR_K6_EFER_SVME);
1018
1027
/* Check for pending actions that force us to go back to ring 3. */
1028
if ( VM_FF_ISPENDING(pVM, VM_FF_HWACCM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING)
1029
|| VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HWACCM_TO_R3_MASK | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_REQUEST))
1031
/* Check if a sync operation is pending. */
1032
if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
1034
rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
1036
if (rc != VINF_SUCCESS)
1038
Log(("Pending pool sync is forcing us back to ring 3; rc=%d\n", rc));
1020
/* Intercept X86_XCPT_DB if stepping is enabled */
1021
if (!DBGFIsStepping(pVCpu))
1044
/* Intercept X86_XCPT_DB if stepping is enabled */
1045
if (!DBGFIsStepping(pVCpu))
1024
if ( VM_FF_ISPENDING(pVM, VM_FF_HWACCM_TO_R3_MASK)
1025
|| VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HWACCM_TO_R3_MASK))
1027
VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
1028
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatSwitchToR3);
1029
STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatEntry, x);
1030
rc = RT_UNLIKELY(VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
1035
/* Pending request packets might contain actions that need immediate attention, such as pending hardware interrupts. */
1036
if ( VM_FF_ISPENDING(pVM, VM_FF_REQUEST)
1037
|| VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_REQUEST))
1039
STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatEntry, x);
1040
rc = VINF_EM_PENDING_REQUEST;
1048
if ( VM_FF_ISPENDING(pVM, VM_FF_HWACCM_TO_R3_MASK)
1049
|| VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HWACCM_TO_R3_MASK))
1051
VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
1052
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatSwitchToR3);
1053
STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatEntry, x);
1054
rc = RT_UNLIKELY(VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
1059
/* Pending request packets might contain actions that need immediate attention, such as pending hardware interrupts. */
1060
if ( VM_FF_ISPENDING(pVM, VM_FF_REQUEST)
1061
|| VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_REQUEST))
1063
STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatEntry, x);
1064
rc = VINF_EM_PENDING_REQUEST;
1068
/* Check if a pgm pool flush is in progress. */
1069
if (VM_FF_ISPENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
1071
STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatEntry, x);
1072
rc = VINF_PGM_POOL_FLUSH_PENDING;
1044
1077
#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
2026
2058
AssertFailed();
2028
/* Check if a sync operation is pending. */
2029
if ( rc == VINF_SUCCESS /* don't bother if we are going to ring 3 anyway */
2030
&& VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
2032
rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
2035
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushTLBCRxChange);
2037
/* Must be set by PGMSyncCR3 */
2038
AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || PGMGetGuestMode(pVCpu) <= PGMMODE_PROTECTED || pVCpu->hwaccm.s.fForceTLBFlush,
2039
("rc=%Rrc mode=%d fForceTLBFlush=%RTbool\n", rc, PGMGetGuestMode(pVCpu), pVCpu->hwaccm.s.fForceTLBFlush));
2041
2060
if (rc == VINF_SUCCESS)
2043
2062
/* EIP has been updated already. */