1310
* Executes one (or perhaps a few more) instruction(s).
1312
* @returns VBox status code suitable for EM.
1314
* @param pVM VM handle.
1315
* @param pVCpu VMCPU handle
1316
* @param rcGC GC return code
1317
* @param pszPrefix Disassembly prefix. If not NULL we'll disassemble the
1318
* instruction and prefix the log output with this text.
1321
static int emR3RawExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcGC, const char *pszPrefix)
1323
static int emR3RawExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcGC)
1326
PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1331
* The simple solution is to use the recompiler.
1332
* The better solution is to disassemble the current instruction and
1333
* try handle as many as possible without using REM.
1339
* Disassemble the instruction if requested.
1343
DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
1344
DBGFR3DisasInstrCurrentLog(pVCpu, pszPrefix);
1346
#endif /* LOG_ENABLED */
1349
* PATM is making life more interesting.
1350
* We cannot hand anything to REM which has an EIP inside patch code. So, we'll
1351
* tell PATM there is a trap in this code and have it take the appropriate actions
1352
* to allow us execute the code in REM.
1354
if (PATMIsPatchGCAddr(pVM, pCtx->eip))
1356
Log(("emR3RawExecuteInstruction: In patch block. eip=%RRv\n", (RTRCPTR)pCtx->eip));
1359
rc = PATMR3HandleTrap(pVM, pCtx, pCtx->eip, &pNewEip);
1363
* It's not very useful to emulate a single instruction and then go back to raw
1364
* mode; just execute the whole block until IF is set again.
1367
Log(("emR3RawExecuteInstruction: Executing instruction starting at new address %RGv IF=%d VMIF=%x\n",
1368
pNewEip, pCtx->eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags));
1369
pCtx->eip = pNewEip;
1372
if (pCtx->eflags.Bits.u1IF)
1375
* The last instruction in the patch block needs to be executed!! (sti/sysexit for example)
1377
Log(("PATCH: IF=1 -> emulate last instruction as it can't be interrupted!!\n"));
1378
return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIR");
1380
else if (rcGC == VINF_PATM_PENDING_IRQ_AFTER_IRET)
1382
/* special case: iret, that sets IF, detected a pending irq/event */
1383
return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIRET");
1385
return VINF_EM_RESCHEDULE_REM;
1390
case VINF_PATCH_EMULATE_INSTR:
1391
Log(("emR3RawExecuteInstruction: Emulate patched instruction at %RGv IF=%d VMIF=%x\n",
1392
pNewEip, pCtx->eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags));
1393
pCtx->eip = pNewEip;
1394
return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIR");
1397
* The patch was disabled, hand it to the REM.
1399
case VERR_PATCH_DISABLED:
1400
Log(("emR3RawExecuteInstruction: Disabled patch -> new eip %RGv IF=%d VMIF=%x\n",
1401
pNewEip, pCtx->eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags));
1402
pCtx->eip = pNewEip;
1403
if (pCtx->eflags.Bits.u1IF)
1406
* The last instruction in the patch block needs to be executed!! (sti/sysexit for example)
1408
Log(("PATCH: IF=1 -> emulate last instruction as it can't be interrupted!!\n"));
1409
return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIR");
1411
return VINF_EM_RESCHEDULE_REM;
1413
/* Force continued patch exection; usually due to write monitored stack. */
1414
case VINF_PATCH_CONTINUE:
1415
return VINF_SUCCESS;
1418
AssertReleaseMsgFailed(("Unknown return code %Rrc from PATMR3HandleTrap\n", rc));
1419
return VERR_IPE_UNEXPECTED_STATUS;
1424
/* Try our own instruction emulator before falling back to the recompiler. */
1426
rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "GEN EMU");
1431
switch (Cpu.pCurInstr->opcode)
1433
/* @todo we can do more now */
1442
STAM_PROFILE_START(&pVCpu->em.s.StatMiscEmu, a);
1443
rc = EMInterpretInstructionCPU(pVM, &Cpu, CPUMCTX2CORE(pCtx), 0, &size);
1446
pCtx->rip += Cpu.opsize;
1447
#ifdef EM_NOTIFY_HWACCM
1448
if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC)
1449
HWACCMR3NotifyEmulated(pVCpu);
1451
STAM_PROFILE_STOP(&pVCpu->em.s.StatMiscEmu, a);
1454
if (rc != VERR_EM_INTERPRETER)
1455
AssertMsgFailedReturn(("rc=%Rrc\n", rc), rc);
1456
STAM_PROFILE_STOP(&pVCpu->em.s.StatMiscEmu, a);
1461
STAM_PROFILE_START(&pVCpu->em.s.StatREMEmu, a);
1462
Log(("EMINS: %04x:%RGv RSP=%RGv\n", pCtx->cs, (RTGCPTR)pCtx->rip, (RTGCPTR)pCtx->rsp));
1464
/* Flush the recompiler TLB if the VCPU has changed. */
1465
if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
1466
CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1467
pVM->em.s.idLastRemCpu = pVCpu->idCpu;
1469
rc = REMR3EmulateInstruction(pVM, pVCpu);
1471
STAM_PROFILE_STOP(&pVCpu->em.s.StatREMEmu, a);
1473
#ifdef EM_NOTIFY_HWACCM
1474
if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC)
1475
HWACCMR3NotifyEmulated(pVCpu);
1482
* Executes one (or perhaps a few more) instruction(s).
1483
* This is just a wrapper for discarding pszPrefix in non-logging builds.
1485
* @returns VBox status code suitable for EM.
1486
* @param pVM VM handle.
1487
* @param pVCpu VMCPU handle.
1488
* @param pszPrefix Disassembly prefix. If not NULL we'll disassemble the
1489
* instruction and prefix the log output with this text.
1490
* @param rcGC GC return code
1492
DECLINLINE(int) emR3RawExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC)
1495
return emR3RawExecuteInstructionWorker(pVM, pVCpu, rcGC, pszPrefix);
1497
return emR3RawExecuteInstructionWorker(pVM, pVCpu, rcGC);
1502
* Executes one (or perhaps a few more) IO instruction(s).
1504
* @returns VBox status code suitable for EM.
1505
* @param pVM VM handle.
1506
* @param pVCpu VMCPU handle.
1508
int emR3RawExecuteIOInstruction(PVM pVM, PVMCPU pVCpu)
1511
PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1513
STAM_PROFILE_START(&pVCpu->em.s.StatIOEmu, a);
1515
/** @todo probably we should fall back to the recompiler; otherwise we'll go back and forth between HC & GC
1516
* as io instructions tend to come in packages of more than one
1519
rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "IO EMU");
1522
rc = VINF_EM_RAW_EMULATE_INSTR;
1524
if (!(Cpu.prefix & (PREFIX_REP | PREFIX_REPNE)))
1526
switch (Cpu.pCurInstr->opcode)
1530
STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIn);
1531
rc = IOMInterpretIN(pVM, CPUMCTX2CORE(pCtx), &Cpu);
1537
STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatOut);
1538
rc = IOMInterpretOUT(pVM, CPUMCTX2CORE(pCtx), &Cpu);
1543
else if (Cpu.prefix & PREFIX_REP)
1545
switch (Cpu.pCurInstr->opcode)
1550
STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIn);
1551
rc = IOMInterpretINS(pVM, CPUMCTX2CORE(pCtx), &Cpu);
1558
STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatOut);
1559
rc = IOMInterpretOUTS(pVM, CPUMCTX2CORE(pCtx), &Cpu);
1566
* Handled the I/O return codes.
1567
* (The unhandled cases end up with rc == VINF_EM_RAW_EMULATE_INSTR.)
1569
if (IOM_SUCCESS(rc))
1571
pCtx->rip += Cpu.opsize;
1572
STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
1576
if (rc == VINF_EM_RAW_GUEST_TRAP)
1578
STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
1579
rc = emR3RawGuestTrap(pVM, pVCpu);
1582
AssertMsg(rc != VINF_TRPM_XCPT_DISPATCHED, ("Handle VINF_TRPM_XCPT_DISPATCHED\n"));
1586
STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
1589
AssertMsg(rc == VINF_EM_RAW_EMULATE_INSTR || rc == VINF_EM_RESCHEDULE_REM, ("rc=%Rrc\n", rc));
1591
STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
1592
return emR3RawExecuteInstruction(pVM, pVCpu, "IO: ");
1597
* Handle a guest context trap.
1599
* @returns VBox status code suitable for EM.
1600
* @param pVM VM handle.
1601
* @param pVCpu VMCPU handle.
1603
static int emR3RawGuestTrap(PVM pVM, PVMCPU pVCpu)
1605
PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1608
* Get the trap info.
1612
RTGCUINT uErrorCode;
1614
int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrorCode, &uCR2);
1617
AssertReleaseMsgFailed(("No trap! (rc=%Rrc)\n", rc));
1622
* Traps can be directly forwarded in hardware accelerated mode.
1624
if (HWACCMIsEnabled(pVM))
1626
#ifdef LOGGING_ENABLED
1627
DBGFR3InfoLog(pVM, "cpumguest", "Guest trap");
1628
DBGFR3DisasInstrCurrentLog(pVCpu, "Guest trap");
1630
return VINF_EM_RESCHEDULE_HWACC;
1633
#if 1 /* Experimental: Review, disable if it causes trouble. */
1635
* Handle traps in patch code first.
1637
* We catch a few of these cases in RC before returning to R3 (#PF, #GP, #BP)
1638
* but several traps isn't handled specially by TRPM in RC and we end up here
1639
* instead. One example is #DE.
1641
uint32_t uCpl = CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx));
1643
&& PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtx->eip))
1645
LogFlow(("emR3RawGuestTrap: trap %#x in patch code; eip=%08x\n", u8TrapNo, pCtx->eip));
1646
return emR3PatchTrap(pVM, pVCpu, pCtx, rc);
1651
* If the guest gate is marked unpatched, then we will check again if we can patch it.
1652
* (This assumes that we've already tried and failed to dispatch the trap in
1653
* RC for the gates that already has been patched. Which is true for most high
1654
* volume traps, because these are handled specially, but not for odd ones like #DE.)
1656
if (TRPMR3GetGuestTrapHandler(pVM, u8TrapNo) == TRPM_INVALID_HANDLER)
1658
CSAMR3CheckGates(pVM, u8TrapNo, 1);
1659
Log(("emR3RawHandleRC: recheck gate %x -> valid=%d\n", u8TrapNo, TRPMR3GetGuestTrapHandler(pVM, u8TrapNo) != TRPM_INVALID_HANDLER));
1661
/* If it was successful, then we could go back to raw mode. */
1662
if (TRPMR3GetGuestTrapHandler(pVM, u8TrapNo) != TRPM_INVALID_HANDLER)
1664
/* Must check pending forced actions as our IDT or GDT might be out of sync. */
1665
rc = EMR3CheckRawForcedActions(pVM, pVCpu);
1666
AssertRCReturn(rc, rc);
1668
TRPMERRORCODE enmError = uErrorCode != ~0U
1669
? TRPM_TRAP_HAS_ERRORCODE
1670
: TRPM_TRAP_NO_ERRORCODE;
1671
rc = TRPMForwardTrap(pVCpu, CPUMCTX2CORE(pCtx), u8TrapNo, uErrorCode, enmError, TRPM_TRAP, -1);
1672
if (rc == VINF_SUCCESS /* Don't use RT_SUCCESS */)
1674
TRPMResetTrap(pVCpu);
1675
return VINF_EM_RESCHEDULE_RAW;
1677
AssertMsg(rc == VINF_EM_RAW_GUEST_TRAP, ("%Rrc\n", rc));
1682
* Scan kernel code that traps; we might not get another chance.
1684
/** @todo move this up before the dispatching? */
1685
if ( (pCtx->ss & X86_SEL_RPL) <= 1
1686
&& !pCtx->eflags.Bits.u1VM)
1688
Assert(!PATMIsPatchGCAddr(pVM, pCtx->eip));
1689
CSAMR3CheckCodeEx(pVM, CPUMCTX2CORE(pCtx), pCtx->eip);
1693
* Trap specific handling.
1695
if (u8TrapNo == 6) /* (#UD) Invalid opcode. */
1698
* If MONITOR & MWAIT are supported, then interpret them here.
1701
rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &cpu, "Guest Trap (#UD): ");
1703
&& (cpu.pCurInstr->opcode == OP_MONITOR || cpu.pCurInstr->opcode == OP_MWAIT))
1705
uint32_t u32Dummy, u32Features, u32ExtFeatures;
1706
CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &u32ExtFeatures, &u32Features);
1707
if (u32ExtFeatures & X86_CPUID_FEATURE_ECX_MONITOR)
1709
rc = TRPMResetTrap(pVCpu);
1713
rc = EMInterpretInstructionCPU(pVM, pVCpu, &cpu, CPUMCTX2CORE(pCtx), 0, &opsize);
1716
pCtx->rip += cpu.opsize;
1717
#ifdef EM_NOTIFY_HWACCM
1718
if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC)
1719
HWACCMR3NotifyEmulated(pVCpu);
1723
return emR3RawExecuteInstruction(pVM, pVCpu, "Monitor: ");
1727
else if (u8TrapNo == 13) /* (#GP) Privileged exception */
1730
* Handle I/O bitmap?
1732
/** @todo We're not supposed to be here with a false guest trap concerning
1733
* I/O access. We can easily handle those in RC. */
1735
rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &cpu, "Guest Trap: ");
1737
&& (cpu.pCurInstr->optype & OPTYPE_PORTIO))
1740
* We should really check the TSS for the IO bitmap, but it's not like this
1741
* lazy approach really makes things worse.
1743
rc = TRPMResetTrap(pVCpu);
1745
return emR3RawExecuteInstruction(pVM, pVCpu, "IO Guest Trap: ");
1750
DBGFR3InfoLog(pVM, "cpumguest", "Guest trap");
1751
DBGFR3DisasInstrCurrentLog(pVCpu, "Guest trap");
1753
/* Get guest page information. */
1754
uint64_t fFlags = 0;
1755
RTGCPHYS GCPhys = 0;
1756
int rc2 = PGMGstGetPage(pVCpu, uCR2, &fFlags, &GCPhys);
1757
Log(("emR3RawGuestTrap: cs:eip=%04x:%08x: trap=%02x err=%08x cr2=%08x cr0=%08x%s: Phys=%RGp fFlags=%08llx %s %s %s%s rc2=%d\n",
1758
pCtx->cs, pCtx->eip, u8TrapNo, uErrorCode, uCR2, (uint32_t)pCtx->cr0, (enmType == TRPM_SOFTWARE_INT) ? " software" : "", GCPhys, fFlags,
1759
fFlags & X86_PTE_P ? "P " : "NP", fFlags & X86_PTE_US ? "U" : "S",
1760
fFlags & X86_PTE_RW ? "RW" : "R0", fFlags & X86_PTE_G ? " G" : "", rc2));
1765
* (Because of stuff like above we must set CR2 in a delayed fashion.)
1767
if (u8TrapNo == 14 /* #PG */)
1770
return VINF_EM_RESCHEDULE_REM;
1775
* Handle a ring switch trap.
1776
* Need to do statistics and to install patches. The result is going to REM.
1778
* @returns VBox status code suitable for EM.
1779
* @param pVM VM handle.
1780
* @param pVCpu VMCPU handle.
1782
int emR3RawRingSwitch(PVM pVM, PVMCPU pVCpu)
1786
PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1789
* sysenter, syscall & callgate
1791
rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "RSWITCH: ");
1794
if (Cpu.pCurInstr->opcode == OP_SYSENTER)
1796
if (pCtx->SysEnter.cs != 0)
1798
rc = PATMR3InstallPatch(pVM, SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pCtx->eip),
1799
(SELMGetCpuModeFromSelector(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid) == CPUMODE_32BIT) ? PATMFL_CODE32 : 0);
1802
DBGFR3DisasInstrCurrentLog(pVCpu, "Patched sysenter instruction");
1803
return VINF_EM_RESCHEDULE_RAW;
1808
#ifdef VBOX_WITH_STATISTICS
1809
switch (Cpu.pCurInstr->opcode)
1812
STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatSysEnter);
1815
STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatSysExit);
1818
STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatSysCall);
1821
STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatSysRet);
1829
/* go to the REM to emulate a single instruction */
1830
return emR3RawExecuteInstruction(pVM, pVCpu, "RSWITCH: ");
1835
* Handle a trap (\#PF or \#GP) in patch code
1837
* @returns VBox status code suitable for EM.
1838
* @param pVM VM handle.
1839
* @param pVCpu VMCPU handle.
1840
* @param pCtx CPU context
1841
* @param gcret GC return code
1843
static int emR3PatchTrap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int gcret)
1848
RTGCUINT uErrorCode;
1851
Assert(PATMIsPatchGCAddr(pVM, pCtx->eip));
1853
if (gcret == VINF_PATM_PATCH_INT3)
1859
else if (gcret == VINF_PATM_PATCH_TRAP_GP)
1861
/* No active trap in this case. Kind of ugly. */
1862
u8TrapNo = X86_XCPT_GP;
1868
rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrorCode, &uCR2);
1871
AssertReleaseMsgFailed(("emR3PatchTrap: no trap! (rc=%Rrc) gcret=%Rrc\n", rc, gcret));
1874
/* Reset the trap as we'll execute the original instruction again. */
1875
TRPMResetTrap(pVCpu);
1879
* Deal with traps inside patch code.
1880
* (This code won't run outside GC.)
1885
DBGFR3InfoLog(pVM, "cpumguest", "Trap in patch code");
1886
DBGFR3DisasInstrCurrentLog(pVCpu, "Patch code");
1891
rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->eip, &Cpu, "Patch code: ");
1893
&& Cpu.pCurInstr->opcode == OP_IRET)
1895
uint32_t eip, selCS, uEFlags;
1897
/* Iret crashes are bad as we have already changed the flags on the stack */
1898
rc = PGMPhysSimpleReadGCPtr(pVCpu, &eip, pCtx->esp, 4);
1899
rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selCS, pCtx->esp+4, 4);
1900
rc |= PGMPhysSimpleReadGCPtr(pVCpu, &uEFlags, pCtx->esp+8, 4);
1901
if (rc == VINF_SUCCESS)
1903
if ( (uEFlags & X86_EFL_VM)
1904
|| (selCS & X86_SEL_RPL) == 3)
1906
uint32_t selSS, esp;
1908
rc |= PGMPhysSimpleReadGCPtr(pVCpu, &esp, pCtx->esp + 12, 4);
1909
rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selSS, pCtx->esp + 16, 4);
1911
if (uEFlags & X86_EFL_VM)
1913
uint32_t selDS, selES, selFS, selGS;
1914
rc = PGMPhysSimpleReadGCPtr(pVCpu, &selES, pCtx->esp + 20, 4);
1915
rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selDS, pCtx->esp + 24, 4);
1916
rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selFS, pCtx->esp + 28, 4);
1917
rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selGS, pCtx->esp + 32, 4);
1918
if (rc == VINF_SUCCESS)
1920
Log(("Patch code: IRET->VM stack frame: return address %04X:%08RX32 eflags=%08x ss:esp=%04X:%08RX32\n", selCS, eip, uEFlags, selSS, esp));
1921
Log(("Patch code: IRET->VM stack frame: DS=%04X ES=%04X FS=%04X GS=%04X\n", selDS, selES, selFS, selGS));
1925
Log(("Patch code: IRET stack frame: return address %04X:%08RX32 eflags=%08x ss:esp=%04X:%08RX32\n", selCS, eip, uEFlags, selSS, esp));
1928
Log(("Patch code: IRET stack frame: return address %04X:%08RX32 eflags=%08x\n", selCS, eip, uEFlags));
1931
#endif /* LOG_ENABLED */
1932
Log(("emR3PatchTrap: in patch: eip=%08x: trap=%02x err=%08x cr2=%08x cr0=%08x\n",
1933
pCtx->eip, u8TrapNo, uErrorCode, uCR2, (uint32_t)pCtx->cr0));
1936
rc = PATMR3HandleTrap(pVM, pCtx, pCtx->eip, &pNewEip);
1940
* Execute the faulting instruction.
1944
/** @todo execute a whole block */
1945
Log(("emR3PatchTrap: Executing faulting instruction at new address %RGv\n", pNewEip));
1946
if (!(pVCpu->em.s.pPatmGCState->uVMFlags & X86_EFL_IF))
1947
Log(("emR3PatchTrap: Virtual IF flag disabled!!\n"));
1949
pCtx->eip = pNewEip;
1950
AssertRelease(pCtx->eip);
1952
if (pCtx->eflags.Bits.u1IF)
1954
/* Windows XP lets irets fault intentionally and then takes action based on the opcode; an
1955
* int3 patch overwrites it and leads to blue screens. Remove the patch in this case.
1957
if ( u8TrapNo == X86_XCPT_GP
1958
&& PATMIsInt3Patch(pVM, pCtx->eip, NULL, NULL))
1960
/** @todo move to PATMR3HandleTrap */
1961
Log(("Possible Windows XP iret fault at %08RX32\n", pCtx->eip));
1962
PATMR3RemovePatch(pVM, pCtx->eip);
1965
/** @todo Knoppix 5 regression when returning VINF_SUCCESS here and going back to raw mode. */
1966
/* Note: possibly because a reschedule is required (e.g. iret to V86 code) */
1968
return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIR");
1969
/* Interrupts are enabled; just go back to the original instruction.
1970
return VINF_SUCCESS; */
1972
return VINF_EM_RESCHEDULE_REM;
1978
case VINF_PATCH_EMULATE_INSTR:
1979
Log(("emR3PatchTrap: Emulate patched instruction at %RGv IF=%d VMIF=%x\n",
1980
pNewEip, pCtx->eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags));
1981
pCtx->eip = pNewEip;
1982
AssertRelease(pCtx->eip);
1983
return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHEMUL: ");
1986
* The patch was disabled, hand it to the REM.
1988
case VERR_PATCH_DISABLED:
1989
if (!(pVCpu->em.s.pPatmGCState->uVMFlags & X86_EFL_IF))
1990
Log(("emR3PatchTrap: Virtual IF flag disabled!!\n"));
1991
pCtx->eip = pNewEip;
1992
AssertRelease(pCtx->eip);
1994
if (pCtx->eflags.Bits.u1IF)
1997
* The last instruction in the patch block needs to be executed!! (sti/sysexit for example)
1999
Log(("PATCH: IF=1 -> emulate last instruction as it can't be interrupted!!\n"));
2000
return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIR");
2002
return VINF_EM_RESCHEDULE_REM;
2004
/* Force continued patch exection; usually due to write monitored stack. */
2005
case VINF_PATCH_CONTINUE:
2006
return VINF_SUCCESS;
2009
* Anything else is *fatal*.
2012
AssertReleaseMsgFailed(("Unknown return code %Rrc from PATMR3HandleTrap!\n", rc));
2013
return VERR_IPE_UNEXPECTED_STATUS;
2016
return VINF_SUCCESS;
2021
* Handle a privileged instruction.
2023
* @returns VBox status code suitable for EM.
2024
* @param pVM VM handle.
2025
* @param pVCpu VMCPU handle;
2027
int emR3RawPrivileged(PVM pVM, PVMCPU pVCpu)
2029
STAM_PROFILE_START(&pVCpu->em.s.StatPrivEmu, a);
2030
PCPUMCTX pCtx = pVCpu->em.s.pCtx;
2032
Assert(!pCtx->eflags.Bits.u1VM);
2034
if (PATMIsEnabled(pVM))
2037
* Check if in patch code.
2039
if (PATMR3IsInsidePatchJump(pVM, pCtx->eip, NULL))
2042
DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
2044
AssertMsgFailed(("FATAL ERROR: executing random instruction inside generated patch jump %08X\n", pCtx->eip));
2045
return VERR_EM_RAW_PATCH_CONFLICT;
2047
if ( (pCtx->ss & X86_SEL_RPL) == 0
2048
&& !pCtx->eflags.Bits.u1VM
2049
&& !PATMIsPatchGCAddr(pVM, pCtx->eip))
2051
int rc = PATMR3InstallPatch(pVM, SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pCtx->eip),
2052
(SELMGetCpuModeFromSelector(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid) == CPUMODE_32BIT) ? PATMFL_CODE32 : 0);
2056
DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
2058
DBGFR3DisasInstrCurrentLog(pVCpu, "Patched privileged instruction");
2059
return VINF_SUCCESS;
2065
if (!PATMIsPatchGCAddr(pVM, pCtx->eip))
2067
DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
2068
DBGFR3DisasInstrCurrentLog(pVCpu, "Privileged instr: ");
2073
* Instruction statistics and logging.
2078
rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "PRIV: ");
2081
#ifdef VBOX_WITH_STATISTICS
2082
PEMSTATS pStats = pVCpu->em.s.CTX_SUFF(pStats);
2083
switch (Cpu.pCurInstr->opcode)
2086
STAM_COUNTER_INC(&pStats->StatInvlpg);
2089
STAM_COUNTER_INC(&pStats->StatIret);
2092
STAM_COUNTER_INC(&pStats->StatCli);
2093
emR3RecordCli(pVM, pVCpu, pCtx->rip);
2096
STAM_COUNTER_INC(&pStats->StatSti);
2104
AssertMsgFailed(("Unexpected privileged exception due to port IO\n"));
2108
if (Cpu.param1.flags & USE_REG_GEN32)
2111
Assert(Cpu.param2.flags & USE_REG_CR);
2112
Assert(Cpu.param2.base.reg_ctrl <= USE_REG_CR4);
2113
STAM_COUNTER_INC(&pStats->StatMovReadCR[Cpu.param2.base.reg_ctrl]);
2118
Assert(Cpu.param1.flags & USE_REG_CR);
2119
Assert(Cpu.param1.base.reg_ctrl <= USE_REG_CR4);
2120
STAM_COUNTER_INC(&pStats->StatMovWriteCR[Cpu.param1.base.reg_ctrl]);
2125
STAM_COUNTER_INC(&pStats->StatMovDRx);
2128
STAM_COUNTER_INC(&pStats->StatMovLldt);
2131
STAM_COUNTER_INC(&pStats->StatMovLidt);
2134
STAM_COUNTER_INC(&pStats->StatMovLgdt);
2137
STAM_COUNTER_INC(&pStats->StatSysEnter);
2140
STAM_COUNTER_INC(&pStats->StatSysExit);
2143
STAM_COUNTER_INC(&pStats->StatSysCall);
2146
STAM_COUNTER_INC(&pStats->StatSysRet);
2149
STAM_COUNTER_INC(&pStats->StatHlt);
2152
STAM_COUNTER_INC(&pStats->StatMisc);
2153
Log4(("emR3RawPrivileged: opcode=%d\n", Cpu.pCurInstr->opcode));
2156
#endif /* VBOX_WITH_STATISTICS */
2157
if ( (pCtx->ss & X86_SEL_RPL) == 0
2158
&& !pCtx->eflags.Bits.u1VM
2159
&& SELMGetCpuModeFromSelector(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid) == CPUMODE_32BIT)
2163
STAM_PROFILE_START(&pVCpu->em.s.StatPrivEmu, a);
2164
switch (Cpu.pCurInstr->opcode)
2167
pCtx->eflags.u32 &= ~X86_EFL_IF;
2168
Assert(Cpu.opsize == 1);
2169
pCtx->rip += Cpu.opsize;
2170
STAM_PROFILE_STOP(&pVCpu->em.s.StatPrivEmu, a);
2171
return VINF_EM_RESCHEDULE_REM; /* must go to the recompiler now! */
2174
pCtx->eflags.u32 |= X86_EFL_IF;
2175
EMSetInhibitInterruptsPC(pVCpu, pCtx->rip + Cpu.opsize);
2176
Assert(Cpu.opsize == 1);
2177
pCtx->rip += Cpu.opsize;
2178
STAM_PROFILE_STOP(&pVCpu->em.s.StatPrivEmu, a);
2179
return VINF_SUCCESS;
2182
if (PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtx->eip))
2184
PATMTRANSSTATE enmState;
2185
RTGCPTR pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pCtx->eip, &enmState);
2187
if (enmState == PATMTRANS_OVERWRITTEN)
2189
rc = PATMR3DetectConflict(pVM, pOrgInstrGC, pOrgInstrGC);
2190
Assert(rc == VERR_PATCH_DISABLED);
2191
/* Conflict detected, patch disabled */
2192
Log(("emR3RawPrivileged: detected conflict -> disabled patch at %08RX32\n", pCtx->eip));
2194
enmState = PATMTRANS_SAFE;
2197
/* The translation had better be successful. Otherwise we can't recover. */
2198
AssertReleaseMsg(pOrgInstrGC && enmState != PATMTRANS_OVERWRITTEN, ("Unable to translate instruction address at %08RX32\n", pCtx->eip));
2199
if (enmState != PATMTRANS_OVERWRITTEN)
2200
pCtx->eip = pOrgInstrGC;
2202
/* no break; we could just return VINF_EM_HALT here */
2207
if (PATMIsPatchGCAddr(pVM, pCtx->eip))
2209
DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
2210
DBGFR3DisasInstrCurrentLog(pVCpu, "Privileged instr: ");
2214
rc = EMInterpretInstructionCPU(pVM, pVCpu, &Cpu, CPUMCTX2CORE(pCtx), 0, &size);
2217
pCtx->rip += Cpu.opsize;
2218
#ifdef EM_NOTIFY_HWACCM
2219
if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC)
2220
HWACCMR3NotifyEmulated(pVCpu);
2222
STAM_PROFILE_STOP(&pVCpu->em.s.StatPrivEmu, a);
2224
if ( Cpu.pCurInstr->opcode == OP_MOV_CR
2225
&& Cpu.param1.flags == USE_REG_CR /* write */
2228
/* Deal with CR0 updates inside patch code that force
2229
* us to go to the recompiler.
2231
if ( PATMIsPatchGCAddr(pVM, pCtx->rip)
2232
&& (pCtx->cr0 & (X86_CR0_WP|X86_CR0_PG|X86_CR0_PE)) != (X86_CR0_WP|X86_CR0_PG|X86_CR0_PE))
2234
PATMTRANSSTATE enmState;
2235
RTGCPTR pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pCtx->rip, &enmState);
2237
Log(("Force recompiler switch due to cr0 (%RGp) update rip=%RGv -> %RGv (enmState=%d)\n", pCtx->cr0, pCtx->rip, pOrgInstrGC, enmState));
2238
if (enmState == PATMTRANS_OVERWRITTEN)
2240
rc = PATMR3DetectConflict(pVM, pOrgInstrGC, pOrgInstrGC);
2241
Assert(rc == VERR_PATCH_DISABLED);
2242
/* Conflict detected, patch disabled */
2243
Log(("emR3RawPrivileged: detected conflict -> disabled patch at %RGv\n", (RTGCPTR)pCtx->rip));
2244
enmState = PATMTRANS_SAFE;
2246
/* The translation had better be successful. Otherwise we can't recover. */
2247
AssertReleaseMsg(pOrgInstrGC && enmState != PATMTRANS_OVERWRITTEN, ("Unable to translate instruction address at %RGv\n", (RTGCPTR)pCtx->rip));
2248
if (enmState != PATMTRANS_OVERWRITTEN)
2249
pCtx->rip = pOrgInstrGC;
2252
/* Reschedule is necessary as the execution/paging mode might have changed. */
2253
return VINF_EM_RESCHEDULE;
2255
return rc; /* can return VINF_EM_HALT as well. */
2257
AssertMsgReturn(rc == VERR_EM_INTERPRETER, ("%Rrc\n", rc), rc);
2258
break; /* fall back to the recompiler */
2260
STAM_PROFILE_STOP(&pVCpu->em.s.StatPrivEmu, a);
2264
if (PATMIsPatchGCAddr(pVM, pCtx->eip))
2265
return emR3PatchTrap(pVM, pVCpu, pCtx, VINF_PATM_PATCH_TRAP_GP);
2267
return emR3RawExecuteInstruction(pVM, pVCpu, "PRIV");
2272
* Update the forced rawmode execution modifier.
2274
* This function is called when we're returning from the raw-mode loop(s). If we're
2275
* in patch code, it will set a flag forcing execution to be resumed in raw-mode,
2276
* if not in patch code, the flag will be cleared.
2278
* We should never interrupt patch code while it's being executed. Cli patches can
2279
* contain big code blocks, but they are always executed with IF=0. Other patches
2280
* replace single instructions and should be atomic.
2282
* @returns Updated rc.
2284
* @param pVM The VM handle.
2285
* @param pVCpu The VMCPU handle.
2286
* @param pCtx The guest CPU context.
2287
* @param rc The result code.
2289
DECLINLINE(int) emR3RawUpdateForceFlag(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc)
2291
if (PATMIsPatchGCAddr(pVM, pCtx->eip)) /** @todo check cs selector base/type */
2293
/* ignore reschedule attempts. */
2296
case VINF_EM_RESCHEDULE:
2297
case VINF_EM_RESCHEDULE_REM:
2298
LogFlow(("emR3RawUpdateForceFlag: patch address -> force raw reschedule\n"));
2302
pVCpu->em.s.fForceRAW = true;
2305
pVCpu->em.s.fForceRAW = false;
2311
* Process a subset of the raw-mode return code.
2313
* Since we have to share this with raw-mode single stepping, this inline
2314
* function has been created to avoid code duplication.
2316
* @returns VINF_SUCCESS if it's ok to continue raw mode.
2317
* @returns VBox status code to return to the EM main loop.
2319
* @param pVM The VM handle
2320
* @param pVCpu The VMCPU handle
2321
* @param rc The return code.
2322
* @param pCtx The guest cpu context.
2324
DECLINLINE(int) emR3RawHandleRC(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc)
2329
* Common & simple ones.
2333
case VINF_EM_RESCHEDULE_RAW:
2334
case VINF_EM_RESCHEDULE_HWACC:
2335
case VINF_EM_RAW_INTERRUPT:
2336
case VINF_EM_RAW_TO_R3:
2337
case VINF_EM_RAW_TIMER_PENDING:
2338
case VINF_EM_PENDING_REQUEST:
2343
* Privileged instruction.
2345
case VINF_EM_RAW_EXCEPTION_PRIVILEGED:
2346
case VINF_PATM_PATCH_TRAP_GP:
2347
rc = emR3RawPrivileged(pVM, pVCpu);
2351
* Got a trap which needs dispatching.
2353
case VINF_EM_RAW_GUEST_TRAP:
2354
if (PATMR3IsInsidePatchJump(pVM, pCtx->eip, NULL))
2356
AssertReleaseMsgFailed(("FATAL ERROR: executing random instruction inside generated patch jump %08X\n", CPUMGetGuestEIP(pVCpu)));
2357
rc = VERR_EM_RAW_PATCH_CONFLICT;
2360
rc = emR3RawGuestTrap(pVM, pVCpu);
2364
* Trap in patch code.
2366
case VINF_PATM_PATCH_TRAP_PF:
2367
case VINF_PATM_PATCH_INT3:
2368
rc = emR3PatchTrap(pVM, pVCpu, pCtx, rc);
2371
case VINF_PATM_DUPLICATE_FUNCTION:
2372
Assert(PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtx->eip));
2373
rc = PATMR3DuplicateFunctionRequest(pVM, pCtx);
2378
case VINF_PATM_CHECK_PATCH_PAGE:
2379
rc = PATMR3HandleMonitoredPage(pVM);
2387
case VERR_EM_RAW_PATCH_CONFLICT:
2388
AssertReleaseMsgFailed(("%Rrc handling is not yet implemented\n", rc));
2391
#ifdef VBOX_WITH_VMI
2395
case VINF_EM_RESCHEDULE_PARAV:
2396
rc = PARAVCallFunction(pVM);
2401
* Memory mapped I/O access - attempt to patch the instruction
2403
case VINF_PATM_HC_MMIO_PATCH_READ:
2404
rc = PATMR3InstallPatch(pVM, SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pCtx->eip),
2405
PATMFL_MMIO_ACCESS | ((SELMGetCpuModeFromSelector(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid) == CPUMODE_32BIT) ? PATMFL_CODE32 : 0));
2407
rc = emR3RawExecuteInstruction(pVM, pVCpu, "MMIO");
2410
case VINF_PATM_HC_MMIO_PATCH_WRITE:
2411
AssertFailed(); /* not yet implemented. */
2412
rc = emR3RawExecuteInstruction(pVM, pVCpu, "MMIO");
2416
* Conflict or out of page tables.
2418
* VM_FF_PGM_SYNC_CR3 is set by the hypervisor and all we need to
2419
* do here is to execute the pending forced actions.
2421
case VINF_PGM_SYNC_CR3:
2422
AssertMsg(VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL),
2423
("VINF_PGM_SYNC_CR3 and no VMCPU_FF_PGM_SYNC_CR3*!\n"));
2428
* Paging mode change.
2430
case VINF_PGM_CHANGE_MODE:
2431
rc = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
2432
if (rc == VINF_SUCCESS)
2433
rc = VINF_EM_RESCHEDULE;
2434
AssertMsg(RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST), ("%Rrc\n", rc));
2438
* CSAM wants to perform a task in ring-3. It has set an FF action flag.
2440
case VINF_CSAM_PENDING_ACTION:
2445
* Invoked Interrupt gate - must directly (!) go to the recompiler.
2447
case VINF_EM_RAW_INTERRUPT_PENDING:
2448
case VINF_EM_RAW_RING_SWITCH_INT:
2449
Assert(TRPMHasTrap(pVCpu));
2450
Assert(!PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtx->eip));
2452
if (TRPMHasTrap(pVCpu))
2454
/* If the guest gate is marked unpatched, then we will check again if we can patch it. */
2455
uint8_t u8Interrupt = TRPMGetTrapNo(pVCpu);
2456
if (TRPMR3GetGuestTrapHandler(pVM, u8Interrupt) == TRPM_INVALID_HANDLER)
2458
CSAMR3CheckGates(pVM, u8Interrupt, 1);
2459
Log(("emR3RawHandleRC: recheck gate %x -> valid=%d\n", u8Interrupt, TRPMR3GetGuestTrapHandler(pVM, u8Interrupt) != TRPM_INVALID_HANDLER));
2460
/* Note: If it was successful, then we could go back to raw mode, but let's keep things simple for now. */
2463
rc = VINF_EM_RESCHEDULE_REM;
2467
* Other ring switch types.
2469
case VINF_EM_RAW_RING_SWITCH:
2470
rc = emR3RawRingSwitch(pVM, pVCpu);
2474
* I/O Port access - emulate the instruction.
2476
case VINF_IOM_HC_IOPORT_READ:
2477
case VINF_IOM_HC_IOPORT_WRITE:
2478
rc = emR3RawExecuteIOInstruction(pVM, pVCpu);
2482
* Memory mapped I/O access - emulate the instruction.
2484
case VINF_IOM_HC_MMIO_READ:
2485
case VINF_IOM_HC_MMIO_WRITE:
2486
case VINF_IOM_HC_MMIO_READ_WRITE:
2487
rc = emR3RawExecuteInstruction(pVM, pVCpu, "MMIO");
2491
* (MM)IO intensive code block detected; fall back to the recompiler for better performance
2493
case VINF_EM_RAW_EMULATE_IO_BLOCK:
2494
rc = HWACCMR3EmulateIoBlock(pVM, pCtx);
2498
* Execute instruction.
2500
case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
2501
rc = emR3RawExecuteInstruction(pVM, pVCpu, "LDT FAULT: ");
2503
case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
2504
rc = emR3RawExecuteInstruction(pVM, pVCpu, "GDT FAULT: ");
2506
case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
2507
rc = emR3RawExecuteInstruction(pVM, pVCpu, "IDT FAULT: ");
2509
case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
2510
rc = emR3RawExecuteInstruction(pVM, pVCpu, "TSS FAULT: ");
2512
case VINF_EM_RAW_EMULATE_INSTR_PD_FAULT:
2513
rc = emR3RawExecuteInstruction(pVM, pVCpu, "PD FAULT: ");
2516
case VINF_EM_RAW_EMULATE_INSTR_HLT:
2517
/** @todo skip instruction and go directly to the halt state. (see REM for implementation details) */
2518
rc = emR3RawPrivileged(pVM, pVCpu);
2521
case VINF_PATM_PENDING_IRQ_AFTER_IRET:
2522
rc = emR3RawExecuteInstruction(pVM, pVCpu, "EMUL: ", VINF_PATM_PENDING_IRQ_AFTER_IRET);
2525
case VINF_EM_RAW_EMULATE_INSTR:
2526
case VINF_PATCH_EMULATE_INSTR:
2527
rc = emR3RawExecuteInstruction(pVM, pVCpu, "EMUL: ");
2531
* Stale selector and iret traps => REM.
2533
case VINF_EM_RAW_STALE_SELECTOR:
2534
case VINF_EM_RAW_IRET_TRAP:
2535
/* We will not go to the recompiler if EIP points to patch code. */
2536
if (PATMIsPatchGCAddr(pVM, pCtx->eip))
2538
pCtx->eip = PATMR3PatchToGCPtr(pVM, (RTGCPTR)pCtx->eip, 0);
2540
LogFlow(("emR3RawHandleRC: %Rrc -> %Rrc\n", rc, VINF_EM_RESCHEDULE_REM));
2541
rc = VINF_EM_RESCHEDULE_REM;
2547
case VINF_EM_TERMINATE:
2550
case VINF_EM_SUSPEND:
2552
case VINF_EM_RESUME:
2553
case VINF_EM_NO_MEMORY:
2554
case VINF_EM_RESCHEDULE:
2555
case VINF_EM_RESCHEDULE_REM:
2556
case VINF_EM_WAIT_SIPI:
2560
* Up a level and invoke the debugger.
2562
case VINF_EM_DBG_STEPPED:
2563
case VINF_EM_DBG_BREAKPOINT:
2564
case VINF_EM_DBG_STEP:
2565
case VINF_EM_DBG_HYPER_BREAKPOINT:
2566
case VINF_EM_DBG_HYPER_STEPPED:
2567
case VINF_EM_DBG_HYPER_ASSERTION:
2568
case VINF_EM_DBG_STOP:
2572
* Up a level, dump and debug.
2574
case VERR_TRPM_DONT_PANIC:
2575
case VERR_TRPM_PANIC:
2576
case VERR_VMM_RING0_ASSERTION:
2577
case VERR_VMM_HYPER_CR3_MISMATCH:
2578
case VERR_VMM_RING3_CALL_DISABLED:
2582
* Up a level, after HwAccM have done some release logging.
2584
case VERR_VMX_INVALID_VMCS_FIELD:
2585
case VERR_VMX_INVALID_VMCS_PTR:
2586
case VERR_VMX_INVALID_VMXON_PTR:
2587
case VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_CODE:
2588
case VERR_VMX_UNEXPECTED_EXCEPTION:
2589
case VERR_VMX_UNEXPECTED_EXIT_CODE:
2590
case VERR_VMX_INVALID_GUEST_STATE:
2591
case VERR_VMX_UNABLE_TO_START_VM:
2592
case VERR_VMX_UNABLE_TO_RESUME_VM:
2593
HWACCMR3CheckError(pVM, rc);
2596
* Anything which is not known to us means an internal error
2597
* and the termination of the VM!
2600
AssertMsgFailed(("Unknown GC return code: %Rra\n", rc));
2608
* Check for pending raw actions
2610
* @returns VBox status code. May return VINF_EM_NO_MEMORY but none of the other
2612
* @param pVM The VM to operate on.
2613
* @param pVCpu The VMCPU handle.
2615
VMMR3DECL(int) EMR3CheckRawForcedActions(PVM pVM, PVMCPU pVCpu)
2617
return emR3RawForcedActions(pVM, pVCpu, pVCpu->em.s.pCtx);
2622
* Process raw-mode specific forced actions.
2624
* This function is called when any FFs in the VM_FF_HIGH_PRIORITY_PRE_RAW_MASK is pending.
2626
* @returns VBox status code. May return VINF_EM_NO_MEMORY but none of the other
2628
* @param pVM The VM handle.
2629
* @param pVCpu The VMCPU handle.
2630
* @param pCtx The guest CPUM register context.
2632
static int emR3RawForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2635
* Note that the order is *vitally* important!
2636
* Also note that SELMR3UpdateFromCPUM may trigger VM_FF_SELM_SYNC_TSS.
2641
* Sync selector tables.
2643
if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT))
2645
int rc = SELMR3UpdateFromCPUM(pVM, pVCpu);
2653
* The CSAMR3CheckGates call in TRPMR3SyncIDT may call PGMPrefetchPage
2654
* and PGMShwModifyPage, so we're in for trouble if for instance a
2655
* PGMSyncCR3+pgmPoolClearAll is pending.
2657
if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TRPM_SYNC_IDT))
2659
if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3)
2660
&& EMIsRawRing0Enabled(pVM)
2661
&& CSAMIsEnabled(pVM))
2663
int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
2668
int rc = TRPMR3SyncIDT(pVM, pVCpu);
2676
if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS))
2678
int rc = SELMR3SyncTSS(pVM, pVCpu);
2684
* Sync page directory.
2686
if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
2688
Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
2689
int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
2693
Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
2695
/* Prefetch pages for EIP and ESP. */
2696
/** @todo This is rather expensive. Should investigate if it really helps at all. */
2697
rc = PGMPrefetchPage(pVCpu, SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pCtx->rip));
2698
if (rc == VINF_SUCCESS)
2699
rc = PGMPrefetchPage(pVCpu, SELMToFlat(pVM, DIS_SELREG_SS, CPUMCTX2CORE(pCtx), pCtx->rsp));
2700
if (rc != VINF_SUCCESS)
2702
if (rc != VINF_PGM_SYNC_CR3)
2704
AssertLogRelMsgReturn(RT_FAILURE(rc), ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
2707
rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
2711
/** @todo maybe prefetch the supervisor stack page as well */
2712
Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
2716
* Allocate handy pages (just in case the above actions have consumed some pages).
2718
if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
2720
int rc = PGMR3PhysAllocateHandyPages(pVM);
2726
* Check whether we're out of memory now.
2728
* This may stem from some of the above actions or operations that has been executed
2729
* since we ran FFs. The allocate handy pages must for instance always be followed by
2732
if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
2733
return VINF_EM_NO_MEMORY;
2735
return VINF_SUCCESS;
2740
* Executes raw code.
2742
* This function contains the raw-mode version of the inner
2743
* execution loop (the outer loop being in EMR3ExecuteVM()).
2745
* @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
2746
* VINF_EM_RESCHEDULE_REM, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
2748
* @param pVM VM handle.
2749
* @param pVCpu VMCPU handle.
2750
* @param pfFFDone Where to store an indicator telling whether or not
2751
* FFs were done before returning.
2753
static int emR3RawExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
2755
STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatRAWTotal, a);
2757
int rc = VERR_INTERNAL_ERROR;
2758
PCPUMCTX pCtx = pVCpu->em.s.pCtx;
2759
LogFlow(("emR3RawExecute: (cs:eip=%04x:%08x)\n", pCtx->cs, pCtx->eip));
2760
pVCpu->em.s.fForceRAW = false;
2766
* Spin till we get a forced action or raw mode status code resulting in
2767
* in anything but VINF_SUCCESS or VINF_EM_RESCHEDULE_RAW.
2772
STAM_PROFILE_ADV_START(&pVCpu->em.s.StatRAWEntry, b);
2775
* Check various preconditions.
2778
Assert(REMR3QueryPendingInterrupt(pVM, pVCpu) == REM_NO_PENDING_IRQ);
2779
Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss & X86_SEL_RPL) == 3 || (pCtx->ss & X86_SEL_RPL) == 0);
2780
AssertMsg( (pCtx->eflags.u32 & X86_EFL_IF)
2781
|| PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip),
2782
("Tried to execute code with IF at EIP=%08x!\n", pCtx->eip));
2783
if ( !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
2784
&& PGMMapHasConflicts(pVM))
2787
AssertMsgFailed(("We should not get conflicts any longer!!!\n"));
2788
return VERR_INTERNAL_ERROR;
2790
#endif /* VBOX_STRICT */
2793
* Process high priority pre-execution raw-mode FFs.
2795
if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
2796
|| VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
2798
rc = emR3RawForcedActions(pVM, pVCpu, pCtx);
2799
if (rc != VINF_SUCCESS)
2804
* If we're going to execute ring-0 code, the guest state needs to
2805
* be modified a bit and some of the state components (IF, SS/CS RPL,
2806
* and perhaps EIP) needs to be stored with PATM.
2808
rc = CPUMRawEnter(pVCpu, NULL);
2809
if (rc != VINF_SUCCESS)
2811
STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWEntry, b);
2816
* Scan code before executing it. Don't bother with user mode or V86 code
2818
if ( (pCtx->ss & X86_SEL_RPL) <= 1
2819
&& !pCtx->eflags.Bits.u1VM
2820
&& !PATMIsPatchGCAddr(pVM, pCtx->eip))
2822
STAM_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatRAWEntry, b);
2823
CSAMR3CheckCodeEx(pVM, CPUMCTX2CORE(pCtx), pCtx->eip);
2824
STAM_PROFILE_ADV_RESUME(&pVCpu->em.s.StatRAWEntry, b);
2825
if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
2826
|| VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
2828
rc = emR3RawForcedActions(pVM, pVCpu, pCtx);
2829
if (rc != VINF_SUCCESS)
2831
rc = CPUMRawLeave(pVCpu, NULL, rc);
2839
* Log important stuff before entering GC.
2841
PPATMGCSTATE pGCState = PATMR3QueryGCStateHC(pVM);
2842
if (pCtx->eflags.Bits.u1VM)
2843
Log(("RV86: %04X:%08X IF=%d VMFlags=%x\n", pCtx->cs, pCtx->eip, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags));
2844
else if ((pCtx->ss & X86_SEL_RPL) == 1)
2846
bool fCSAMScanned = CSAMIsPageScanned(pVM, (RTGCPTR)pCtx->eip);
2847
Log(("RR0: %08X ESP=%08X IF=%d VMFlags=%x PIF=%d CPL=%d (Scanned=%d)\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags, pGCState->fPIF, (pCtx->ss & X86_SEL_RPL), fCSAMScanned));
2849
else if ((pCtx->ss & X86_SEL_RPL) == 3)
2850
Log(("RR3: %08X ESP=%08X IF=%d VMFlags=%x\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags));
2851
#endif /* LOG_ENABLED */
2858
STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWEntry, b);
2859
STAM_PROFILE_START(&pVCpu->em.s.StatRAWExec, c);
2860
rc = VMMR3RawRunGC(pVM, pVCpu);
2861
STAM_PROFILE_STOP(&pVCpu->em.s.StatRAWExec, c);
2862
STAM_PROFILE_ADV_START(&pVCpu->em.s.StatRAWTail, d);
2864
LogFlow(("RR0-E: %08X ESP=%08X IF=%d VMFlags=%x PIF=%d CPL=%d\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags, pGCState->fPIF, (pCtx->ss & X86_SEL_RPL)));
2865
LogFlow(("VMMR3RawRunGC returned %Rrc\n", rc));
2870
* Restore the real CPU state and deal with high priority post
2871
* execution FFs before doing anything else.
2873
rc = CPUMRawLeave(pVCpu, NULL, rc);
2874
VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
2875
if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
2876
|| VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
2877
rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
2881
* Assert TSS consistency & rc vs patch code.
2883
if ( !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_SELM_SYNC_GDT) /* GDT implies TSS at the moment. */
2884
&& EMIsRawRing0Enabled(pVM))
2885
SELMR3CheckTSS(pVM);
2889
case VINF_EM_RAW_INTERRUPT:
2890
case VINF_PATM_PATCH_TRAP_PF:
2891
case VINF_PATM_PATCH_TRAP_GP:
2892
case VINF_PATM_PATCH_INT3:
2893
case VINF_PATM_CHECK_PATCH_PAGE:
2894
case VINF_EM_RAW_EXCEPTION_PRIVILEGED:
2895
case VINF_EM_RAW_GUEST_TRAP:
2896
case VINF_EM_RESCHEDULE_RAW:
2900
if (PATMIsPatchGCAddr(pVM, pCtx->eip) && !(pCtx->eflags.u32 & X86_EFL_TF))
2901
LogIt(NULL, 0, LOG_GROUP_PATM, ("Patch code interrupted at %RRv for reason %Rrc\n", (RTRCPTR)CPUMGetGuestEIP(pVCpu), rc));
2905
* Let's go paranoid!
2907
if ( !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
2908
&& PGMMapHasConflicts(pVM))
2911
AssertMsgFailed(("We should not get conflicts any longer!!! rc=%Rrc\n", rc));
2912
return VERR_INTERNAL_ERROR;
2914
#endif /* VBOX_STRICT */
2917
* Process the returned status code.
2919
if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
2921
STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWTail, d);
2924
rc = emR3RawHandleRC(pVM, pVCpu, pCtx, rc);
2925
if (rc != VINF_SUCCESS)
2927
rc = emR3RawUpdateForceFlag(pVM, pVCpu, pCtx, rc);
2928
if (rc != VINF_SUCCESS)
2930
STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWTail, d);
2936
* Check and execute forced actions.
2938
#ifdef VBOX_HIGH_RES_TIMERS_HACK
2939
TMTimerPollVoid(pVM, pVCpu);
2941
STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWTail, d);
2942
if ( VM_FF_ISPENDING(pVM, ~VM_FF_HIGH_PRIORITY_PRE_RAW_MASK | VM_FF_PGM_NO_MEMORY)
2943
|| VMCPU_FF_ISPENDING(pVCpu, ~VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
2945
Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss & X86_SEL_RPL) != 1);
2947
STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatRAWTotal, a);
2948
rc = emR3ForcedActions(pVM, pVCpu, rc);
2949
STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatRAWTotal, a);
2950
if ( rc != VINF_SUCCESS
2951
&& rc != VINF_EM_RESCHEDULE_RAW)
2953
rc = emR3RawUpdateForceFlag(pVM, pVCpu, pCtx, rc);
2954
if (rc != VINF_SUCCESS)
2964
* Return to outer loop.
2966
#if defined(LOG_ENABLED) && defined(DEBUG)
2969
STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWTotal, a);
2975
* Executes hardware accelerated raw code. (Intel VMX & AMD SVM)
2977
* This function contains the raw-mode version of the inner
2978
* execution loop (the outer loop being in EMR3ExecuteVM()).
2980
* @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE, VINF_EM_RESCHEDULE_RAW,
2981
* VINF_EM_RESCHEDULE_REM, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
2983
* @param pVM VM handle.
2984
* @param pVCpu VMCPU handle.
2985
* @param pfFFDone Where to store an indicator telling whether or not
2986
* FFs were done before returning.
2988
static int emR3HwAccExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
2990
int rc = VERR_INTERNAL_ERROR;
2991
PCPUMCTX pCtx = pVCpu->em.s.pCtx;
2993
LogFlow(("emR3HwAccExecute%d: (cs:eip=%04x:%RGv)\n", pVCpu->idCpu, pCtx->cs, (RTGCPTR)pCtx->rip));
2996
STAM_COUNTER_INC(&pVCpu->em.s.StatHwAccExecuteEntry);
2998
#ifdef EM_NOTIFY_HWACCM
2999
HWACCMR3NotifyScheduled(pVCpu);
3003
* Spin till we get a forced action which returns anything but VINF_SUCCESS.
3007
STAM_PROFILE_ADV_START(&pVCpu->em.s.StatHwAccEntry, a);
3010
* Process high priority pre-execution raw-mode FFs.
3012
VMCPU_FF_CLEAR(pVCpu, (VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS)); /* not relevant in HWACCM mode; shouldn't be set really. */
3013
if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
3014
|| VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
3016
rc = emR3RawForcedActions(pVM, pVCpu, pCtx);
3017
if (rc != VINF_SUCCESS)
3023
* Log important stuff before entering GC.
3025
if (TRPMHasTrap(pVCpu))
3026
Log(("CPU%d: Pending hardware interrupt=0x%x cs:rip=%04X:%RGv\n", pVCpu->idCpu, TRPMGetTrapNo(pVCpu), pCtx->cs, (RTGCPTR)pCtx->rip));
3028
uint32_t cpl = CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx));
3030
if (pVM->cCPUs == 1)
3032
if (pCtx->eflags.Bits.u1VM)
3033
Log(("HWV86: %08X IF=%d\n", pCtx->eip, pCtx->eflags.Bits.u1IF));
3034
else if (CPUMIsGuestIn64BitCodeEx(pCtx))
3035
Log(("HWR%d: %04X:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs, (RTGCPTR)pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
3037
Log(("HWR%d: %04X:%08X ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
3041
if (pCtx->eflags.Bits.u1VM)
3042
Log(("HWV86-CPU%d: %08X IF=%d\n", pVCpu->idCpu, pCtx->eip, pCtx->eflags.Bits.u1IF));
3043
else if (CPUMIsGuestIn64BitCodeEx(pCtx))
3044
Log(("HWR%d-CPU%d: %04X:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pCtx->cs, (RTGCPTR)pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
3046
Log(("HWR%d-CPU%d: %04X:%08X ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pCtx->cs, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
3048
#endif /* LOG_ENABLED */
3053
STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatHwAccEntry, a);
3054
STAM_PROFILE_START(&pVCpu->em.s.StatHwAccExec, x);
3055
rc = VMMR3HwAccRunGC(pVM, pVCpu);
3056
STAM_PROFILE_STOP(&pVCpu->em.s.StatHwAccExec, x);
3059
* Deal with high priority post execution FFs before doing anything else.
3061
VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
3062
if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
3063
|| VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
3064
rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
3067
* Process the returned status code.
3069
if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
3072
rc = emR3RawHandleRC(pVM, pVCpu, pCtx, rc);
3073
if (rc != VINF_SUCCESS)
3077
* Check and execute forced actions.
3079
#ifdef VBOX_HIGH_RES_TIMERS_HACK
3080
TMTimerPollVoid(pVM, pVCpu);
3082
if ( VM_FF_ISPENDING(pVM, VM_FF_ALL_MASK)
3083
|| VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_MASK))
3085
rc = emR3ForcedActions(pVM, pVCpu, rc);
3086
if ( rc != VINF_SUCCESS
3087
&& rc != VINF_EM_RESCHEDULE_HWACC)
3096
* Return to outer loop.
3098
#if defined(LOG_ENABLED) && defined(DEBUG)
3106
1034
* Decides whether to execute RAW, HWACC or REM.
3108
1036
* @returns new EM state