1
/* $Id: PATMAll.cpp 35348 2010-12-27 16:35:23Z vboxsync $ */
3
* PATM - The Patch Manager, all contexts.
7
* Copyright (C) 2006-2007 Oracle Corporation
9
* This file is part of VirtualBox Open Source Edition (OSE), as
10
* available from http://www.virtualbox.org. This file is free software;
11
* you can redistribute it and/or modify it under the terms of the GNU
12
* General Public License (GPL) as published by the Free Software
13
* Foundation, in version 2 as it comes in the "COPYING" file of the
14
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18
/*******************************************************************************
20
*******************************************************************************/
21
#define LOG_GROUP LOG_GROUP_PATM
22
#include <VBox/vmm/patm.h>
23
#include <VBox/vmm/cpum.h>
25
#include <VBox/disopcode.h>
26
#include <VBox/vmm/em.h>
28
#include <VBox/vmm/selm.h>
29
#include <VBox/vmm/mm.h>
30
#include "PATMInternal.h"
31
#include <VBox/vmm/vm.h>
32
#include <VBox/vmm/vmm.h>
36
#include <iprt/assert.h>
40
* Load virtualized flags.
42
* This function is called from CPUMRawEnter(). It doesn't have to update the
43
* IF and IOPL eflags bits, the caller will enforce those to set and 0 respectively.
45
* @param pVM VM handle.
46
* @param pCtxCore The cpu context core.
49
VMMDECL(void) PATMRawEnter(PVM pVM, PCPUMCTXCORE pCtxCore)
51
bool fPatchCode = PATMIsPatchGCAddr(pVM, pCtxCore->eip);
54
* Currently we don't bother to check whether PATM is enabled or not.
55
* For all cases where it isn't, IOPL will be safe and IF will be set.
57
register uint32_t efl = pCtxCore->eflags.u32;
58
CTXSUFF(pVM->patm.s.pGCState)->uVMFlags = efl & PATM_VIRTUAL_FLAGS_MASK;
59
AssertMsg((efl & X86_EFL_IF) || PATMShouldUseRawMode(pVM, (RTRCPTR)pCtxCore->eip), ("X86_EFL_IF is clear and PATM is disabled! (eip=%RRv eflags=%08x fPATM=%d pPATMGC=%RRv-%RRv\n", pCtxCore->eip, pCtxCore->eflags.u32, PATMIsEnabled(pVM), pVM->patm.s.pPatchMemGC, pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem));
61
AssertReleaseMsg(CTXSUFF(pVM->patm.s.pGCState)->fPIF || fPatchCode, ("fPIF=%d eip=%RRv\n", CTXSUFF(pVM->patm.s.pGCState)->fPIF, pCtxCore->eip));
63
efl &= ~PATM_VIRTUAL_FLAGS_MASK;
65
pCtxCore->eflags.u32 = efl;
68
#ifdef PATM_EMULATE_SYSENTER
71
/* Check if the sysenter handler has changed. */
72
pCtx = CPUMQueryGuestCtxPtr(pVM);
73
if ( pCtx->SysEnter.cs != 0
74
&& pCtx->SysEnter.eip != 0
77
if (pVM->patm.s.pfnSysEnterGC != (RTRCPTR)pCtx->SysEnter.eip)
79
pVM->patm.s.pfnSysEnterPatchGC = 0;
80
pVM->patm.s.pfnSysEnterGC = 0;
82
Log2(("PATMRawEnter: installing sysenter patch for %RRv\n", pCtx->SysEnter.eip));
83
pVM->patm.s.pfnSysEnterPatchGC = PATMR3QueryPatchGCPtr(pVM, pCtx->SysEnter.eip);
84
if (pVM->patm.s.pfnSysEnterPatchGC == 0)
86
rc = PATMR3InstallPatch(pVM, pCtx->SysEnter.eip, PATMFL_SYSENTER | PATMFL_CODE32);
87
if (rc == VINF_SUCCESS)
89
pVM->patm.s.pfnSysEnterPatchGC = PATMR3QueryPatchGCPtr(pVM, pCtx->SysEnter.eip);
90
pVM->patm.s.pfnSysEnterGC = (RTRCPTR)pCtx->SysEnter.eip;
91
Assert(pVM->patm.s.pfnSysEnterPatchGC);
95
pVM->patm.s.pfnSysEnterGC = (RTRCPTR)pCtx->SysEnter.eip;
100
pVM->patm.s.pfnSysEnterPatchGC = 0;
101
pVM->patm.s.pfnSysEnterGC = 0;
109
* Restores virtualized flags.
111
* This function is called from CPUMRawLeave(). It will update the eflags register.
113
** @note Only here we are allowed to switch back to guest code (without a special reason such as a trap in patch code)!!
115
* @param pVM VM handle.
116
* @param pCtxCore The cpu context core.
117
* @param rawRC Raw mode return code
120
VMMDECL(void) PATMRawLeave(PVM pVM, PCPUMCTXCORE pCtxCore, int rawRC)
122
bool fPatchCode = PATMIsPatchGCAddr(pVM, pCtxCore->eip);
124
* We will only be called if PATMRawEnter was previously called.
126
register uint32_t efl = pCtxCore->eflags.u32;
127
efl = (efl & ~PATM_VIRTUAL_FLAGS_MASK) | (CTXSUFF(pVM->patm.s.pGCState)->uVMFlags & PATM_VIRTUAL_FLAGS_MASK);
128
pCtxCore->eflags.u32 = efl;
129
CTXSUFF(pVM->patm.s.pGCState)->uVMFlags = X86_EFL_IF;
131
AssertReleaseMsg((efl & X86_EFL_IF) || fPatchCode || rawRC == VINF_PATM_PENDING_IRQ_AFTER_IRET || RT_FAILURE(rawRC), ("Inconsistent state at %RRv rc=%Rrc\n", pCtxCore->eip, rawRC));
132
AssertReleaseMsg(CTXSUFF(pVM->patm.s.pGCState)->fPIF || fPatchCode || RT_FAILURE(rawRC), ("fPIF=%d eip=%RRv rc=%Rrc\n", CTXSUFF(pVM->patm.s.pGCState)->fPIF, pCtxCore->eip, rawRC));
135
if ( (efl & X86_EFL_IF)
139
if ( rawRC < VINF_PATM_LEAVEGC_FIRST
140
|| rawRC > VINF_PATM_LEAVEGC_LAST)
144
* - Don't interrupt special patch streams that replace special instructions
145
* - Don't break instruction fusing (sti, pop ss, mov ss)
146
* - Don't go back to an instruction that has been overwritten by a patch jump
147
* - Don't interrupt an idt handler on entry (1st instruction); technically incorrect
150
if (CTXSUFF(pVM->patm.s.pGCState)->fPIF == 1) /* consistent patch instruction state */
152
PATMTRANSSTATE enmState;
153
RTRCPTR pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pCtxCore->eip, &enmState);
155
AssertRelease(pOrgInstrGC);
157
Assert(enmState != PATMTRANS_OVERWRITTEN);
158
if (enmState == PATMTRANS_SAFE)
160
Assert(!PATMFindActivePatchByEntrypoint(pVM, pOrgInstrGC));
161
Log(("Switchback from %RRv to %RRv (Psp=%x)\n", pCtxCore->eip, pOrgInstrGC, CTXSUFF(pVM->patm.s.pGCState)->Psp));
162
STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBack);
163
pCtxCore->eip = pOrgInstrGC;
164
fPatchCode = false; /* to reset the stack ptr */
166
CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts = 0; /* reset this pointer; safe otherwise the state would be PATMTRANS_INHIBITIRQ */
170
LogFlow(("Patch address %RRv can't be interrupted (state=%d)!\n", pCtxCore->eip, enmState));
171
STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBackFail);
176
LogFlow(("Patch address %RRv can't be interrupted (fPIF=%d)!\n", pCtxCore->eip, CTXSUFF(pVM->patm.s.pGCState)->fPIF));
177
STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBackFail);
181
#else /* !IN_RING3 */
182
AssertMsgFailed(("!IN_RING3"));
183
#endif /* !IN_RING3 */
187
if (CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts == (RTRCPTR)pCtxCore->eip)
189
EMSetInhibitInterruptsPC(VMMGetCpu0(pVM), pCtxCore->eip);
191
CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts = 0;
193
/* Reset the stack pointer to the top of the stack. */
195
if (CTXSUFF(pVM->patm.s.pGCState)->Psp != PATM_STACK_SIZE)
197
LogFlow(("PATMRawLeave: Reset PATM stack (Psp = %x)\n", CTXSUFF(pVM->patm.s.pGCState)->Psp));
200
CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
206
* This is a worker for CPUMRawGetEFlags().
208
* @returns The eflags.
209
* @param pVM The VM handle.
210
* @param pCtxCore The context core.
212
VMMDECL(uint32_t) PATMRawGetEFlags(PVM pVM, PCCPUMCTXCORE pCtxCore)
214
uint32_t efl = pCtxCore->eflags.u32;
215
efl &= ~PATM_VIRTUAL_FLAGS_MASK;
216
efl |= pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & PATM_VIRTUAL_FLAGS_MASK;
221
* Updates the EFLAGS.
222
* This is a worker for CPUMRawSetEFlags().
224
* @param pVM The VM handle.
225
* @param pCtxCore The context core.
226
* @param efl The new EFLAGS value.
228
VMMDECL(void) PATMRawSetEFlags(PVM pVM, PCPUMCTXCORE pCtxCore, uint32_t efl)
230
pVM->patm.s.CTXSUFF(pGCState)->uVMFlags = efl & PATM_VIRTUAL_FLAGS_MASK;
231
efl &= ~PATM_VIRTUAL_FLAGS_MASK;
233
pCtxCore->eflags.u32 = efl;
237
* Check if we must use raw mode (patch code being executed)
239
* @param pVM VM handle.
240
* @param pAddrGC Guest context address
242
VMMDECL(bool) PATMShouldUseRawMode(PVM pVM, RTRCPTR pAddrGC)
244
return ( PATMIsEnabled(pVM)
245
&& ((pAddrGC >= (RTRCPTR)pVM->patm.s.pPatchMemGC && pAddrGC < (RTRCPTR)((RTRCUINTPTR)pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem)))) ? true : false;
249
* Returns the guest context pointer and size of the GC context structure
251
* @returns VBox status code.
252
* @param pVM The VM to operate on.
254
VMMDECL(RCPTRTYPE(PPATMGCSTATE)) PATMQueryGCState(PVM pVM)
256
return pVM->patm.s.pGCStateGC;
260
* Checks whether the GC address is part of our patch region
262
* @returns VBox status code.
263
* @param pVM The VM to operate on.
264
* @param pAddrGC Guest context address
266
VMMDECL(bool) PATMIsPatchGCAddr(PVM pVM, RTRCUINTPTR pAddrGC)
268
return (PATMIsEnabled(pVM) && pAddrGC - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC < pVM->patm.s.cbPatchMem) ? true : false;
272
* Set parameters for pending MMIO patch operation
274
* @returns VBox status code.
275
* @param pDevIns Device instance.
276
* @param GCPhys MMIO physical address
277
* @param pCachedData GC pointer to cached data
279
VMMDECL(int) PATMSetMMIOPatchInfo(PVM pVM, RTGCPHYS GCPhys, RTRCPTR pCachedData)
281
pVM->patm.s.mmio.GCPhys = GCPhys;
282
pVM->patm.s.mmio.pCachedData = (RTRCPTR)pCachedData;
288
* Checks if the interrupt flag is enabled or not.
290
* @returns true if it's enabled.
291
* @returns false if it's disabled.
293
* @param pVM The VM handle.
295
VMMDECL(bool) PATMAreInterruptsEnabled(PVM pVM)
297
PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));
299
return PATMAreInterruptsEnabledByCtxCore(pVM, CPUMCTX2CORE(pCtx));
303
* Checks if the interrupt flag is enabled or not.
305
* @returns true if it's enabled.
306
* @returns false if it's disabled.
308
* @param pVM The VM handle.
309
* @param pCtxCore CPU context
311
VMMDECL(bool) PATMAreInterruptsEnabledByCtxCore(PVM pVM, PCPUMCTXCORE pCtxCore)
313
if (PATMIsEnabled(pVM))
315
if (PATMIsPatchGCAddr(pVM, pCtxCore->eip))
318
return !!(pCtxCore->eflags.u32 & X86_EFL_IF);
322
* Check if the instruction is patched as a duplicated function
324
* @returns patch record
325
* @param pVM The VM to operate on.
326
* @param pInstrGC Guest context point to the instruction
329
VMMDECL(PPATMPATCHREC) PATMQueryFunctionPatch(PVM pVM, RTRCPTR pInstrGC)
333
AssertCompile(sizeof(AVLOU32KEY) == sizeof(pInstrGC));
334
pRec = (PPATMPATCHREC)RTAvloU32Get(&CTXSUFF(pVM->patm.s.PatchLookupTree)->PatchTree, (AVLOU32KEY)pInstrGC);
336
&& (pRec->patch.uState == PATCH_ENABLED)
337
&& (pRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CALLABLE_AS_FUNCTION))
344
* Checks if the int 3 was caused by a patched instruction
346
* @returns VBox status
348
* @param pVM The VM handle.
349
* @param pInstrGC Instruction pointer
350
* @param pOpcode Original instruction opcode (out, optional)
351
* @param pSize Original instruction size (out, optional)
353
VMMDECL(bool) PATMIsInt3Patch(PVM pVM, RTRCPTR pInstrGC, uint32_t *pOpcode, uint32_t *pSize)
357
pRec = (PPATMPATCHREC)RTAvloU32Get(&CTXSUFF(pVM->patm.s.PatchLookupTree)->PatchTree, (AVLOU32KEY)pInstrGC);
359
&& (pRec->patch.uState == PATCH_ENABLED)
360
&& (pRec->patch.flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
363
if (pOpcode) *pOpcode = pRec->patch.opcode;
364
if (pSize) *pSize = pRec->patch.cbPrivInstr;
371
* Emulate sysenter, sysexit and syscall instructions
373
* @returns VBox status
375
* @param pVM The VM handle.
376
* @param pCtxCore The relevant core context.
377
* @param pCpu Disassembly context
379
VMMDECL(int) PATMSysCall(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
381
PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu0(pVM));
383
if (pCpu->pCurInstr->opcode == OP_SYSENTER)
385
if ( pCtx->SysEnter.cs == 0
386
|| pRegFrame->eflags.Bits.u1VM
387
|| (pRegFrame->cs & X86_SEL_RPL) != 3
388
|| pVM->patm.s.pfnSysEnterPatchGC == 0
389
|| pVM->patm.s.pfnSysEnterGC != (RTRCPTR)(RTRCUINTPTR)pCtx->SysEnter.eip
390
|| !(PATMRawGetEFlags(pVM, pRegFrame) & X86_EFL_IF))
393
Log2(("PATMSysCall: sysenter from %RRv to %RRv\n", pRegFrame->eip, pVM->patm.s.pfnSysEnterPatchGC));
394
/** @todo the base and limit are forced to 0 & 4G-1 resp. We assume the selector is wide open here. */
395
/** @note The Intel manual suggests that the OS is responsible for this. */
396
pRegFrame->cs = (pCtx->SysEnter.cs & ~X86_SEL_RPL) | 1;
397
pRegFrame->eip = /** @todo ugly conversion! */(uint32_t)pVM->patm.s.pfnSysEnterPatchGC;
398
pRegFrame->ss = pRegFrame->cs + 8; /* SysEnter.cs + 8 */
399
pRegFrame->esp = pCtx->SysEnter.esp;
400
pRegFrame->eflags.u32 &= ~(X86_EFL_VM|X86_EFL_RF);
401
pRegFrame->eflags.u32 |= X86_EFL_IF;
403
/* Turn off interrupts. */
404
pVM->patm.s.CTXSUFF(pGCState)->uVMFlags &= ~X86_EFL_IF;
406
STAM_COUNTER_INC(&pVM->patm.s.StatSysEnter);
411
if (pCpu->pCurInstr->opcode == OP_SYSEXIT)
413
if ( pCtx->SysEnter.cs == 0
414
|| (pRegFrame->cs & X86_SEL_RPL) != 1
415
|| pRegFrame->eflags.Bits.u1VM
416
|| !(PATMRawGetEFlags(pVM, pRegFrame) & X86_EFL_IF))
419
Log2(("PATMSysCall: sysexit from %RRv to %RRv\n", pRegFrame->eip, pRegFrame->edx));
421
pRegFrame->cs = ((pCtx->SysEnter.cs + 16) & ~X86_SEL_RPL) | 3;
422
pRegFrame->eip = pRegFrame->edx;
423
pRegFrame->ss = pRegFrame->cs + 8; /* SysEnter.cs + 24 */
424
pRegFrame->esp = pRegFrame->ecx;
426
STAM_COUNTER_INC(&pVM->patm.s.StatSysExit);
431
if (pCpu->pCurInstr->opcode == OP_SYSCALL)
433
/** @todo implement syscall */
436
if (pCpu->pCurInstr->opcode == OP_SYSRET)
438
/** @todo implement sysret */
442
return VINF_EM_RAW_RING_SWITCH;
446
* Adds branch pair to the lookup cache of the particular branch instruction
448
* @returns VBox status
449
* @param pVM The VM to operate on.
450
* @param pJumpTableGC Pointer to branch instruction lookup cache
451
* @param pBranchTarget Original branch target
452
* @param pRelBranchPatch Relative duplicated function address
454
VMMDECL(int) PATMAddBranchToLookupCache(PVM pVM, RTRCPTR pJumpTableGC, RTRCPTR pBranchTarget, RTRCUINTPTR pRelBranchPatch)
456
PPATCHJUMPTABLE pJumpTable;
458
Log(("PATMAddBranchToLookupCache: Adding (%RRv->%RRv (%RRv)) to table %RRv\n", pBranchTarget, pRelBranchPatch + pVM->patm.s.pPatchMemGC, pRelBranchPatch, pJumpTableGC));
460
AssertReturn(PATMIsPatchGCAddr(pVM, (RTRCUINTPTR)pJumpTableGC), VERR_INVALID_PARAMETER);
463
pJumpTable = (PPATCHJUMPTABLE) pJumpTableGC;
465
pJumpTable = (PPATCHJUMPTABLE) (pJumpTableGC - pVM->patm.s.pPatchMemGC + pVM->patm.s.pPatchMemHC);
467
Log(("Nr addresses = %d, insert pos = %d\n", pJumpTable->cAddresses, pJumpTable->ulInsertPos));
468
if (pJumpTable->cAddresses < pJumpTable->nrSlots)
472
for (i=0;i<pJumpTable->nrSlots;i++)
474
if (pJumpTable->Slot[i].pInstrGC == 0)
476
pJumpTable->Slot[i].pInstrGC = pBranchTarget;
477
/* Relative address - eases relocation */
478
pJumpTable->Slot[i].pRelPatchGC = pRelBranchPatch;
479
pJumpTable->cAddresses++;
483
AssertReturn(i < pJumpTable->nrSlots, VERR_INTERNAL_ERROR);
484
#ifdef VBOX_WITH_STATISTICS
485
STAM_COUNTER_INC(&pVM->patm.s.StatFunctionLookupInsert);
486
if (pVM->patm.s.StatU32FunctionMaxSlotsUsed < i)
487
pVM->patm.s.StatU32FunctionMaxSlotsUsed = i + 1;
492
/* Replace an old entry. */
493
/** @todo replacement strategy isn't really bright. change to something better if required. */
494
Assert(pJumpTable->ulInsertPos < pJumpTable->nrSlots);
495
Assert((pJumpTable->nrSlots & 1) == 0);
497
pJumpTable->ulInsertPos &= (pJumpTable->nrSlots-1);
498
pJumpTable->Slot[pJumpTable->ulInsertPos].pInstrGC = pBranchTarget;
499
/* Relative address - eases relocation */
500
pJumpTable->Slot[pJumpTable->ulInsertPos].pRelPatchGC = pRelBranchPatch;
502
pJumpTable->ulInsertPos = (pJumpTable->ulInsertPos+1) & (pJumpTable->nrSlots-1);
504
STAM_COUNTER_INC(&pVM->patm.s.StatFunctionLookupReplace);
511
#if defined(VBOX_WITH_STATISTICS) || defined(LOG_ENABLED)
513
* Return the name of the patched instruction
515
* @returns instruction name
517
* @param opcode DIS instruction opcode
518
* @param fPatchFlags Patch flags
520
VMMDECL(const char *) patmGetInstructionString(uint32_t opcode, uint32_t fPatchFlags)
522
const char *pszInstr = NULL;
629
if (fPatchFlags & PATMFL_IDTHANDLER)
631
pszInstr = "mov (Int/Trap Handler)";
635
pszInstr = "sysenter";
638
pszInstr = "push (cs)";