23
23
/*******************************************************************************
24
24
* Internal Functions *
25
25
*******************************************************************************/
27
27
PGM_BTH_DECL(int, InitData)(PVM pVM, PPGMMODEDATA pModeData, bool fResolveGCAndR0);
28
PGM_BTH_DECL(int, Enter)(PVM pVM, RTGCPHYS GCPhysCR3);
29
PGM_BTH_DECL(int, Relocate)(PVM pVM, RTGCPTR offDelta);
28
PGM_BTH_DECL(int, Enter)(PVMCPU pVCpu, RTGCPHYS GCPhysCR3);
29
PGM_BTH_DECL(int, Relocate)(PVMCPU pVCpu, RTGCPTR offDelta);
31
PGM_BTH_DECL(int, Trap0eHandler)(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault);
32
PGM_BTH_DECL(int, SyncCR3)(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal);
33
PGM_BTH_DECL(int, SyncPage)(PVM pVM, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError);
34
PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVM pVM, RTGCPTR Addr, unsigned fPage, unsigned uError);
35
PGM_BTH_DECL(int, InvalidatePage)(PVM pVM, RTGCPTR GCPtrPage);
36
PGM_BTH_DECL(int, PrefetchPage)(PVM pVM, RTGCPTR GCPtrPage);
37
PGM_BTH_DECL(unsigned, AssertCR3)(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr = 0, RTGCPTR cb = ~(RTGCPTR)0);
38
PGM_BTH_DECL(int, MapCR3)(PVM pVM, RTGCPHYS GCPhysCR3);
39
PGM_BTH_DECL(int, UnmapCR3)(PVM pVM);
31
PGM_BTH_DECL(int, Trap0eHandler)(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault);
32
PGM_BTH_DECL(int, SyncCR3)(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal);
33
PGM_BTH_DECL(int, SyncPage)(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError);
34
PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVMCPU pVCpu, RTGCPTR Addr, unsigned fPage, unsigned uError);
35
PGM_BTH_DECL(int, InvalidatePage)(PVMCPU pVCpu, RTGCPTR GCPtrPage);
36
PGM_BTH_DECL(int, PrefetchPage)(PVMCPU pVCpu, RTGCPTR GCPtrPage);
37
PGM_BTH_DECL(unsigned, AssertCR3)(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr = 0, RTGCPTR cb = ~(RTGCPTR)0);
38
PGM_BTH_DECL(int, MapCR3)(PVMCPU pVCpu, RTGCPHYS GCPhysCR3);
39
PGM_BTH_DECL(int, UnmapCR3)(PVMCPU pVCpu);
138
139
&& ( PGM_GST_TYPE == PGM_TYPE_REAL \
139
140
|| PGM_GST_TYPE == PGM_TYPE_PROT))
142
PVM pVM = pVCpu->pVMR3;
141
144
Assert(!HWACCMIsNestedPagingActive(pVM));
142
147
/* Note: we only really need shadow paging in real and protected mode for VT-x and AMD-V (excluding nested paging/EPT modes),
143
148
* but any calls to GC need a proper shadow page setup as well.
145
150
/* Free the previous root mapping if still active. */
146
151
PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
147
if (pVM->pgm.s.CTX_SUFF(pShwPageCR3))
152
if (pVCpu->pgm.s.CTX_SUFF(pShwPageCR3))
149
Assert(pVM->pgm.s.pShwPageCR3R3->enmKind != PGMPOOLKIND_FREE);
154
Assert(pVCpu->pgm.s.pShwPageCR3R3->enmKind != PGMPOOLKIND_FREE);
151
156
/* Mark the page as unlocked; allow flushing again. */
152
pgmPoolUnlockPage(pPool, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
157
pgmPoolUnlockPage(pPool, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
154
159
/* Remove the hypervisor mappings from the shadow page table. */
155
pgmMapDeactivateCR3(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
160
pgmMapDeactivateCR3(pVM, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
157
pgmPoolFreeByPage(pPool, pVM->pgm.s.pShwPageCR3R3, pVM->pgm.s.iShwUser, pVM->pgm.s.iShwUserTable);
158
pVM->pgm.s.pShwPageCR3R3 = 0;
159
pVM->pgm.s.pShwPageCR3RC = 0;
160
pVM->pgm.s.pShwPageCR3R0 = 0;
161
pVM->pgm.s.iShwUser = 0;
162
pVM->pgm.s.iShwUserTable = 0;
162
pgmPoolFreeByPage(pPool, pVCpu->pgm.s.pShwPageCR3R3, pVCpu->pgm.s.iShwUser, pVCpu->pgm.s.iShwUserTable);
163
pVCpu->pgm.s.pShwPageCR3R3 = 0;
164
pVCpu->pgm.s.pShwPageCR3RC = 0;
165
pVCpu->pgm.s.pShwPageCR3R0 = 0;
166
pVCpu->pgm.s.iShwUser = 0;
167
pVCpu->pgm.s.iShwUserTable = 0;
165
/* contruct a fake address */
170
/* contruct a fake address. */
166
171
GCPhysCR3 = RT_BIT_64(63);
167
pVM->pgm.s.iShwUser = SHW_POOL_ROOT_IDX;
168
pVM->pgm.s.iShwUserTable = GCPhysCR3 >> PAGE_SHIFT;
169
int rc = pgmPoolAlloc(pVM, GCPhysCR3, BTH_PGMPOOLKIND_ROOT, pVM->pgm.s.iShwUser, pVM->pgm.s.iShwUserTable, &pVM->pgm.s.pShwPageCR3R3);
172
pVCpu->pgm.s.iShwUser = SHW_POOL_ROOT_IDX;
173
pVCpu->pgm.s.iShwUserTable = GCPhysCR3 >> PAGE_SHIFT;
174
int rc = pgmPoolAlloc(pVM, GCPhysCR3, BTH_PGMPOOLKIND_ROOT, pVCpu->pgm.s.iShwUser, pVCpu->pgm.s.iShwUserTable, &pVCpu->pgm.s.pShwPageCR3R3);
170
175
if (rc == VERR_PGM_POOL_FLUSHED)
172
177
Log(("Bth-Enter: PGM pool flushed -> signal sync cr3\n"));
173
Assert(VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
178
Assert(VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
174
180
return VINF_PGM_SYNC_CR3;
176
182
AssertRCReturn(rc, rc);
178
184
/* Mark the page as locked; disallow flushing. */
179
pgmPoolLockPage(pPool, pVM->pgm.s.pShwPageCR3R3);
185
pgmPoolLockPage(pPool, pVCpu->pgm.s.pShwPageCR3R3);
181
pVM->pgm.s.pShwPageCR3R0 = MMHyperCCToR0(pVM, pVM->pgm.s.pShwPageCR3R3);
182
pVM->pgm.s.pShwPageCR3RC = MMHyperCCToRC(pVM, pVM->pgm.s.pShwPageCR3R3);
187
pVCpu->pgm.s.pShwPageCR3R0 = MMHyperCCToR0(pVM, pVCpu->pgm.s.pShwPageCR3R3);
188
pVCpu->pgm.s.pShwPageCR3RC = MMHyperCCToRC(pVM, pVCpu->pgm.s.pShwPageCR3R3);
184
190
/* Set the current hypervisor CR3. */
185
CPUMSetHyperCR3(pVM, PGMGetHyperCR3(pVM));
191
CPUMSetHyperCR3(pVCpu, PGMGetHyperCR3(pVCpu));
187
193
/* Apply all hypervisor mappings to the new CR3. */
188
return pgmMapActivateCR3(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
194
rc = pgmMapActivateCR3(pVM, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
190
198
return VINF_SUCCESS;