1
/* $Id: PGMInline.h 35346 2010-12-27 16:13:13Z vboxsync $ */
3
* PGM - Inlined functions.
7
* Copyright (C) 2006-2010 Oracle Corporation
9
* This file is part of VirtualBox Open Source Edition (OSE), as
10
* available from http://www.virtualbox.org. This file is free software;
11
* you can redistribute it and/or modify it under the terms of the GNU
12
* General Public License (GPL) as published by the Free Software
13
* Foundation, in version 2 as it comes in the "COPYING" file of the
14
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18
#ifndef ___PGMInline_h
19
#define ___PGMInline_h
21
#include <VBox/cdefs.h>
22
#include <VBox/types.h>
24
#include <VBox/vmm/stam.h>
25
#include <VBox/param.h>
26
#include <VBox/vmm/vmm.h>
27
#include <VBox/vmm/mm.h>
28
#include <VBox/vmm/pdmcritsect.h>
29
#include <VBox/vmm/pdmapi.h>
31
#include <VBox/vmm/dbgf.h>
33
#include <VBox/vmm/gmm.h>
34
#include <VBox/vmm/hwaccm.h>
36
#include <iprt/assert.h>
38
#include <iprt/critsect.h>
43
/** @addtogroup grp_pgm_int Internals
48
/** @todo Split out all the inline stuff into a separate file. Then we can
49
* include it later when VM and VMCPU are defined and so avoid all that
50
* &pVM->pgm.s and &pVCpu->pgm.s stuff. It also chops ~1600 lines off
51
* this file and will make it somewhat easier to navigate... */
54
* Gets the PGMRAMRANGE structure for a guest page.
56
* @returns Pointer to the RAM range on success.
57
* @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
59
* @param pPGM PGM handle.
60
* @param GCPhys The GC physical address.
62
DECLINLINE(PPGMRAMRANGE) pgmPhysGetRange(PPGM pPGM, RTGCPHYS GCPhys)
65
* Optimize for the first range.
67
PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
68
RTGCPHYS off = GCPhys - pRam->GCPhys;
69
if (RT_UNLIKELY(off >= pRam->cb))
73
pRam = pRam->CTX_SUFF(pNext);
74
if (RT_UNLIKELY(!pRam))
76
off = GCPhys - pRam->GCPhys;
77
} while (off >= pRam->cb);
84
* Gets the PGMPAGE structure for a guest page.
86
* @returns Pointer to the page on success.
87
* @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
89
* @param pPGM PGM handle.
90
* @param GCPhys The GC physical address.
92
DECLINLINE(PPGMPAGE) pgmPhysGetPage(PPGM pPGM, RTGCPHYS GCPhys)
95
* Optimize for the first range.
97
PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
98
RTGCPHYS off = GCPhys - pRam->GCPhys;
99
if (RT_UNLIKELY(off >= pRam->cb))
103
pRam = pRam->CTX_SUFF(pNext);
104
if (RT_UNLIKELY(!pRam))
106
off = GCPhys - pRam->GCPhys;
107
} while (off >= pRam->cb);
109
return &pRam->aPages[off >> PAGE_SHIFT];
114
* Gets the PGMPAGE structure for a guest page.
116
* Old Phys code: Will make sure the page is present.
118
* @returns VBox status code.
119
* @retval VINF_SUCCESS and a valid *ppPage on success.
120
* @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
122
* @param pPGM PGM handle.
123
* @param GCPhys The GC physical address.
124
* @param ppPage Where to store the page pointer on success.
126
DECLINLINE(int) pgmPhysGetPageEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
129
* Optimize for the first range.
131
PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
132
RTGCPHYS off = GCPhys - pRam->GCPhys;
133
if (RT_UNLIKELY(off >= pRam->cb))
137
pRam = pRam->CTX_SUFF(pNext);
138
if (RT_UNLIKELY(!pRam))
140
*ppPage = NULL; /* avoid incorrect and very annoying GCC warnings */
141
return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
143
off = GCPhys - pRam->GCPhys;
144
} while (off >= pRam->cb);
146
*ppPage = &pRam->aPages[off >> PAGE_SHIFT];
154
* Gets the PGMPAGE structure for a guest page.
156
* Old Phys code: Will make sure the page is present.
158
* @returns VBox status code.
159
* @retval VINF_SUCCESS and a valid *ppPage on success.
160
* @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
162
* @param pPGM PGM handle.
163
* @param GCPhys The GC physical address.
164
* @param ppPage Where to store the page pointer on success.
165
* @param ppRamHint Where to read and store the ram list hint.
166
* The caller initializes this to NULL before the call.
168
DECLINLINE(int) pgmPhysGetPageWithHintEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRamHint)
171
PPGMRAMRANGE pRam = *ppRamHint;
173
|| RT_UNLIKELY((off = GCPhys - pRam->GCPhys) >= pRam->cb))
175
pRam = pPGM->CTX_SUFF(pRamRanges);
176
off = GCPhys - pRam->GCPhys;
177
if (RT_UNLIKELY(off >= pRam->cb))
181
pRam = pRam->CTX_SUFF(pNext);
182
if (RT_UNLIKELY(!pRam))
184
*ppPage = NULL; /* Kill the incorrect and extremely annoying GCC warnings. */
185
return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
187
off = GCPhys - pRam->GCPhys;
188
} while (off >= pRam->cb);
192
*ppPage = &pRam->aPages[off >> PAGE_SHIFT];
198
* Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
200
* @returns Pointer to the page on success.
201
* @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
203
* @param pPGM PGM handle.
204
* @param GCPhys The GC physical address.
205
* @param ppRam Where to store the pointer to the PGMRAMRANGE.
207
DECLINLINE(PPGMPAGE) pgmPhysGetPageAndRange(PPGM pPGM, RTGCPHYS GCPhys, PPGMRAMRANGE *ppRam)
210
* Optimize for the first range.
212
PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
213
RTGCPHYS off = GCPhys - pRam->GCPhys;
214
if (RT_UNLIKELY(off >= pRam->cb))
218
pRam = pRam->CTX_SUFF(pNext);
219
if (RT_UNLIKELY(!pRam))
221
off = GCPhys - pRam->GCPhys;
222
} while (off >= pRam->cb);
225
return &pRam->aPages[off >> PAGE_SHIFT];
230
* Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
232
* @returns Pointer to the page on success.
233
* @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
235
* @param pPGM PGM handle.
236
* @param GCPhys The GC physical address.
237
* @param ppPage Where to store the pointer to the PGMPAGE structure.
238
* @param ppRam Where to store the pointer to the PGMRAMRANGE structure.
240
DECLINLINE(int) pgmPhysGetPageAndRangeEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
243
* Optimize for the first range.
245
PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
246
RTGCPHYS off = GCPhys - pRam->GCPhys;
247
if (RT_UNLIKELY(off >= pRam->cb))
251
pRam = pRam->CTX_SUFF(pNext);
252
if (RT_UNLIKELY(!pRam))
254
*ppRam = NULL; /* Shut up silly GCC warnings. */
255
*ppPage = NULL; /* ditto */
256
return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
258
off = GCPhys - pRam->GCPhys;
259
} while (off >= pRam->cb);
262
*ppPage = &pRam->aPages[off >> PAGE_SHIFT];
268
* Convert GC Phys to HC Phys.
270
* @returns VBox status.
271
* @param pPGM PGM handle.
272
* @param GCPhys The GC physical address.
273
* @param pHCPhys Where to store the corresponding HC physical address.
275
* @deprecated Doesn't deal with zero, shared or write monitored pages.
276
* Avoid when writing new code!
278
DECLINLINE(int) pgmRamGCPhys2HCPhys(PPGM pPGM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
281
int rc = pgmPhysGetPageEx(pPGM, GCPhys, &pPage);
284
*pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
288
#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
291
* Inlined version of the ring-0 version of the host page mapping code
292
* that optimizes access to pages already in the set.
294
* @returns VINF_SUCCESS. Will bail out to ring-3 on failure.
295
* @param pVCpu The current CPU.
296
* @param HCPhys The physical address of the page.
297
* @param ppv Where to store the mapping address.
299
DECLINLINE(int) pgmRZDynMapHCPageInlined(PVMCPU pVCpu, RTHCPHYS HCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
301
PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
303
STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInl, a);
304
Assert(!(HCPhys & PAGE_OFFSET_MASK));
305
Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
307
unsigned iHash = PGMMAPSET_HASH(HCPhys);
308
unsigned iEntry = pSet->aiHashTable[iHash];
309
if ( iEntry < pSet->cEntries
310
&& pSet->aEntries[iEntry].HCPhys == HCPhys
311
&& pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1)
313
pSet->aEntries[iEntry].cInlinedRefs++;
314
*ppv = pSet->aEntries[iEntry].pvPage;
315
STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInlHits);
319
STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInlMisses);
320
pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
323
STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInl, a);
329
* Inlined version of the guest page mapping code that optimizes access to pages
330
* already in the set.
332
* @returns VBox status code, see pgmRZDynMapGCPageCommon for details.
333
* @param pVM The VM handle.
334
* @param pVCpu The current CPU.
335
* @param GCPhys The guest physical address of the page.
336
* @param ppv Where to store the mapping address.
338
DECLINLINE(int) pgmRZDynMapGCPageV2Inlined(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
340
STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a);
341
AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys));
346
PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
347
RTGCPHYS off = GCPhys - pRam->GCPhys;
348
if (RT_UNLIKELY(off >= pRam->cb
349
/** @todo || page state stuff */))
351
/* This case is not counted into StatRZDynMapGCPageInl. */
352
STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamMisses);
353
return pgmRZDynMapGCPageCommon(pVM, pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
356
RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
357
STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamHits);
360
* pgmRZDynMapHCPageInlined with out stats.
362
PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
363
Assert(!(HCPhys & PAGE_OFFSET_MASK));
364
Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
366
unsigned iHash = PGMMAPSET_HASH(HCPhys);
367
unsigned iEntry = pSet->aiHashTable[iHash];
368
if ( iEntry < pSet->cEntries
369
&& pSet->aEntries[iEntry].HCPhys == HCPhys
370
&& pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1)
372
pSet->aEntries[iEntry].cInlinedRefs++;
373
*ppv = pSet->aEntries[iEntry].pvPage;
374
STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlHits);
378
STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlMisses);
379
pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
382
STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a);
388
* Inlined version of the ring-0 version of guest page mapping that optimizes
389
* access to pages already in the set.
391
* @returns VBox status code, see pgmRZDynMapGCPageCommon for details.
392
* @param pVCpu The current CPU.
393
* @param GCPhys The guest physical address of the page.
394
* @param ppv Where to store the mapping address.
396
DECLINLINE(int) pgmRZDynMapGCPageInlined(PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
398
return pgmRZDynMapGCPageV2Inlined(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
403
* Inlined version of the ring-0 version of the guest byte mapping code
404
* that optimizes access to pages already in the set.
406
* @returns VBox status code, see pgmRZDynMapGCPageCommon for details.
407
* @param pVCpu The current CPU.
408
* @param HCPhys The physical address of the page.
409
* @param ppv Where to store the mapping address. The offset is
412
DECLINLINE(int) pgmRZDynMapGCPageOffInlined(PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
414
STAM_PROFILE_START(&pVCpu->pgm.s.StatRZDynMapGCPageInl, a);
419
PVM pVM = pVCpu->CTX_SUFF(pVM);
420
PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
421
RTGCPHYS off = GCPhys - pRam->GCPhys;
422
if (RT_UNLIKELY(off >= pRam->cb
423
/** @todo || page state stuff */))
425
/* This case is not counted into StatRZDynMapGCPageInl. */
426
STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamMisses);
427
return pgmRZDynMapGCPageCommon(pVM, pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
430
RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
431
STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamHits);
434
* pgmRZDynMapHCPageInlined with out stats.
436
PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
437
Assert(!(HCPhys & PAGE_OFFSET_MASK));
438
Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
440
unsigned iHash = PGMMAPSET_HASH(HCPhys);
441
unsigned iEntry = pSet->aiHashTable[iHash];
442
if ( iEntry < pSet->cEntries
443
&& pSet->aEntries[iEntry].HCPhys == HCPhys
444
&& pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1)
446
STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlHits);
447
pSet->aEntries[iEntry].cInlinedRefs++;
448
*ppv = (void *)((uintptr_t)pSet->aEntries[iEntry].pvPage | (PAGE_OFFSET_MASK & (uintptr_t)GCPhys));
452
STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlMisses);
453
pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
454
*ppv = (void *)((uintptr_t)*ppv | (PAGE_OFFSET_MASK & (uintptr_t)GCPhys));
457
STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a);
461
#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
462
#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
465
* Maps the page into current context (RC and maybe R0).
467
* @returns pointer to the mapping.
468
* @param pVM Pointer to the PGM instance data.
469
* @param pPage The page.
471
DECLINLINE(void *) pgmPoolMapPageInlined(PVM pVM, PPGMPOOLPAGE pPage RTLOG_COMMA_SRC_POS_DECL)
473
if (pPage->idx >= PGMPOOL_IDX_FIRST)
475
Assert(pPage->idx < pVM->pgm.s.CTX_SUFF(pPool)->cCurPages);
477
pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), pPage->Core.Key, &pv RTLOG_COMMA_SRC_POS_ARGS);
480
AssertFatalMsgFailed(("pgmPoolMapPageInlined invalid page index %x\n", pPage->idx));
484
* Maps the page into current context (RC and maybe R0).
486
* @returns pointer to the mapping.
487
* @param pVM Pointer to the PGM instance data.
488
* @param pVCpu The current CPU.
489
* @param pPage The page.
491
DECLINLINE(void *) pgmPoolMapPageV2Inlined(PVM pVM, PVMCPU pVCpu, PPGMPOOLPAGE pPage RTLOG_COMMA_SRC_POS_DECL)
493
if (pPage->idx >= PGMPOOL_IDX_FIRST)
495
Assert(pPage->idx < pVM->pgm.s.CTX_SUFF(pPool)->cCurPages);
497
Assert(pVCpu == VMMGetCpu(pVM));
498
pgmRZDynMapHCPageInlined(pVCpu, pPage->Core.Key, &pv RTLOG_COMMA_SRC_POS_ARGS);
501
AssertFatalMsgFailed(("pgmPoolMapPageV2Inlined invalid page index %x\n", pPage->idx));
504
#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 || IN_RC */
508
* Queries the Physical TLB entry for a physical guest page,
509
* attempting to load the TLB entry if necessary.
511
* @returns VBox status code.
512
* @retval VINF_SUCCESS on success
513
* @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
515
* @param pPGM The PGM instance handle.
516
* @param GCPhys The address of the guest page.
517
* @param ppTlbe Where to store the pointer to the TLB entry.
519
DECLINLINE(int) pgmPhysPageQueryTlbe(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
522
PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
523
if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
525
STAM_COUNTER_INC(&pPGM->CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbHits));
529
rc = pgmPhysPageLoadIntoTlb(pPGM, GCPhys);
536
* Queries the Physical TLB entry for a physical guest page,
537
* attempting to load the TLB entry if necessary.
539
* @returns VBox status code.
540
* @retval VINF_SUCCESS on success
541
* @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
543
* @param pPGM The PGM instance handle.
544
* @param pPage Pointer to the PGMPAGE structure corresponding to
546
* @param GCPhys The address of the guest page.
547
* @param ppTlbe Where to store the pointer to the TLB entry.
549
DECLINLINE(int) pgmPhysPageQueryTlbeWithPage(PPGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
552
PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
553
if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
555
STAM_COUNTER_INC(&pPGM->CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbHits));
559
rc = pgmPhysPageLoadIntoTlbWithPage(pPGM, pPage, GCPhys);
568
* Checks if the no-execute (NX) feature is active (EFER.NXE=1).
570
* Only used when the guest is in PAE or long mode. This is inlined so that we
571
* can perform consistency checks in debug builds.
573
* @returns true if it is, false if it isn't.
574
* @param pVCpu The current CPU.
576
DECL_FORCE_INLINE(bool) pgmGstIsNoExecuteActive(PVMCPU pVCpu)
578
Assert(pVCpu->pgm.s.fNoExecuteEnabled == CPUMIsGuestNXEnabled(pVCpu));
579
Assert(CPUMIsGuestInPAEMode(pVCpu) || CPUMIsGuestInLongMode(pVCpu));
580
return pVCpu->pgm.s.fNoExecuteEnabled;
585
* Checks if the page size extension (PSE) is currently enabled (CR4.PSE=1).
587
* Only used when the guest is in paged 32-bit mode. This is inlined so that
588
* we can perform consistency checks in debug builds.
590
* @returns true if it is, false if it isn't.
591
* @param pVCpu The current CPU.
593
DECL_FORCE_INLINE(bool) pgmGst32BitIsPageSizeExtActive(PVMCPU pVCpu)
595
Assert(pVCpu->pgm.s.fGst32BitPageSizeExtension == CPUMIsGuestPageSizeExtEnabled(pVCpu));
596
Assert(!CPUMIsGuestInPAEMode(pVCpu));
597
Assert(!CPUMIsGuestInLongMode(pVCpu));
598
return pVCpu->pgm.s.fGst32BitPageSizeExtension;
603
* Calculated the guest physical address of the large (4 MB) page in 32 bits paging mode.
604
* Takes PSE-36 into account.
606
* @returns guest physical address
607
* @param pPGM Pointer to the PGM instance data.
608
* @param Pde Guest Pde
610
DECLINLINE(RTGCPHYS) pgmGstGet4MBPhysPage(PPGM pPGM, X86PDE Pde)
612
RTGCPHYS GCPhys = Pde.u & X86_PDE4M_PG_MASK;
613
GCPhys |= (RTGCPHYS)Pde.b.u8PageNoHigh << 32;
615
return GCPhys & pPGM->GCPhys4MBPSEMask;
620
* Gets the address the guest page directory (32-bit paging).
622
* @returns VBox status code.
623
* @param pVCpu The current CPU.
624
* @param ppPd Where to return the mapping. This is always set.
626
DECLINLINE(int) pgmGstGet32bitPDPtrEx(PVMCPU pVCpu, PX86PD *ppPd)
628
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
629
int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPd RTLOG_COMMA_SRC_POS);
636
*ppPd = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd);
637
if (RT_UNLIKELY(!*ppPd))
638
return pgmGstLazyMap32BitPD(pVCpu, ppPd);
645
* Gets the address the guest page directory (32-bit paging).
647
* @returns Pointer the page directory entry in question.
648
* @param pVCpu The current CPU.
650
DECLINLINE(PX86PD) pgmGstGet32bitPDPtr(PVMCPU pVCpu)
652
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
653
PX86PD pGuestPD = NULL;
654
int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPD RTLOG_COMMA_SRC_POS);
657
AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
661
PX86PD pGuestPD = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd);
662
if (RT_UNLIKELY(!pGuestPD))
664
int rc = pgmGstLazyMap32BitPD(pVCpu, &pGuestPD);
674
* Gets the guest page directory pointer table.
676
* @returns VBox status code.
677
* @param pVCpu The current CPU.
678
* @param ppPdpt Where to return the mapping. This is always set.
680
DECLINLINE(int) pgmGstGetPaePDPTPtrEx(PVMCPU pVCpu, PX86PDPT *ppPdpt)
682
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
683
int rc = pgmRZDynMapGCPageOffInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPdpt RTLOG_COMMA_SRC_POS);
690
*ppPdpt = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
691
if (RT_UNLIKELY(!*ppPdpt))
692
return pgmGstLazyMapPaePDPT(pVCpu, ppPdpt);
698
* Gets the guest page directory pointer table.
700
* @returns Pointer to the page directory in question.
701
* @returns NULL if the page directory is not present or on an invalid page.
702
* @param pVCpu The current CPU.
704
DECLINLINE(PX86PDPT) pgmGstGetPaePDPTPtr(PVMCPU pVCpu)
707
int rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pGuestPdpt);
708
AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
714
* Gets the guest page directory pointer table entry for the specified address.
716
* @returns Pointer to the page directory in question.
717
* @returns NULL if the page directory is not present or on an invalid page.
718
* @param pVCpu The current CPU
719
* @param GCPtr The address.
721
DECLINLINE(PX86PDPE) pgmGstGetPaePDPEPtr(PVMCPU pVCpu, RTGCPTR GCPtr)
723
AssertGCPtr32(GCPtr);
725
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
726
PX86PDPT pGuestPDPT = NULL;
727
int rc = pgmRZDynMapGCPageOffInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPDPT RTLOG_COMMA_SRC_POS);
728
AssertRCReturn(rc, NULL);
730
PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
731
if (RT_UNLIKELY(!pGuestPDPT))
733
int rc = pgmGstLazyMapPaePDPT(pVCpu, &pGuestPDPT);
738
return &pGuestPDPT->a[(GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE];
743
* Gets the page directory entry for the specified address.
745
* @returns The page directory entry in question.
746
* @returns A non-present entry if the page directory is not present or on an invalid page.
747
* @param pVCpu The handle of the virtual CPU.
748
* @param GCPtr The address.
750
DECLINLINE(X86PDEPAE) pgmGstGetPaePDE(PVMCPU pVCpu, RTGCPTR GCPtr)
752
AssertGCPtr32(GCPtr);
753
PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu);
754
if (RT_LIKELY(pGuestPDPT))
756
const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
757
if ( pGuestPDPT->a[iPdpt].n.u1Present
758
&& !(pGuestPDPT->a[iPdpt].u & pVCpu->pgm.s.fGstPaeMbzPdpeMask) )
760
const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
761
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
762
PX86PDPAE pGuestPD = NULL;
763
int rc = pgmRZDynMapGCPageInlined(pVCpu,
764
pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK,
766
RTLOG_COMMA_SRC_POS);
768
return pGuestPD->a[iPD];
769
AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
771
PX86PDPAE pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
773
|| (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt])
774
pgmGstLazyMapPaePD(pVCpu, iPdpt, &pGuestPD);
776
return pGuestPD->a[iPD];
781
X86PDEPAE ZeroPde = {0};
787
* Gets the page directory pointer table entry for the specified address
788
* and returns the index into the page directory
790
* @returns Pointer to the page directory in question.
791
* @returns NULL if the page directory is not present or on an invalid page.
792
* @param pVCpu The current CPU.
793
* @param GCPtr The address.
794
* @param piPD Receives the index into the returned page directory
795
* @param pPdpe Receives the page directory pointer entry. Optional.
797
DECLINLINE(PX86PDPAE) pgmGstGetPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, unsigned *piPD, PX86PDPE pPdpe)
799
AssertGCPtr32(GCPtr);
802
PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu);
803
if (RT_UNLIKELY(!pGuestPDPT))
805
const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
807
*pPdpe = pGuestPDPT->a[iPdpt];
808
if (!pGuestPDPT->a[iPdpt].n.u1Present)
810
if (RT_UNLIKELY(pVCpu->pgm.s.fGstPaeMbzPdpeMask & pGuestPDPT->a[iPdpt].u))
814
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
815
PX86PDPAE pGuestPD = NULL;
816
int rc = pgmRZDynMapGCPageInlined(pVCpu,
817
pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK,
819
RTLOG_COMMA_SRC_POS);
822
AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
826
PX86PDPAE pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
828
|| (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt])
829
pgmGstLazyMapPaePD(pVCpu, iPdpt, &pGuestPD);
832
*piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
839
* Gets the page map level-4 pointer for the guest.
841
* @returns VBox status code.
842
* @param pVCpu The current CPU.
843
* @param ppPml4 Where to return the mapping. Always set.
845
DECLINLINE(int) pgmGstGetLongModePML4PtrEx(PVMCPU pVCpu, PX86PML4 *ppPml4)
847
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
848
int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPml4 RTLOG_COMMA_SRC_POS);
855
*ppPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4);
856
if (RT_UNLIKELY(!*ppPml4))
857
return pgmGstLazyMapPml4(pVCpu, ppPml4);
864
* Gets the page map level-4 pointer for the guest.
866
* @returns Pointer to the PML4 page.
867
* @param pVCpu The current CPU.
869
DECLINLINE(PX86PML4) pgmGstGetLongModePML4Ptr(PVMCPU pVCpu)
872
int rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pGuestPml4);
873
AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc);
879
* Gets the pointer to a page map level-4 entry.
881
* @returns Pointer to the PML4 entry.
882
* @param pVCpu The current CPU.
883
* @param iPml4 The index.
884
* @remarks Only used by AssertCR3.
886
DECLINLINE(PX86PML4E) pgmGstGetLongModePML4EPtr(PVMCPU pVCpu, unsigned int iPml4)
888
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
890
int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPml4 RTLOG_COMMA_SRC_POS);
891
AssertRCReturn(rc, NULL);
893
PX86PML4 pGuestPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4);
894
if (RT_UNLIKELY(!pGuestPml4))
896
int rc = pgmGstLazyMapPml4(pVCpu, &pGuestPml4);
897
AssertRCReturn(rc, NULL);
900
return &pGuestPml4->a[iPml4];
905
* Gets the page directory entry for the specified address.
907
* @returns The page directory entry in question.
908
* @returns A non-present entry if the page directory is not present or on an invalid page.
909
* @param pVCpu The current CPU.
910
* @param GCPtr The address.
912
DECLINLINE(X86PDEPAE) pgmGstGetLongModePDE(PVMCPU pVCpu, RTGCPTR64 GCPtr)
915
* Note! To keep things simple, ASSUME invalid physical addresses will
916
* cause X86_TRAP_PF_RSVD. This isn't a problem until we start
917
* supporting 52-bit wide physical guest addresses.
919
PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pVCpu);
920
const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
921
if ( RT_LIKELY(pGuestPml4)
922
&& pGuestPml4->a[iPml4].n.u1Present
923
&& !(pGuestPml4->a[iPml4].u & pVCpu->pgm.s.fGstAmd64MbzPml4eMask) )
926
int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp);
929
const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
930
if ( pPdptTemp->a[iPdpt].n.u1Present
931
&& !(pPdptTemp->a[iPdpt].u & pVCpu->pgm.s.fGstAmd64MbzPdpeMask) )
934
rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
937
const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
942
AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
945
X86PDEPAE ZeroPde = {0};
951
* Gets the GUEST page directory pointer for the specified address.
953
* @returns The page directory in question.
954
* @returns NULL if the page directory is not present or on an invalid page.
955
* @param pVCpu The current CPU.
956
* @param GCPtr The address.
957
* @param ppPml4e Page Map Level-4 Entry (out)
958
* @param pPdpe Page directory pointer table entry (out)
959
* @param piPD Receives the index into the returned page directory
961
DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe, unsigned *piPD)
964
PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pVCpu);
965
if (RT_UNLIKELY(!pGuestPml4))
967
const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
968
PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4];
969
if (!pPml4e->n.u1Present)
971
if (RT_UNLIKELY(pPml4e->u & pVCpu->pgm.s.fGstAmd64MbzPml4eMask))
976
int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pPml4e->u & X86_PML4E_PG_MASK, &pPdptTemp);
979
AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
982
const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
983
*pPdpe = pPdptTemp->a[iPdpt];
984
if (!pPdpe->n.u1Present)
986
if (RT_UNLIKELY(pPdpe->u & pVCpu->pgm.s.fGstAmd64MbzPdpeMask))
991
rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
994
AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
998
*piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1005
* Gets the shadow page directory, 32-bit.
1007
* @returns Pointer to the shadow 32-bit PD.
1008
* @param pVCpu The current CPU.
1010
DECLINLINE(PX86PD) pgmShwGet32BitPDPtr(PVMCPU pVCpu)
1012
return (PX86PD)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1017
* Gets the shadow page directory entry for the specified address, 32-bit.
1019
* @returns Shadow 32-bit PDE.
1020
* @param pVCpu The current CPU.
1021
* @param GCPtr The address.
1023
DECLINLINE(X86PDE) pgmShwGet32BitPDE(PVMCPU pVCpu, RTGCPTR GCPtr)
1025
const unsigned iPd = (GCPtr >> X86_PD_SHIFT) & X86_PD_MASK;
1027
PX86PD pShwPde = pgmShwGet32BitPDPtr(pVCpu);
1030
X86PDE ZeroPde = {0};
1033
return pShwPde->a[iPd];
1038
* Gets the pointer to the shadow page directory entry for the specified
1041
* @returns Pointer to the shadow 32-bit PDE.
1042
* @param pVCpu The current CPU.
1043
* @param GCPtr The address.
1045
DECLINLINE(PX86PDE) pgmShwGet32BitPDEPtr(PVMCPU pVCpu, RTGCPTR GCPtr)
1047
const unsigned iPd = (GCPtr >> X86_PD_SHIFT) & X86_PD_MASK;
1049
PX86PD pPde = pgmShwGet32BitPDPtr(pVCpu);
1050
AssertReturn(pPde, NULL);
1051
return &pPde->a[iPd];
1056
* Gets the shadow page pointer table, PAE.
1058
* @returns Pointer to the shadow PAE PDPT.
1059
* @param pVCpu The current CPU.
1061
DECLINLINE(PX86PDPT) pgmShwGetPaePDPTPtr(PVMCPU pVCpu)
1063
return (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1068
* Gets the shadow page directory for the specified address, PAE.
1070
* @returns Pointer to the shadow PD.
1071
* @param pVCpu The current CPU.
1072
* @param GCPtr The address.
1074
DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr)
1076
const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1077
PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1079
if (!pPdpt->a[iPdpt].n.u1Present)
1082
/* Fetch the pgm pool shadow descriptor. */
1083
PVM pVM = pVCpu->CTX_SUFF(pVM);
1084
PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
1085
AssertReturn(pShwPde, NULL);
1087
return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPde);
1092
* Gets the shadow page directory for the specified address, PAE.
1094
* @returns Pointer to the shadow PD.
1095
* @param pVCpu The current CPU.
1096
* @param GCPtr The address.
1098
DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PVMCPU pVCpu, PX86PDPT pPdpt, RTGCPTR GCPtr)
1100
const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1102
if (!pPdpt->a[iPdpt].n.u1Present)
1105
/* Fetch the pgm pool shadow descriptor. */
1106
PVM pVM = pVCpu->CTX_SUFF(pVM);
1107
PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
1108
AssertReturn(pShwPde, NULL);
1110
return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPde);
1115
* Gets the shadow page directory entry, PAE.
1118
* @param pVCpu The current CPU.
1119
* @param GCPtr The address.
1121
DECLINLINE(X86PDEPAE) pgmShwGetPaePDE(PVMCPU pVCpu, RTGCPTR GCPtr)
1123
const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1125
PX86PDPAE pShwPde = pgmShwGetPaePDPtr(pVCpu, GCPtr);
1128
X86PDEPAE ZeroPde = {0};
1131
return pShwPde->a[iPd];
1136
* Gets the pointer to the shadow page directory entry for an address, PAE.
1138
* @returns Pointer to the PDE.
1139
* @param pVCpu The current CPU.
1140
* @param GCPtr The address.
1141
* @remarks Only used by AssertCR3.
1143
DECLINLINE(PX86PDEPAE) pgmShwGetPaePDEPtr(PVMCPU pVCpu, RTGCPTR GCPtr)
1145
const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1147
PX86PDPAE pPde = pgmShwGetPaePDPtr(pVCpu, GCPtr);
1148
AssertReturn(pPde, NULL);
1149
return &pPde->a[iPd];
1155
* Gets the shadow page map level-4 pointer.
1157
* @returns Pointer to the shadow PML4.
1158
* @param pVCpu The current CPU.
1160
DECLINLINE(PX86PML4) pgmShwGetLongModePML4Ptr(PVMCPU pVCpu)
1162
return (PX86PML4)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1167
* Gets the shadow page map level-4 entry for the specified address.
1169
* @returns The entry.
1170
* @param pVCpu The current CPU.
1171
* @param GCPtr The address.
1173
DECLINLINE(X86PML4E) pgmShwGetLongModePML4E(PVMCPU pVCpu, RTGCPTR GCPtr)
1175
const unsigned iPml4 = ((RTGCUINTPTR64)GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1176
PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pVCpu);
1180
X86PML4E ZeroPml4e = {0};
1183
return pShwPml4->a[iPml4];
1188
* Gets the pointer to the specified shadow page map level-4 entry.
1190
* @returns The entry.
1191
* @param pVCpu The current CPU.
1192
* @param iPml4 The PML4 index.
1194
DECLINLINE(PX86PML4E) pgmShwGetLongModePML4EPtr(PVMCPU pVCpu, unsigned int iPml4)
1196
PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pVCpu);
1199
return &pShwPml4->a[iPml4];
1206
* Cached physical handler lookup.
1208
* @returns Physical handler covering @a GCPhys.
1209
* @param pVM The VM handle.
1210
* @param GCPhys The lookup address.
1212
DECLINLINE(PPGMPHYSHANDLER) pgmHandlerPhysicalLookup(PVM pVM, RTGCPHYS GCPhys)
1214
PPGMPHYSHANDLER pHandler = pVM->pgm.s.CTX_SUFF(pLastPhysHandler);
1216
&& GCPhys >= pHandler->Core.Key
1217
&& GCPhys < pHandler->Core.KeyLast)
1219
STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysHandlerLookupHits));
1223
STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysHandlerLookupMisses));
1224
pHandler = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1226
pVM->pgm.s.CTX_SUFF(pLastPhysHandler) = pHandler;
1232
* Gets the page state for a physical handler.
1234
* @returns The physical handler page state.
1235
* @param pCur The physical handler in question.
1237
DECLINLINE(unsigned) pgmHandlerPhysicalCalcState(PPGMPHYSHANDLER pCur)
1239
switch (pCur->enmType)
1241
case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
1242
return PGM_PAGE_HNDL_PHYS_STATE_WRITE;
1244
case PGMPHYSHANDLERTYPE_MMIO:
1245
case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
1246
return PGM_PAGE_HNDL_PHYS_STATE_ALL;
1249
AssertFatalMsgFailed(("Invalid type %d\n", pCur->enmType));
1255
* Gets the page state for a virtual handler.
1257
* @returns The virtual handler page state.
1258
* @param pCur The virtual handler in question.
1259
* @remarks This should never be used on a hypervisor access handler.
1261
DECLINLINE(unsigned) pgmHandlerVirtualCalcState(PPGMVIRTHANDLER pCur)
1263
switch (pCur->enmType)
1265
case PGMVIRTHANDLERTYPE_WRITE:
1266
return PGM_PAGE_HNDL_VIRT_STATE_WRITE;
1267
case PGMVIRTHANDLERTYPE_ALL:
1268
return PGM_PAGE_HNDL_VIRT_STATE_ALL;
1270
AssertFatalMsgFailed(("Invalid type %d\n", pCur->enmType));
1276
* Clears one physical page of a virtual handler
1278
* @param pPGM Pointer to the PGM instance.
1279
* @param pCur Virtual handler structure
1280
* @param iPage Physical page index
1282
* @remark Only used when PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL is being set, so no
1283
* need to care about other handlers in the same page.
1285
DECLINLINE(void) pgmHandlerVirtualClearPage(PPGM pPGM, PPGMVIRTHANDLER pCur, unsigned iPage)
1287
const PPGMPHYS2VIRTHANDLER pPhys2Virt = &pCur->aPhysToVirt[iPage];
1290
* Remove the node from the tree (it's supposed to be in the tree if we get here!).
1292
#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1293
AssertReleaseMsg(pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE,
1294
("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
1295
pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
1297
if (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_IS_HEAD)
1299
/* We're the head of the alias chain. */
1300
PPGMPHYS2VIRTHANDLER pRemove = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysRemove(&pPGM->CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key); NOREF(pRemove);
1301
#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1302
AssertReleaseMsg(pRemove != NULL,
1303
("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
1304
pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
1305
AssertReleaseMsg(pRemove == pPhys2Virt,
1306
("wanted: pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n"
1307
" got: pRemove=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
1308
pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias,
1309
pRemove, pRemove->Core.Key, pRemove->Core.KeyLast, pRemove->offVirtHandler, pRemove->offNextAlias));
1311
if (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK)
1313
/* Insert the next list in the alias chain into the tree. */
1314
PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1315
#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1316
AssertReleaseMsg(pNext->offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE,
1317
("pNext=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
1318
pNext, pNext->Core.Key, pNext->Core.KeyLast, pNext->offVirtHandler, pNext->offNextAlias));
1320
pNext->offNextAlias |= PGMPHYS2VIRTHANDLER_IS_HEAD;
1321
bool fRc = RTAvlroGCPhysInsert(&pPGM->CTX_SUFF(pTrees)->PhysToVirtHandlers, &pNext->Core);
1327
/* Locate the previous node in the alias chain. */
1328
PPGMPHYS2VIRTHANDLER pPrev = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGet(&pPGM->CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key);
1329
#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1330
AssertReleaseMsg(pPrev != pPhys2Virt,
1331
("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
1332
pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
1336
PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPrev + (pPrev->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1337
if (pNext == pPhys2Virt)
1340
LogFlow(("pgmHandlerVirtualClearPage: removed %p:{.offNextAlias=%#RX32} from alias chain. prev %p:{.offNextAlias=%#RX32} [%RGp-%RGp]\n",
1341
pPhys2Virt, pPhys2Virt->offNextAlias, pPrev, pPrev->offNextAlias, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast));
1342
if (!(pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK))
1343
pPrev->offNextAlias &= ~PGMPHYS2VIRTHANDLER_OFF_MASK;
1346
PPGMPHYS2VIRTHANDLER pNewNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1347
pPrev->offNextAlias = ((intptr_t)pNewNext - (intptr_t)pPrev)
1348
| (pPrev->offNextAlias & ~PGMPHYS2VIRTHANDLER_OFF_MASK);
1356
#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1357
AssertReleaseMsg(pNext != pPrev,
1358
("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
1359
pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
1366
Log2(("PHYS2VIRT: Removing %RGp-%RGp %#RX32 %s\n",
1367
pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias, R3STRING(pCur->pszDesc)));
1368
pPhys2Virt->offNextAlias = 0;
1369
pPhys2Virt->Core.KeyLast = NIL_RTGCPHYS; /* require reinsert */
1372
* Clear the ram flags for this page.
1374
PPGMPAGE pPage = pgmPhysGetPage(pPGM, pPhys2Virt->Core.Key);
1375
AssertReturnVoid(pPage);
1376
PGM_PAGE_SET_HNDL_VIRT_STATE(pPage, PGM_PAGE_HNDL_VIRT_STATE_NONE);
1381
* Internal worker for finding a 'in-use' shadow page give by it's physical address.
1383
* @returns Pointer to the shadow page structure.
1384
* @param pPool The pool.
1385
* @param idx The pool page index.
1387
DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPageByIdx(PPGMPOOL pPool, unsigned idx)
1389
AssertFatalMsg(idx >= PGMPOOL_IDX_FIRST && idx < pPool->cCurPages, ("idx=%d\n", idx));
1390
return &pPool->aPages[idx];
1395
* Clear references to guest physical memory.
1397
* @param pPool The pool.
1398
* @param pPoolPage The pool page.
1399
* @param pPhysPage The physical guest page tracking structure.
1400
* @param iPte Shadow PTE index
1402
DECLINLINE(void) pgmTrackDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage, uint16_t iPte)
1405
* Just deal with the simple case here.
1408
const unsigned uOrg = PGM_PAGE_GET_TRACKING(pPhysPage);
1410
const unsigned cRefs = PGM_PAGE_GET_TD_CREFS(pPhysPage);
1413
Assert(pPoolPage->idx == PGM_PAGE_GET_TD_IDX(pPhysPage));
1414
Assert(iPte == PGM_PAGE_GET_PTE_INDEX(pPhysPage));
1415
/* Invalidate the tracking data. */
1416
PGM_PAGE_SET_TRACKING(pPhysPage, 0);
1419
pgmPoolTrackPhysExtDerefGCPhys(pPool, pPoolPage, pPhysPage, iPte);
1420
Log2(("pgmTrackDerefGCPhys: %x -> %x pPhysPage=%R[pgmpage]\n", uOrg, PGM_PAGE_GET_TRACKING(pPhysPage), pPhysPage ));
1425
* Moves the page to the head of the age list.
1427
* This is done when the cached page is used in one way or another.
1429
* @param pPool The pool.
1430
* @param pPage The cached page.
1432
DECLINLINE(void) pgmPoolCacheUsed(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1434
Assert(PGMIsLockOwner(pPool->CTX_SUFF(pVM)));
1437
* Move to the head of the age list.
1439
if (pPage->iAgePrev != NIL_PGMPOOL_IDX)
1442
pPool->aPages[pPage->iAgePrev].iAgeNext = pPage->iAgeNext;
1443
if (pPage->iAgeNext != NIL_PGMPOOL_IDX)
1444
pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->iAgePrev;
1446
pPool->iAgeTail = pPage->iAgePrev;
1448
/* insert at head */
1449
pPage->iAgePrev = NIL_PGMPOOL_IDX;
1450
pPage->iAgeNext = pPool->iAgeHead;
1451
Assert(pPage->iAgeNext != NIL_PGMPOOL_IDX); /* we would've already been head then */
1452
pPool->iAgeHead = pPage->idx;
1453
pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->idx;
1458
* Locks a page to prevent flushing (important for cr3 root pages or shadow pae pd pages).
1460
* @param pVM VM Handle.
1461
* @param pPage PGM pool page
1463
DECLINLINE(void) pgmPoolLockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1465
Assert(PGMIsLockOwner(pPool->CTX_SUFF(pVM)));
1466
ASMAtomicIncU32(&pPage->cLocked);
1471
* Unlocks a page to allow flushing again
1473
* @param pVM VM Handle.
1474
* @param pPage PGM pool page
1476
DECLINLINE(void) pgmPoolUnlockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1478
Assert(PGMIsLockOwner(pPool->CTX_SUFF(pVM)));
1479
Assert(pPage->cLocked);
1480
ASMAtomicDecU32(&pPage->cLocked);
1485
* Checks if the page is locked (e.g. the active CR3 or one of the four PDs of a PAE PDPT)
1487
* @returns VBox status code.
1488
* @param pPage PGM pool page
1490
DECLINLINE(bool) pgmPoolIsPageLocked(PPGM pPGM, PPGMPOOLPAGE pPage)
1494
LogFlow(("pgmPoolIsPageLocked found root page %d\n", pPage->enmKind));
1495
if (pPage->cModifications)
1496
pPage->cModifications = 1; /* reset counter (can't use 0, or else it will be reinserted in the modified list) */
1504
* Tells if mappings are to be put into the shadow page table or not.
1506
* @returns boolean result
1507
* @param pVM VM handle.
1509
DECL_FORCE_INLINE(bool) pgmMapAreMappingsEnabled(PPGM pPGM)
1511
#ifdef PGM_WITHOUT_MAPPINGS
1512
/* There are no mappings in VT-x and AMD-V mode. */
1513
Assert(pPGM->fMappingsDisabled);
1516
return !pPGM->fMappingsDisabled;
1522
* Checks if the mappings are floating and enabled.
1524
* @returns true / false.
1525
* @param pVM The VM handle.
1527
DECL_FORCE_INLINE(bool) pgmMapAreMappingsFloating(PPGM pPGM)
1529
#ifdef PGM_WITHOUT_MAPPINGS
1530
/* There are no mappings in VT-x and AMD-V mode. */
1531
Assert(pPGM->fMappingsDisabled);
1534
return !pPGM->fMappingsDisabled
1535
&& !pPGM->fMappingsFixed;