1
/* $Id: memobj-r0drv-nt.cpp 4223 2007-08-19 01:02:11Z vboxsync $ */
3
* innotek Portable Runtime - Ring-0 Memory Objects, NT.
7
* Copyright (C) 2006-2007 innotek GmbH
9
* This file is part of VirtualBox Open Source Edition (OSE), as
10
* available from http://www.virtualbox.org. This file is free software;
11
* you can redistribute it and/or modify it under the terms of the GNU
12
* General Public License as published by the Free Software Foundation,
13
* in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14
* distribution. VirtualBox OSE is distributed in the hope that it will
15
* be useful, but WITHOUT ANY WARRANTY of any kind.
19
/*******************************************************************************
21
*******************************************************************************/
22
#include "the-nt-kernel.h"
24
#include <iprt/memobj.h>
25
#include <iprt/alloc.h>
26
#include <iprt/assert.h>
28
#include <iprt/param.h>
29
#include <iprt/string.h>
30
#include <iprt/process.h>
31
#include "internal/memobj.h"
34
/*******************************************************************************
35
* Defined Constants And Macros *
36
*******************************************************************************/
37
/** Maximum number of bytes we try to lock down in one go.
38
* This is supposed to have a limit right below 256MB, but this appears
39
* to actually be much lower. The values here have been determined experimentally.
42
# define MAX_LOCK_MEM_SIZE (32*1024*1024) /* 32MB */
45
# define MAX_LOCK_MEM_SIZE (24*1024*1024) /* 24MB */
49
/*******************************************************************************
50
* Structures and Typedefs *
51
*******************************************************************************/
53
* The NT version of the memory object structure.
55
typedef struct RTR0MEMOBJNT
57
/** The core structure. */
58
RTR0MEMOBJINTERNAL Core;
59
#ifndef IPRT_TARGET_NT4
60
/** Used MmAllocatePagesForMdl(). */
61
bool fAllocatedPagesForMdl;
63
/** The number of PMDLs (memory descriptor lists) in the array. */
65
/** Array of MDL pointers. (variable size) */
67
} RTR0MEMOBJNT, *PRTR0MEMOBJNT;
70
int rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
72
PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
75
* Deal with it on a per type basis (just as a variation).
77
switch (pMemNt->Core.enmType)
79
case RTR0MEMOBJTYPE_LOW:
80
#ifndef IPRT_TARGET_NT4
81
if (pMemNt->fAllocatedPagesForMdl)
83
Assert(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
84
MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
85
pMemNt->Core.pv = NULL;
87
MmFreePagesFromMdl(pMemNt->apMdls[0]);
88
pMemNt->apMdls[0] = NULL;
96
case RTR0MEMOBJTYPE_PAGE:
97
Assert(pMemNt->Core.pv);
98
ExFreePool(pMemNt->Core.pv);
99
pMemNt->Core.pv = NULL;
101
Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
102
IoFreeMdl(pMemNt->apMdls[0]);
103
pMemNt->apMdls[0] = NULL;
107
case RTR0MEMOBJTYPE_CONT:
108
Assert(pMemNt->Core.pv);
109
MmFreeContiguousMemory(pMemNt->Core.pv);
110
pMemNt->Core.pv = NULL;
112
Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
113
IoFreeMdl(pMemNt->apMdls[0]);
114
pMemNt->apMdls[0] = NULL;
118
case RTR0MEMOBJTYPE_PHYS:
119
case RTR0MEMOBJTYPE_PHYS_NC:
120
#ifndef IPRT_TARGET_NT4
121
if (pMemNt->fAllocatedPagesForMdl)
123
MmFreePagesFromMdl(pMemNt->apMdls[0]);
124
pMemNt->apMdls[0] = NULL;
132
case RTR0MEMOBJTYPE_LOCK:
133
for (uint32_t i = 0; i < pMemNt->cMdls; i++)
135
MmUnlockPages(pMemNt->apMdls[i]);
136
IoFreeMdl(pMemNt->apMdls[i]);
137
pMemNt->apMdls[i] = NULL;
141
case RTR0MEMOBJTYPE_RES_VIRT:
142
/* if (pMemNt->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
148
AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
149
return VERR_INTERNAL_ERROR;
152
case RTR0MEMOBJTYPE_MAPPING:
154
Assert(pMemNt->cMdls == 0 && pMemNt->Core.pv);
155
PRTR0MEMOBJNT pMemNtParent = (PRTR0MEMOBJNT)pMemNt->Core.uRel.Child.pParent;
156
Assert(pMemNtParent);
157
if (pMemNtParent->cMdls)
159
Assert(pMemNtParent->cMdls == 1 && pMemNtParent->apMdls[0]);
160
Assert( pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS
161
|| pMemNt->Core.u.Mapping.R0Process == RTR0ProcHandleSelf());
162
MmUnmapLockedPages(pMemNt->Core.pv, pMemNtParent->apMdls[0]);
166
Assert( pMemNtParent->Core.enmType == RTR0MEMOBJTYPE_PHYS
167
&& !pMemNtParent->Core.u.Phys.fAllocated);
168
Assert(pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS);
169
MmUnmapIoSpace(pMemNt->Core.pv, pMemNt->Core.cb);
171
pMemNt->Core.pv = NULL;
176
AssertMsgFailed(("enmType=%d\n", pMemNt->Core.enmType));
177
return VERR_INTERNAL_ERROR;
184
int rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
186
AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
189
* Try allocate the memory and create an MDL for them so
190
* we can query the physical addresses and do mappings later
191
* without running into out-of-memory conditions and similar problems.
193
int rc = VERR_NO_PAGE_MEMORY;
194
void *pv = ExAllocatePoolWithTag(NonPagedPool, cb, IPRT_NT_POOL_TAG);
197
PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
200
MmBuildMdlForNonPagedPool(pMdl);
202
MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
206
* Create the IPRT memory object.
208
PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PAGE, pv, cb);
212
pMemNt->apMdls[0] = pMdl;
213
*ppMem = &pMemNt->Core;
226
int rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
228
AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
231
* Try see if we get lucky first...
232
* (We could probably just assume we're lucky on NT4.)
234
int rc = rtR0MemObjNativeAllocPage(ppMem, cb, fExecutable);
237
size_t iPage = cb >> PAGE_SHIFT;
239
if (rtR0MemObjNativeGetPagePhysAddr(*ppMem, iPage) >= _4G)
247
/* The following ASSUMES that rtR0MemObjNativeAllocPage returns a completed object. */
248
RTR0MemObjFree(*ppMem, false);
252
#ifndef IPRT_TARGET_NT4
254
* Use MmAllocatePagesForMdl to specify the range of physical addresses we wish to use.
256
PHYSICAL_ADDRESS Zero;
258
PHYSICAL_ADDRESS HighAddr;
259
HighAddr.QuadPart = _4G - 1;
260
PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
263
if (MmGetMdlByteCount(pMdl) >= cb)
267
void *pv = MmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
268
FALSE /* no bug check on failure */, NormalPagePriority);
271
PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_LOW, pv, cb);
274
pMemNt->fAllocatedPagesForMdl = true;
276
pMemNt->apMdls[0] = pMdl;
277
*ppMem = &pMemNt->Core;
280
MmUnmapLockedPages(pv, pMdl);
283
__except(EXCEPTION_EXECUTE_HANDLER)
288
MmFreePagesFromMdl(pMdl);
290
#endif /* !IPRT_TARGET_NT4 */
293
* Fall back on contiguous memory...
295
return rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable);
300
* Internal worker for rtR0MemObjNativeAllocCont(), rtR0MemObjNativeAllocPhys()
301
* and rtR0MemObjNativeAllocPhysNC() that takes a max physical address in addition
302
* to what rtR0MemObjNativeAllocCont() does.
304
* @returns IPRT status code.
305
* @param ppMem Where to store the pointer to the ring-0 memory object.
306
* @param cb The size.
307
* @param fExecutable Whether the mapping should be executable or not.
308
* @param PhysHighest The highest physical address for the pages in allocation.
310
static int rtR0MemObjNativeAllocContEx(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, RTHCPHYS PhysHighest)
312
AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
315
* Allocate the memory and create an MDL for it.
317
PHYSICAL_ADDRESS PhysAddrHighest;
318
PhysAddrHighest.QuadPart = PhysHighest;
319
void *pv = MmAllocateContiguousMemory(cb, PhysAddrHighest);
321
return VERR_NO_MEMORY;
323
PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
326
MmBuildMdlForNonPagedPool(pMdl);
328
MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
331
PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_CONT, pv, cb);
334
pMemNt->Core.u.Cont.Phys = (RTHCPHYS)*MmGetMdlPfnArray(pMdl) << PAGE_SHIFT;
336
pMemNt->apMdls[0] = pMdl;
337
*ppMem = &pMemNt->Core;
343
MmFreeContiguousMemory(pv);
344
return VERR_NO_MEMORY;
348
int rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
350
return rtR0MemObjNativeAllocContEx(ppMem, cb, fExecutable, _4G-1);
354
int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
356
#ifndef IPRT_TARGET_NT4
358
* Try and see if we're lucky and get a contiguous chunk from MmAllocatePagesForMdl.
360
* This is preferable to using MmAllocateContiguousMemory because there are
361
* a few situations where the memory shouldn't be mapped, like for instance
362
* VT-x control memory. Since these are rather small allocations (one or
363
* two pages) MmAllocatePagesForMdl will probably be able to satisfy the
366
* If the allocation is big, the chances are *probably* not very good. The
367
* current limit is kind of random...
371
PHYSICAL_ADDRESS Zero;
373
PHYSICAL_ADDRESS HighAddr;
374
HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
375
PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
378
if (MmGetMdlByteCount(pMdl) >= cb)
380
PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMdl);
381
PFN_NUMBER Pfn = paPfns[0] + 1;
382
const size_t cPages = cb >> PAGE_SHIFT;
384
for (iPage = 1; iPage < cPages; iPage++, Pfn++)
385
if (paPfns[iPage] != Pfn)
389
PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
392
pMemNt->Core.u.Phys.fAllocated = true;
393
pMemNt->Core.u.Phys.PhysBase = (RTHCPHYS)paPfns[0] << PAGE_SHIFT;
394
pMemNt->fAllocatedPagesForMdl = true;
396
pMemNt->apMdls[0] = pMdl;
397
*ppMem = &pMemNt->Core;
402
MmFreePagesFromMdl(pMdl);
405
#endif /* !IPRT_TARGET_NT4 */
407
return rtR0MemObjNativeAllocContEx(ppMem, cb, false, PhysHighest);
411
int rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
413
#ifndef IPRT_TARGET_NT4
414
PHYSICAL_ADDRESS Zero;
416
PHYSICAL_ADDRESS HighAddr;
417
HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
418
PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
421
if (MmGetMdlByteCount(pMdl) >= cb)
423
PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb);
426
pMemNt->fAllocatedPagesForMdl = true;
428
pMemNt->apMdls[0] = pMdl;
429
*ppMem = &pMemNt->Core;
433
MmFreePagesFromMdl(pMdl);
435
return VERR_NO_MEMORY;
436
#else /* IPRT_TARGET_NT4 */
437
return VERR_NOT_SUPPORTED;
438
#endif /* IPRT_TARGET_NT4 */
442
int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb)
445
* Validate the address range and create a descriptor for it.
447
PFN_NUMBER Pfn = (PFN_NUMBER)(Phys >> PAGE_SHIFT);
448
if (((RTHCPHYS)Pfn << PAGE_SHIFT) != Phys)
449
return VERR_ADDRESS_TOO_BIG;
452
* Create the IPRT memory object.
454
PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
457
pMemNt->Core.u.Phys.PhysBase = Phys;
458
pMemNt->Core.u.Phys.fAllocated = false;
459
*ppMem = &pMemNt->Core;
462
return VERR_NO_MEMORY;
467
* Internal worker for locking down pages.
469
* @return IPRT status code.
471
* @param ppMem Where to store the memory object pointer.
472
* @param pv First page.
473
* @param cb Number of bytes.
474
* @param Task The task \a pv and \a cb refers to.
476
static int rtR0MemObjNtLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, RTR0PROCESS R0Process)
479
* Calc the number of MDLs we need and allocate the memory object structure.
481
size_t cMdls = cb / MAX_LOCK_MEM_SIZE;
482
if (cb % MAX_LOCK_MEM_SIZE)
484
if (cMdls >= UINT32_MAX)
485
return VERR_OUT_OF_RANGE;
486
PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJNT, apMdls[cMdls]),
487
RTR0MEMOBJTYPE_LOCK, pv, cb);
489
return VERR_NO_MEMORY;
492
* Loop locking down the sub parts of the memory.
494
int rc = VINF_SUCCESS;
496
uint8_t *pb = (uint8_t *)pv;
498
for (iMdl = 0; iMdl < cMdls; iMdl++)
501
* Calc the Mdl size and allocate it.
503
size_t cbCur = cb - cbTotal;
504
if (cbCur > MAX_LOCK_MEM_SIZE)
505
cbCur = MAX_LOCK_MEM_SIZE;
506
AssertMsg(cbCur, ("cbCur: 0!\n"));
507
PMDL pMdl = IoAllocateMdl(pb, (ULONG)cbCur, FALSE, FALSE, NULL);
519
MmProbeAndLockPages(pMdl, R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode, IoModifyAccess);
520
pMemNt->apMdls[iMdl] = pMdl;
523
__except(EXCEPTION_EXECUTE_HANDLER)
526
rc = VERR_LOCK_FAILED;
536
Assert(pMemNt->cMdls == cMdls);
537
pMemNt->Core.u.Lock.R0Process = R0Process;
538
*ppMem = &pMemNt->Core;
543
* We failed, perform cleanups.
547
MmUnlockPages(pMemNt->apMdls[iMdl]);
548
IoFreeMdl(pMemNt->apMdls[iMdl]);
549
pMemNt->apMdls[iMdl] = NULL;
551
rtR0MemObjDelete(&pMemNt->Core);
552
return VERR_LOCK_FAILED;
556
int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process)
558
AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
559
/* (Can use MmProbeAndLockProcessPages if we need to mess with other processes later.) */
560
return rtR0MemObjNtLock(ppMem, (void *)R3Ptr, cb, R0Process);
564
int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb)
566
return rtR0MemObjNtLock(ppMem, pv, cb, NIL_RTR0PROCESS);
570
int rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
573
* MmCreateSection(SEC_RESERVE) + MmMapViewInSystemSpace perhaps?
575
return VERR_NOT_IMPLEMENTED;
579
int rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
582
* ZeCreateSection(SEC_RESERVE) + ZwMapViewOfSection perhaps?
584
return VERR_NOT_IMPLEMENTED;
589
* Internal worker for rtR0MemObjNativeMapKernel and rtR0MemObjNativeMapUser.
591
* @returns IPRT status code.
592
* @param ppMem Where to store the memory object for the mapping.
593
* @param pMemToMap The memory object to map.
594
* @param pvFixed Where to map it. (void *)-1 if anywhere is fine.
595
* @param uAlignment The alignment requirement for the mapping.
596
* @param fProt The desired page protection for the mapping.
597
* @param R0Process If NIL_RTR0PROCESS map into system (kernel) memory.
598
* If not nil, it's the current process.
600
static int rtR0MemObjNtMap(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
601
unsigned fProt, RTR0PROCESS R0Process)
603
int rc = VERR_MAP_FAILED;
606
* There are two basic cases here, either we've got an MDL and can
607
* map it using MmMapLockedPages, or we've got a contiguous physical
608
* range (MMIO most likely) and can use MmMapIoSpace.
610
PRTR0MEMOBJNT pMemNtToMap = (PRTR0MEMOBJNT)pMemToMap;
611
if (pMemNtToMap->cMdls)
613
/* don't attempt map locked regions with more than one mdl. */
614
if (pMemNtToMap->cMdls != 1)
615
return VERR_NOT_SUPPORTED;
617
/* we can't map anything to the first page, sorry. */
619
return VERR_NOT_SUPPORTED;
621
/* only one system mapping for now - no time to figure out MDL restrictions right now. */
622
if ( pMemNtToMap->Core.uRel.Parent.cMappings
623
&& R0Process == NIL_RTR0PROCESS)
624
return VERR_NOT_SUPPORTED;
628
/** @todo uAlignment */
629
/** @todo How to set the protection on the pages? */
630
void *pv = MmMapLockedPagesSpecifyCache(pMemNtToMap->apMdls[0],
631
R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
633
pvFixed != (void *)-1 ? pvFixed : NULL,
634
FALSE /* no bug check on failure */,
640
PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
641
pMemNtToMap->Core.cb);
644
pMemNt->Core.u.Mapping.R0Process = R0Process;
645
*ppMem = &pMemNt->Core;
650
MmUnmapLockedPages(pv, pMemNtToMap->apMdls[0]);
653
__except(EXCEPTION_EXECUTE_HANDLER)
656
rc = VERR_MAP_FAILED;
661
AssertReturn( pMemNtToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS
662
&& !pMemNtToMap->Core.u.Phys.fAllocated, VERR_INTERNAL_ERROR);
664
/* cannot map phys mem to user space (yet). */
665
if (R0Process != NIL_RTR0PROCESS)
666
return VERR_NOT_SUPPORTED;
668
/** @todo uAlignment */
669
/** @todo How to set the protection on the pages? */
670
PHYSICAL_ADDRESS Phys;
671
Phys.QuadPart = pMemNtToMap->Core.u.Phys.PhysBase;
672
void *pv = MmMapIoSpace(Phys, pMemNtToMap->Core.cb, MmCached); /** @todo add cache type to fProt. */
675
PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
676
pMemNtToMap->Core.cb);
679
pMemNt->Core.u.Mapping.R0Process = R0Process;
680
*ppMem = &pMemNt->Core;
685
MmUnmapIoSpace(pv, pMemNtToMap->Core.cb);
689
NOREF(uAlignment); NOREF(fProt);
694
int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment, unsigned fProt)
696
return rtR0MemObjNtMap(ppMem, pMemToMap, pvFixed, uAlignment, fProt, NIL_RTR0PROCESS);
700
int rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
702
AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_NOT_SUPPORTED);
703
return rtR0MemObjNtMap(ppMem, pMemToMap, (void *)R3PtrFixed, uAlignment, fProt, R0Process);
707
RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
709
PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
713
if (pMemNt->cMdls == 1)
715
PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[0]);
716
return (RTHCPHYS)paPfns[iPage] << PAGE_SHIFT;
719
size_t iMdl = iPage / (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
720
size_t iMdlPfn = iPage % (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
721
PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[iMdl]);
722
return (RTHCPHYS)paPfns[iMdlPfn] << PAGE_SHIFT;
725
switch (pMemNt->Core.enmType)
727
case RTR0MEMOBJTYPE_MAPPING:
728
return rtR0MemObjNativeGetPagePhysAddr(pMemNt->Core.uRel.Child.pParent, iPage);
730
case RTR0MEMOBJTYPE_PHYS:
731
return pMemNt->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
733
case RTR0MEMOBJTYPE_PAGE:
734
case RTR0MEMOBJTYPE_PHYS_NC:
735
case RTR0MEMOBJTYPE_LOW:
736
case RTR0MEMOBJTYPE_CONT:
737
case RTR0MEMOBJTYPE_LOCK:
739
AssertMsgFailed(("%d\n", pMemNt->Core.enmType));
740
case RTR0MEMOBJTYPE_RES_VIRT: