~ubuntu-branches/ubuntu/gutsy/virtualbox-ose/gutsy

« back to all changes in this revision

Viewing changes to src/VBox/Runtime/r0drv/nt/memobj-r0drv-nt.cpp

  • Committer: Bazaar Package Importer
  • Author(s): Steve Kowalik
  • Date: 2007-09-08 16:44:58 UTC
  • Revision ID: james.westby@ubuntu.com-20070908164458-wao29470vqtr8ksy
Tags: upstream-1.5.0-dfsg2
ImportĀ upstreamĀ versionĀ 1.5.0-dfsg2

Show diffs side-by-side

added added

removed removed

Lines of Context:
 
1
/* $Id: memobj-r0drv-nt.cpp 4223 2007-08-19 01:02:11Z vboxsync $ */
 
2
/** @file
 
3
 * innotek Portable Runtime - Ring-0 Memory Objects, NT.
 
4
 */
 
5
 
 
6
/*
 
7
 * Copyright (C) 2006-2007 innotek GmbH
 
8
 *
 
9
 * This file is part of VirtualBox Open Source Edition (OSE), as
 
10
 * available from http://www.virtualbox.org. This file is free software;
 
11
 * you can redistribute it and/or modify it under the terms of the GNU
 
12
 * General Public License as published by the Free Software Foundation,
 
13
 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
 
14
 * distribution. VirtualBox OSE is distributed in the hope that it will
 
15
 * be useful, but WITHOUT ANY WARRANTY of any kind.
 
16
 */
 
17
 
 
18
 
 
19
/*******************************************************************************
 
20
*   Header Files                                                               *
 
21
*******************************************************************************/
 
22
#include "the-nt-kernel.h"
 
23
 
 
24
#include <iprt/memobj.h>
 
25
#include <iprt/alloc.h>
 
26
#include <iprt/assert.h>
 
27
#include <iprt/log.h>
 
28
#include <iprt/param.h>
 
29
#include <iprt/string.h>
 
30
#include <iprt/process.h>
 
31
#include "internal/memobj.h"
 
32
 
 
33
 
 
34
/*******************************************************************************
 
35
*   Defined Constants And Macros                                               *
 
36
*******************************************************************************/
 
37
/** Maximum number of bytes we try to lock down in one go.
 
38
 * This is supposed to have a limit right below 256MB, but this appears
 
39
 * to actually be much lower. The values here have been determined experimentally.
 
40
 */
 
41
#ifdef RT_ARCH_X86
 
42
# define MAX_LOCK_MEM_SIZE   (32*1024*1024) /* 32MB */
 
43
#endif
 
44
#ifdef RT_ARCH_AMD64
 
45
# define MAX_LOCK_MEM_SIZE   (24*1024*1024) /* 24MB */
 
46
#endif
 
47
 
 
48
 
 
49
/*******************************************************************************
 
50
*   Structures and Typedefs                                                    *
 
51
*******************************************************************************/
 
52
/**
 
53
 * The NT version of the memory object structure.
 
54
 */
 
55
typedef struct RTR0MEMOBJNT
 
56
{
 
57
    /** The core structure. */
 
58
    RTR0MEMOBJINTERNAL  Core;
 
59
#ifndef IPRT_TARGET_NT4
 
60
    /** Used MmAllocatePagesForMdl(). */
 
61
    bool                fAllocatedPagesForMdl;
 
62
#endif
 
63
    /** The number of PMDLs (memory descriptor lists) in the array. */
 
64
    uint32_t            cMdls;
 
65
    /** Array of MDL pointers. (variable size) */
 
66
    PMDL                apMdls[1];
 
67
} RTR0MEMOBJNT, *PRTR0MEMOBJNT;
 
68
 
 
69
 
 
70
int rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
 
71
{
 
72
    PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
 
73
 
 
74
    /*
 
75
     * Deal with it on a per type basis (just as a variation).
 
76
     */
 
77
    switch (pMemNt->Core.enmType)
 
78
    {
 
79
        case RTR0MEMOBJTYPE_LOW:
 
80
#ifndef IPRT_TARGET_NT4
 
81
            if (pMemNt->fAllocatedPagesForMdl)
 
82
            {
 
83
                Assert(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
 
84
                MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
 
85
                pMemNt->Core.pv = NULL;
 
86
 
 
87
                MmFreePagesFromMdl(pMemNt->apMdls[0]);
 
88
                pMemNt->apMdls[0] = NULL;
 
89
                pMemNt->cMdls = 0;
 
90
                break;
 
91
            }
 
92
#endif
 
93
            AssertFailed();
 
94
            break;
 
95
 
 
96
        case RTR0MEMOBJTYPE_PAGE:
 
97
            Assert(pMemNt->Core.pv);
 
98
            ExFreePool(pMemNt->Core.pv);
 
99
            pMemNt->Core.pv = NULL;
 
100
 
 
101
            Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
 
102
            IoFreeMdl(pMemNt->apMdls[0]);
 
103
            pMemNt->apMdls[0] = NULL;
 
104
            pMemNt->cMdls = 0;
 
105
            break;
 
106
 
 
107
        case RTR0MEMOBJTYPE_CONT:
 
108
            Assert(pMemNt->Core.pv);
 
109
            MmFreeContiguousMemory(pMemNt->Core.pv);
 
110
            pMemNt->Core.pv = NULL;
 
111
 
 
112
            Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
 
113
            IoFreeMdl(pMemNt->apMdls[0]);
 
114
            pMemNt->apMdls[0] = NULL;
 
115
            pMemNt->cMdls = 0;
 
116
            break;
 
117
 
 
118
        case RTR0MEMOBJTYPE_PHYS:
 
119
        case RTR0MEMOBJTYPE_PHYS_NC:
 
120
#ifndef IPRT_TARGET_NT4
 
121
            if (pMemNt->fAllocatedPagesForMdl)
 
122
            {
 
123
                MmFreePagesFromMdl(pMemNt->apMdls[0]);
 
124
                pMemNt->apMdls[0] = NULL;
 
125
                pMemNt->cMdls = 0;
 
126
                break;
 
127
            }
 
128
#endif
 
129
            AssertFailed();
 
130
            break;
 
131
 
 
132
        case RTR0MEMOBJTYPE_LOCK:
 
133
            for (uint32_t i = 0; i < pMemNt->cMdls; i++)
 
134
            {
 
135
                MmUnlockPages(pMemNt->apMdls[i]);
 
136
                IoFreeMdl(pMemNt->apMdls[i]);
 
137
                pMemNt->apMdls[i] = NULL;
 
138
            }
 
139
            break;
 
140
 
 
141
        case RTR0MEMOBJTYPE_RES_VIRT:
 
142
/*            if (pMemNt->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
 
143
            {
 
144
            }
 
145
            else
 
146
            {
 
147
            }*/
 
148
            AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
 
149
            return VERR_INTERNAL_ERROR;
 
150
            break;
 
151
 
 
152
        case RTR0MEMOBJTYPE_MAPPING:
 
153
        {
 
154
            Assert(pMemNt->cMdls == 0 && pMemNt->Core.pv);
 
155
            PRTR0MEMOBJNT pMemNtParent = (PRTR0MEMOBJNT)pMemNt->Core.uRel.Child.pParent;
 
156
            Assert(pMemNtParent);
 
157
            if (pMemNtParent->cMdls)
 
158
            {
 
159
                Assert(pMemNtParent->cMdls == 1 && pMemNtParent->apMdls[0]);
 
160
                Assert(     pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS
 
161
                       ||   pMemNt->Core.u.Mapping.R0Process == RTR0ProcHandleSelf());
 
162
                MmUnmapLockedPages(pMemNt->Core.pv, pMemNtParent->apMdls[0]);
 
163
            }
 
164
            else
 
165
            {
 
166
                Assert(     pMemNtParent->Core.enmType == RTR0MEMOBJTYPE_PHYS
 
167
                       &&   !pMemNtParent->Core.u.Phys.fAllocated);
 
168
                Assert(pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS);
 
169
                MmUnmapIoSpace(pMemNt->Core.pv, pMemNt->Core.cb);
 
170
            }
 
171
            pMemNt->Core.pv = NULL;
 
172
            break;
 
173
        }
 
174
 
 
175
        default:
 
176
            AssertMsgFailed(("enmType=%d\n", pMemNt->Core.enmType));
 
177
            return VERR_INTERNAL_ERROR;
 
178
    }
 
179
 
 
180
    return VINF_SUCCESS;
 
181
}
 
182
 
 
183
 
 
184
int rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
 
185
{
 
186
    AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
 
187
 
 
188
    /*
 
189
     * Try allocate the memory and create an MDL for them so
 
190
     * we can query the physical addresses and do mappings later
 
191
     * without running into out-of-memory conditions and similar problems.
 
192
     */
 
193
    int rc = VERR_NO_PAGE_MEMORY;
 
194
    void *pv = ExAllocatePoolWithTag(NonPagedPool, cb, IPRT_NT_POOL_TAG);
 
195
    if (pv)
 
196
    {
 
197
        PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
 
198
        if (pMdl)
 
199
        {
 
200
            MmBuildMdlForNonPagedPool(pMdl);
 
201
#ifdef RT_ARCH_AMD64
 
202
            MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
 
203
#endif
 
204
 
 
205
            /*
 
206
             * Create the IPRT memory object.
 
207
             */
 
208
            PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PAGE, pv, cb);
 
209
            if (pMemNt)
 
210
            {
 
211
                pMemNt->cMdls = 1;
 
212
                pMemNt->apMdls[0] = pMdl;
 
213
                *ppMem = &pMemNt->Core;
 
214
                return VINF_SUCCESS;
 
215
            }
 
216
 
 
217
            rc = VERR_NO_MEMORY;
 
218
            IoFreeMdl(pMdl);
 
219
        }
 
220
        ExFreePool(pv);
 
221
    }
 
222
    return rc;
 
223
}
 
224
 
 
225
 
 
226
int rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
 
227
{
 
228
    AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
 
229
 
 
230
    /*
 
231
     * Try see if we get lucky first...
 
232
     * (We could probably just assume we're lucky on NT4.)
 
233
     */
 
234
    int rc = rtR0MemObjNativeAllocPage(ppMem, cb, fExecutable);
 
235
    if (RT_SUCCESS(rc))
 
236
    {
 
237
        size_t iPage = cb >> PAGE_SHIFT;
 
238
        while (iPage-- > 0)
 
239
            if (rtR0MemObjNativeGetPagePhysAddr(*ppMem, iPage) >= _4G)
 
240
            {
 
241
                rc = VERR_NO_MEMORY;
 
242
                break;
 
243
            }
 
244
        if (RT_SUCCESS(rc))
 
245
            return rc;
 
246
 
 
247
        /* The following ASSUMES that rtR0MemObjNativeAllocPage returns a completed object. */
 
248
        RTR0MemObjFree(*ppMem, false);
 
249
        *ppMem = NULL;
 
250
    }
 
251
 
 
252
#ifndef IPRT_TARGET_NT4
 
253
    /*
 
254
     * Use MmAllocatePagesForMdl to specify the range of physical addresses we wish to use.
 
255
     */
 
256
    PHYSICAL_ADDRESS Zero;
 
257
    Zero.QuadPart = 0;
 
258
    PHYSICAL_ADDRESS HighAddr;
 
259
    HighAddr.QuadPart = _4G - 1;
 
260
    PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
 
261
    if (pMdl)
 
262
    {
 
263
        if (MmGetMdlByteCount(pMdl) >= cb)
 
264
        {
 
265
            __try
 
266
            {
 
267
                void *pv = MmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
 
268
                                                        FALSE /* no bug check on failure */, NormalPagePriority);
 
269
                if (pv)
 
270
                {
 
271
                    PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_LOW, pv, cb);
 
272
                    if (pMemNt)
 
273
                    {
 
274
                        pMemNt->fAllocatedPagesForMdl = true;
 
275
                        pMemNt->cMdls = 1;
 
276
                        pMemNt->apMdls[0] = pMdl;
 
277
                        *ppMem = &pMemNt->Core;
 
278
                        return VINF_SUCCESS;
 
279
                    }
 
280
                    MmUnmapLockedPages(pv, pMdl);
 
281
                }
 
282
            }
 
283
            __except(EXCEPTION_EXECUTE_HANDLER)
 
284
            {
 
285
                /* nothing */
 
286
            }
 
287
        }
 
288
        MmFreePagesFromMdl(pMdl);
 
289
    }
 
290
#endif /* !IPRT_TARGET_NT4 */
 
291
 
 
292
    /*
 
293
     * Fall back on contiguous memory...
 
294
     */
 
295
    return rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable);
 
296
}
 
297
 
 
298
 
 
299
/**
 
300
 * Internal worker for rtR0MemObjNativeAllocCont(), rtR0MemObjNativeAllocPhys()
 
301
 * and rtR0MemObjNativeAllocPhysNC() that takes a max physical address in addition
 
302
 * to what rtR0MemObjNativeAllocCont() does.
 
303
 *
 
304
 * @returns IPRT status code.
 
305
 * @param   ppMem           Where to store the pointer to the ring-0 memory object.
 
306
 * @param   cb              The size.
 
307
 * @param   fExecutable     Whether the mapping should be executable or not.
 
308
 * @param   PhysHighest     The highest physical address for the pages in allocation.
 
309
 */
 
310
static int rtR0MemObjNativeAllocContEx(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, RTHCPHYS PhysHighest)
 
311
{
 
312
    AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
 
313
 
 
314
    /*
 
315
     * Allocate the memory and create an MDL for it.
 
316
     */
 
317
    PHYSICAL_ADDRESS PhysAddrHighest;
 
318
    PhysAddrHighest.QuadPart = PhysHighest;
 
319
    void *pv = MmAllocateContiguousMemory(cb, PhysAddrHighest);
 
320
    if (!pv)
 
321
        return VERR_NO_MEMORY;
 
322
 
 
323
    PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
 
324
    if (pMdl)
 
325
    {
 
326
        MmBuildMdlForNonPagedPool(pMdl);
 
327
#ifdef RT_ARCH_AMD64
 
328
        MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
 
329
#endif
 
330
 
 
331
        PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_CONT, pv, cb);
 
332
        if (pMemNt)
 
333
        {
 
334
            pMemNt->Core.u.Cont.Phys = (RTHCPHYS)*MmGetMdlPfnArray(pMdl) << PAGE_SHIFT;
 
335
            pMemNt->cMdls = 1;
 
336
            pMemNt->apMdls[0] = pMdl;
 
337
            *ppMem = &pMemNt->Core;
 
338
            return VINF_SUCCESS;
 
339
        }
 
340
 
 
341
        IoFreeMdl(pMdl);
 
342
    }
 
343
    MmFreeContiguousMemory(pv);
 
344
    return VERR_NO_MEMORY;
 
345
}
 
346
 
 
347
 
 
348
int rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
 
349
{
 
350
    return rtR0MemObjNativeAllocContEx(ppMem, cb, fExecutable, _4G-1);
 
351
}
 
352
 
 
353
 
 
354
int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
 
355
{
 
356
#ifndef IPRT_TARGET_NT4
 
357
    /*
 
358
     * Try and see if we're lucky and get a contiguous chunk from MmAllocatePagesForMdl.
 
359
     *
 
360
     * This is preferable to using MmAllocateContiguousMemory because there are
 
361
     * a few situations where the memory shouldn't be mapped, like for instance
 
362
     * VT-x control memory. Since these are rather small allocations (one or
 
363
     * two pages) MmAllocatePagesForMdl will probably be able to satisfy the
 
364
     * request.
 
365
     *
 
366
     * If the allocation is big, the chances are *probably* not very good. The
 
367
     * current limit is kind of random...
 
368
     */
 
369
    if (cb < _128K)
 
370
    {
 
371
        PHYSICAL_ADDRESS Zero;
 
372
        Zero.QuadPart = 0;
 
373
        PHYSICAL_ADDRESS HighAddr;
 
374
        HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
 
375
        PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
 
376
        if (pMdl)
 
377
        {
 
378
            if (MmGetMdlByteCount(pMdl) >= cb)
 
379
            {
 
380
                PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMdl);
 
381
                PFN_NUMBER Pfn = paPfns[0] + 1;
 
382
                const size_t cPages = cb >> PAGE_SHIFT;
 
383
                size_t iPage;
 
384
                for (iPage = 1; iPage < cPages; iPage++, Pfn++)
 
385
                    if (paPfns[iPage] != Pfn)
 
386
                        break;
 
387
                if (iPage >= cPages)
 
388
                {
 
389
                    PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
 
390
                    if (pMemNt)
 
391
                    {
 
392
                        pMemNt->Core.u.Phys.fAllocated = true;
 
393
                        pMemNt->Core.u.Phys.PhysBase = (RTHCPHYS)paPfns[0] << PAGE_SHIFT;
 
394
                        pMemNt->fAllocatedPagesForMdl = true;
 
395
                        pMemNt->cMdls = 1;
 
396
                        pMemNt->apMdls[0] = pMdl;
 
397
                        *ppMem = &pMemNt->Core;
 
398
                        return VINF_SUCCESS;
 
399
                    }
 
400
                }
 
401
            }
 
402
            MmFreePagesFromMdl(pMdl);
 
403
        }
 
404
    }
 
405
#endif /* !IPRT_TARGET_NT4 */
 
406
 
 
407
    return rtR0MemObjNativeAllocContEx(ppMem, cb, false, PhysHighest);
 
408
}
 
409
 
 
410
 
 
411
int rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
 
412
{
 
413
#ifndef IPRT_TARGET_NT4
 
414
    PHYSICAL_ADDRESS Zero;
 
415
    Zero.QuadPart = 0;
 
416
    PHYSICAL_ADDRESS HighAddr;
 
417
    HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
 
418
    PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
 
419
    if (pMdl)
 
420
    {
 
421
        if (MmGetMdlByteCount(pMdl) >= cb)
 
422
        {
 
423
            PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb);
 
424
            if (pMemNt)
 
425
            {
 
426
                pMemNt->fAllocatedPagesForMdl = true;
 
427
                pMemNt->cMdls = 1;
 
428
                pMemNt->apMdls[0] = pMdl;
 
429
                *ppMem = &pMemNt->Core;
 
430
                return VINF_SUCCESS;
 
431
            }
 
432
        }
 
433
        MmFreePagesFromMdl(pMdl);
 
434
    }
 
435
    return VERR_NO_MEMORY;
 
436
#else   /* IPRT_TARGET_NT4 */
 
437
    return VERR_NOT_SUPPORTED;
 
438
#endif  /* IPRT_TARGET_NT4 */
 
439
}
 
440
 
 
441
 
 
442
int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb)
 
443
{
 
444
    /*
 
445
     * Validate the address range and create a descriptor for it.
 
446
     */
 
447
    PFN_NUMBER Pfn = (PFN_NUMBER)(Phys >> PAGE_SHIFT);
 
448
    if (((RTHCPHYS)Pfn << PAGE_SHIFT) != Phys)
 
449
        return VERR_ADDRESS_TOO_BIG;
 
450
 
 
451
    /*
 
452
     * Create the IPRT memory object.
 
453
     */
 
454
    PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
 
455
    if (pMemNt)
 
456
    {
 
457
        pMemNt->Core.u.Phys.PhysBase = Phys;
 
458
        pMemNt->Core.u.Phys.fAllocated = false;
 
459
        *ppMem = &pMemNt->Core;
 
460
        return VINF_SUCCESS;
 
461
    }
 
462
    return VERR_NO_MEMORY;
 
463
}
 
464
 
 
465
 
 
466
/**
 
467
 * Internal worker for locking down pages.
 
468
 *
 
469
 * @return IPRT status code.
 
470
 *
 
471
 * @param ppMem     Where to store the memory object pointer.
 
472
 * @param pv        First page.
 
473
 * @param cb        Number of bytes.
 
474
 * @param Task      The task \a pv and \a cb refers to.
 
475
 */
 
476
static int rtR0MemObjNtLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, RTR0PROCESS R0Process)
 
477
{
 
478
    /*
 
479
     * Calc the number of MDLs we need and allocate the memory object structure.
 
480
     */
 
481
    size_t cMdls = cb / MAX_LOCK_MEM_SIZE;
 
482
    if (cb % MAX_LOCK_MEM_SIZE)
 
483
        cMdls++;
 
484
    if (cMdls >= UINT32_MAX)
 
485
        return VERR_OUT_OF_RANGE;
 
486
    PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJNT, apMdls[cMdls]),
 
487
                                                        RTR0MEMOBJTYPE_LOCK, pv, cb);
 
488
    if (!pMemNt)
 
489
        return VERR_NO_MEMORY;
 
490
 
 
491
    /*
 
492
     * Loop locking down the sub parts of the memory.
 
493
     */
 
494
    int         rc = VINF_SUCCESS;
 
495
    size_t      cbTotal = 0;
 
496
    uint8_t    *pb = (uint8_t *)pv;
 
497
    uint32_t    iMdl;
 
498
    for (iMdl = 0; iMdl < cMdls; iMdl++)
 
499
    {
 
500
        /*
 
501
         * Calc the Mdl size and allocate it.
 
502
         */
 
503
        size_t cbCur = cb - cbTotal;
 
504
        if (cbCur > MAX_LOCK_MEM_SIZE)
 
505
            cbCur = MAX_LOCK_MEM_SIZE;
 
506
        AssertMsg(cbCur, ("cbCur: 0!\n"));
 
507
        PMDL pMdl = IoAllocateMdl(pb, (ULONG)cbCur, FALSE, FALSE, NULL);
 
508
        if (!pMdl)
 
509
        {
 
510
            rc = VERR_NO_MEMORY;
 
511
            break;
 
512
        }
 
513
 
 
514
        /*
 
515
         * Lock the pages.
 
516
         */
 
517
        __try
 
518
        {
 
519
            MmProbeAndLockPages(pMdl, R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode, IoModifyAccess);
 
520
            pMemNt->apMdls[iMdl] = pMdl;
 
521
            pMemNt->cMdls++;
 
522
        }
 
523
        __except(EXCEPTION_EXECUTE_HANDLER)
 
524
        {
 
525
            IoFreeMdl(pMdl);
 
526
            rc = VERR_LOCK_FAILED;
 
527
            break;
 
528
        }
 
529
 
 
530
        /* next */
 
531
        cbTotal += cbCur;
 
532
        pb      += cbCur;
 
533
    }
 
534
    if (RT_SUCCESS(rc))
 
535
    {
 
536
        Assert(pMemNt->cMdls == cMdls);
 
537
        pMemNt->Core.u.Lock.R0Process = R0Process;
 
538
        *ppMem = &pMemNt->Core;
 
539
        return rc;
 
540
    }
 
541
 
 
542
    /*
 
543
     * We failed, perform cleanups.
 
544
     */
 
545
    while (iMdl-- > 0)
 
546
    {
 
547
        MmUnlockPages(pMemNt->apMdls[iMdl]);
 
548
        IoFreeMdl(pMemNt->apMdls[iMdl]);
 
549
        pMemNt->apMdls[iMdl] = NULL;
 
550
    }
 
551
    rtR0MemObjDelete(&pMemNt->Core);
 
552
    return VERR_LOCK_FAILED;
 
553
}
 
554
 
 
555
 
 
556
int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process)
 
557
{
 
558
    AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
 
559
    /* (Can use MmProbeAndLockProcessPages if we need to mess with other processes later.) */
 
560
    return rtR0MemObjNtLock(ppMem, (void *)R3Ptr, cb, R0Process);
 
561
}
 
562
 
 
563
 
 
564
int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb)
 
565
{
 
566
    return rtR0MemObjNtLock(ppMem, pv, cb, NIL_RTR0PROCESS);
 
567
}
 
568
 
 
569
 
 
570
int rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
 
571
{
 
572
    /*
 
573
     * MmCreateSection(SEC_RESERVE) + MmMapViewInSystemSpace perhaps?
 
574
     */
 
575
    return VERR_NOT_IMPLEMENTED;
 
576
}
 
577
 
 
578
 
 
579
int rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
 
580
{
 
581
    /*
 
582
     * ZeCreateSection(SEC_RESERVE) + ZwMapViewOfSection perhaps?
 
583
     */
 
584
    return VERR_NOT_IMPLEMENTED;
 
585
}
 
586
 
 
587
 
 
588
/**
 
589
 * Internal worker for rtR0MemObjNativeMapKernel and rtR0MemObjNativeMapUser.
 
590
 *
 
591
 * @returns IPRT status code.
 
592
 * @param   ppMem       Where to store the memory object for the mapping.
 
593
 * @param   pMemToMap   The memory object to map.
 
594
 * @param   pvFixed     Where to map it. (void *)-1 if anywhere is fine.
 
595
 * @param   uAlignment  The alignment requirement for the mapping.
 
596
 * @param   fProt       The desired page protection for the mapping.
 
597
 * @param   R0Process   If NIL_RTR0PROCESS map into system (kernel) memory.
 
598
 *                      If not nil, it's the current process.
 
599
 */
 
600
static int rtR0MemObjNtMap(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
 
601
                           unsigned fProt, RTR0PROCESS R0Process)
 
602
{
 
603
    int rc = VERR_MAP_FAILED;
 
604
 
 
605
    /*
 
606
     * There are two basic cases here, either we've got an MDL and can
 
607
     * map it using MmMapLockedPages, or we've got a contiguous physical
 
608
     * range (MMIO most likely) and can use MmMapIoSpace.
 
609
     */
 
610
    PRTR0MEMOBJNT pMemNtToMap = (PRTR0MEMOBJNT)pMemToMap;
 
611
    if (pMemNtToMap->cMdls)
 
612
    {
 
613
        /* don't attempt map locked regions with more than one mdl. */
 
614
        if (pMemNtToMap->cMdls != 1)
 
615
            return VERR_NOT_SUPPORTED;
 
616
 
 
617
        /* we can't map anything to the first page, sorry. */
 
618
        if (pvFixed == 0)
 
619
            return VERR_NOT_SUPPORTED;
 
620
 
 
621
        /* only one system mapping for now - no time to figure out MDL restrictions right now. */
 
622
        if (    pMemNtToMap->Core.uRel.Parent.cMappings
 
623
            &&  R0Process == NIL_RTR0PROCESS)
 
624
            return VERR_NOT_SUPPORTED;
 
625
 
 
626
        __try
 
627
        {
 
628
            /** @todo uAlignment */
 
629
            /** @todo How to set the protection on the pages? */
 
630
            void *pv = MmMapLockedPagesSpecifyCache(pMemNtToMap->apMdls[0],
 
631
                                                    R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
 
632
                                                    MmCached,
 
633
                                                    pvFixed != (void *)-1 ? pvFixed : NULL,
 
634
                                                    FALSE /* no bug check on failure */,
 
635
                                                    NormalPagePriority);
 
636
            if (pv)
 
637
            {
 
638
                NOREF(fProt);
 
639
 
 
640
                PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
 
641
                                                                    pMemNtToMap->Core.cb);
 
642
                if (pMemNt)
 
643
                {
 
644
                    pMemNt->Core.u.Mapping.R0Process = R0Process;
 
645
                    *ppMem = &pMemNt->Core;
 
646
                    return VINF_SUCCESS;
 
647
                }
 
648
 
 
649
                rc = VERR_NO_MEMORY;
 
650
                MmUnmapLockedPages(pv, pMemNtToMap->apMdls[0]);
 
651
            }
 
652
        }
 
653
        __except(EXCEPTION_EXECUTE_HANDLER)
 
654
        {
 
655
            /* nothing */
 
656
            rc = VERR_MAP_FAILED;
 
657
        }
 
658
    }
 
659
    else
 
660
    {
 
661
        AssertReturn(   pMemNtToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS
 
662
                     && !pMemNtToMap->Core.u.Phys.fAllocated, VERR_INTERNAL_ERROR);
 
663
 
 
664
        /* cannot map phys mem to user space (yet). */
 
665
        if (R0Process != NIL_RTR0PROCESS)
 
666
            return VERR_NOT_SUPPORTED;
 
667
 
 
668
        /** @todo uAlignment */
 
669
        /** @todo How to set the protection on the pages? */
 
670
        PHYSICAL_ADDRESS Phys;
 
671
        Phys.QuadPart = pMemNtToMap->Core.u.Phys.PhysBase;
 
672
        void *pv = MmMapIoSpace(Phys, pMemNtToMap->Core.cb, MmCached); /** @todo add cache type to fProt. */
 
673
        if (pv)
 
674
        {
 
675
            PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
 
676
                                                                pMemNtToMap->Core.cb);
 
677
            if (pMemNt)
 
678
            {
 
679
                pMemNt->Core.u.Mapping.R0Process = R0Process;
 
680
                *ppMem = &pMemNt->Core;
 
681
                return VINF_SUCCESS;
 
682
            }
 
683
 
 
684
            rc = VERR_NO_MEMORY;
 
685
            MmUnmapIoSpace(pv, pMemNtToMap->Core.cb);
 
686
        }
 
687
    }
 
688
 
 
689
    NOREF(uAlignment); NOREF(fProt);
 
690
    return rc;
 
691
}
 
692
 
 
693
 
 
694
int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment, unsigned fProt)
 
695
{
 
696
    return rtR0MemObjNtMap(ppMem, pMemToMap, pvFixed, uAlignment, fProt, NIL_RTR0PROCESS);
 
697
}
 
698
 
 
699
 
 
700
int rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
 
701
{
 
702
    AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_NOT_SUPPORTED);
 
703
    return rtR0MemObjNtMap(ppMem, pMemToMap, (void *)R3PtrFixed, uAlignment, fProt, R0Process);
 
704
}
 
705
 
 
706
 
 
707
RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
 
708
{
 
709
    PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
 
710
 
 
711
    if (pMemNt->cMdls)
 
712
    {
 
713
        if (pMemNt->cMdls == 1)
 
714
        {
 
715
            PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[0]);
 
716
            return (RTHCPHYS)paPfns[iPage] << PAGE_SHIFT;
 
717
        }
 
718
 
 
719
        size_t iMdl = iPage / (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
 
720
        size_t iMdlPfn = iPage % (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
 
721
        PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[iMdl]);
 
722
        return (RTHCPHYS)paPfns[iMdlPfn] << PAGE_SHIFT;
 
723
    }
 
724
 
 
725
    switch (pMemNt->Core.enmType)
 
726
    {
 
727
        case RTR0MEMOBJTYPE_MAPPING:
 
728
            return rtR0MemObjNativeGetPagePhysAddr(pMemNt->Core.uRel.Child.pParent, iPage);
 
729
 
 
730
        case RTR0MEMOBJTYPE_PHYS:
 
731
            return pMemNt->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
 
732
 
 
733
        case RTR0MEMOBJTYPE_PAGE:
 
734
        case RTR0MEMOBJTYPE_PHYS_NC:
 
735
        case RTR0MEMOBJTYPE_LOW:
 
736
        case RTR0MEMOBJTYPE_CONT:
 
737
        case RTR0MEMOBJTYPE_LOCK:
 
738
        default:
 
739
            AssertMsgFailed(("%d\n", pMemNt->Core.enmType));
 
740
        case RTR0MEMOBJTYPE_RES_VIRT:
 
741
            return NIL_RTHCPHYS;
 
742
    }
 
743
}
 
744