~ubuntu-branches/ubuntu/vivid/virtualbox/vivid

« back to all changes in this revision

Viewing changes to src/VBox/VMM/VMMAll/PGMAllPhys.cpp

  • Committer: Package Import Robot
  • Author(s): Felix Geyer
  • Date: 2011-12-29 12:29:25 UTC
  • mfrom: (3.1.8 sid)
  • Revision ID: package-import@ubuntu.com-20111229122925-8ota2o33fuk0bkf8
Tags: 4.1.8-dfsg-1
* New upstream release.
* Move all transitional packages to section oldlibs and priority extra.
* Refresh 16-no-update.patch.
* Drop 36-kernel-3.2.patch, applied upstream.

Show diffs side-by-side

added added

removed removed

Lines of Context:
4
4
 */
5
5
 
6
6
/*
7
 
 * Copyright (C) 2006-2007 Oracle Corporation
 
7
 * Copyright (C) 2006-2011 Oracle Corporation
8
8
 *
9
9
 * This file is part of VirtualBox Open Source Edition (OSE), as
10
10
 * available from http://www.virtualbox.org. This file is free software;
113
113
                        return VINF_SUCCESS;
114
114
                }
115
115
            }
116
 
            else if (RT_UNLIKELY(rc == VERR_INTERNAL_ERROR))
 
116
            else if (RT_UNLIKELY(rc == VERR_EM_INTERNAL_DISAS_ERROR))
117
117
                return rc;
118
118
            break;
119
119
        }
132
132
        default:
133
133
            AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
134
134
                                   pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
135
 
                                  VERR_INTERNAL_ERROR);
 
135
                                  VERR_IPE_NOT_REACHED_DEFAULT_CASE);
136
136
    }
137
137
 
138
138
    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestROMWriteUnhandled);
510
510
            AssertMsgReturn(    pVM->pgm.s.cHandyPages > 0
511
511
                            &&  pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
512
512
                            ("%u\n", pVM->pgm.s.cHandyPages),
513
 
                            VERR_INTERNAL_ERROR);
 
513
                            VERR_PGM_HANDY_PAGE_IPE);
514
514
        }
515
515
        else
516
516
        {
631
631
    const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
632
632
    pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
633
633
 
634
 
    const void *pvSharedPage = NULL;
635
 
 
 
634
    void *pvSharedPage = NULL;
636
635
    if (PGM_PAGE_IS_SHARED(pPage))
637
636
    {
638
637
        /* Mark this shared page for freeing/dereferencing. */
644
643
        STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageReplaceShared));
645
644
        pVM->pgm.s.cSharedPages--;
646
645
 
647
 
        /* Grab the address of the page so we can make a copy later on. */
648
 
        rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSharedPage);
 
646
        /* Grab the address of the page so we can make a copy later on. (safe) */
 
647
        rc = pgmPhysPageMap(pVM, pPage, GCPhys, &pvSharedPage);
649
648
        AssertRC(rc);
650
649
    }
651
650
    else
669
668
    if (pvSharedPage)
670
669
    {
671
670
        /* Get the virtual address of the new page. */
672
 
        void *pvNewPage;
673
 
        rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvNewPage);
674
 
        AssertRC(rc);
675
 
        if (rc == VINF_SUCCESS)
 
671
        PGMPAGEMAPLOCK  PgMpLck;
 
672
        void           *pvNewPage;
 
673
        rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvNewPage, &PgMpLck); AssertRC(rc);
 
674
        if (RT_SUCCESS(rc))
676
675
        {
677
 
            /** @todo todo write ASMMemCopyPage */
678
 
            memcpy(pvNewPage, pvSharedPage, PAGE_SIZE);
 
676
            memcpy(pvNewPage, pvSharedPage, PAGE_SIZE); /** @todo todo write ASMMemCopyPage */
 
677
            pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
679
678
        }
680
679
    }
681
680
 
921
920
 * @param   ppv         Where to store the mapping address.
922
921
 *
923
922
 * @remarks Called from within the PGM critical section.  The mapping is only
924
 
 *          valid while your inside this section.
 
923
 *          valid while you are inside this section.
925
924
 */
926
925
int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
927
926
{
958
957
         * Find the chunk, map it if necessary.
959
958
         */
960
959
        pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
961
 
        if (!pMap)
 
960
        if (pMap)
 
961
            pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
 
962
        else
962
963
        {
963
964
# ifdef IN_RING0
964
965
            int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
977
978
         */
978
979
        pTlbe->idChunk = idChunk;
979
980
        pTlbe->pChunk = pMap;
980
 
        pMap->iAge = 0;
981
981
    }
982
982
 
983
983
    *ppv = (uint8_t *)pMap->pv + ((idPage &GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
1025
1025
    const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
1026
1026
    if (idChunk == NIL_GMM_CHUNKID)
1027
1027
    {
1028
 
        AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR_2);
 
1028
        AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage), VERR_PGM_PHYS_PAGE_MAP_IPE_1);
1029
1029
        if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2)
1030
1030
        {
1031
1031
            /* Lookup the MMIO2 range and use pvR3 to calc the address. */
1032
1032
            PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1033
 
            AssertMsgReturn(pRam || !pRam->pvR3, ("pRam=%p pPage=%R[pgmpage]\n", pRam, pPage), VERR_INTERNAL_ERROR_2);
 
1033
            AssertMsgReturn(pRam || !pRam->pvR3, ("pRam=%p pPage=%R[pgmpage]\n", pRam, pPage), VERR_PGM_PHYS_PAGE_MAP_IPE_2);
1034
1034
            *ppv = (void *)((uintptr_t)pRam->pvR3 + (uintptr_t)((GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK) - pRam->GCPhys));
1035
1035
        }
1036
1036
        else if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
1039
1039
             * One solution would be to seed MMIO2 pages to GMM and get unique Page IDs for
1040
1040
             * them, that would also avoid this mess. It would actually be kind of
1041
1041
             * elegant... */
1042
 
            AssertLogRelMsgFailedReturn(("%RGp\n", GCPhys), VERR_INTERNAL_ERROR_3);
 
1042
            AssertLogRelMsgFailedReturn(("%RGp\n", GCPhys), VERR_PGM_MAP_MMIO2_ALIAS_MMIO);
1043
1043
        }
1044
1044
        else
1045
1045
        {
1046
1046
            /** @todo handle MMIO2 */
1047
 
            AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR_2);
 
1047
            AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage), VERR_PGM_PHYS_PAGE_MAP_IPE_3);
1048
1048
            AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg,
1049
1049
                            ("pPage=%R[pgmpage]\n", pPage),
1050
 
                            VERR_INTERNAL_ERROR_2);
 
1050
                            VERR_PGM_PHYS_PAGE_MAP_IPE_4);
1051
1051
            *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1052
1052
        }
1053
1053
        *ppMap = NULL;
1063
1063
    {
1064
1064
        STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1065
1065
        pMap = pTlbe->pChunk;
 
1066
        AssertPtr(pMap->pv);
1066
1067
    }
1067
1068
    else
1068
1069
    {
1072
1073
         * Find the chunk, map it if necessary.
1073
1074
         */
1074
1075
        pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1075
 
        if (!pMap)
 
1076
        if (pMap)
 
1077
        {
 
1078
            AssertPtr(pMap->pv);
 
1079
            pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
 
1080
        }
 
1081
        else
1076
1082
        {
1077
1083
#ifdef IN_RING0
1078
1084
            int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
1084
1090
            if (RT_FAILURE(rc))
1085
1091
                return rc;
1086
1092
#endif
 
1093
            AssertPtr(pMap->pv);
1087
1094
        }
1088
1095
 
1089
1096
        /*
1091
1098
         */
1092
1099
        pTlbe->idChunk = idChunk;
1093
1100
        pTlbe->pChunk = pMap;
1094
 
        pMap->iAge = 0;
1095
1101
    }
1096
1102
 
1097
1103
    *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << PAGE_SHIFT);
1119
1125
 *                      offset is masked off!
1120
1126
 *
1121
1127
 * @remarks Called from within the PGM critical section.  The mapping is only
1122
 
 *          valid while your inside this section.
 
1128
 *          valid while you are inside section.
1123
1129
 */
1124
1130
int pgmPhysPageMakeWritableAndMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1125
1131
{
1155
1161
 *                      offset is masked off!
1156
1162
 *
1157
1163
 * @remarks Called from within the PGM critical section.  The mapping is only
1158
 
 *          valid while your inside this section.
 
1164
 *          valid while you are inside section.
1159
1165
 */
1160
1166
int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1161
1167
{
1183
1189
 *                      offset is masked off!
1184
1190
 *
1185
1191
 * @remarks Called from within the PGM critical section.  The mapping is only
1186
 
 *          valid while your inside this section.
 
1192
 *          valid while you are inside this section.
1187
1193
 */
1188
1194
int pgmPhysPageMapReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
1189
1195
{
1291
1297
 * @param   ppv         Where to store the address corresponding to GCPhys.
1292
1298
 *
1293
1299
 * @internal
 
1300
 * @deprecated Use pgmPhysGCPhys2CCPtrInternalEx.
1294
1301
 */
1295
 
int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
 
1302
int pgmPhysGCPhys2CCPtrInternalDepr(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1296
1303
{
1297
1304
    int rc;
1298
 
    AssertReturn(pPage, VERR_INTERNAL_ERROR);
 
1305
    AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1299
1306
    PGM_LOCK_ASSERT_OWNER(pVM);
 
1307
    pVM->pgm.s.cDeprecatedPageLocks++;
1300
1308
 
1301
1309
    /*
1302
1310
     * Make sure the page is writable.
1332
1340
    return VINF_SUCCESS;
1333
1341
}
1334
1342
 
 
1343
#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
 
1344
 
 
1345
/**
 
1346
 * Locks a page mapping for writing.
 
1347
 *
 
1348
 * @param   pVM                 The VM handle.
 
1349
 * @param   pPage               The page.
 
1350
 * @param   pTlbe               The mapping TLB entry for the page.
 
1351
 * @param   pLock               The lock structure (output).
 
1352
 */
 
1353
DECLINLINE(void) pgmPhysPageMapLockForWriting(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
 
1354
{
 
1355
    PPGMPAGEMAP pMap = pTlbe->pMap;
 
1356
    if (pMap)
 
1357
        pMap->cRefs++;
 
1358
 
 
1359
    unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
 
1360
    if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
 
1361
    {
 
1362
        if (cLocks == 0)
 
1363
            pVM->pgm.s.cWriteLockedPages++;
 
1364
        PGM_PAGE_INC_WRITE_LOCKS(pPage);
 
1365
    }
 
1366
    else if (cLocks != PGM_PAGE_MAX_LOCKS)
 
1367
    {
 
1368
        PGM_PAGE_INC_WRITE_LOCKS(pPage);
 
1369
        AssertMsgFailed(("%R[pgmpage] is entering permanent write locked state!\n", pPage));
 
1370
        if (pMap)
 
1371
            pMap->cRefs++; /* Extra ref to prevent it from going away. */
 
1372
    }
 
1373
 
 
1374
    pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
 
1375
    pLock->pvMap = pMap;
 
1376
}
 
1377
 
 
1378
/**
 
1379
 * Locks a page mapping for reading.
 
1380
 *
 
1381
 * @param   pVM                 The VM handle.
 
1382
 * @param   pPage               The page.
 
1383
 * @param   pTlbe               The mapping TLB entry for the page.
 
1384
 * @param   pLock               The lock structure (output).
 
1385
 */
 
1386
DECLINLINE(void) pgmPhysPageMapLockForReading(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
 
1387
{
 
1388
    PPGMPAGEMAP pMap = pTlbe->pMap;
 
1389
    if (pMap)
 
1390
        pMap->cRefs++;
 
1391
 
 
1392
    unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
 
1393
    if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
 
1394
    {
 
1395
        if (cLocks == 0)
 
1396
            pVM->pgm.s.cReadLockedPages++;
 
1397
        PGM_PAGE_INC_READ_LOCKS(pPage);
 
1398
    }
 
1399
    else if (cLocks != PGM_PAGE_MAX_LOCKS)
 
1400
    {
 
1401
        PGM_PAGE_INC_READ_LOCKS(pPage);
 
1402
        AssertMsgFailed(("%R[pgmpage] is entering permanent read locked state!\n", pPage));
 
1403
        if (pMap)
 
1404
            pMap->cRefs++; /* Extra ref to prevent it from going away. */
 
1405
    }
 
1406
 
 
1407
    pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
 
1408
    pLock->pvMap = pMap;
 
1409
}
 
1410
 
 
1411
#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
 
1412
 
 
1413
 
 
1414
/**
 
1415
 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
 
1416
 * own the PGM lock and have access to the page structure.
 
1417
 *
 
1418
 * @returns VBox status code.
 
1419
 * @retval  VINF_SUCCESS on success.
 
1420
 * @retval  VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
 
1421
 * @retval  VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
 
1422
 *
 
1423
 * @param   pVM         The VM handle.
 
1424
 * @param   GCPhys      The guest physical address of the page that should be mapped.
 
1425
 * @param   pPage       Pointer to the PGMPAGE structure for the page.
 
1426
 * @param   ppv         Where to store the address corresponding to GCPhys.
 
1427
 * @param   pLock       Where to store the lock information that
 
1428
 *                      pgmPhysReleaseInternalPageMappingLock needs.
 
1429
 *
 
1430
 * @internal
 
1431
 */
 
1432
int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
 
1433
{
 
1434
    int rc;
 
1435
    AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
 
1436
    PGM_LOCK_ASSERT_OWNER(pVM);
 
1437
 
 
1438
    /*
 
1439
     * Make sure the page is writable.
 
1440
     */
 
1441
    if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
 
1442
    {
 
1443
        rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
 
1444
        if (RT_FAILURE(rc))
 
1445
            return rc;
 
1446
        AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
 
1447
    }
 
1448
    Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
 
1449
 
 
1450
    /*
 
1451
     * Do the job.
 
1452
     */
 
1453
#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
 
1454
    void *pv;
 
1455
    PVMCPU pVCpu = VMMGetCpu(pVM);
 
1456
    rc = pgmRZDynMapHCPageInlined(pVCpu,
 
1457
                                  PGM_PAGE_GET_HCPHYS(pPage),
 
1458
                                  &pv
 
1459
                                  RTLOG_COMMA_SRC_POS);
 
1460
    if (RT_FAILURE(rc))
 
1461
        return rc;
 
1462
    *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
 
1463
    pLock->pvPage = pv;
 
1464
    pLock->pVCpu  = pVCpu;
 
1465
 
 
1466
#else
 
1467
    PPGMPAGEMAPTLBE pTlbe;
 
1468
    rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
 
1469
    if (RT_FAILURE(rc))
 
1470
        return rc;
 
1471
    pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
 
1472
    *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
 
1473
#endif
 
1474
    return VINF_SUCCESS;
 
1475
}
 
1476
 
1335
1477
 
1336
1478
/**
1337
1479
 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
1338
 
 * own the PGM lock and therefore not need to lock the mapped page.
 
1480
 * own the PGM lock and have access to the page structure.
1339
1481
 *
1340
1482
 * @returns VBox status code.
1341
1483
 * @retval  VINF_SUCCESS on success.
1346
1488
 * @param   GCPhys      The guest physical address of the page that should be mapped.
1347
1489
 * @param   pPage       Pointer to the PGMPAGE structure for the page.
1348
1490
 * @param   ppv         Where to store the address corresponding to GCPhys.
 
1491
 * @param   pLock       Where to store the lock information that
 
1492
 *                      pgmPhysReleaseInternalPageMappingLock needs.
1349
1493
 *
1350
1494
 * @internal
1351
1495
 */
1352
 
int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv)
 
1496
int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv, PPGMPAGEMAPLOCK pLock)
1353
1497
{
1354
 
    AssertReturn(pPage, VERR_INTERNAL_ERROR);
 
1498
    AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1355
1499
    PGM_LOCK_ASSERT_OWNER(pVM);
1356
1500
    Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1357
1501
 
1358
1502
    /*
1359
 
     * Get the mapping address.
 
1503
     * Do the job.
1360
1504
     */
1361
1505
#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1362
1506
    void *pv;
1363
 
    int rc = pgmRZDynMapHCPageInlined(VMMGetCpu(pVM),
 
1507
    PVMCPU pVCpu = VMMGetCpu(pVM);
 
1508
    int rc = pgmRZDynMapHCPageInlined(pVCpu,
1364
1509
                                      PGM_PAGE_GET_HCPHYS(pPage),
1365
1510
                                      &pv
1366
1511
                                      RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
1367
1512
    if (RT_FAILURE(rc))
1368
1513
        return rc;
1369
1514
    *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
 
1515
    pLock->pvPage = pv;
 
1516
    pLock->pVCpu  = pVCpu;
 
1517
 
1370
1518
#else
1371
1519
    PPGMPAGEMAPTLBE pTlbe;
1372
1520
    int rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1373
1521
    if (RT_FAILURE(rc))
1374
1522
        return rc;
 
1523
    pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1375
1524
    *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1376
1525
#endif
1377
1526
    return VINF_SUCCESS;
1381
1530
/**
1382
1531
 * Requests the mapping of a guest page into the current context.
1383
1532
 *
1384
 
 * This API should only be used for very short term, as it will consume
1385
 
 * scarse resources (R0 and GC) in the mapping cache. When you're done
1386
 
 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
 
1533
 * This API should only be used for very short term, as it will consume scarse
 
1534
 * resources (R0 and GC) in the mapping cache. When you're done with the page,
 
1535
 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1387
1536
 *
1388
1537
 * This API will assume your intention is to write to the page, and will
1389
1538
 * therefore replace shared and zero pages. If you do not intend to modify
1395
1544
 * @retval  VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1396
1545
 *
1397
1546
 * @param   pVM         The VM handle.
1398
 
 * @param   GCPhys      The guest physical address of the page that should be mapped.
 
1547
 * @param   GCPhys      The guest physical address of the page that should be
 
1548
 *                      mapped.
1399
1549
 * @param   ppv         Where to store the address corresponding to GCPhys.
1400
 
 * @param   pLock       Where to store the lock information that PGMPhysReleasePageMappingLock needs.
 
1550
 * @param   pLock       Where to store the lock information that
 
1551
 *                      PGMPhysReleasePageMappingLock needs.
1401
1552
 *
1402
1553
 * @remarks The caller is responsible for dealing with access handlers.
1403
1554
 * @todo    Add an informational return code for pages with access handlers?
1404
1555
 *
1405
 
 * @remark  Avoid calling this API from within critical sections (other than the
1406
 
 *          PGM one) because of the deadlock risk. External threads may need to
1407
 
 *          delegate jobs to the EMTs.
 
1556
 * @remark  Avoid calling this API from within critical sections (other than
 
1557
 *          the PGM one) because of the deadlock risk. External threads may
 
1558
 *          need to delegate jobs to the EMTs.
 
1559
 * @remarks Only one page is mapped!  Make no assumption about what's after or
 
1560
 *          before the returned page!
1408
1561
 * @thread  Any thread.
1409
1562
 */
1410
1563
VMMDECL(int) PGMPhysGCPhys2CCPtr(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1445
1598
    }
1446
1599
 
1447
1600
#else  /* IN_RING3 || IN_RING0 */
1448
 
    /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */
1449
 
    /** @todo : This can be dangerous if abused for more than one page; the ring-3 mapping is only valid for ranges that do NOT cross a chunk boundary.   */
1450
 
    /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */
1451
 
 
1452
1601
    /*
1453
1602
     * Query the Physical TLB entry for the page (may fail).
1454
1603
     */
1475
1624
            /*
1476
1625
             * Now, just perform the locking and calculate the return address.
1477
1626
             */
1478
 
            PPGMPAGEMAP pMap = pTlbe->pMap;
1479
 
            if (pMap)
1480
 
                pMap->cRefs++;
1481
 
 
1482
 
            unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1483
 
            if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1484
 
            {
1485
 
                if (cLocks == 0)
1486
 
                    pVM->pgm.s.cWriteLockedPages++;
1487
 
                PGM_PAGE_INC_WRITE_LOCKS(pPage);
1488
 
            }
1489
 
            else if (cLocks != PGM_PAGE_GET_WRITE_LOCKS(pPage))
1490
 
            {
1491
 
                PGM_PAGE_INC_WRITE_LOCKS(pPage);
1492
 
                AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent write locked state!\n", GCPhys, pPage));
1493
 
                if (pMap)
1494
 
                    pMap->cRefs++; /* Extra ref to prevent it from going away. */
1495
 
            }
1496
 
 
 
1627
            pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1497
1628
            *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1498
 
            pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
1499
 
            pLock->pvMap = pMap;
1500
1629
        }
1501
1630
    }
1502
1631
 
1509
1638
/**
1510
1639
 * Requests the mapping of a guest page into the current context.
1511
1640
 *
1512
 
 * This API should only be used for very short term, as it will consume
1513
 
 * scarse resources (R0 and GC) in the mapping cache. When you're done
1514
 
 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
 
1641
 * This API should only be used for very short term, as it will consume scarse
 
1642
 * resources (R0 and GC) in the mapping cache.  When you're done with the page,
 
1643
 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1515
1644
 *
1516
1645
 * @returns VBox status code.
1517
1646
 * @retval  VINF_SUCCESS on success.
1519
1648
 * @retval  VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1520
1649
 *
1521
1650
 * @param   pVM         The VM handle.
1522
 
 * @param   GCPhys      The guest physical address of the page that should be mapped.
 
1651
 * @param   GCPhys      The guest physical address of the page that should be
 
1652
 *                      mapped.
1523
1653
 * @param   ppv         Where to store the address corresponding to GCPhys.
1524
 
 * @param   pLock       Where to store the lock information that PGMPhysReleasePageMappingLock needs.
 
1654
 * @param   pLock       Where to store the lock information that
 
1655
 *                      PGMPhysReleasePageMappingLock needs.
1525
1656
 *
1526
1657
 * @remarks The caller is responsible for dealing with access handlers.
1527
1658
 * @todo    Add an informational return code for pages with access handlers?
1528
1659
 *
1529
 
 * @remark  Avoid calling this API from within critical sections (other than
 
1660
 * @remarks Avoid calling this API from within critical sections (other than
1530
1661
 *          the PGM one) because of the deadlock risk.
 
1662
 * @remarks Only one page is mapped!  Make no assumption about what's after or
 
1663
 *          before the returned page!
1531
1664
 * @thread  Any thread.
1532
1665
 */
1533
1666
VMMDECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
1566
1699
    }
1567
1700
 
1568
1701
#else  /* IN_RING3 || IN_RING0 */
1569
 
 
1570
 
    /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */
1571
 
    /** @todo : This can be dangerous if abused for more than one page; the ring-3 mapping is only valid for ranges that do NOT cross a chunk boundary.   */
1572
 
    /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */
1573
 
 
1574
1702
    /*
1575
1703
     * Query the Physical TLB entry for the page (may fail).
1576
1704
     */
1587
1715
            /*
1588
1716
             * Now, just perform the locking and calculate the return address.
1589
1717
             */
1590
 
            PPGMPAGEMAP pMap = pTlbe->pMap;
1591
 
            if (pMap)
1592
 
                pMap->cRefs++;
1593
 
 
1594
 
            unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1595
 
            if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1596
 
            {
1597
 
                if (cLocks == 0)
1598
 
                    pVM->pgm.s.cReadLockedPages++;
1599
 
                PGM_PAGE_INC_READ_LOCKS(pPage);
1600
 
            }
1601
 
            else if (cLocks != PGM_PAGE_GET_READ_LOCKS(pPage))
1602
 
            {
1603
 
                PGM_PAGE_INC_READ_LOCKS(pPage);
1604
 
                AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent readonly locked state!\n", GCPhys, pPage));
1605
 
                if (pMap)
1606
 
                    pMap->cRefs++; /* Extra ref to prevent it from going away. */
1607
 
            }
1608
 
 
 
1718
            pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1609
1719
            *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1610
 
            pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
1611
 
            pLock->pvMap = pMap;
1612
1720
        }
1613
1721
    }
1614
1722
 
1759
1867
    {
1760
1868
        Assert(pMap->cRefs >= 1);
1761
1869
        pMap->cRefs--;
1762
 
        pMap->iAge = 0;
1763
1870
    }
1764
1871
    pgmUnlock(pVM);
1765
1872
#endif /* IN_RING3 */
1767
1874
 
1768
1875
 
1769
1876
/**
 
1877
 * Release the internal mapping of a guest page.
 
1878
 *
 
1879
 * This is the counter part of pgmPhysGCPhys2CCPtrInternalEx and
 
1880
 * pgmPhysGCPhys2CCPtrInternalReadOnly.
 
1881
 *
 
1882
 * @param   pVM         The VM handle.
 
1883
 * @param   pLock       The lock structure initialized by the mapping function.
 
1884
 *
 
1885
 * @remarks Caller must hold the PGM lock.
 
1886
 */
 
1887
void pgmPhysReleaseInternalPageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
 
1888
{
 
1889
    PGM_LOCK_ASSERT_OWNER(pVM);
 
1890
    PGMPhysReleasePageMappingLock(pVM, pLock); /* lazy for now */
 
1891
}
 
1892
 
 
1893
 
 
1894
/**
1770
1895
 * Converts a GC physical address to a HC ring-3 pointer.
1771
1896
 *
1772
1897
 * @returns VINF_SUCCESS on success.
1779
1904
 *
1780
1905
 * @param   pVM         The VM handle.
1781
1906
 * @param   GCPhys      The GC physical address to convert.
1782
 
 * @param   cbRange     Physical range
1783
1907
 * @param   pR3Ptr      Where to store the R3 pointer on success.
1784
1908
 *
1785
1909
 * @deprecated  Avoid when possible!
1786
1910
 */
1787
 
VMMDECL(int) PGMPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange, PRTR3PTR pR3Ptr)
 
1911
int pgmPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
1788
1912
{
1789
1913
/** @todo this is kind of hacky and needs some more work. */
1790
1914
#ifndef DEBUG_sandervl
1791
1915
    VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1792
1916
#endif
1793
1917
 
1794
 
    Log(("PGMPhysGCPhys2R3Ptr(,%RGp,%#x,): dont use this API!\n", GCPhys, cbRange)); /** @todo eliminate this API! */
 
1918
    Log(("pgmPhysGCPhys2R3Ptr(,%RGp,): dont use this API!\n", GCPhys)); /** @todo eliminate this API! */
1795
1919
#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1796
1920
    AssertFailedReturn(VERR_NOT_IMPLEMENTED);
1797
1921
#else
1801
1925
    PPGMPAGE pPage;
1802
1926
    int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
1803
1927
    if (RT_SUCCESS(rc))
1804
 
        rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, (void **)pR3Ptr);
 
1928
        rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
1805
1929
 
1806
1930
    pgmUnlock(pVM);
1807
1931
    Assert(rc <= VINF_SUCCESS);
1810
1934
}
1811
1935
 
1812
1936
 
1813
 
#ifdef VBOX_STRICT
1814
 
/**
1815
 
 * PGMPhysGCPhys2R3Ptr convenience for use with assertions.
1816
 
 *
1817
 
 * @returns The R3Ptr, NIL_RTR3PTR on failure.
1818
 
 * @param   pVM         The VM handle.
1819
 
 * @param   GCPhys      The GC Physical address.
1820
 
 * @param   cbRange     Physical range.
1821
 
 *
1822
 
 * @deprecated  Avoid when possible.
1823
 
 */
1824
 
VMMDECL(RTR3PTR) PGMPhysGCPhys2R3PtrAssert(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange)
1825
 
{
1826
 
    RTR3PTR R3Ptr;
1827
 
    int rc = PGMPhysGCPhys2R3Ptr(pVM, GCPhys, cbRange, &R3Ptr);
1828
 
    if (RT_SUCCESS(rc))
1829
 
        return R3Ptr;
1830
 
    return NIL_RTR3PTR;
1831
 
}
1832
 
#endif /* VBOX_STRICT */
1833
 
 
1834
 
 
1835
1937
/**
1836
1938
 * Converts a guest pointer to a GC physical address.
1837
1939
 *
1930
2032
    /*
1931
2033
     * Whatever we do we need the source page, map it first.
1932
2034
     */
1933
 
    const void *pvSrc = NULL;
1934
 
    int         rc    = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc);
 
2035
    PGMPAGEMAPLOCK PgMpLck;
 
2036
    const void    *pvSrc = NULL;
 
2037
    int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc, &PgMpLck);
1935
2038
    if (RT_FAILURE(rc))
1936
2039
    {
1937
2040
        AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
1976
2079
#else
1977
2080
        /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1978
2081
        //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
 
2082
        pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
1979
2083
        return VERR_PGM_PHYS_WR_HIT_HANDLER;
1980
2084
#endif
1981
2085
    }
2017
2121
#else
2018
2122
        /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2019
2123
        //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
 
2124
        pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2020
2125
        return VERR_PGM_PHYS_WR_HIT_HANDLER;
2021
2126
#endif
2022
2127
    }
2026
2131
     */
2027
2132
    if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2028
2133
        memcpy(pvBuf, pvSrc, cb);
 
2134
    pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2029
2135
    return rc;
2030
2136
}
2031
2137
 
2093
2199
                    /*
2094
2200
                     * Get the pointer to the page.
2095
2201
                     */
2096
 
                    const void *pvSrc;
2097
 
                    int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc);
 
2202
                    PGMPAGEMAPLOCK PgMpLck;
 
2203
                    const void    *pvSrc;
 
2204
                    int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
2098
2205
                    if (RT_SUCCESS(rc))
 
2206
                    {
2099
2207
                        memcpy(pvBuf, pvSrc, cb);
 
2208
                        pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
 
2209
                    }
2100
2210
                    else
2101
2211
                    {
2102
2212
                        AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2163
2273
 */
2164
2274
static int pgmPhysWriteHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite)
2165
2275
{
2166
 
    void *pvDst = NULL;
2167
 
    int rc;
 
2276
    PGMPAGEMAPLOCK  PgMpLck;
 
2277
    void           *pvDst = NULL;
 
2278
    int             rc;
2168
2279
 
2169
2280
    /*
2170
2281
     * Give priority to physical handlers (like #PF does).
2195
2306
#else  /* IN_RING3 */
2196
2307
            Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2197
2308
            if (!PGM_PAGE_IS_MMIO(pPage))
2198
 
                rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
 
2309
                rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2199
2310
            else
2200
2311
                rc = VINF_SUCCESS;
2201
2312
            if (RT_SUCCESS(rc))
2216
2327
# else
2217
2328
                pCur = NULL; /* might not be valid anymore. */
2218
2329
# endif
2219
 
                if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2220
 
                    memcpy(pvDst, pvBuf, cbRange);
 
2330
                if (rc == VINF_PGM_HANDLER_DO_DEFAULT && pvDst)
 
2331
                {
 
2332
                    if (pvDst)
 
2333
                        memcpy(pvDst, pvBuf, cbRange);
 
2334
                }
2221
2335
                else
2222
2336
                    AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pCur) ? pCur->pszDesc : ""));
2223
2337
            }
2225
2339
                AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2226
2340
                                             GCPhys, pPage, rc), rc);
2227
2341
            if (RT_LIKELY(cbRange == cbWrite))
 
2342
            {
 
2343
                if (pvDst)
 
2344
                    pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2228
2345
                return VINF_SUCCESS;
 
2346
            }
2229
2347
 
2230
2348
            /* more fun to be had below */
2231
2349
            cbWrite -= cbRange;
2239
2357
    }
2240
2358
    /*
2241
2359
     * A virtual handler without any interfering physical handlers.
2242
 
     * Hopefully it'll convert the whole write.
 
2360
     * Hopefully it'll cover the whole write.
2243
2361
     */
2244
2362
    else if (!PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage))
2245
2363
    {
2261
2379
#else  /* IN_RING3 */
2262
2380
 
2263
2381
            Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2264
 
            rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
 
2382
            rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2265
2383
            if (RT_SUCCESS(rc))
2266
2384
            {
2267
2385
                rc = VINF_PGM_HANDLER_DO_DEFAULT;
2284
2402
                AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2285
2403
                                             GCPhys, pPage, rc), rc);
2286
2404
            if (RT_LIKELY(cbRange == cbWrite))
 
2405
            {
 
2406
                pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2287
2407
                return VINF_SUCCESS;
 
2408
            }
2288
2409
 
2289
2410
            /* more fun to be had below */
2290
2411
            cbWrite -= cbRange;
2303
2424
    /* We need a writable destination page. */
2304
2425
    if (!pvDst)
2305
2426
    {
2306
 
        rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
 
2427
        rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2307
2428
        AssertLogRelMsgReturn(RT_SUCCESS(rc),
2308
2429
                              ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2309
2430
                               GCPhys, pPage, rc), rc);
2433
2554
            /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2434
2555
            NOREF(cbRange);
2435
2556
            //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
 
2557
            pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2436
2558
            return VERR_PGM_PHYS_WR_HIT_HANDLER;
2437
2559
#endif
2438
2560
        }
2462
2584
            /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2463
2585
            NOREF(cbRange);
2464
2586
            //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
 
2587
            pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2465
2588
            return VERR_PGM_PHYS_WR_HIT_HANDLER;
2466
2589
#endif
2467
2590
        }
2518
2641
            /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2519
2642
            NOREF(cbRange);
2520
2643
            //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
 
2644
            pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2521
2645
            return VERR_PGM_PHYS_WR_HIT_HANDLER;
2522
2646
#endif
2523
2647
        }
2528
2652
         * Advance if we've got more stuff to do.
2529
2653
         */
2530
2654
        if (cbRange >= cbWrite)
 
2655
        {
 
2656
            pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2531
2657
            return VINF_SUCCESS;
 
2658
        }
2532
2659
 
2533
2660
        cbWrite         -= cbRange;
2534
2661
        GCPhys          += cbRange;
2607
2734
                    /*
2608
2735
                     * Get the pointer to the page.
2609
2736
                     */
2610
 
                    void *pvDst;
2611
 
                    int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst);
 
2737
                    PGMPAGEMAPLOCK PgMpLck;
 
2738
                    void          *pvDst;
 
2739
                    int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
2612
2740
                    if (RT_SUCCESS(rc))
2613
2741
                    {
2614
2742
                        Assert(!PGM_PAGE_IS_BALLOONED(pPage));
2615
2743
                        memcpy(pvDst, pvBuf, cb);
 
2744
                        pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2616
2745
                    }
2617
 
                    else
2618
2746
                    /* Ignore writes to ballooned pages. */
2619
 
                    if (!PGM_PAGE_IS_BALLOONED(pPage))
 
2747
                    else if (!PGM_PAGE_IS_BALLOONED(pPage))
2620
2748
                        AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2621
2749
                                                pRam->GCPhys + off, pPage, rc));
2622
2750
                }
3278
3406
        if (RT_SUCCESS(rc))
3279
3407
        {
3280
3408
            /** @todo we should check reserved bits ... */
3281
 
            void *pvSrc;
3282
 
            rc = PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys, &pvSrc);
 
3409
            PGMPAGEMAPLOCK PgMpLck;
 
3410
            void const    *pvSrc;
 
3411
            rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &PgMpLck);
3283
3412
            switch (rc)
3284
3413
            {
3285
3414
                case VINF_SUCCESS:
3286
3415
                    Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
3287
3416
                    memcpy(pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
 
3417
                    PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3288
3418
                    break;
3289
3419
                case VERR_PGM_PHYS_PAGE_RESERVED:
3290
3420
                case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3291
 
                    memset(pvDst, 0, cb); /** @todo this is wrong, it should be 0xff */
 
3421
                    memset(pvDst, 0xff, cb);
3292
3422
                    break;
3293
3423
                default:
 
3424
                    Assert(RT_FAILURE_NP(rc));
3294
3425
                    return rc;
3295
3426
            }
3296
3427
 
3320
3451
        {
3321
3452
            /** @todo we should check reserved bits ... */
3322
3453
            AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));
3323
 
            void *pvSrc1;
3324
 
            rc = PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys1, &pvSrc1);
 
3454
            PGMPAGEMAPLOCK PgMpLck;
 
3455
            void const *pvSrc1;
 
3456
            rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc1, &PgMpLck);
3325
3457
            switch (rc)
3326
3458
            {
3327
3459
                case VINF_SUCCESS:
3328
3460
                    memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
 
3461
                    PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3329
3462
                    break;
3330
3463
                case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3331
 
                    memset(pvDst, 0, cb1); /** @todo this is wrong, it should be 0xff */
 
3464
                    memset(pvDst, 0xff, cb1);
3332
3465
                    break;
3333
3466
                default:
 
3467
                    Assert(RT_FAILURE_NP(rc));
3334
3468
                    return rc;
3335
3469
            }
3336
3470
 
3337
 
            void *pvSrc2;
3338
 
            rc = PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys2, &pvSrc2);
 
3471
            void const *pvSrc2;
 
3472
            rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc2, &PgMpLck);
3339
3473
            switch (rc)
3340
3474
            {
3341
3475
                case VINF_SUCCESS:
3342
3476
                    memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2);
 
3477
                    PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3343
3478
                    break;
3344
3479
                case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3345
 
                    memset((uint8_t *)pvDst + cb1, 0, cb2);  /** @todo this is wrong, it should be 0xff */
 
3480
                    memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3346
3481
                    break;
3347
3482
                default:
 
3483
                    Assert(RT_FAILURE_NP(rc));
3348
3484
                    return rc;
3349
3485
            }
3350
3486