~ubuntu-branches/ubuntu/trusty/virtualbox/trusty-proposed

« back to all changes in this revision

Viewing changes to src/VBox/VMM/VMMR3/PDMBlkCache.cpp

  • Committer: Package Import Robot
  • Author(s): Felix Geyer
  • Date: 2013-03-07 16:38:36 UTC
  • mfrom: (1.1.13) (3.1.20 experimental)
  • Revision ID: package-import@ubuntu.com-20130307163836-p93jpbgx39tp3gb4
Tags: 4.2.8-dfsg-0ubuntu1
* New upstream release. (Closes: #691148)
  - Fixes compatibility with kernel 3.8. (Closes: #700823; LP: #1101867)
* Switch to my @debian.org email address.
* Move package to contrib as virtualbox 4.2 needs a non-free compiler to
  build the BIOS.
* Build-depend on libdevmapper-dev.
* Refresh patches.
  - Drop 36-fix-ftbfs-xserver-112.patch, cve-2012-3221.patch,
    CVE-2013-0420.patch 37-kcompat-3.6.patch and 38-kcompat-3.7.patch.
* Drop all virtualbox-ose transitional packages.
* Drop the virtualbox-fuse package as vdfuse fails to build with
  virtualbox 4.2.
* Update install files and VBox.sh.
* Bump required kbuild version to 0.1.9998svn2577.
* Fix path to VBoxCreateUSBNode.sh in virtualbox.postinst. (Closes: #700479)
* Add an init script to virtuabox-guest-x11 which loads the vboxvideo
  kernel module. The X Server 1.13 doesn't load it anymore. (Closes: #686994)
* Update man pages. (Closes: #680053)
* Add 36-python-multiarch.patch from Rico Tzschichholz to fix detection of
  python in multiarch paths using pkg-config.

Show diffs side-by-side

added added

removed removed

Lines of Context:
4
4
 */
5
5
 
6
6
/*
7
 
 * Copyright (C) 2006-2008 Oracle Corporation
 
7
 * Copyright (C) 2006-2012 Oracle Corporation
8
8
 *
9
9
 * This file is part of VirtualBox Open Source Edition (OSE), as
10
10
 * available from http://www.virtualbox.org. This file is free software;
360
360
            {
361
361
                LogFlow(("Evicting entry %#p (%u bytes)\n", pCurr, pCurr->cbData));
362
362
 
363
 
                if (fReuseBuffer && (pCurr->cbData == cbData))
 
363
                if (fReuseBuffer && pCurr->cbData == cbData)
364
364
                {
365
365
                    STAM_COUNTER_INC(&pCache->StatBuffersReused);
366
366
                    *ppbBuffer = pCurr->pbData;
381
381
                    PPDMBLKCACHEENTRY pGhostEntFree = pGhostListDst->pTail;
382
382
 
383
383
                    /* We have to remove the last entries from the paged out list. */
384
 
                    while (   ((pGhostListDst->cbCached + pCurr->cbData) > pCache->cbRecentlyUsedOutMax)
 
384
                    while (   pGhostListDst->cbCached + pCurr->cbData > pCache->cbRecentlyUsedOutMax
385
385
                           && pGhostEntFree)
386
386
                    {
387
387
                        PPDMBLKCACHEENTRY pFree = pGhostEntFree;
655
655
    RTSemRWRequestWrite(pBlkCache->SemRWEntries, RT_INDEFINITE_WAIT);
656
656
 
657
657
    /* The list is moved to a new header to reduce locking overhead. */
658
 
    RTLISTNODE ListDirtyNotCommitted;
659
 
    RTSPINLOCKTMP Tmp;
 
658
    RTLISTANCHOR ListDirtyNotCommitted;
660
659
 
661
660
    RTListInit(&ListDirtyNotCommitted);
662
 
    RTSpinlockAcquire(pBlkCache->LockList, &Tmp);
 
661
    RTSpinlockAcquire(pBlkCache->LockList);
663
662
    RTListMove(&ListDirtyNotCommitted, &pBlkCache->ListDirtyNotCommitted);
664
 
    RTSpinlockRelease(pBlkCache->LockList, &Tmp);
 
663
    RTSpinlockRelease(pBlkCache->LockList);
665
664
 
666
665
    if (!RTListIsEmpty(&ListDirtyNotCommitted))
667
666
    {
680
679
        /* Commit the last endpoint */
681
680
        Assert(RTListNodeIsLast(&ListDirtyNotCommitted, &pEntry->NodeNotCommitted));
682
681
        pdmBlkCacheEntryCommit(pEntry);
 
682
        cbCommitted += pEntry->cbData;
683
683
        RTListNodeRemove(&pEntry->NodeNotCommitted);
684
684
        AssertMsg(RTListIsEmpty(&ListDirtyNotCommitted),
685
685
                  ("Committed all entries but list is not empty\n"));
753
753
    {
754
754
        pEntry->fFlags |= PDMBLKCACHE_ENTRY_IS_DIRTY;
755
755
 
756
 
        RTSPINLOCKTMP Tmp;
757
 
        RTSpinlockAcquire(pBlkCache->LockList, &Tmp);
 
756
        RTSpinlockAcquire(pBlkCache->LockList);
758
757
        RTListAppend(&pBlkCache->ListDirtyNotCommitted, &pEntry->NodeNotCommitted);
759
 
        RTSpinlockRelease(pBlkCache->LockList, &Tmp);
 
758
        RTSpinlockRelease(pBlkCache->LockList);
760
759
 
761
760
        uint32_t cbDirty = ASMAtomicAddU32(&pCache->cbDirty, pEntry->cbData);
762
761
 
793
792
/**
794
793
 * Commit timer callback.
795
794
 */
796
 
static void pdmBlkCacheCommitTimerCallback(PVM pVM, PTMTIMER pTimer, void *pvUser)
 
795
static DECLCALLBACK(void) pdmBlkCacheCommitTimerCallback(PVM pVM, PTMTIMER pTimer, void *pvUser)
797
796
{
798
797
    PPDMBLKCACHEGLOBAL pCache = (PPDMBLKCACHEGLOBAL)pvUser;
 
798
    NOREF(pVM); NOREF(pTimer);
799
799
 
800
800
    LogFlowFunc(("Commit interval expired, commiting dirty entries\n"));
801
801
 
824
824
        PPDMBLKCACHEENTRY pEntry;
825
825
 
826
826
        RTSemRWRequestRead(pBlkCache->SemRWEntries, RT_INDEFINITE_WAIT);
827
 
        SSMR3PutU32(pSSM, strlen(pBlkCache->pszId));
 
827
        SSMR3PutU32(pSSM, (uint32_t)strlen(pBlkCache->pszId));
828
828
        SSMR3PutStrZ(pSSM, pBlkCache->pszId);
829
829
 
830
830
        /* Count the number of entries to safe. */
866
866
 
867
867
static DECLCALLBACK(int) pdmR3BlkCacheLoadExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
868
868
{
869
 
    int rc = VINF_SUCCESS;
870
 
    uint32_t cRefs;
871
869
    PPDMBLKCACHEGLOBAL pBlkCacheGlobal = pVM->pUVM->pdm.s.pBlkCacheGlobal;
 
870
    uint32_t           cRefs;
872
871
 
 
872
    NOREF(uPass);
873
873
    AssertPtr(pBlkCacheGlobal);
874
874
 
875
875
    pdmBlkCacheLockEnter(pBlkCacheGlobal);
883
883
     * Fewer users in the saved state than in the current VM are allowed
884
884
     * because that means that there are only new ones which don't have any saved state
885
885
     * which can get lost.
886
 
     * More saved entries that current ones are not allowed because this could result in
887
 
     * lost data.
 
886
     * More saved state entries than registered cache users are only allowed if the
 
887
     * missing users don't have any data saved in the cache.
888
888
     */
889
 
    if (cRefs <= pBlkCacheGlobal->cRefs)
 
889
    int rc = VINF_SUCCESS;
 
890
    char *pszId = NULL;
 
891
 
 
892
    while (   cRefs > 0
 
893
           && RT_SUCCESS(rc))
890
894
    {
891
 
        char *pszId = NULL;
892
 
 
893
 
        while (   cRefs > 0
894
 
               && RT_SUCCESS(rc))
895
 
        {
896
 
            PPDMBLKCACHE pBlkCache = NULL;
897
 
            uint32_t cbId = 0;
898
 
 
899
 
            SSMR3GetU32(pSSM, &cbId);
900
 
            Assert(cbId > 0);
901
 
 
902
 
            cbId++; /* Include terminator */
903
 
            pszId = (char *)RTMemAllocZ(cbId * sizeof(char));
904
 
            if (!pszId)
 
895
        PPDMBLKCACHE pBlkCache = NULL;
 
896
        uint32_t cbId = 0;
 
897
 
 
898
        SSMR3GetU32(pSSM, &cbId);
 
899
        Assert(cbId > 0);
 
900
 
 
901
        cbId++; /* Include terminator */
 
902
        pszId = (char *)RTMemAllocZ(cbId * sizeof(char));
 
903
        if (!pszId)
 
904
        {
 
905
            rc = VERR_NO_MEMORY;
 
906
            break;
 
907
        }
 
908
 
 
909
        rc = SSMR3GetStrZ(pSSM, pszId, cbId);
 
910
        AssertRC(rc);
 
911
 
 
912
        /* Search for the block cache with the provided id. */
 
913
        pBlkCache = pdmR3BlkCacheFindById(pBlkCacheGlobal, pszId);
 
914
 
 
915
        /* Get the entries */
 
916
        uint32_t cEntries;
 
917
        SSMR3GetU32(pSSM, &cEntries);
 
918
 
 
919
        if (!pBlkCache && (cEntries > 0))
 
920
        {
 
921
            rc = SSMR3SetCfgError(pSSM, RT_SRC_POS,
 
922
                                  N_("The VM is missing a block device and there is data in the cache. Please make sure the source and target VMs have compatible storage configurations"));
 
923
            break;
 
924
        }
 
925
 
 
926
        RTStrFree(pszId);
 
927
        pszId = NULL;
 
928
 
 
929
        while (cEntries > 0)
 
930
        {
 
931
            PPDMBLKCACHEENTRY pEntry;
 
932
            uint64_t off;
 
933
            uint32_t cbEntry;
 
934
 
 
935
            SSMR3GetU64(pSSM, &off);
 
936
            SSMR3GetU32(pSSM, &cbEntry);
 
937
 
 
938
            pEntry = pdmBlkCacheEntryAlloc(pBlkCache, off, cbEntry, NULL);
 
939
            if (!pEntry)
905
940
            {
906
941
                rc = VERR_NO_MEMORY;
907
942
                break;
908
943
            }
909
944
 
910
 
            rc = SSMR3GetStrZ(pSSM, pszId, cbId);
911
 
            AssertRC(rc);
912
 
 
913
 
            /* Search for the block cache with the provided id. */
914
 
            pBlkCache = pdmR3BlkCacheFindById(pBlkCacheGlobal, pszId);
915
 
            if (!pBlkCache)
 
945
            rc = SSMR3GetMem(pSSM, pEntry->pbData, cbEntry);
 
946
            if (RT_FAILURE(rc))
916
947
            {
917
 
                rc = SSMR3SetCfgError(pSSM, RT_SRC_POS,
918
 
                                      N_("The VM is missing a block device. Please make sure the source and target VMs have compatible storage configurations"));
 
948
                RTMemFree(pEntry->pbData);
 
949
                RTMemFree(pEntry);
919
950
                break;
920
951
            }
921
952
 
922
 
            RTStrFree(pszId);
923
 
            pszId = NULL;
924
 
 
925
 
            /* Get the entries */
926
 
            uint32_t cEntries;
927
 
            SSMR3GetU32(pSSM, &cEntries);
928
 
 
929
 
            while (cEntries > 0)
930
 
            {
931
 
                PPDMBLKCACHEENTRY pEntry;
932
 
                uint64_t off;
933
 
                uint32_t cbEntry;
934
 
 
935
 
                SSMR3GetU64(pSSM, &off);
936
 
                SSMR3GetU32(pSSM, &cbEntry);
937
 
 
938
 
                pEntry = pdmBlkCacheEntryAlloc(pBlkCache, off, cbEntry, NULL);
939
 
                if (!pEntry)
940
 
                {
941
 
                    rc = VERR_NO_MEMORY;
942
 
                    break;
943
 
                }
944
 
 
945
 
                rc = SSMR3GetMem(pSSM, pEntry->pbData, cbEntry);
946
 
                if (RT_FAILURE(rc))
947
 
                {
948
 
                    RTMemFree(pEntry->pbData);
949
 
                    RTMemFree(pEntry);
950
 
                    break;
951
 
                }
952
 
 
953
 
                /* Insert into the tree. */
954
 
                bool fInserted = RTAvlrU64Insert(pBlkCache->pTree, &pEntry->Core);
955
 
                Assert(fInserted);
956
 
 
957
 
                /* Add to the dirty list. */
958
 
                pdmBlkCacheAddDirtyEntry(pBlkCache, pEntry);
959
 
                pdmBlkCacheEntryAddToList(&pBlkCacheGlobal->LruRecentlyUsedIn, pEntry);
960
 
                pdmBlkCacheAdd(pBlkCacheGlobal, cbEntry);
961
 
                pdmBlkCacheEntryRelease(pEntry);
962
 
                cEntries--;
963
 
            }
964
 
 
965
 
            cRefs--;
 
953
            /* Insert into the tree. */
 
954
            bool fInserted = RTAvlrU64Insert(pBlkCache->pTree, &pEntry->Core);
 
955
            Assert(fInserted); NOREF(fInserted);
 
956
 
 
957
            /* Add to the dirty list. */
 
958
            pdmBlkCacheAddDirtyEntry(pBlkCache, pEntry);
 
959
            pdmBlkCacheEntryAddToList(&pBlkCacheGlobal->LruRecentlyUsedIn, pEntry);
 
960
            pdmBlkCacheAdd(pBlkCacheGlobal, cbEntry);
 
961
            pdmBlkCacheEntryRelease(pEntry);
 
962
            cEntries--;
966
963
        }
967
964
 
968
 
        if (pszId)
969
 
            RTStrFree(pszId);
 
965
        cRefs--;
970
966
    }
971
 
    else
 
967
 
 
968
    if (pszId)
 
969
        RTStrFree(pszId);
 
970
 
 
971
    if (cRefs && RT_SUCCESS(rc))
972
972
        rc = SSMR3SetCfgError(pSSM, RT_SRC_POS,
973
 
                              N_("The VM is missing a block device. Please make sure the source and target VMs have compatible storage configurations"));
 
973
                              N_("Unexpected error while restoring state. Please make sure the source and target VMs have compatible storage configurations"));
974
974
 
975
975
    pdmBlkCacheLockLeave(pBlkCacheGlobal);
976
976
 
1216
1216
            pBlkCache->pCache = pBlkCacheGlobal;
1217
1217
            RTListInit(&pBlkCache->ListDirtyNotCommitted);
1218
1218
 
1219
 
            rc = RTSpinlockCreate(&pBlkCache->LockList);
 
1219
            rc = RTSpinlockCreate(&pBlkCache->LockList, RTSPINLOCK_FLAGS_INTERRUPT_UNSAFE, "pdmR3BlkCacheRetain");
1220
1220
            if (RT_SUCCESS(rc))
1221
1221
            {
1222
1222
                rc = RTSemRWCreate(&pBlkCache->SemRWEntries);
1270
1270
VMMR3DECL(int) PDMR3BlkCacheRetainDriver(PVM pVM, PPDMDRVINS pDrvIns, PPPDMBLKCACHE ppBlkCache,
1271
1271
                                         PFNPDMBLKCACHEXFERCOMPLETEDRV pfnXferComplete,
1272
1272
                                         PFNPDMBLKCACHEXFERENQUEUEDRV pfnXferEnqueue,
 
1273
                                         PFNPDMBLKCACHEXFERENQUEUEDISCARDDRV pfnXferEnqueueDiscard,
1273
1274
                                         const char *pcszId)
1274
1275
{
1275
1276
    int rc = VINF_SUCCESS;
1278
1279
    rc = pdmR3BlkCacheRetain(pVM, &pBlkCache, pcszId);
1279
1280
    if (RT_SUCCESS(rc))
1280
1281
    {
1281
 
        pBlkCache->enmType = PDMBLKCACHETYPE_DRV;
1282
 
        pBlkCache->u.Drv.pfnXferComplete = pfnXferComplete;
1283
 
        pBlkCache->u.Drv.pfnXferEnqueue  = pfnXferEnqueue;
1284
 
        pBlkCache->u.Drv.pDrvIns         = pDrvIns;
 
1282
        pBlkCache->enmType                      = PDMBLKCACHETYPE_DRV;
 
1283
        pBlkCache->u.Drv.pfnXferComplete        = pfnXferComplete;
 
1284
        pBlkCache->u.Drv.pfnXferEnqueue         = pfnXferEnqueue;
 
1285
        pBlkCache->u.Drv.pfnXferEnqueueDiscard  = pfnXferEnqueueDiscard;
 
1286
        pBlkCache->u.Drv.pDrvIns                = pDrvIns;
1285
1287
        *ppBlkCache = pBlkCache;
1286
1288
    }
1287
1289
 
1292
1294
VMMR3DECL(int) PDMR3BlkCacheRetainDevice(PVM pVM, PPDMDEVINS pDevIns, PPPDMBLKCACHE ppBlkCache,
1293
1295
                                         PFNPDMBLKCACHEXFERCOMPLETEDEV pfnXferComplete,
1294
1296
                                         PFNPDMBLKCACHEXFERENQUEUEDEV pfnXferEnqueue,
 
1297
                                         PFNPDMBLKCACHEXFERENQUEUEDISCARDDEV pfnXferEnqueueDiscard,
1295
1298
                                         const char *pcszId)
1296
1299
{
1297
1300
    int rc = VINF_SUCCESS;
1300
1303
    rc = pdmR3BlkCacheRetain(pVM, &pBlkCache, pcszId);
1301
1304
    if (RT_SUCCESS(rc))
1302
1305
    {
1303
 
        pBlkCache->enmType = PDMBLKCACHETYPE_DEV;
1304
 
        pBlkCache->u.Dev.pfnXferComplete = pfnXferComplete;
1305
 
        pBlkCache->u.Dev.pfnXferEnqueue  = pfnXferEnqueue;
1306
 
        pBlkCache->u.Dev.pDevIns         = pDevIns;
 
1306
        pBlkCache->enmType                      = PDMBLKCACHETYPE_DEV;
 
1307
        pBlkCache->u.Dev.pfnXferComplete        = pfnXferComplete;
 
1308
        pBlkCache->u.Dev.pfnXferEnqueue         = pfnXferEnqueue;
 
1309
        pBlkCache->u.Dev.pfnXferEnqueueDiscard  = pfnXferEnqueueDiscard;
 
1310
        pBlkCache->u.Dev.pDevIns                = pDevIns;
1307
1311
        *ppBlkCache = pBlkCache;
1308
1312
    }
1309
1313
 
1315
1319
VMMR3DECL(int) PDMR3BlkCacheRetainUsb(PVM pVM, PPDMUSBINS pUsbIns, PPPDMBLKCACHE ppBlkCache,
1316
1320
                                      PFNPDMBLKCACHEXFERCOMPLETEUSB pfnXferComplete,
1317
1321
                                      PFNPDMBLKCACHEXFERENQUEUEUSB pfnXferEnqueue,
 
1322
                                      PFNPDMBLKCACHEXFERENQUEUEDISCARDUSB pfnXferEnqueueDiscard,
1318
1323
                                      const char *pcszId)
1319
1324
{
1320
1325
    int rc = VINF_SUCCESS;
1323
1328
    rc = pdmR3BlkCacheRetain(pVM, &pBlkCache, pcszId);
1324
1329
    if (RT_SUCCESS(rc))
1325
1330
    {
1326
 
        pBlkCache->enmType = PDMBLKCACHETYPE_USB;
1327
 
        pBlkCache->u.Usb.pfnXferComplete = pfnXferComplete;
1328
 
        pBlkCache->u.Usb.pfnXferEnqueue  = pfnXferEnqueue;
1329
 
        pBlkCache->u.Usb.pUsbIns         = pUsbIns;
 
1331
        pBlkCache->enmType                      = PDMBLKCACHETYPE_USB;
 
1332
        pBlkCache->u.Usb.pfnXferComplete        = pfnXferComplete;
 
1333
        pBlkCache->u.Usb.pfnXferEnqueue         = pfnXferEnqueue;
 
1334
        pBlkCache->u.Usb.pfnXferEnqueueDiscard  = pfnXferEnqueueDiscard;
 
1335
        pBlkCache->u.Usb.pUsbIns                = pUsbIns;
1330
1336
        *ppBlkCache = pBlkCache;
1331
1337
    }
1332
1338
 
1338
1344
VMMR3DECL(int) PDMR3BlkCacheRetainInt(PVM pVM, void *pvUser, PPPDMBLKCACHE ppBlkCache,
1339
1345
                                      PFNPDMBLKCACHEXFERCOMPLETEINT pfnXferComplete,
1340
1346
                                      PFNPDMBLKCACHEXFERENQUEUEINT pfnXferEnqueue,
 
1347
                                      PFNPDMBLKCACHEXFERENQUEUEDISCARDINT pfnXferEnqueueDiscard,
1341
1348
                                      const char *pcszId)
1342
1349
{
1343
1350
    int rc = VINF_SUCCESS;
1346
1353
    rc = pdmR3BlkCacheRetain(pVM, &pBlkCache, pcszId);
1347
1354
    if (RT_SUCCESS(rc))
1348
1355
    {
1349
 
        pBlkCache->enmType = PDMBLKCACHETYPE_INTERNAL;
1350
 
        pBlkCache->u.Int.pfnXferComplete = pfnXferComplete;
1351
 
        pBlkCache->u.Int.pfnXferEnqueue  = pfnXferEnqueue;
1352
 
        pBlkCache->u.Int.pvUser          = pvUser;
 
1356
        pBlkCache->enmType                      = PDMBLKCACHETYPE_INTERNAL;
 
1357
        pBlkCache->u.Int.pfnXferComplete        = pfnXferComplete;
 
1358
        pBlkCache->u.Int.pfnXferEnqueue         = pfnXferEnqueue;
 
1359
        pBlkCache->u.Int.pfnXferEnqueueDiscard  = pfnXferEnqueueDiscard;
 
1360
        pBlkCache->u.Int.pvUser                 = pvUser;
1353
1361
        *ppBlkCache = pBlkCache;
1354
1362
    }
1355
1363
 
1536
1544
 
1537
1545
static PPDMBLKCACHEENTRY pdmBlkCacheGetCacheEntryByOffset(PPDMBLKCACHE pBlkCache, uint64_t off)
1538
1546
{
1539
 
    PPDMBLKCACHEGLOBAL pCache = pBlkCache->pCache;
1540
 
    PPDMBLKCACHEENTRY pEntry = NULL;
1541
 
 
1542
 
    STAM_PROFILE_ADV_START(&pCache->StatTreeGet, Cache);
 
1547
    STAM_PROFILE_ADV_START(&pBlkCache->pCache->StatTreeGet, Cache);
1543
1548
 
1544
1549
    RTSemRWRequestRead(pBlkCache->SemRWEntries, RT_INDEFINITE_WAIT);
1545
 
    pEntry = (PPDMBLKCACHEENTRY)RTAvlrU64RangeGet(pBlkCache->pTree, off);
 
1550
    PPDMBLKCACHEENTRY pEntry = (PPDMBLKCACHEENTRY)RTAvlrU64RangeGet(pBlkCache->pTree, off);
1546
1551
    if (pEntry)
1547
1552
        pdmBlkCacheEntryRef(pEntry);
1548
1553
    RTSemRWReleaseRead(pBlkCache->SemRWEntries);
1549
1554
 
1550
 
    STAM_PROFILE_ADV_STOP(&pCache->StatTreeGet, Cache);
 
1555
    STAM_PROFILE_ADV_STOP(&pBlkCache->pCache->StatTreeGet, Cache);
1551
1556
 
1552
1557
    return pEntry;
1553
1558
}
1564
1569
static void pdmBlkCacheGetCacheBestFitEntryByOffset(PPDMBLKCACHE pBlkCache, uint64_t off,
1565
1570
                                                    PPDMBLKCACHEENTRY *ppEntryAbove)
1566
1571
{
1567
 
    PPDMBLKCACHEGLOBAL pCache = pBlkCache->pCache;
1568
 
 
1569
 
    STAM_PROFILE_ADV_START(&pCache->StatTreeGet, Cache);
 
1572
    STAM_PROFILE_ADV_START(&pBlkCache->pCache->StatTreeGet, Cache);
1570
1573
 
1571
1574
    RTSemRWRequestRead(pBlkCache->SemRWEntries, RT_INDEFINITE_WAIT);
1572
1575
    if (ppEntryAbove)
1578
1581
 
1579
1582
    RTSemRWReleaseRead(pBlkCache->SemRWEntries);
1580
1583
 
1581
 
    STAM_PROFILE_ADV_STOP(&pCache->StatTreeGet, Cache);
 
1584
    STAM_PROFILE_ADV_STOP(&pBlkCache->pCache->StatTreeGet, Cache);
1582
1585
}
1583
1586
 
1584
1587
static void pdmBlkCacheInsertEntry(PPDMBLKCACHE pBlkCache, PPDMBLKCACHEENTRY pEntry)
1585
1588
{
1586
 
    PPDMBLKCACHEGLOBAL pCache = pBlkCache->pCache;
1587
 
 
1588
 
    STAM_PROFILE_ADV_START(&pCache->StatTreeInsert, Cache);
 
1589
    STAM_PROFILE_ADV_START(&pBlkCache->pCache->StatTreeInsert, Cache);
1589
1590
    RTSemRWRequestWrite(pBlkCache->SemRWEntries, RT_INDEFINITE_WAIT);
1590
1591
    bool fInserted = RTAvlrU64Insert(pBlkCache->pTree, &pEntry->Core);
1591
 
    AssertMsg(fInserted, ("Node was not inserted into tree\n"));
1592
 
    STAM_PROFILE_ADV_STOP(&pCache->StatTreeInsert, Cache);
 
1592
    AssertMsg(fInserted, ("Node was not inserted into tree\n")); NOREF(fInserted);
 
1593
    STAM_PROFILE_ADV_STOP(&pBlkCache->pCache->StatTreeInsert, Cache);
1593
1594
    RTSemRWReleaseWrite(pBlkCache->SemRWEntries);
1594
1595
}
1595
1596
 
1608
1609
static PPDMBLKCACHEENTRY pdmBlkCacheEntryAlloc(PPDMBLKCACHE pBlkCache,
1609
1610
                                               uint64_t off, size_t cbData, uint8_t *pbBuffer)
1610
1611
{
 
1612
    AssertReturn(cbData <= UINT32_MAX, NULL);
1611
1613
    PPDMBLKCACHEENTRY pEntryNew = (PPDMBLKCACHEENTRY)RTMemAllocZ(sizeof(PDMBLKCACHEENTRY));
1612
1614
 
1613
1615
    if (RT_UNLIKELY(!pEntryNew))
1619
1621
    pEntryNew->fFlags        = 0;
1620
1622
    pEntryNew->cRefs         = 1; /* We are using it now. */
1621
1623
    pEntryNew->pList         = NULL;
1622
 
    pEntryNew->cbData        = cbData;
 
1624
    pEntryNew->cbData        = (uint32_t)cbData;
1623
1625
    pEntryNew->pWaitingHead  = NULL;
1624
1626
    pEntryNew->pWaitingTail  = NULL;
1625
1627
    if (pbBuffer)
1738
1740
}
1739
1741
 
1740
1742
/**
1741
 
 * Calculate aligned offset and size for a new cache entry
1742
 
 * which do not intersect with an already existing entry and the
1743
 
 * file end.
 
1743
 * Calculate aligned offset and size for a new cache entry which do not
 
1744
 * intersect with an already existing entry and the file end.
1744
1745
 *
1745
1746
 * @returns The number of bytes the entry can hold of the requested amount
1746
 
 *          of byte.
1747
 
 * @param   pEndpoint        The endpoint.
1748
 
 * @param   pBlkCache   The endpoint cache.
1749
 
 * @param   off              The start offset.
1750
 
 * @param   cb               The number of bytes the entry needs to hold at least.
1751
 
 * @param   uAlignment       Alignment of the boundary sizes.
1752
 
 * @param   poffAligned      Where to store the aligned offset.
1753
 
 * @param   pcbAligned       Where to store the aligned size of the entry.
 
1747
 *          of bytes.
 
1748
 * @param   pEndpoint       The endpoint.
 
1749
 * @param   pBlkCache       The endpoint cache.
 
1750
 * @param   off             The start offset.
 
1751
 * @param   cb              The number of bytes the entry needs to hold at
 
1752
 *                          least.
 
1753
 * @param   pcbEntry        Where to store the number of bytes the entry can hold.
 
1754
 *                          Can be less than given because of other entries.
1754
1755
 */
1755
 
static size_t pdmBlkCacheEntryBoundariesCalc(PPDMBLKCACHE pBlkCache,
1756
 
                                             uint64_t off, size_t cb,
1757
 
                                             unsigned uAlignment,
1758
 
                                             uint64_t *poffAligned, size_t *pcbAligned)
 
1756
static uint32_t pdmBlkCacheEntryBoundariesCalc(PPDMBLKCACHE pBlkCache,
 
1757
                                               uint64_t off, uint32_t cb,
 
1758
                                               uint32_t *pcbEntry)
1759
1759
{
1760
 
    size_t cbAligned;
1761
 
    size_t cbInEntry = 0;
1762
 
    uint64_t offAligned;
 
1760
    /* Get the best fit entries around the offset */
1763
1761
    PPDMBLKCACHEENTRY pEntryAbove = NULL;
1764
 
 
1765
 
    /* Get the best fit entries around the offset */
1766
1762
    pdmBlkCacheGetCacheBestFitEntryByOffset(pBlkCache, off, &pEntryAbove);
1767
1763
 
1768
1764
    /* Log the info */
1773
1769
             pEntryAbove ? pEntryAbove->Core.KeyLast : 0,
1774
1770
             pEntryAbove ? pEntryAbove->cbData : 0));
1775
1771
 
1776
 
    offAligned = off;
1777
 
 
 
1772
    uint32_t cbNext;
 
1773
    uint32_t cbInEntry;
1778
1774
    if (    pEntryAbove
1779
1775
        &&  off + cb > pEntryAbove->Core.Key)
1780
1776
    {
1781
 
        cbInEntry = pEntryAbove->Core.Key - off;
1782
 
        cbAligned = pEntryAbove->Core.Key - offAligned;
 
1777
        cbInEntry = (uint32_t)(pEntryAbove->Core.Key - off);
 
1778
        cbNext = (uint32_t)(pEntryAbove->Core.Key - off);
1783
1779
    }
1784
1780
    else
1785
1781
    {
1786
 
        cbAligned = cb;
1787
1782
        cbInEntry = cb;
 
1783
        cbNext    = cb;
1788
1784
    }
1789
1785
 
1790
1786
    /* A few sanity checks */
1791
 
    AssertMsg(!pEntryAbove || (offAligned + cbAligned) <= pEntryAbove->Core.Key,
 
1787
    AssertMsg(!pEntryAbove || off + cbNext <= pEntryAbove->Core.Key,
1792
1788
              ("Aligned size intersects with another cache entry\n"));
1793
 
    Assert(cbInEntry <= cbAligned);
 
1789
    Assert(cbInEntry <= cbNext);
1794
1790
 
1795
1791
    if (pEntryAbove)
1796
1792
        pdmBlkCacheEntryRelease(pEntryAbove);
1797
1793
 
1798
 
    LogFlow(("offAligned=%llu cbAligned=%u\n", offAligned, cbAligned));
 
1794
    LogFlow(("off=%llu cbNext=%u\n", off, cbNext));
1799
1795
 
1800
 
    *poffAligned = offAligned;
1801
 
    *pcbAligned  = cbAligned;
 
1796
    *pcbEntry  = cbNext;
1802
1797
 
1803
1798
    return cbInEntry;
1804
1799
}
1812
1807
 * @param   pBlkCache    The endpoint cache.
1813
1808
 * @param   off               The offset.
1814
1809
 * @param   cb                Number of bytes the cache entry should have.
1815
 
 * @param   uAlignment        Alignment the size of the entry should have.
1816
1810
 * @param   pcbData           Where to store the number of bytes the new
1817
1811
 *                            entry can hold. May be lower than actually requested
1818
1812
 *                            due to another entry intersecting the access range.
1819
1813
 */
1820
1814
static PPDMBLKCACHEENTRY pdmBlkCacheEntryCreate(PPDMBLKCACHE pBlkCache,
1821
1815
                                                uint64_t off, size_t cb,
1822
 
                                                unsigned uAlignment,
1823
1816
                                                size_t *pcbData)
1824
1817
{
1825
 
    uint64_t offStart = 0;
1826
 
    size_t cbEntry = 0;
 
1818
    uint32_t cbEntry  = 0;
 
1819
 
 
1820
    *pcbData = pdmBlkCacheEntryBoundariesCalc(pBlkCache, off, (uint32_t)cb, &cbEntry);
 
1821
    AssertReturn(cb <= UINT32_MAX, NULL);
 
1822
 
 
1823
    PPDMBLKCACHEGLOBAL pCache = pBlkCache->pCache;
 
1824
    pdmBlkCacheLockEnter(pCache);
 
1825
 
1827
1826
    PPDMBLKCACHEENTRY pEntryNew = NULL;
1828
 
    PPDMBLKCACHEGLOBAL pCache = pBlkCache->pCache;
1829
 
    uint8_t *pbBuffer = NULL;
1830
 
 
1831
 
    *pcbData = pdmBlkCacheEntryBoundariesCalc(pBlkCache, off, cb, uAlignment,
1832
 
                                              &offStart, &cbEntry);
1833
 
 
1834
 
    pdmBlkCacheLockEnter(pCache);
 
1827
    uint8_t          *pbBuffer  = NULL;
1835
1828
    bool fEnough = pdmBlkCacheReclaim(pCache, cbEntry, true, &pbBuffer);
1836
 
 
1837
1829
    if (fEnough)
1838
1830
    {
1839
1831
        LogFlow(("Evicted enough bytes (%u requested). Creating new cache entry\n", cbEntry));
1840
1832
 
1841
 
        pEntryNew = pdmBlkCacheEntryAlloc(pBlkCache, offStart, cbEntry, pbBuffer);
 
1833
        pEntryNew = pdmBlkCacheEntryAlloc(pBlkCache, off, cbEntry, pbBuffer);
1842
1834
        if (RT_LIKELY(pEntryNew))
1843
1835
        {
1844
1836
            pdmBlkCacheEntryAddToList(&pCache->LruRecentlyUsedIn, pEntryNew);
1849
1841
 
1850
1842
            AssertMsg(   (off >= pEntryNew->Core.Key)
1851
1843
                      && (off + *pcbData <= pEntryNew->Core.KeyLast + 1),
1852
 
                      ("Overflow in calculation off=%llu OffsetAligned=%llu\n",
1853
 
                       off, pEntryNew->Core.Key));
 
1844
                      ("Overflow in calculation off=%llu\n", off));
1854
1845
        }
1855
1846
        else
1856
1847
            pdmBlkCacheLockLeave(pCache);
2083
2074
            /* No entry found for this offset. Create a new entry and fetch the data to the cache. */
2084
2075
            PPDMBLKCACHEENTRY pEntryNew = pdmBlkCacheEntryCreate(pBlkCache,
2085
2076
                                                                 off, cbRead,
2086
 
                                                                 PAGE_SIZE,
2087
2077
                                                                 &cbToRead);
2088
2078
 
2089
2079
            cbRead -= cbToRead;
2179
2169
        size_t cbToWrite;
2180
2170
 
2181
2171
        pEntry = pdmBlkCacheGetCacheEntryByOffset(pBlkCache, off);
2182
 
 
2183
2172
        if (pEntry)
2184
2173
        {
2185
2174
            /* Write the data into the entry and mark it as dirty */
2212
2201
                {
2213
2202
                    /* If it is already dirty but not in progress just update the data. */
2214
2203
                    if (!(pEntry->fFlags & PDMBLKCACHE_ENTRY_IO_IN_PROGRESS))
2215
 
                    {
2216
 
                        RTSgBufCopyToBuf(&SgBuf, pEntry->pbData + offDiff,
2217
 
                                         cbToWrite);
2218
 
                    }
 
2204
                        RTSgBufCopyToBuf(&SgBuf, pEntry->pbData + offDiff, cbToWrite);
2219
2205
                    else
2220
2206
                    {
2221
2207
                        /* The data isn't written to the file yet */
2233
2219
                     * Check if a read is in progress for this entry.
2234
2220
                     * We have to defer processing in that case.
2235
2221
                     */
2236
 
                    if(pdmBlkCacheEntryFlagIsSetClearAcquireLock(pBlkCache, pEntry,
2237
 
                                                                 PDMBLKCACHE_ENTRY_IO_IN_PROGRESS,
2238
 
                                                                 0))
 
2222
                    if (pdmBlkCacheEntryFlagIsSetClearAcquireLock(pBlkCache, pEntry,
 
2223
                                                                  PDMBLKCACHE_ENTRY_IO_IN_PROGRESS,
 
2224
                                                                  0))
2239
2225
                    {
2240
2226
                        pdmBlkCacheEntryWaitersAdd(pEntry, pReq,
2241
2227
                                                   &SgBuf, offDiff, cbToWrite,
2319
2305
             */
2320
2306
            PPDMBLKCACHEENTRY pEntryNew = pdmBlkCacheEntryCreate(pBlkCache,
2321
2307
                                                                 off, cbWrite,
2322
 
                                                                 512, &cbToWrite);
 
2308
                                                                 &cbToWrite);
2323
2309
 
2324
2310
            cbWrite -= cbToWrite;
2325
2311
 
2407
2393
    return VINF_AIO_TASK_PENDING;
2408
2394
}
2409
2395
 
 
2396
VMMR3DECL(int) PDMR3BlkCacheDiscard(PPDMBLKCACHE pBlkCache, PCRTRANGE paRanges,
 
2397
                                    unsigned cRanges, void *pvUser)
 
2398
{
 
2399
    int rc = VINF_SUCCESS;
 
2400
    PPDMBLKCACHEGLOBAL pCache = pBlkCache->pCache;
 
2401
    PPDMBLKCACHEENTRY pEntry;
 
2402
    PPDMBLKCACHEREQ pReq;
 
2403
 
 
2404
    LogFlowFunc((": pBlkCache=%#p{%s} paRanges=%#p cRanges=%u pvUser=%#p\n",
 
2405
                 pBlkCache, pBlkCache->pszId, paRanges, cRanges, pvUser));
 
2406
 
 
2407
    AssertPtrReturn(pBlkCache, VERR_INVALID_POINTER);
 
2408
    AssertReturn(!pBlkCache->fSuspended, VERR_INVALID_STATE);
 
2409
 
 
2410
    /* Allocate new request structure. */
 
2411
    pReq = pdmBlkCacheReqAlloc(pvUser);
 
2412
    if (RT_UNLIKELY(!pReq))
 
2413
        return VERR_NO_MEMORY;
 
2414
 
 
2415
    /* Increment data transfer counter to keep the request valid while we access it. */
 
2416
    ASMAtomicIncU32(&pReq->cXfersPending);
 
2417
 
 
2418
    for (unsigned i = 0; i < cRanges; i++)
 
2419
    {
 
2420
        uint64_t offCur = paRanges[i].offStart;
 
2421
        size_t cbLeft = paRanges[i].cbRange;
 
2422
 
 
2423
        while (cbLeft)
 
2424
        {
 
2425
            size_t cbThisDiscard = 0;
 
2426
 
 
2427
            pEntry = pdmBlkCacheGetCacheEntryByOffset(pBlkCache, offCur);
 
2428
 
 
2429
            if (pEntry)
 
2430
            {
 
2431
                /* Write the data into the entry and mark it as dirty */
 
2432
                AssertPtr(pEntry->pList);
 
2433
 
 
2434
                uint64_t offDiff = offCur - pEntry->Core.Key;
 
2435
 
 
2436
                AssertMsg(offCur >= pEntry->Core.Key,
 
2437
                          ("Overflow in calculation offCur=%llu OffsetAligned=%llu\n",
 
2438
                          offCur, pEntry->Core.Key));
 
2439
 
 
2440
                cbThisDiscard = RT_MIN(pEntry->cbData - offDiff, cbLeft);
 
2441
 
 
2442
                /* Ghost lists contain no data. */
 
2443
                if (   (pEntry->pList == &pCache->LruRecentlyUsedIn)
 
2444
                    || (pEntry->pList == &pCache->LruFrequentlyUsed))
 
2445
                {
 
2446
                    /* Check if the entry is dirty. */
 
2447
                    if (pdmBlkCacheEntryFlagIsSetClearAcquireLock(pBlkCache, pEntry,
 
2448
                                                                  PDMBLKCACHE_ENTRY_IS_DIRTY,
 
2449
                                                                  0))
 
2450
                    {
 
2451
                        /* If it is dirty but not yet in progress remove it. */
 
2452
                        if (!(pEntry->fFlags & PDMBLKCACHE_ENTRY_IO_IN_PROGRESS))
 
2453
                        {
 
2454
                            pdmBlkCacheLockEnter(pCache);
 
2455
                            pdmBlkCacheEntryRemoveFromList(pEntry);
 
2456
 
 
2457
                            STAM_PROFILE_ADV_START(&pCache->StatTreeRemove, Cache);
 
2458
                            RTAvlrU64Remove(pBlkCache->pTree, pEntry->Core.Key);
 
2459
                            STAM_PROFILE_ADV_STOP(&pCache->StatTreeRemove, Cache);
 
2460
 
 
2461
                            pdmBlkCacheLockLeave(pCache);
 
2462
 
 
2463
                            RTMemFree(pEntry);
 
2464
                        }
 
2465
                        else
 
2466
                        {
 
2467
#if 0
 
2468
                            /* The data isn't written to the file yet */
 
2469
                            pdmBlkCacheEntryWaitersAdd(pEntry, pReq,
 
2470
                                                       &SgBuf, offDiff, cbToWrite,
 
2471
                                                       true /* fWrite */);
 
2472
                            STAM_COUNTER_INC(&pBlkCache->StatWriteDeferred);
 
2473
#endif
 
2474
                        }
 
2475
 
 
2476
                        RTSemRWReleaseWrite(pBlkCache->SemRWEntries);
 
2477
                        pdmBlkCacheEntryRelease(pEntry);
 
2478
                    }
 
2479
                    else /* Dirty bit not set */
 
2480
                    {
 
2481
                        /*
 
2482
                         * Check if a read is in progress for this entry.
 
2483
                         * We have to defer processing in that case.
 
2484
                         */
 
2485
                        if(pdmBlkCacheEntryFlagIsSetClearAcquireLock(pBlkCache, pEntry,
 
2486
                                                                     PDMBLKCACHE_ENTRY_IO_IN_PROGRESS,
 
2487
                                                                     0))
 
2488
                        {
 
2489
#if 0
 
2490
                            pdmBlkCacheEntryWaitersAdd(pEntry, pReq,
 
2491
                                                       &SgBuf, offDiff, cbToWrite,
 
2492
                                                       true /* fWrite */);
 
2493
#endif
 
2494
                            STAM_COUNTER_INC(&pBlkCache->StatWriteDeferred);
 
2495
                            RTSemRWReleaseWrite(pBlkCache->SemRWEntries);
 
2496
                            pdmBlkCacheEntryRelease(pEntry);
 
2497
                        }
 
2498
                        else /* I/O in progress flag not set */
 
2499
                        {
 
2500
                            pdmBlkCacheLockEnter(pCache);
 
2501
                            pdmBlkCacheEntryRemoveFromList(pEntry);
 
2502
 
 
2503
                            RTSemRWRequestWrite(pBlkCache->SemRWEntries, RT_INDEFINITE_WAIT);
 
2504
                            STAM_PROFILE_ADV_START(&pCache->StatTreeRemove, Cache);
 
2505
                            RTAvlrU64Remove(pBlkCache->pTree, pEntry->Core.Key);
 
2506
                            STAM_PROFILE_ADV_STOP(&pCache->StatTreeRemove, Cache);
 
2507
                            RTSemRWReleaseWrite(pBlkCache->SemRWEntries);
 
2508
 
 
2509
                            pdmBlkCacheLockLeave(pCache);
 
2510
 
 
2511
                            RTMemFree(pEntry);
 
2512
                        }
 
2513
                    } /* Dirty bit not set */
 
2514
                }
 
2515
                else /* Entry is on the ghost list just remove cache entry. */
 
2516
                {
 
2517
                    pdmBlkCacheLockEnter(pCache);
 
2518
                    pdmBlkCacheEntryRemoveFromList(pEntry);
 
2519
 
 
2520
                    RTSemRWRequestWrite(pBlkCache->SemRWEntries, RT_INDEFINITE_WAIT);
 
2521
                    STAM_PROFILE_ADV_START(&pCache->StatTreeRemove, Cache);
 
2522
                    RTAvlrU64Remove(pBlkCache->pTree, pEntry->Core.Key);
 
2523
                    STAM_PROFILE_ADV_STOP(&pCache->StatTreeRemove, Cache);
 
2524
                    RTSemRWReleaseWrite(pBlkCache->SemRWEntries);
 
2525
 
 
2526
                    pdmBlkCacheLockLeave(pCache);
 
2527
 
 
2528
                    RTMemFree(pEntry);
 
2529
                }
 
2530
            }
 
2531
            /* else: no entry found. */
 
2532
 
 
2533
            offCur += cbThisDiscard;
 
2534
            cbLeft -= cbThisDiscard;
 
2535
        }
 
2536
    }
 
2537
 
 
2538
    if (!pdmBlkCacheReqUpdate(pBlkCache, pReq, rc, false))
 
2539
        rc = VINF_AIO_TASK_PENDING;
 
2540
 
 
2541
    LogFlowFunc((": Leave rc=%Rrc\n", rc));
 
2542
 
 
2543
    return rc;
 
2544
}
 
2545
 
2410
2546
/**
2411
2547
 * Completes a task segment freeing all resources and completes the task handle
2412
2548
 * if everything was transferred.
2422
2558
    PPDMBLKCACHEWAITER pNext = pWaiter->pNext;
2423
2559
    PPDMBLKCACHEREQ pReq = pWaiter->pReq;
2424
2560
 
2425
 
    pdmBlkCacheReqUpdate(pBlkCache, pWaiter->pReq, rc, true);
 
2561
    pdmBlkCacheReqUpdate(pBlkCache, pReq, rc, true);
2426
2562
 
2427
2563
    RTMemFree(pWaiter);
2428
2564
 
2546
2682
 */
2547
2683
static int pdmBlkCacheEntryQuiesce(PAVLRU64NODECORE pNode, void *pvUser)
2548
2684
{
2549
 
    PPDMBLKCACHEENTRY  pEntry = (PPDMBLKCACHEENTRY)pNode;
2550
 
    PPDMBLKCACHE pBlkCache = pEntry->pBlkCache;
 
2685
    PPDMBLKCACHEENTRY   pEntry    = (PPDMBLKCACHEENTRY)pNode;
 
2686
    PPDMBLKCACHE        pBlkCache = pEntry->pBlkCache;
 
2687
    NOREF(pvUser);
2551
2688
 
2552
2689
    while (ASMAtomicReadU32(&pEntry->fFlags) & PDMBLKCACHE_ENTRY_IO_IN_PROGRESS)
2553
2690
    {