~ubuntu-branches/ubuntu/karmic/virtualbox-ose/karmic-updates

« back to all changes in this revision

Viewing changes to src/VBox/VMM/HWACCM.cpp

  • Committer: Bazaar Package Importer
  • Author(s): Felix Geyer
  • Date: 2009-09-14 18:25:07 UTC
  • mfrom: (0.4.1 squeeze)
  • Revision ID: james.westby@ubuntu.com-20090914182507-c98g07mq16hjmn6d
Tags: 3.0.6-dfsg-1ubuntu1
* Merge from debian unstable (LP: #429697), remaining changes:
  - Enable DKMS support on virtualbox host and guest modules (LP: #267097)
    - Drop virtualbox-ose{-guest,}-modules-* package templates
    - Recommend *-source instead of *-modules packages
    - Replace error messages related to missing/mismatched
      kernel module accordingly
  - Autoload kernel module
    - LOAD_VBOXDRV_MODULE=1 in virtualbox-ose.default
  - Disable update action
    - patches/u01-disable-update-action.dpatch
  - Virtualbox should go in Accessories, not in System tools (LP: #288590)
    - virtualbox-ose-qt.files/virtualbox-ose.desktop
  - Add apport hook
    - virtualbox-ose.files/source_virtualbox-ose.py
    - virtualbox-ose.install
  - Add launchpad integration
    - control
    - lpi-bug.xpm
    - patches/u02-lp-integration.dpatch
  - virtualbox, virtualbox-* (names of the upstream proprietary packages)
    conflict with virtualbox-ose (LP: #379878)
* Make debug package depend on normal or guest utils package
* Drop patches/22-pulseaudio-stubs.dpatch (applied upstream)
* Rename Ubuntu specific patches to uXX-*.dpatch
* Fix lintian warnings in maintainer scripts

Show diffs side-by-side

added added

removed removed

Lines of Context:
258
258
    EXIT_REASON(SVM_EXIT_TASK_MONITOR               ,138, "MONITOR instruction."),
259
259
    EXIT_REASON(SVM_EXIT_MWAIT_UNCOND               ,139, "MWAIT instruction unconditional."),
260
260
    EXIT_REASON(SVM_EXIT_MWAIT_ARMED                ,140, "MWAIT instruction when armed."),
261
 
    EXIT_REASON(SVM_EXIT_MWAIT_NPF                  ,1024, "Nested paging: host-level page fault occurred (EXITINFO1 contains fault errorcode; EXITINFO2 contains the guest physical address causing the fault)."),
 
261
    EXIT_REASON(SVM_EXIT_NPF                        ,1024, "Nested paging: host-level page fault occurred (EXITINFO1 contains fault errorcode; EXITINFO2 contains the guest physical address causing the fault)."),
262
262
    EXIT_REASON_NIL()
263
263
};
264
264
# undef EXIT_REASON
326
326
    /*
327
327
     * Check CFGM options.
328
328
     */
329
 
    PCFGMNODE pRoot      = CFGMR3GetRoot(pVM);
 
329
    PCFGMNODE pRoot      = CFGMR3GetRoot(pVM); 
330
330
    PCFGMNODE pHWVirtExt = CFGMR3GetChild(pRoot, "HWVirtExt/");
331
331
    /* Nested paging: disabled by default. */
332
332
    rc = CFGMR3QueryBoolDef(pRoot, "EnableNestedPaging", &pVM->hwaccm.s.fAllowNestedPaging, false);
340
340
    rc = CFGMR3QueryBoolDef(pHWVirtExt, "Enabled", &pVM->hwaccm.s.fAllowed, false);
341
341
    AssertRC(rc);
342
342
 
 
343
    /* TPR patching for 32 bits (Windows) guests with IO-APIC: disabled by default. */
 
344
    rc = CFGMR3QueryBoolDef(pHWVirtExt, "TPRPatchingEnabled", &pVM->hwaccm.s.fTRPPatchingAllowed, false);
 
345
    AssertRC(rc);
 
346
 
343
347
#ifdef RT_OS_DARWIN
344
348
    if (VMMIsHwVirtExtForced(pVM) != pVM->hwaccm.s.fAllowed)
345
349
#else
400
404
    }
401
405
 
402
406
#ifdef VBOX_WITH_STATISTICS
 
407
    STAM_REG(pVM, &pVM->hwaccm.s.StatTPRPatchSuccess, STAMTYPE_COUNTER,   "/HWACCM/TPR/Patch/Success", STAMUNIT_OCCURENCES, "Number of times an instruction was successfully patched.");
 
408
    STAM_REG(pVM, &pVM->hwaccm.s.StatTPRPatchFailure, STAMTYPE_COUNTER,   "/HWACCM/TPR/Patch/Failed",  STAMUNIT_OCCURENCES, "Number of unsuccessful patch attempts.");
 
409
    STAM_REG(pVM, &pVM->hwaccm.s.StatTPRReplaceSuccess, STAMTYPE_COUNTER, "/HWACCM/TPR/Replace/Success", STAMUNIT_OCCURENCES, "Number of times an instruction was successfully patched.");
 
410
    STAM_REG(pVM, &pVM->hwaccm.s.StatTPRReplaceFailure, STAMTYPE_COUNTER, "/HWACCM/TPR/Replace/Failed",  STAMUNIT_OCCURENCES, "Number of unsuccessful patch attempts.");
 
411
    
403
412
    /*
404
413
     * Statistics.
405
414
     */
501
510
 
502
511
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCOffset,              "/HWACCM/CPU%d/TSC/Offset");
503
512
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCIntercept,           "/HWACCM/CPU%d/TSC/Intercept");
504
 
 
 
513
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCInterceptOverFlow,   "/HWACCM/CPU%d/TSC/InterceptOverflow");
 
514
        
505
515
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatDRxArmed,               "/HWACCM/CPU%d/Debug/Armed");
506
516
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatDRxContextSwitch,       "/HWACCM/CPU%d/Debug/ContextSwitch");
507
517
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatDRxIOCheck,             "/HWACCM/CPU%d/Debug/IOCheck");
527
537
            const char * const *papszDesc = ASMIsIntelCpu() ? &g_apszVTxExitReasons[0] : &g_apszAmdVExitReasons[0];
528
538
            for (int j=0;j<MAX_EXITREASON_STAT;j++)
529
539
            {
530
 
                rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.paStatExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
531
 
                                     papszDesc[j] ? papszDesc[j] : "Exit reason",
532
 
                                     "/HWACCM/CPU%d/Exit/Reason/%02x", i, j);
533
 
                AssertRC(rc);
 
540
                if (papszDesc[j])
 
541
                {
 
542
                    rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.paStatExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
 
543
                                        papszDesc[j], "/HWACCM/CPU%d/Exit/Reason/%02x", i, j);
 
544
                    AssertRC(rc);
 
545
                }
534
546
            }
535
547
            rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExitReasonNPF, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Nested page fault", "/HWACCM/CPU%d/Exit/Reason/#NPF", i);
536
548
            AssertRC(rc);
624
636
        return VINF_SUCCESS;
625
637
    }
626
638
 
 
639
    if (pVM->hwaccm.s.vmx.fSupported)
 
640
    {
 
641
        rc = SUPR3QueryVTxSupported();
 
642
        if (RT_FAILURE(rc))
 
643
        {
 
644
#ifdef RT_OS_LINUX
 
645
            LogRel(("HWACCM: The host kernel does not support VT-x -- Linux 2.6.13 or newer required!\n"));
 
646
#else
 
647
            LogRel(("HWACCM: The host kernel does not support VT-x!\n"));
 
648
#endif
 
649
            if (pVM->cCPUs > 1)
 
650
                return rc;
 
651
 
 
652
            /* silently fall back to raw mode */
 
653
            return VINF_SUCCESS;
 
654
        }
 
655
    }
 
656
 
627
657
    if (!pVM->hwaccm.s.fAllowed)
628
658
        return VINF_SUCCESS;    /* nothing to do */
629
659
 
637
667
    Assert(!pVM->fHWACCMEnabled || VMMIsHwVirtExtForced(pVM));
638
668
 
639
669
    pVM->hwaccm.s.fHasIoApic = PDMHasIoApic(pVM);
 
670
    /* No TPR patching is required when the IO-APIC is not enabled for this VM. (Main should have taken care of this already) */
 
671
    if (!pVM->hwaccm.s.fHasIoApic)
 
672
    {
 
673
        Assert(!pVM->hwaccm.s.fTRPPatchingAllowed); /* paranoia */
 
674
        pVM->hwaccm.s.fTRPPatchingAllowed = false;
 
675
    }
640
676
 
641
677
    if (pVM->hwaccm.s.vmx.fSupported)
642
678
    {
938
974
            LogRel(("HWACCM: MSR_IA32_VMX_VMCS_ENUM        = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum));
939
975
 
940
976
            LogRel(("HWACCM: TPR shadow physaddr           = %RHp\n", pVM->hwaccm.s.vmx.pAPICPhys));
941
 
            LogRel(("HWACCM: MSR bitmap physaddr           = %RHp\n", pVM->hwaccm.s.vmx.pMSRBitmapPhys));
 
977
 
 
978
            /* Paranoia */
 
979
            AssertRelease(MSR_IA32_VMX_MISC_MAX_MSR(pVM->hwaccm.s.vmx.msr.vmx_misc) >= 512);
942
980
 
943
981
            for (unsigned i=0;i<pVM->cCPUs;i++)
944
 
                LogRel(("HWACCM: VMCS physaddr VCPU%d           = %RHp\n", i, pVM->aCpus[i].hwaccm.s.vmx.pVMCSPhys));
 
982
            {
 
983
                LogRel(("HWACCM: VCPU%d: MSR bitmap physaddr      = %RHp\n", i, pVM->aCpus[i].hwaccm.s.vmx.pMSRBitmapPhys));
 
984
                LogRel(("HWACCM: VCPU%d: VMCS physaddr            = %RHp\n", i, pVM->aCpus[i].hwaccm.s.vmx.pVMCSPhys));
 
985
            }
945
986
 
946
987
#ifdef HWACCM_VTX_WITH_EPT
947
988
            if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_EPT)
1133
1174
                LogRel((pVM->hwaccm.s.fAllow64BitGuests
1134
1175
                        ? "HWACCM:    32-bit and 64-bit guest supported.\n"
1135
1176
                        : "HWACCM:    32-bit guest supported.\n"));
 
1177
 
 
1178
                LogRel(("HWACCM:    TPR Patching %s.\n", (pVM->hwaccm.s.fTRPPatchingAllowed) ? "enabled" : "disabled"));
1136
1179
            }
1137
1180
            else
1138
1181
            {
1377
1420
        pCache->uMagic = UINT64_C(0xDEADBEEFDEADBEEF);
1378
1421
#endif
1379
1422
    }
 
1423
 
 
1424
    /* Clear all patch information. */
 
1425
    pVM->hwaccm.s.pGuestPatchMem         = 0;
 
1426
    pVM->hwaccm.s.pFreeGuestPatchMem     = 0;
 
1427
    pVM->hwaccm.s.cbGuestPatchMem        = 0;
 
1428
    pVM->hwaccm.s.svm.cPatches           = 0;
 
1429
    pVM->hwaccm.s.svm.PatchTree          = 0;
 
1430
    pVM->hwaccm.s.svm.fTPRPatchingActive = false;
 
1431
    ASMMemZero32(pVM->hwaccm.s.svm.aPatches, sizeof(pVM->hwaccm.s.svm.aPatches));
 
1432
}
 
1433
 
 
1434
/**
 
1435
 * Callback to patch a TPR instruction (vmmcall or mov cr8)
 
1436
 *
 
1437
 * @returns VBox status code.
 
1438
 * @param   pVM     The VM handle.
 
1439
 * @param   pVCpu   The VMCPU for the EMT we're being called on. 
 
1440
 * @param   pvUser  Unused
 
1441
 *
 
1442
 */
 
1443
DECLCALLBACK(int) hwaccmR3RemovePatches(PVM pVM, PVMCPU pVCpu, void *pvUser)
 
1444
{
 
1445
    VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
 
1446
 
 
1447
    /* Only execute the handler on the VCPU the original patch request was issued. */
 
1448
    if (pVCpu->idCpu != idCpu)
 
1449
        return VINF_SUCCESS;
 
1450
 
 
1451
    Log(("hwaccmR3RemovePatches\n"));
 
1452
    for (unsigned i = 0; i < pVM->hwaccm.s.svm.cPatches; i++)
 
1453
    {
 
1454
        uint8_t         szInstr[15];
 
1455
        PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.svm.aPatches[i];
 
1456
        RTGCPTR         pInstrGC = (RTGCPTR)pPatch->Core.Key;
 
1457
        int             rc;
 
1458
 
 
1459
#ifdef LOG_ENABLED
 
1460
        char            szOutput[256];
 
1461
 
 
1462
        rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, CPUMGetGuestCS(pVCpu), pInstrGC, 0, szOutput, sizeof(szOutput), 0);
 
1463
        if (VBOX_SUCCESS(rc))
 
1464
            Log(("Patched instr: %s\n", szOutput));
 
1465
#endif
 
1466
 
 
1467
        /* Check if the instruction is still the same. */
 
1468
        rc = PGMPhysSimpleReadGCPtr(pVCpu, szInstr, pInstrGC, pPatch->cbNewOp);
 
1469
        if (rc != VINF_SUCCESS)
 
1470
        {
 
1471
            Log(("Patched code removed? (rc=%Rrc0\n", rc));
 
1472
            continue;   /* swapped out or otherwise removed; skip it. */
 
1473
        }
 
1474
 
 
1475
        if (memcmp(szInstr, pPatch->aNewOpcode, pPatch->cbNewOp))
 
1476
        {
 
1477
            Log(("Patched instruction was changed! (rc=%Rrc0\n", rc));
 
1478
            continue;   /* skip it. */
 
1479
        }
 
1480
 
 
1481
        rc = PGMPhysSimpleWriteGCPtr(pVCpu, pInstrGC, pPatch->aOpcode, pPatch->cbOp);
 
1482
        AssertRC(rc);
 
1483
 
 
1484
#ifdef LOG_ENABLED
 
1485
        rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, CPUMGetGuestCS(pVCpu), pInstrGC, 0, szOutput, sizeof(szOutput), 0);
 
1486
        if (VBOX_SUCCESS(rc))
 
1487
            Log(("Original instr: %s\n", szOutput));
 
1488
#endif
 
1489
    }
 
1490
    pVM->hwaccm.s.svm.cPatches        = 0;
 
1491
    pVM->hwaccm.s.svm.PatchTree       = 0;
 
1492
    pVM->hwaccm.s.pFreeGuestPatchMem  = pVM->hwaccm.s.pGuestPatchMem;
 
1493
    pVM->hwaccm.s.svm.fTPRPatchingActive = false;
 
1494
    return VINF_SUCCESS;
 
1495
}
 
1496
 
 
1497
/**
 
1498
 * Enable patching in a VT-x/AMD-V guest
 
1499
 *
 
1500
 * @returns VBox status code.
 
1501
 * @param   pVM         The VM to operate on.
 
1502
 * @param   idCpu       VCPU to execute hwaccmR3RemovePatches on
 
1503
 * @param   pPatchMem   Patch memory range
 
1504
 * @param   cbPatchMem  Size of the memory range
 
1505
 */
 
1506
int hwaccmR3EnablePatching(PVM pVM, VMCPUID idCpu, RTRCPTR pPatchMem, unsigned cbPatchMem)
 
1507
{
 
1508
    int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hwaccmR3RemovePatches, (void *)idCpu);
 
1509
    AssertRC(rc);
 
1510
 
 
1511
    pVM->hwaccm.s.pGuestPatchMem      = pPatchMem;
 
1512
    pVM->hwaccm.s.pFreeGuestPatchMem  = pPatchMem;
 
1513
    pVM->hwaccm.s.cbGuestPatchMem     = cbPatchMem;
 
1514
    return VINF_SUCCESS;
 
1515
}
 
1516
 
 
1517
/**
 
1518
 * Enable patching in a VT-x/AMD-V guest
 
1519
 *
 
1520
 * @returns VBox status code.
 
1521
 * @param   pVM         The VM to operate on.
 
1522
 * @param   pPatchMem   Patch memory range
 
1523
 * @param   cbPatchMem  Size of the memory range
 
1524
 */
 
1525
VMMR3DECL(int)  HWACMMR3EnablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
 
1526
{
 
1527
    Log(("HWACMMR3EnablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
 
1528
 
 
1529
    /* Current TPR patching only applies to AMD cpus.
 
1530
     * Needs to be extended to Intel CPUs without the APIC TPR hardware optimization.
 
1531
     */
 
1532
    if (CPUMGetCPUVendor(pVM) != CPUMCPUVENDOR_AMD)
 
1533
        return VERR_NOT_SUPPORTED;
 
1534
 
 
1535
    if (pVM->cCPUs > 1)
 
1536
    {
 
1537
        /* We own the IOM lock here and could cause a deadlock by waiting for a VCPU that is blocking on the IOM lock. */
 
1538
        PVMREQ pReq;
 
1539
        int rc = VMR3ReqCallU(pVM->pUVM, VMCPUID_ANY_QUEUE, &pReq, 0, VMREQFLAGS_NO_WAIT,
 
1540
                              (PFNRT)hwaccmR3EnablePatching, 4, pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
 
1541
        AssertRC(rc);
 
1542
        return rc;
 
1543
    }
 
1544
    else
 
1545
        return hwaccmR3EnablePatching(pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
 
1546
}
 
1547
 
 
1548
/**
 
1549
 * Disable patching in a VT-x/AMD-V guest
 
1550
 *
 
1551
 * @returns VBox status code.
 
1552
 * @param   pVM         The VM to operate on.
 
1553
 * @param   pPatchMem   Patch memory range
 
1554
 * @param   cbPatchMem  Size of the memory range
 
1555
 */
 
1556
VMMR3DECL(int)  HWACMMR3DisablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
 
1557
{
 
1558
    Log(("HWACMMR3DisablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
 
1559
 
 
1560
    Assert(pVM->hwaccm.s.pGuestPatchMem == pPatchMem);
 
1561
    Assert(pVM->hwaccm.s.cbGuestPatchMem == cbPatchMem);
 
1562
 
 
1563
    /* @todo Potential deadlock when other VCPUs are waiting on the IOM lock (we own it)!! */
 
1564
    int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hwaccmR3RemovePatches, (void *)VMMGetCpuId(pVM));
 
1565
    AssertRC(rc);
 
1566
 
 
1567
    pVM->hwaccm.s.pGuestPatchMem      = 0;
 
1568
    pVM->hwaccm.s.pFreeGuestPatchMem  = 0;
 
1569
    pVM->hwaccm.s.cbGuestPatchMem     = 0;
 
1570
    pVM->hwaccm.s.svm.fTPRPatchingActive = false;
 
1571
    return VINF_SUCCESS;
 
1572
}
 
1573
 
 
1574
 
 
1575
/**
 
1576
 * Callback to patch a TPR instruction (vmmcall or mov cr8)
 
1577
 *
 
1578
 * @returns VBox status code.
 
1579
 * @param   pVM     The VM handle.
 
1580
 * @param   pVCpu   The VMCPU for the EMT we're being called on. 
 
1581
 * @param   pvUser  User specified CPU context
 
1582
 *
 
1583
 */
 
1584
DECLCALLBACK(int) hwaccmR3ReplaceTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
 
1585
{
 
1586
    VMCPUID      idCpu  = (VMCPUID)(uintptr_t)pvUser;
 
1587
    PCPUMCTX     pCtx   = CPUMQueryGuestCtxPtr(pVCpu);
 
1588
    RTGCPTR      oldrip = pCtx->rip;
 
1589
    PDISCPUSTATE pDis   = &pVCpu->hwaccm.s.DisState;
 
1590
    unsigned     cbOp;
 
1591
 
 
1592
    /* Only execute the handler on the VCPU the original patch request was issued. (the other CPU(s) might not yet have switched to protected mode) */
 
1593
    if (pVCpu->idCpu != idCpu)
 
1594
        return VINF_SUCCESS;
 
1595
 
 
1596
    Log(("hwaccmR3ReplaceTprInstr: %RGv\n", pCtx->rip));
 
1597
 
 
1598
    /* Two or more VCPUs were racing to patch this instruction. */
 
1599
    PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.svm.PatchTree, (AVLOU32KEY)pCtx->eip);
 
1600
    if (pPatch)
 
1601
        return VINF_SUCCESS;
 
1602
 
 
1603
    Assert(pVM->hwaccm.s.svm.cPatches < RT_ELEMENTS(pVM->hwaccm.s.svm.aPatches));
 
1604
 
 
1605
    int rc = EMInterpretDisasOne(pVM, pVCpu, CPUMCTX2CORE(pCtx), pDis, &cbOp);
 
1606
    AssertRC(rc);
 
1607
    if (    rc == VINF_SUCCESS
 
1608
        &&  pDis->pCurInstr->opcode == OP_MOV
 
1609
        &&  cbOp >= 3)
 
1610
    {
 
1611
        uint8_t         aVMMCall[3] = { 0xf, 0x1, 0xd9};
 
1612
        uint32_t        idx = pVM->hwaccm.s.svm.cPatches;
 
1613
        PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.svm.aPatches[idx];
 
1614
 
 
1615
        rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aOpcode, pCtx->rip, cbOp);
 
1616
        AssertRC(rc);
 
1617
 
 
1618
        pPatch->cbOp     = cbOp;
 
1619
 
 
1620
        if (pDis->param1.flags == USE_DISPLACEMENT32)
 
1621
        {
 
1622
            /* write. */
 
1623
            if (pDis->param2.flags == USE_REG_GEN32)
 
1624
            {
 
1625
                pPatch->enmType     = HWACCMTPRINSTR_WRITE_REG;
 
1626
                pPatch->uSrcOperand = pDis->param2.base.reg_gen;
 
1627
            }
 
1628
            else
 
1629
            {
 
1630
                Assert(pDis->param2.flags == USE_IMMEDIATE32);
 
1631
                pPatch->enmType     = HWACCMTPRINSTR_WRITE_IMM;
 
1632
                pPatch->uSrcOperand = pDis->param2.parval;
 
1633
            }
 
1634
            rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, aVMMCall, sizeof(aVMMCall));
 
1635
            AssertRC(rc);
 
1636
 
 
1637
            memcpy(pPatch->aNewOpcode, aVMMCall, sizeof(aVMMCall));
 
1638
            pPatch->cbNewOp = sizeof(aVMMCall);
 
1639
        }
 
1640
        else
 
1641
        {
 
1642
            RTGCPTR  oldrip   = pCtx->rip;
 
1643
            uint32_t oldcbOp  = cbOp;
 
1644
            uint32_t uMmioReg = pDis->param1.base.reg_gen;
 
1645
 
 
1646
            /* read */
 
1647
            Assert(pDis->param1.flags == USE_REG_GEN32);
 
1648
 
 
1649
            /* Found:
 
1650
                *   mov eax, dword [fffe0080]        (5 bytes)
 
1651
                * Check if next instruction is:
 
1652
                *   shr eax, 4
 
1653
                */
 
1654
            pCtx->rip += cbOp;
 
1655
            rc = EMInterpretDisasOne(pVM, pVCpu, CPUMCTX2CORE(pCtx), pDis, &cbOp);
 
1656
            pCtx->rip = oldrip;
 
1657
            if (    rc == VINF_SUCCESS
 
1658
                &&  pDis->pCurInstr->opcode == OP_SHR
 
1659
                &&  pDis->param1.flags == USE_REG_GEN32
 
1660
                &&  pDis->param1.base.reg_gen == uMmioReg
 
1661
                &&  pDis->param2.flags == USE_IMMEDIATE8
 
1662
                &&  pDis->param2.parval == 4
 
1663
                &&  oldcbOp + cbOp < sizeof(pVM->hwaccm.s.svm.aPatches[idx].aOpcode))
 
1664
            {
 
1665
                uint8_t szInstr[15];
 
1666
 
 
1667
                /* Replacing two instructions now. */
 
1668
                rc = PGMPhysSimpleReadGCPtr(pVCpu, &pPatch->aOpcode, pCtx->rip, oldcbOp + cbOp);
 
1669
                AssertRC(rc);
 
1670
 
 
1671
                pPatch->cbOp = oldcbOp + cbOp;
 
1672
 
 
1673
                /* 0xF0, 0x0F, 0x20, 0xC0 = mov eax, cr8 */
 
1674
                szInstr[0] = 0xF0;
 
1675
                szInstr[1] = 0x0F;
 
1676
                szInstr[2] = 0x20;
 
1677
                szInstr[3] = 0xC0 | pDis->param1.base.reg_gen;
 
1678
                for (unsigned i = 4; i < pPatch->cbOp; i++)
 
1679
                    szInstr[i] = 0x90;  /* nop */
 
1680
 
 
1681
                rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, szInstr, pPatch->cbOp);
 
1682
                AssertRC(rc);
 
1683
 
 
1684
                memcpy(pPatch->aNewOpcode, szInstr, pPatch->cbOp);
 
1685
                pPatch->cbNewOp = pPatch->cbOp;
 
1686
 
 
1687
                Log(("Acceptable read/shr candidate!\n"));
 
1688
                pPatch->enmType = HWACCMTPRINSTR_READ_SHR4;
 
1689
            }
 
1690
            else
 
1691
            {
 
1692
                pPatch->enmType     = HWACCMTPRINSTR_READ;
 
1693
                pPatch->uDstOperand = pDis->param1.base.reg_gen;
 
1694
 
 
1695
                rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, aVMMCall, sizeof(aVMMCall));
 
1696
                AssertRC(rc);
 
1697
 
 
1698
                memcpy(pPatch->aNewOpcode, aVMMCall, sizeof(aVMMCall));
 
1699
                pPatch->cbNewOp = sizeof(aVMMCall);
 
1700
            }
 
1701
        }
 
1702
 
 
1703
        pPatch->Core.Key = pCtx->eip;
 
1704
        rc = RTAvloU32Insert(&pVM->hwaccm.s.svm.PatchTree, &pPatch->Core);
 
1705
        AssertRC(rc);
 
1706
 
 
1707
        pVM->hwaccm.s.svm.cPatches++;
 
1708
        STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRReplaceSuccess);
 
1709
        return VINF_SUCCESS;
 
1710
    }
 
1711
 
 
1712
    /* Save invalid patch, so we will not try again. */
 
1713
    uint32_t  idx = pVM->hwaccm.s.svm.cPatches;
 
1714
 
 
1715
#ifdef LOG_ENABLED
 
1716
    char      szOutput[256];
 
1717
    rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCtx->rip, 0, szOutput, sizeof(szOutput), 0);
 
1718
    if (VBOX_SUCCESS(rc))
 
1719
        Log(("Failed to patch instr: %s\n", szOutput));
 
1720
#endif
 
1721
 
 
1722
    pPatch = &pVM->hwaccm.s.svm.aPatches[idx];
 
1723
    pPatch->Core.Key = pCtx->eip;
 
1724
    pPatch->enmType  = HWACCMTPRINSTR_INVALID;
 
1725
    rc = RTAvloU32Insert(&pVM->hwaccm.s.svm.PatchTree, &pPatch->Core);
 
1726
    AssertRC(rc);
 
1727
    pVM->hwaccm.s.svm.cPatches++;
 
1728
    STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRReplaceFailure);
 
1729
    return VINF_SUCCESS;
 
1730
}
 
1731
 
 
1732
/**
 
1733
 * Callback to patch a TPR instruction (jump to generated code)
 
1734
 *
 
1735
 * @returns VBox status code.
 
1736
 * @param   pVM     The VM handle.
 
1737
 * @param   pVCpu   The VMCPU for the EMT we're being called on. 
 
1738
 * @param   pvUser  User specified CPU context
 
1739
 *
 
1740
 */
 
1741
DECLCALLBACK(int) hwaccmR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
 
1742
{
 
1743
    VMCPUID      idCpu  = (VMCPUID)(uintptr_t)pvUser;
 
1744
    PCPUMCTX     pCtx   = CPUMQueryGuestCtxPtr(pVCpu);
 
1745
    PDISCPUSTATE pDis   = &pVCpu->hwaccm.s.DisState;
 
1746
    unsigned     cbOp;
 
1747
    int          rc;
 
1748
#ifdef LOG_ENABLED
 
1749
    RTGCPTR      pInstr;
 
1750
    char         szOutput[256];
 
1751
#endif
 
1752
 
 
1753
    /* Only execute the handler on the VCPU the original patch request was issued. (the other CPU(s) might not yet have switched to protected mode) */
 
1754
    if (pVCpu->idCpu != idCpu)
 
1755
        return VINF_SUCCESS;
 
1756
 
 
1757
    Assert(pVM->hwaccm.s.svm.cPatches < RT_ELEMENTS(pVM->hwaccm.s.svm.aPatches));
 
1758
 
 
1759
    /* Two or more VCPUs were racing to patch this instruction. */
 
1760
    PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.svm.PatchTree, (AVLOU32KEY)pCtx->eip);
 
1761
    if (pPatch)
 
1762
    {
 
1763
        Log(("hwaccmR3PatchTprInstr: already patched %RGv\n", pCtx->rip));
 
1764
        return VINF_SUCCESS;
 
1765
    }
 
1766
 
 
1767
    Log(("hwaccmR3PatchTprInstr %RGv\n", pCtx->rip));
 
1768
 
 
1769
    rc = EMInterpretDisasOne(pVM, pVCpu, CPUMCTX2CORE(pCtx), pDis, &cbOp);
 
1770
    AssertRC(rc);
 
1771
    if (    rc == VINF_SUCCESS
 
1772
        &&  pDis->pCurInstr->opcode == OP_MOV
 
1773
        &&  cbOp >= 5)
 
1774
    {
 
1775
        uint32_t        idx = pVM->hwaccm.s.svm.cPatches;
 
1776
        PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.svm.aPatches[idx];
 
1777
        uint8_t         aPatch[64];
 
1778
        uint32_t        off = 0;
 
1779
 
 
1780
#ifdef LOG_ENABLED
 
1781
        rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCtx->rip, 0, szOutput, sizeof(szOutput), 0);
 
1782
        if (VBOX_SUCCESS(rc))
 
1783
            Log(("Original instr: %s\n", szOutput));
 
1784
#endif
 
1785
 
 
1786
        rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aOpcode, pCtx->rip, cbOp);
 
1787
        AssertRC(rc);
 
1788
 
 
1789
        pPatch->cbOp    = cbOp;
 
1790
        pPatch->enmType = HWACCMTPRINSTR_JUMP_REPLACEMENT;
 
1791
 
 
1792
        if (pDis->param1.flags == USE_DISPLACEMENT32)
 
1793
        {
 
1794
            /*
 
1795
                * TPR write:
 
1796
                *
 
1797
                * push ECX                      [51]
 
1798
                * push EDX                      [52]
 
1799
                * push EAX                      [50]
 
1800
                * xor EDX,EDX                   [31 D2]
 
1801
                * mov EAX,EAX                   [89 C0]
 
1802
                *  or
 
1803
                * mov EAX,0000000CCh            [B8 CC 00 00 00]
 
1804
                * mov ECX,0C0000082h            [B9 82 00 00 C0]
 
1805
                * wrmsr                         [0F 30]
 
1806
                * pop EAX                       [58]
 
1807
                * pop EDX                       [5A]
 
1808
                * pop ECX                       [59]
 
1809
                * jmp return_address            [E9 return_address]
 
1810
                *
 
1811
                */
 
1812
            bool fUsesEax = (pDis->param2.flags == USE_REG_GEN32 && pDis->param2.base.reg_gen == USE_REG_EAX);
 
1813
 
 
1814
            aPatch[off++] = 0x51;    /* push ecx */
 
1815
            aPatch[off++] = 0x52;    /* push edx */
 
1816
            if (!fUsesEax)
 
1817
                aPatch[off++] = 0x50;    /* push eax */
 
1818
            aPatch[off++] = 0x31;    /* xor edx, edx */
 
1819
            aPatch[off++] = 0xD2;
 
1820
            if (pDis->param2.flags == USE_REG_GEN32)
 
1821
            {
 
1822
                if (!fUsesEax)
 
1823
                {
 
1824
                    aPatch[off++] = 0x89;    /* mov eax, src_reg */
 
1825
                    aPatch[off++] = MAKE_MODRM(3, pDis->param2.base.reg_gen, USE_REG_EAX);
 
1826
                }
 
1827
            }
 
1828
            else
 
1829
            {
 
1830
                Assert(pDis->param2.flags == USE_IMMEDIATE32);
 
1831
                aPatch[off++] = 0xB8;    /* mov eax, immediate */
 
1832
                *(uint32_t *)&aPatch[off] = pDis->param2.parval;
 
1833
                off += sizeof(uint32_t);
 
1834
            }
 
1835
            aPatch[off++] = 0xB9;    /* mov ecx, 0xc0000082 */
 
1836
            *(uint32_t *)&aPatch[off] = MSR_K8_LSTAR;
 
1837
            off += sizeof(uint32_t);
 
1838
 
 
1839
            aPatch[off++] = 0x0F;    /* wrmsr */
 
1840
            aPatch[off++] = 0x30;
 
1841
            if (!fUsesEax)
 
1842
                aPatch[off++] = 0x58;    /* pop eax */
 
1843
            aPatch[off++] = 0x5A;    /* pop edx */
 
1844
            aPatch[off++] = 0x59;    /* pop ecx */
 
1845
        }
 
1846
        else
 
1847
        {
 
1848
            /*
 
1849
                * TPR read:
 
1850
                *
 
1851
                * push ECX                      [51]
 
1852
                * push EDX                      [52]
 
1853
                * push EAX                      [50]
 
1854
                * mov ECX,0C0000082h            [B9 82 00 00 C0]
 
1855
                * rdmsr                         [0F 32]
 
1856
                * mov EAX,EAX                   [89 C0]
 
1857
                * pop EAX                       [58]
 
1858
                * pop EDX                       [5A]
 
1859
                * pop ECX                       [59]
 
1860
                * jmp return_address            [E9 return_address]
 
1861
                *
 
1862
                */
 
1863
            Assert(pDis->param1.flags == USE_REG_GEN32);
 
1864
 
 
1865
            if (pDis->param1.base.reg_gen != USE_REG_ECX)
 
1866
                aPatch[off++] = 0x51;    /* push ecx */
 
1867
            if (pDis->param1.base.reg_gen != USE_REG_EDX)
 
1868
                aPatch[off++] = 0x52;    /* push edx */
 
1869
            if (pDis->param1.base.reg_gen != USE_REG_EAX)
 
1870
                aPatch[off++] = 0x50;    /* push eax */
 
1871
 
 
1872
            aPatch[off++] = 0x31;    /* xor edx, edx */
 
1873
            aPatch[off++] = 0xD2;
 
1874
 
 
1875
            aPatch[off++] = 0xB9;    /* mov ecx, 0xc0000082 */
 
1876
            *(uint32_t *)&aPatch[off] = MSR_K8_LSTAR;
 
1877
            off += sizeof(uint32_t);
 
1878
 
 
1879
            aPatch[off++] = 0x0F;    /* rdmsr */
 
1880
            aPatch[off++] = 0x32;
 
1881
 
 
1882
            if (pDis->param1.base.reg_gen != USE_REG_EAX)
 
1883
            {
 
1884
                aPatch[off++] = 0x89;    /* mov dst_reg, eax */
 
1885
                aPatch[off++] = MAKE_MODRM(3, USE_REG_EAX, pDis->param1.base.reg_gen);
 
1886
            }
 
1887
 
 
1888
            if (pDis->param1.base.reg_gen != USE_REG_EAX)
 
1889
                aPatch[off++] = 0x58;    /* pop eax */
 
1890
            if (pDis->param1.base.reg_gen != USE_REG_EDX)
 
1891
                aPatch[off++] = 0x5A;    /* pop edx */
 
1892
            if (pDis->param1.base.reg_gen != USE_REG_ECX)
 
1893
                aPatch[off++] = 0x59;    /* pop ecx */
 
1894
        }
 
1895
        aPatch[off++] = 0xE9;    /* jmp return_address */
 
1896
        *(RTRCUINTPTR *)&aPatch[off] = ((RTRCUINTPTR)pCtx->eip + cbOp) - ((RTRCUINTPTR)pVM->hwaccm.s.pFreeGuestPatchMem + off + 4);
 
1897
        off += sizeof(RTRCUINTPTR);
 
1898
 
 
1899
        if (pVM->hwaccm.s.pFreeGuestPatchMem + off <= pVM->hwaccm.s.pGuestPatchMem + pVM->hwaccm.s.cbGuestPatchMem)
 
1900
        {
 
1901
            /* Write new code to the patch buffer. */
 
1902
            rc = PGMPhysSimpleWriteGCPtr(pVCpu, pVM->hwaccm.s.pFreeGuestPatchMem, aPatch, off);
 
1903
            AssertRC(rc);
 
1904
 
 
1905
#ifdef LOG_ENABLED
 
1906
            pInstr = pVM->hwaccm.s.pFreeGuestPatchMem;
 
1907
            while (true)
 
1908
            {
 
1909
                uint32_t cb;
 
1910
 
 
1911
                rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pInstr, 0, szOutput, sizeof(szOutput), &cb);
 
1912
                if (VBOX_SUCCESS(rc))
 
1913
                    Log(("Patch instr %s\n", szOutput));
 
1914
 
 
1915
                pInstr += cb;
 
1916
 
 
1917
                if (pInstr >= pVM->hwaccm.s.pFreeGuestPatchMem + off)
 
1918
                    break;
 
1919
            }
 
1920
#endif
 
1921
 
 
1922
            pPatch->aNewOpcode[0] = 0xE9;
 
1923
            *(RTRCUINTPTR *)&pPatch->aNewOpcode[1] = ((RTRCUINTPTR)pVM->hwaccm.s.pFreeGuestPatchMem) - ((RTRCUINTPTR)pCtx->eip + 5);
 
1924
 
 
1925
            /* Overwrite the TPR instruction with a jump. */
 
1926
            rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->eip, pPatch->aNewOpcode, 5);
 
1927
            AssertRC(rc);
 
1928
 
 
1929
#ifdef LOG_ENABLED
 
1930
            rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCtx->rip, 0, szOutput, sizeof(szOutput), 0);
 
1931
            if (VBOX_SUCCESS(rc))
 
1932
                Log(("Jump: %s\n", szOutput));
 
1933
#endif
 
1934
            pVM->hwaccm.s.pFreeGuestPatchMem += off;
 
1935
            pPatch->cbNewOp = 5;
 
1936
 
 
1937
            pPatch->Core.Key = pCtx->eip;
 
1938
            rc = RTAvloU32Insert(&pVM->hwaccm.s.svm.PatchTree, &pPatch->Core);
 
1939
            AssertRC(rc);
 
1940
 
 
1941
            pVM->hwaccm.s.svm.cPatches++;
 
1942
            pVM->hwaccm.s.svm.fTPRPatchingActive = true;
 
1943
            STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRPatchSuccess);
 
1944
            return VINF_SUCCESS;
 
1945
        }
 
1946
        else
 
1947
            Log(("Ran out of space in our patch buffer!\n"));
 
1948
    }
 
1949
 
 
1950
    /* Save invalid patch, so we will not try again. */
 
1951
    uint32_t  idx = pVM->hwaccm.s.svm.cPatches;
 
1952
 
 
1953
#ifdef LOG_ENABLED
 
1954
    rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCtx->rip, 0, szOutput, sizeof(szOutput), 0);
 
1955
    if (VBOX_SUCCESS(rc))
 
1956
        Log(("Failed to patch instr: %s\n", szOutput));
 
1957
#endif
 
1958
 
 
1959
    pPatch = &pVM->hwaccm.s.svm.aPatches[idx];
 
1960
    pPatch->Core.Key = pCtx->eip;
 
1961
    pPatch->enmType  = HWACCMTPRINSTR_INVALID;
 
1962
    rc = RTAvloU32Insert(&pVM->hwaccm.s.svm.PatchTree, &pPatch->Core);
 
1963
    AssertRC(rc);
 
1964
    pVM->hwaccm.s.svm.cPatches++;
 
1965
    STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRPatchFailure);
 
1966
    return VINF_SUCCESS;
 
1967
}
 
1968
 
 
1969
/**
 
1970
 * Attempt to patch TPR mmio instructions
 
1971
 *
 
1972
 * @returns VBox status code.
 
1973
 * @param   pVM         The VM to operate on.
 
1974
 * @param   pVCpu       The VM CPU to operate on.
 
1975
 * @param   pCtx        CPU context
 
1976
 */
 
1977
VMMR3DECL(int) HWACCMR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
 
1978
{
 
1979
    int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, (pVM->hwaccm.s.pGuestPatchMem) ? hwaccmR3PatchTprInstr : hwaccmR3ReplaceTprInstr, (void *)pVCpu->idCpu);
 
1980
    AssertRC(rc);
 
1981
    return rc;
1380
1982
}
1381
1983
 
1382
1984
/**
1632
2234
    return HWACCMIsEnabled(pVCpu->pVMR3) && pVCpu->hwaccm.s.Event.fPending;
1633
2235
}
1634
2236
 
 
2237
/**
 
2238
 * Restart an I/O instruction that was refused in ring-0
 
2239
 *
 
2240
 * @returns Strict VBox status code. Informational status codes other than the one documented
 
2241
 *          here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
 
2242
 * @retval  VINF_SUCCESS                Success.
 
2243
 * @retval  VINF_EM_FIRST-VINF_EM_LAST  Success with some exceptions (see IOM_SUCCESS()), the
 
2244
 *                                      status code must be passed on to EM.
 
2245
 * @retval  VERR_NOT_FOUND if no pending I/O instruction.
 
2246
 *
 
2247
 * @param   pVM         The VM to operate on.
 
2248
 * @param   pVCpu       The VMCPU to operate on.
 
2249
 * @param   pCtx        VCPU register context
 
2250
 */
 
2251
VMMR3DECL(int)  HWACCMR3RestartPendingIOInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
 
2252
{
 
2253
    HWACCMPENDINGIO enmType = pVCpu->hwaccm.s.PendingIO.enmType;
 
2254
    int rc;
 
2255
 
 
2256
    pVCpu->hwaccm.s.PendingIO.enmType = HWACCMPENDINGIO_INVALID;
 
2257
 
 
2258
    if (    pVCpu->hwaccm.s.PendingIO.GCPtrRip != pCtx->rip
 
2259
        ||  enmType  == HWACCMPENDINGIO_INVALID)
 
2260
        return VERR_NOT_FOUND;
 
2261
 
 
2262
    switch (enmType)
 
2263
    {
 
2264
    case HWACCMPENDINGIO_PORT_READ:
 
2265
    {
 
2266
        uint32_t uAndVal = pVCpu->hwaccm.s.PendingIO.s.Port.uAndVal;
 
2267
        uint32_t u32Val  = 0;
 
2268
 
 
2269
        rc = IOMIOPortRead(pVM, pVCpu->hwaccm.s.PendingIO.s.Port.uPort, 
 
2270
                           &u32Val, 
 
2271
                           pVCpu->hwaccm.s.PendingIO.s.Port.cbSize);
 
2272
        if (IOM_SUCCESS(rc))
 
2273
        {
 
2274
            /* Write back to the EAX register. */
 
2275
            pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal);
 
2276
            pCtx->rip = pVCpu->hwaccm.s.PendingIO.GCPtrRipNext;
 
2277
        }
 
2278
        break;
 
2279
    }
 
2280
 
 
2281
    case HWACCMPENDINGIO_PORT_WRITE:
 
2282
        rc = IOMIOPortWrite(pVM, pVCpu->hwaccm.s.PendingIO.s.Port.uPort, 
 
2283
                            pCtx->eax & pVCpu->hwaccm.s.PendingIO.s.Port.uAndVal, 
 
2284
                            pVCpu->hwaccm.s.PendingIO.s.Port.cbSize);
 
2285
        if (IOM_SUCCESS(rc))
 
2286
            pCtx->rip = pVCpu->hwaccm.s.PendingIO.GCPtrRipNext;
 
2287
        break;
 
2288
 
 
2289
    default:
 
2290
        AssertFailed();
 
2291
        return VERR_INTERNAL_ERROR;
 
2292
    }
 
2293
 
 
2294
    return rc;
 
2295
}
1635
2296
 
1636
2297
/**
1637
2298
 * Inject an NMI into a running VM (only VCPU 0!)
1722
2383
        rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.vmx.enmPrevGuestMode);
1723
2384
        AssertRCReturn(rc, rc);
1724
2385
    }
1725
 
 
 
2386
#ifdef VBOX_HWACCM_WITH_GUEST_PATCHING
 
2387
    rc = SSMR3PutGCPtr(pSSM, pVM->hwaccm.s.pGuestPatchMem);
 
2388
    AssertRCReturn(rc, rc);
 
2389
    rc = SSMR3PutGCPtr(pSSM, pVM->hwaccm.s.pFreeGuestPatchMem);
 
2390
    AssertRCReturn(rc, rc);
 
2391
    rc = SSMR3PutU32(pSSM, pVM->hwaccm.s.cbGuestPatchMem);
 
2392
    AssertRCReturn(rc, rc);
 
2393
 
 
2394
    /* Store all the guest patch records too. */
 
2395
    rc = SSMR3PutU32(pSSM, pVM->hwaccm.s.svm.cPatches);
 
2396
    AssertRCReturn(rc, rc);
 
2397
 
 
2398
    for (unsigned i = 0; i < pVM->hwaccm.s.svm.cPatches; i++)
 
2399
    {
 
2400
        PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.svm.aPatches[i];
 
2401
 
 
2402
        rc = SSMR3PutU32(pSSM, pPatch->Core.Key);
 
2403
        AssertRCReturn(rc, rc);
 
2404
 
 
2405
        rc = SSMR3PutMem(pSSM, pPatch->aOpcode, sizeof(pPatch->aOpcode));
 
2406
        AssertRCReturn(rc, rc);
 
2407
 
 
2408
        rc = SSMR3PutU32(pSSM, pPatch->cbOp);
 
2409
        AssertRCReturn(rc, rc);
 
2410
 
 
2411
        rc = SSMR3PutMem(pSSM, pPatch->aNewOpcode, sizeof(pPatch->aNewOpcode));
 
2412
        AssertRCReturn(rc, rc);
 
2413
 
 
2414
        rc = SSMR3PutU32(pSSM, pPatch->cbNewOp);
 
2415
        AssertRCReturn(rc, rc);
 
2416
 
 
2417
        AssertCompileSize(HWACCMTPRINSTR, 4);
 
2418
        rc = SSMR3PutU32(pSSM, (uint32_t)pPatch->enmType);
 
2419
        AssertRCReturn(rc, rc);
 
2420
 
 
2421
        rc = SSMR3PutU32(pSSM, pPatch->uSrcOperand);
 
2422
        AssertRCReturn(rc, rc);
 
2423
 
 
2424
        rc = SSMR3PutU32(pSSM, pPatch->uDstOperand);
 
2425
        AssertRCReturn(rc, rc);
 
2426
 
 
2427
        rc = SSMR3PutU32(pSSM, pPatch->pJumpTarget);
 
2428
        AssertRCReturn(rc, rc);
 
2429
 
 
2430
        rc = SSMR3PutU32(pSSM, pPatch->cFaults);
 
2431
        AssertRCReturn(rc, rc);
 
2432
    }
 
2433
#endif
1726
2434
    return VINF_SUCCESS;
1727
2435
}
1728
2436
 
1744
2452
     * Validate version.
1745
2453
     */
1746
2454
    if (   u32Version != HWACCM_SSM_VERSION
 
2455
        && u32Version != HWACCM_SSM_VERSION_NO_PATCHING
1747
2456
        && u32Version != HWACCM_SSM_VERSION_2_0_X)
1748
2457
    {
1749
2458
        AssertMsgFailed(("hwaccmR3Load: Invalid version u32Version=%d!\n", u32Version));
1758
2467
        rc = SSMR3GetU64(pSSM, &pVM->aCpus[i].hwaccm.s.Event.intInfo);
1759
2468
        AssertRCReturn(rc, rc);
1760
2469
 
1761
 
        if (u32Version >= HWACCM_SSM_VERSION)
 
2470
        if (u32Version >= HWACCM_SSM_VERSION_NO_PATCHING)
1762
2471
        {
1763
2472
            uint32_t val;
1764
2473
 
1775
2484
            pVM->aCpus[i].hwaccm.s.vmx.enmPrevGuestMode = (PGMMODE)val;
1776
2485
        }
1777
2486
    }
 
2487
#ifdef VBOX_HWACCM_WITH_GUEST_PATCHING
 
2488
    if (u32Version > HWACCM_SSM_VERSION_NO_PATCHING)
 
2489
    {
 
2490
        rc = SSMR3GetGCPtr(pSSM, &pVM->hwaccm.s.pGuestPatchMem);
 
2491
        AssertRCReturn(rc, rc);
 
2492
        rc = SSMR3GetGCPtr(pSSM, &pVM->hwaccm.s.pFreeGuestPatchMem);
 
2493
        AssertRCReturn(rc, rc);
 
2494
        rc = SSMR3GetU32(pSSM, &pVM->hwaccm.s.cbGuestPatchMem);
 
2495
        AssertRCReturn(rc, rc);
 
2496
 
 
2497
        /* Fetch all TPR patch records. */
 
2498
        rc = SSMR3GetU32(pSSM, &pVM->hwaccm.s.svm.cPatches);
 
2499
        AssertRCReturn(rc, rc);
 
2500
 
 
2501
        for (unsigned i = 0; i < pVM->hwaccm.s.svm.cPatches; i++)
 
2502
        {
 
2503
            PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.svm.aPatches[i];
 
2504
 
 
2505
            rc = SSMR3GetU32(pSSM, &pPatch->Core.Key);
 
2506
            AssertRCReturn(rc, rc);
 
2507
 
 
2508
            rc = SSMR3GetMem(pSSM, pPatch->aOpcode, sizeof(pPatch->aOpcode));
 
2509
            AssertRCReturn(rc, rc);
 
2510
 
 
2511
            rc = SSMR3GetU32(pSSM, &pPatch->cbOp);
 
2512
            AssertRCReturn(rc, rc);
 
2513
 
 
2514
            rc = SSMR3GetMem(pSSM, pPatch->aNewOpcode, sizeof(pPatch->aNewOpcode));
 
2515
            AssertRCReturn(rc, rc);
 
2516
 
 
2517
            rc = SSMR3GetU32(pSSM, &pPatch->cbNewOp);
 
2518
            AssertRCReturn(rc, rc);
 
2519
 
 
2520
            rc = SSMR3GetU32(pSSM, (uint32_t *)&pPatch->enmType);
 
2521
            AssertRCReturn(rc, rc);
 
2522
 
 
2523
            rc = SSMR3GetU32(pSSM, &pPatch->uSrcOperand);
 
2524
            AssertRCReturn(rc, rc);
 
2525
 
 
2526
            rc = SSMR3GetU32(pSSM, &pPatch->uDstOperand);
 
2527
            AssertRCReturn(rc, rc);
 
2528
 
 
2529
            rc = SSMR3GetU32(pSSM, &pPatch->cFaults);
 
2530
            AssertRCReturn(rc, rc);
 
2531
 
 
2532
            rc = SSMR3GetU32(pSSM, &pPatch->pJumpTarget);
 
2533
            AssertRCReturn(rc, rc);
 
2534
            
 
2535
            rc = RTAvloU32Insert(&pVM->hwaccm.s.svm.PatchTree, &pPatch->Core);
 
2536
            AssertRC(rc);
 
2537
        }
 
2538
    }
 
2539
#endif
1778
2540
    return VINF_SUCCESS;
1779
2541
}
1780
2542