~ubuntu-branches/ubuntu/natty/virtualbox-ose/natty-updates

« back to all changes in this revision

Viewing changes to src/VBox/VMM/EM.cpp

  • Committer: Bazaar Package Importer
  • Author(s): Felix Geyer
  • Date: 2010-03-11 17:16:37 UTC
  • mfrom: (0.3.4 upstream) (0.4.8 sid)
  • Revision ID: james.westby@ubuntu.com-20100311171637-43z64ia3ccpj8vqn
Tags: 3.1.4-dfsg-2ubuntu1
* Merge from Debian unstable (LP: #528561), remaining changes:
  - VirtualBox should go in Accessories, not in System tools (LP: #288590)
    - debian/virtualbox-ose-qt.files/virtualbox-ose.desktop
  - Add Apport hook
    - debian/virtualbox-ose.files/source_virtualbox-ose.py
    - debian/virtualbox-ose.install
  - Add Launchpad integration
    - debian/control
    - debian/lpi-bug.xpm
    - debian/patches/u02-lp-integration.dpatch
  - Replace *-source packages with transitional packages for *-dkms
* Fix crash in vboxvideo_drm with kernel 2.6.33 / backported drm code
  (LP: #535297)
* Add a list of linux-headers packages to the apport hook
* Update debian/patches/u02-lp-integration.dpatch with a
  DEP-3 compliant header
* Add ${misc:Depends} to virtualbox-ose-source and virtualbox-ose-guest-source
  Depends

Show diffs side-by-side

added added

removed removed

Lines of Context:
88
88
static int emR3Debug(PVM pVM, PVMCPU pVCpu, int rc);
89
89
static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
90
90
static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
91
 
DECLINLINE(int) emR3RawExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC = VINF_SUCCESS);
92
91
int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc);
93
92
 
94
93
 
381
380
        /* these should be considered for release statistics. */
382
381
        EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu,                 "/PROF/CPU%d/EM/Emulation/IO",      "Profiling of emR3RawExecuteIOInstruction.");
383
382
        EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu,               "/PROF/CPU%d/EM/Emulation/Priv",    "Profiling of emR3RawPrivileged.");
384
 
        EM_REG_COUNTER(&pVCpu->em.s.StatMiscEmu,               "/PROF/CPU%d/EM/Emulation/Misc",    "Profiling of emR3RawExecuteInstruction.");
385
383
        EM_REG_PROFILE(&pVCpu->em.s.StatHwAccEntry,           "/PROF/CPU%d/EM/HwAccEnter",        "Profiling Hardware Accelerated Mode entry overhead.");
386
384
        EM_REG_PROFILE(&pVCpu->em.s.StatHwAccExec,            "/PROF/CPU%d/EM/HwAccExec",         "Profiling Hardware Accelerated Mode execution.");
387
385
        EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu,               "/PROF/CPU%d/EM/REMEmuSingle",      "Profiling single instruction REM execution.");
850
848
 
851
849
 
852
850
/**
 
851
 * emR3RemExecute helper that syncs the state back from REM and leave the REM
 
852
 * critical section.
 
853
 *
 
854
 * @returns false - new fInREMState value.
 
855
 * @param   pVM         The VM handle.
 
856
 * @param   pVCpu       The virtual CPU handle.
 
857
 */
 
858
DECLINLINE(bool) emR3RemExecuteSyncBack(PVM pVM, PVMCPU pVCpu)
 
859
{
 
860
    STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, a);
 
861
    REMR3StateBack(pVM, pVCpu);
 
862
    STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, a);
 
863
 
 
864
    EMRemUnlock(pVM);
 
865
    return false;
 
866
}
 
867
 
 
868
 
 
869
/**
853
870
 * Executes recompiled code.
854
871
 *
855
872
 * This function contains the recompiler version of the inner
883
900
              ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
884
901
#endif
885
902
 
886
 
    /* Big lock, but you are not supposed to own any lock when coming in here. */
887
 
    EMRemLock(pVM);
888
 
 
889
903
    /*
890
904
     * Spin till we get a forced action which returns anything but VINF_SUCCESS
891
905
     * or the REM suggests raw-mode execution.
892
906
     */
893
907
    *pfFFDone = false;
894
908
    bool    fInREMState = false;
895
 
    int     rc = VINF_SUCCESS;
896
 
 
897
 
    /* Flush the recompiler TLB if the VCPU has changed. */
898
 
    if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
899
 
    {
900
 
        REMFlushTBs(pVM);
901
 
        /* Also sync the entire state. */
902
 
        CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
903
 
    }
904
 
    pVM->em.s.idLastRemCpu = pVCpu->idCpu;
905
 
 
 
909
    int     rc          = VINF_SUCCESS;
906
910
    for (;;)
907
911
    {
908
912
        /*
909
 
         * Update REM state if not already in sync.
 
913
         * Lock REM and update the state if not already in sync.
 
914
         *
 
915
         * Note! Big lock, but you are not supposed to own any lock when
 
916
         *       coming in here.
910
917
         */
911
918
        if (!fInREMState)
912
919
        {
 
920
            EMRemLock(pVM);
913
921
            STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
 
922
 
 
923
            /* Flush the recompiler translation blocks if the VCPU has changed,
 
924
               also force a full CPU state resync. */
 
925
            if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
 
926
            {
 
927
                REMFlushTBs(pVM);
 
928
                CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
 
929
            }
 
930
            pVM->em.s.idLastRemCpu = pVCpu->idCpu;
 
931
 
914
932
            rc = REMR3State(pVM, pVCpu);
 
933
 
915
934
            STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
916
935
            if (RT_FAILURE(rc))
917
936
                break;
939
958
 
940
959
 
941
960
        /*
942
 
         * Deal with high priority post execution FFs before doing anything else.
 
961
         * Deal with high priority post execution FFs before doing anything
 
962
         * else.  Sync back the state and leave the lock to be on the safe side.
943
963
         */
944
964
        if (    VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
945
965
            ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
 
966
        {
 
967
            fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
946
968
            rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
 
969
        }
947
970
 
948
971
        /*
949
972
         * Process the returned status code.
950
 
         * (Try keep this short! Call functions!)
951
973
         */
952
974
        if (rc != VINF_SUCCESS)
953
975
        {
967
989
 
968
990
        /*
969
991
         * Check and execute forced actions.
970
 
         * Sync back the VM state before calling any of these.
 
992
         *
 
993
         * Sync back the VM state and leave the lock  before calling any of
 
994
         * these, you never know what's going to happen here.
971
995
         */
972
996
#ifdef VBOX_HIGH_RES_TIMERS_HACK
973
997
        TMTimerPollVoid(pVM, pVCpu);
978
1002
        {
979
1003
l_REMDoForcedActions:
980
1004
            if (fInREMState)
981
 
            {
982
 
                STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, d);
983
 
                REMR3StateBack(pVM, pVCpu);
984
 
                STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, d);
985
 
                fInREMState = false;
986
 
            }
 
1005
                fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
987
1006
            STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
988
1007
            rc = emR3ForcedActions(pVM, pVCpu, rc);
989
1008
            STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1002
1021
     * Returning. Sync back the VM state if required.
1003
1022
     */
1004
1023
    if (fInREMState)
1005
 
    {
1006
 
        STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, e);
1007
 
        REMR3StateBack(pVM, pVCpu);
1008
 
        STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, e);
1009
 
    }
1010
 
    EMRemUnlock(pVM);
 
1024
        fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1011
1025
 
1012
1026
    STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1013
1027
    return rc;