26
26
#ifndef ___VBox_vmm_vm_h
27
27
#define ___VBox_vmm_vm_h
29
#include <VBox/types.h>
30
#include <VBox/vmm/cpum.h>
31
#include <VBox/vmm/stam.h>
32
#include <VBox/vmm/vmapi.h>
33
#include <VBox/vmm/vmm.h>
29
#ifndef VBOX_FOR_DTRACE_LIB
30
# include <VBox/types.h>
31
# include <VBox/vmm/cpum.h>
32
# include <VBox/vmm/stam.h>
33
# include <VBox/vmm/vmapi.h>
34
# include <VBox/vmm/vmm.h>
35
# include <VBox/sup.h>
37
# pragma D depends_on library vbox-types.d
38
# pragma D depends_on library CPUMInternal.d
39
# define ___CPUMInternal_h
37
44
/** @defgroup grp_vm The Virtual Machine
45
52
* addition, there are sub-states when started for assisting scheduling (GVMM
48
* The transision out of the STOPPED state is done by a vmR3PowerOn.
49
* The transision back to the STOPPED state is done by vmR3PowerOff.
55
* The transition out of the STOPPED state is done by a vmR3PowerOn.
56
* The transition back to the STOPPED state is done by vmR3PowerOff.
51
58
* (Alternatively we could let vmR3PowerOn start CPU 0 only and let the SPIP
52
59
* handling switch on the other CPUs. Then vmR3Reset would stop all but CPU 0.)
84
91
/** Per CPU forced action.
85
92
* See the VMCPU_FF_* \#defines. Updated atomically. */
86
uint32_t volatile fLocalForcedActions;
93
uint32_t volatile fLocalForcedActions; /* 0 */
87
94
/** The CPU state. */
88
VMCPUSTATE volatile enmState;
95
VMCPUSTATE volatile enmState; /* 4 */
90
97
/** Pointer to the ring-3 UVMCPU structure. */
98
PUVMCPU pUVCpu; /* 8 */
92
99
/** Ring-3 Host Context VM Pointer. */
100
PVMR3 pVMR3; /* 16 / 12 */
94
101
/** Ring-0 Host Context VM Pointer. */
96
/** Alignment padding. */
102
PVMR0 pVMR0; /* 24 / 16 */
98
103
/** Raw-mode Context VM Pointer. */
104
PVMRC pVMRC; /* 32 / 20 */
101
106
* This is the index into the VM::aCpu array. */
107
VMCPUID idCpu; /* 36 / 24 */
103
108
/** The native thread handle. */
104
RTNATIVETHREAD hNativeThread;
109
RTNATIVETHREAD hNativeThread; /* 40 / 28 */
105
110
/** The native R0 thread handle. (different from the R3 handle!) */
106
RTNATIVETHREAD hNativeThreadR0;
111
RTNATIVETHREAD hNativeThreadR0; /* 48 / 32 */
107
112
/** Which host CPU ID is this EMT running on.
108
113
* Only valid when in RC or HWACCMR0 with scheduling disabled. */
109
RTCPUID volatile idHostCpu;
110
/** State data for use by ad hoc profiling. */
112
/** Profiling samples for use by ad hoc profiling. */
113
STAMPROFILEADV aStatAdHoc[8];
114
RTCPUID volatile idHostCpu; /* 56 / 36 */
115
/** Align the next bit on a 64-byte boundary and make sure it starts at the same
116
* offset in both 64-bit and 32-bit builds.
116
/** Trace groups enable flags. */
117
uint32_t fTraceGroups; /* 60 / 40 */
118
/** Align the structures below bit on a 64-byte boundary and make sure it starts
119
* at the same offset in both 64-bit and 32-bit builds.
118
121
* @remarks The alignments of the members that are larger than 48 bytes should be
119
122
* 64-byte for cache line reasons. structs containing small amounts of
120
123
* data could be lumped together at the end with a < 64 byte padding
121
124
* following it (to grow into and align the struct size).
123
uint8_t abAlignment1[HC_ARCH_BITS == 32 ? 16+64 : 56];
126
uint8_t abAlignment1[HC_ARCH_BITS == 64 ? 60 : 16+64];
127
/** State data for use by ad hoc profiling. */
129
/** Profiling samples for use by ad hoc profiling. */
130
STAMPROFILEADV aStatAdHoc[8]; /* size: 40*8 = 320 */
125
132
/** CPUM part. */
239
248
#define VMCPU_CMPXCHG_STATE(pVCpu, enmNewState, enmOldState) \
240
249
ASMAtomicCmpXchgU32((uint32_t volatile *)&(pVCpu)->enmState, (enmNewState), (enmOldState))
241
250
/** Checks the VMCPU state. */
242
#define VMCPU_ASSERT_STATE(pVCpu, enmExpectedState) \
252
# define VMCPU_ASSERT_STATE(pVCpu, enmExpectedState) \
244
254
VMCPUSTATE enmState = VMCPU_GET_STATE(pVCpu); \
245
255
AssertMsg(enmState == (enmExpectedState), \
246
256
("enmState=%d enmExpectedState=%d idCpu=%u\n", \
247
257
enmState, enmExpectedState, (pVCpu)->idCpu)); \
260
# define VMCPU_ASSERT_STATE(pVCpu, enmExpectedState) do { } while (0)
249
262
/** Tests if the state means that the CPU is started. */
250
263
#define VMCPUSTATE_IS_STARTED(enmState) ( (enmState) > VMCPUSTATE_STOPPED )
251
264
/** Tests if the state means that the CPU is stopped. */
490
505
* @param pVCpu VMCPU Handle.
491
506
* @param fFlag The flag to check.
493
#define VMCPU_FF_ISSET(pVCpu, fFlag) (((pVCpu)->fLocalForcedActions & (fFlag)) == (fFlag))
508
#define VMCPU_FF_IS_SET(pVCpu, fFlag) (((pVCpu)->fLocalForcedActions & (fFlag)) == (fFlag))
510
#define VMCPU_FF_ISSET(pVCpu, fFlag) VMCPU_FF_IS_SET(pVCpu, fFlag)
495
512
/** @def VM_FF_ISPENDING
496
513
* Checks if one or more force action in the specified set is pending.
498
515
* @param pVM VM Handle.
499
516
* @param fFlags The flags to check for.
501
#define VM_FF_ISPENDING(pVM, fFlags) ((pVM)->fGlobalForcedActions & (fFlags))
518
#define VM_FF_IS_PENDING(pVM, fFlags) ((pVM)->fGlobalForcedActions & (fFlags))
520
#define VM_FF_ISPENDING(pVM, fFlags) VM_FF_IS_PENDING(pVM, fFlags)
503
522
/** @def VM_FF_TESTANDCLEAR
504
523
* Checks if one (!) force action in the specified set is pending and clears it atomically
508
527
* @param pVM VM Handle.
509
528
* @param iBit Bit position to check and clear
511
#define VM_FF_TESTANDCLEAR(pVM, iBit) (ASMAtomicBitTestAndClear(&(pVM)->fGlobalForcedActions, iBit##_BIT))
530
#define VM_FF_TEST_AND_CLEAR(pVM, iBit) (ASMAtomicBitTestAndClear(&(pVM)->fGlobalForcedActions, iBit##_BIT))
532
#define VM_FF_TESTANDCLEAR(pVM, iBit) (ASMAtomicBitTestAndClear(&(pVM)->fGlobalForcedActions, iBit##_BIT))
513
534
/** @def VMCPU_FF_TESTANDCLEAR
514
535
* Checks if one (!) force action in the specified set is pending and clears it atomically
518
539
* @param pVCpu VMCPU Handle.
519
540
* @param iBit Bit position to check and clear
521
#define VMCPU_FF_TESTANDCLEAR(pVCpu, iBit) (ASMAtomicBitTestAndClear(&(pVCpu)->fLocalForcedActions, iBit##_BIT))
542
#define VMCPU_FF_TEST_AND_CLEAR(pVCpu, iBit) (ASMAtomicBitTestAndClear(&(pVCpu)->fLocalForcedActions, iBit##_BIT))
544
#define VMCPU_FF_TESTANDCLEAR(pVCpu, iBit) (ASMAtomicBitTestAndClear(&(pVCpu)->fLocalForcedActions, iBit##_BIT))
523
546
/** @def VMCPU_FF_ISPENDING
524
547
* Checks if one or more force action in the specified set is pending for the given VCPU.
526
549
* @param pVCpu VMCPU Handle.
527
550
* @param fFlags The flags to check for.
529
#define VMCPU_FF_ISPENDING(pVCpu, fFlags) ((pVCpu)->fLocalForcedActions & (fFlags))
552
#define VMCPU_FF_IS_PENDING(pVCpu, fFlags) ((pVCpu)->fLocalForcedActions & (fFlags))
554
#define VMCPU_FF_ISPENDING(pVCpu, fFlags) VMCPU_FF_IS_PENDING(pVCpu, fFlags)
531
556
/** @def VM_FF_ISPENDING
532
557
* Checks if one or more force action in the specified set is pending while one
772
800
/** Offset to the VMCPU array starting from beginning of this structure. */
773
801
uint32_t offVMCPU;
775
/** Reserved; alignment. */
776
uint32_t u32Reserved[5];
778
/** @name Public VMM Switcher APIs
781
* Assembly switch entry point for returning to host context.
782
* This function will clean up the stack frame.
784
* @param eax The return code, register.
785
* @param Ctx The guest core context.
786
* @remark Assume interrupts disabled.
788
RTRCPTR pfnVMMGCGuestToHostAsmGuestCtx/*(int32_t eax, CPUMCTXCORE Ctx)*/;
791
* Assembly switch entry point for returning to host context.
793
* This is an alternative entry point which we'll be using when the we have the
794
* hypervisor context and need to save that before going to the host.
796
* This is typically useful when abandoning the hypervisor because of a trap
797
* and want the trap state to be saved.
799
* @param eax The return code, register.
800
* @param ecx Pointer to the hypervisor core context, register.
801
* @remark Assume interrupts disabled.
803
RTRCPTR pfnVMMGCGuestToHostAsmHyperCtx/*(int32_t eax, PCPUMCTXCORE ecx)*/;
806
* Assembly switch entry point for returning to host context.
808
* This is an alternative to the two *Ctx APIs and implies that the context has already
809
* been saved, or that it's just a brief return to HC and that the caller intends to resume
810
* whatever it is doing upon 'return' from this call.
812
* @param eax The return code, register.
813
* @remark Assume interrupts disabled.
815
RTRCPTR pfnVMMGCGuestToHostAsm/*(int32_t eax)*/;
819
/** @name Various VM data owned by VM.
822
/** The native handle of ThreadEMT. Getting the native handle
823
* is generally faster than getting the IPRT one (except on OS/2 :-). */
824
RTNATIVETHREAD uPadding2;
804
* VMMSwitcher assembly entry point returning to host context.
806
* Depending on how the host handles the rc status given in @a eax, this may
807
* return and let the caller resume whatever it was doing prior to the call.
810
* @param eax The return code, register.
811
* @remark Assume interrupts disabled.
812
* @remark This method pointer lives here because TRPM needs it.
814
RTRCPTR pfnVMMRCToHostAsm/*(int32_t eax)*/;
817
* VMMSwitcher assembly entry point returning to host context without saving the
818
* raw-mode context (hyper) registers.
820
* Unlike pfnVMMRC2HCAsm, this will not return to the caller. Instead it
821
* expects the caller to save a RC context in CPUM where one might return if the
822
* return code indicate that this is possible.
824
* This method pointer lives here because TRPM needs it.
826
* @param eax The return code, register.
827
* @remark Assume interrupts disabled.
828
* @remark This method pointer lives here because TRPM needs it.
830
RTRCPTR pfnVMMRCToHostAsmNoReturn/*(int32_t eax)*/;
828
832
/** @name Various items that are frequently accessed.