~ubuntu-branches/ubuntu/raring/virtualbox-ose/raring

« back to all changes in this revision

Viewing changes to src/VBox/VMM/VMMR3/HWACCM.cpp

  • Committer: Bazaar Package Importer
  • Author(s): Felix Geyer
  • Date: 2011-01-30 23:27:25 UTC
  • mfrom: (0.3.12 upstream)
  • Revision ID: james.westby@ubuntu.com-20110130232725-2ouajjd2ggdet0zd
Tags: 4.0.2-dfsg-1ubuntu1
* Merge from Debian unstable, remaining changes:
  - Add Apport hook.
    - debian/virtualbox-ose.files/source_virtualbox-ose.py
    - debian/virtualbox-ose.install
  - Drop *-source packages.
* Drop ubuntu-01-fix-build-gcc45.patch, fixed upstream.
* Drop ubuntu-02-as-needed.patch, added to the Debian package.

Show diffs side-by-side

added added

removed removed

Lines of Context:
 
1
/* $Id: HWACCM.cpp 35346 2010-12-27 16:13:13Z vboxsync $ */
 
2
/** @file
 
3
 * HWACCM - Intel/AMD VM Hardware Support Manager
 
4
 */
 
5
 
 
6
/*
 
7
 * Copyright (C) 2006-2007 Oracle Corporation
 
8
 *
 
9
 * This file is part of VirtualBox Open Source Edition (OSE), as
 
10
 * available from http://www.virtualbox.org. This file is free software;
 
11
 * you can redistribute it and/or modify it under the terms of the GNU
 
12
 * General Public License (GPL) as published by the Free Software
 
13
 * Foundation, in version 2 as it comes in the "COPYING" file of the
 
14
 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
 
15
 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
 
16
 */
 
17
 
 
18
/*******************************************************************************
 
19
*   Header Files                                                               *
 
20
*******************************************************************************/
 
21
#define LOG_GROUP LOG_GROUP_HWACCM
 
22
#include <VBox/vmm/cpum.h>
 
23
#include <VBox/vmm/stam.h>
 
24
#include <VBox/vmm/mm.h>
 
25
#include <VBox/vmm/pdmapi.h>
 
26
#include <VBox/vmm/pgm.h>
 
27
#include <VBox/vmm/ssm.h>
 
28
#include <VBox/vmm/trpm.h>
 
29
#include <VBox/vmm/dbgf.h>
 
30
#include <VBox/vmm/iom.h>
 
31
#include <VBox/vmm/patm.h>
 
32
#include <VBox/vmm/csam.h>
 
33
#include <VBox/vmm/selm.h>
 
34
#include <VBox/vmm/rem.h>
 
35
#include <VBox/vmm/hwacc_vmx.h>
 
36
#include <VBox/vmm/hwacc_svm.h>
 
37
#include "HWACCMInternal.h"
 
38
#include <VBox/vmm/vm.h>
 
39
#include <VBox/err.h>
 
40
#include <VBox/param.h>
 
41
 
 
42
#include <iprt/assert.h>
 
43
#include <VBox/log.h>
 
44
#include <iprt/asm.h>
 
45
#include <iprt/asm-amd64-x86.h>
 
46
#include <iprt/string.h>
 
47
#include <iprt/env.h>
 
48
#include <iprt/thread.h>
 
49
 
 
50
/*******************************************************************************
 
51
*   Global Variables                                                           *
 
52
*******************************************************************************/
 
53
#ifdef VBOX_WITH_STATISTICS
 
54
# define EXIT_REASON(def, val, str) #def " - " #val " - " str
 
55
# define EXIT_REASON_NIL() NULL
 
56
/** Exit reason descriptions for VT-x, used to describe statistics. */
 
57
static const char * const g_apszVTxExitReasons[MAX_EXITREASON_STAT] =
 
58
{
 
59
    EXIT_REASON(VMX_EXIT_EXCEPTION          ,  0, "Exception or non-maskable interrupt (NMI)."),
 
60
    EXIT_REASON(VMX_EXIT_EXTERNAL_IRQ       ,  1, "External interrupt."),
 
61
    EXIT_REASON(VMX_EXIT_TRIPLE_FAULT       ,  2, "Triple fault."),
 
62
    EXIT_REASON(VMX_EXIT_INIT_SIGNAL        ,  3, "INIT signal."),
 
63
    EXIT_REASON(VMX_EXIT_SIPI               ,  4, "Start-up IPI (SIPI)."),
 
64
    EXIT_REASON(VMX_EXIT_IO_SMI_IRQ         ,  5, "I/O system-management interrupt (SMI)."),
 
65
    EXIT_REASON(VMX_EXIT_SMI_IRQ            ,  6, "Other SMI."),
 
66
    EXIT_REASON(VMX_EXIT_IRQ_WINDOW         ,  7, "Interrupt window."),
 
67
    EXIT_REASON_NIL(),
 
68
    EXIT_REASON(VMX_EXIT_TASK_SWITCH        ,  9, "Task switch."),
 
69
    EXIT_REASON(VMX_EXIT_CPUID              , 10, "Guest software attempted to execute CPUID."),
 
70
    EXIT_REASON_NIL(),
 
71
    EXIT_REASON(VMX_EXIT_HLT                , 12, "Guest software attempted to execute HLT."),
 
72
    EXIT_REASON(VMX_EXIT_INVD               , 13, "Guest software attempted to execute INVD."),
 
73
    EXIT_REASON(VMX_EXIT_INVPG              , 14, "Guest software attempted to execute INVPG."),
 
74
    EXIT_REASON(VMX_EXIT_RDPMC              , 15, "Guest software attempted to execute RDPMC."),
 
75
    EXIT_REASON(VMX_EXIT_RDTSC              , 16, "Guest software attempted to execute RDTSC."),
 
76
    EXIT_REASON(VMX_EXIT_RSM                , 17, "Guest software attempted to execute RSM in SMM."),
 
77
    EXIT_REASON(VMX_EXIT_VMCALL             , 18, "Guest software executed VMCALL."),
 
78
    EXIT_REASON(VMX_EXIT_VMCLEAR            , 19, "Guest software executed VMCLEAR."),
 
79
    EXIT_REASON(VMX_EXIT_VMLAUNCH           , 20, "Guest software executed VMLAUNCH."),
 
80
    EXIT_REASON(VMX_EXIT_VMPTRLD            , 21, "Guest software executed VMPTRLD."),
 
81
    EXIT_REASON(VMX_EXIT_VMPTRST            , 22, "Guest software executed VMPTRST."),
 
82
    EXIT_REASON(VMX_EXIT_VMREAD             , 23, "Guest software executed VMREAD."),
 
83
    EXIT_REASON(VMX_EXIT_VMRESUME           , 24, "Guest software executed VMRESUME."),
 
84
    EXIT_REASON(VMX_EXIT_VMWRITE            , 25, "Guest software executed VMWRITE."),
 
85
    EXIT_REASON(VMX_EXIT_VMXOFF             , 26, "Guest software executed VMXOFF."),
 
86
    EXIT_REASON(VMX_EXIT_VMXON              , 27, "Guest software executed VMXON."),
 
87
    EXIT_REASON(VMX_EXIT_CRX_MOVE           , 28, "Control-register accesses."),
 
88
    EXIT_REASON(VMX_EXIT_DRX_MOVE           , 29, "Debug-register accesses."),
 
89
    EXIT_REASON(VMX_EXIT_PORT_IO            , 30, "I/O instruction."),
 
90
    EXIT_REASON(VMX_EXIT_RDMSR              , 31, "RDMSR. Guest software attempted to execute RDMSR."),
 
91
    EXIT_REASON(VMX_EXIT_WRMSR              , 32, "WRMSR. Guest software attempted to execute WRMSR."),
 
92
    EXIT_REASON(VMX_EXIT_ERR_INVALID_GUEST_STATE,  33, "VM-entry failure due to invalid guest state."),
 
93
    EXIT_REASON(VMX_EXIT_ERR_MSR_LOAD       , 34, "VM-entry failure due to MSR loading."),
 
94
    EXIT_REASON_NIL(),
 
95
    EXIT_REASON(VMX_EXIT_MWAIT              , 36, "Guest software executed MWAIT."),
 
96
    EXIT_REASON_NIL(),
 
97
    EXIT_REASON_NIL(),
 
98
    EXIT_REASON(VMX_EXIT_MONITOR            , 39, "Guest software attempted to execute MONITOR."),
 
99
    EXIT_REASON(VMX_EXIT_PAUSE              , 40, "Guest software attempted to execute PAUSE."),
 
100
    EXIT_REASON(VMX_EXIT_ERR_MACHINE_CHECK  , 41, "VM-entry failure due to machine-check."),
 
101
    EXIT_REASON_NIL(),
 
102
    EXIT_REASON(VMX_EXIT_TPR                , 43, "TPR below threshold. Guest software executed MOV to CR8."),
 
103
    EXIT_REASON(VMX_EXIT_APIC_ACCESS        , 44, "APIC access. Guest software attempted to access memory at a physical address on the APIC-access page."),
 
104
    EXIT_REASON_NIL(),
 
105
    EXIT_REASON(VMX_EXIT_XDTR_ACCESS        , 46, "Access to GDTR or IDTR. Guest software attempted to execute LGDT, LIDT, SGDT, or SIDT."),
 
106
    EXIT_REASON(VMX_EXIT_TR_ACCESS          , 47, "Access to LDTR or TR. Guest software attempted to execute LLDT, LTR, SLDT, or STR."),
 
107
    EXIT_REASON(VMX_EXIT_EPT_VIOLATION      , 48, "EPT violation. An attempt to access memory with a guest-physical address was disallowed by the configuration of the EPT paging structures."),
 
108
    EXIT_REASON(VMX_EXIT_EPT_MISCONFIG      , 49, "EPT misconfiguration. An attempt to access memory with a guest-physical address encountered a misconfigured EPT paging-structure entry."),
 
109
    EXIT_REASON(VMX_EXIT_INVEPT             , 50, "INVEPT. Guest software attempted to execute INVEPT."),
 
110
    EXIT_REASON_NIL(),
 
111
    EXIT_REASON(VMX_EXIT_PREEMPTION_TIMER   , 52, "VMX-preemption timer expired. The preemption timer counted down to zero."),
 
112
    EXIT_REASON(VMX_EXIT_INVVPID            , 53, "INVVPID. Guest software attempted to execute INVVPID."),
 
113
    EXIT_REASON(VMX_EXIT_WBINVD             , 54, "WBINVD. Guest software attempted to execute WBINVD."),
 
114
    EXIT_REASON(VMX_EXIT_XSETBV             , 55, "XSETBV. Guest software attempted to execute XSETBV."),
 
115
    EXIT_REASON_NIL()
 
116
};
 
117
/** Exit reason descriptions for AMD-V, used to describe statistics. */
 
118
static const char * const g_apszAmdVExitReasons[MAX_EXITREASON_STAT] =
 
119
{
 
120
    EXIT_REASON(SVM_EXIT_READ_CR0                   ,  0, "Read CR0."),
 
121
    EXIT_REASON(SVM_EXIT_READ_CR1                   ,  1, "Read CR1."),
 
122
    EXIT_REASON(SVM_EXIT_READ_CR2                   ,  2, "Read CR2."),
 
123
    EXIT_REASON(SVM_EXIT_READ_CR3                   ,  3, "Read CR3."),
 
124
    EXIT_REASON(SVM_EXIT_READ_CR4                   ,  4, "Read CR4."),
 
125
    EXIT_REASON(SVM_EXIT_READ_CR5                   ,  5, "Read CR5."),
 
126
    EXIT_REASON(SVM_EXIT_READ_CR6                   ,  6, "Read CR6."),
 
127
    EXIT_REASON(SVM_EXIT_READ_CR7                   ,  7, "Read CR7."),
 
128
    EXIT_REASON(SVM_EXIT_READ_CR8                   ,  8, "Read CR8."),
 
129
    EXIT_REASON(SVM_EXIT_READ_CR9                   ,  9, "Read CR9."),
 
130
    EXIT_REASON(SVM_EXIT_READ_CR10                  , 10, "Read CR10."),
 
131
    EXIT_REASON(SVM_EXIT_READ_CR11                  , 11, "Read CR11."),
 
132
    EXIT_REASON(SVM_EXIT_READ_CR12                  , 12, "Read CR12."),
 
133
    EXIT_REASON(SVM_EXIT_READ_CR13                  , 13, "Read CR13."),
 
134
    EXIT_REASON(SVM_EXIT_READ_CR14                  , 14, "Read CR14."),
 
135
    EXIT_REASON(SVM_EXIT_READ_CR15                  , 15, "Read CR15."),
 
136
    EXIT_REASON(SVM_EXIT_WRITE_CR0                  , 16, "Write CR0."),
 
137
    EXIT_REASON(SVM_EXIT_WRITE_CR1                  , 17, "Write CR1."),
 
138
    EXIT_REASON(SVM_EXIT_WRITE_CR2                  , 18, "Write CR2."),
 
139
    EXIT_REASON(SVM_EXIT_WRITE_CR3                  , 19, "Write CR3."),
 
140
    EXIT_REASON(SVM_EXIT_WRITE_CR4                  , 20, "Write CR4."),
 
141
    EXIT_REASON(SVM_EXIT_WRITE_CR5                  , 21, "Write CR5."),
 
142
    EXIT_REASON(SVM_EXIT_WRITE_CR6                  , 22, "Write CR6."),
 
143
    EXIT_REASON(SVM_EXIT_WRITE_CR7                  , 23, "Write CR7."),
 
144
    EXIT_REASON(SVM_EXIT_WRITE_CR8                  , 24, "Write CR8."),
 
145
    EXIT_REASON(SVM_EXIT_WRITE_CR9                  , 25, "Write CR9."),
 
146
    EXIT_REASON(SVM_EXIT_WRITE_CR10                 , 26, "Write CR10."),
 
147
    EXIT_REASON(SVM_EXIT_WRITE_CR11                 , 27, "Write CR11."),
 
148
    EXIT_REASON(SVM_EXIT_WRITE_CR12                 , 28, "Write CR12."),
 
149
    EXIT_REASON(SVM_EXIT_WRITE_CR13                 , 29, "Write CR13."),
 
150
    EXIT_REASON(SVM_EXIT_WRITE_CR14                 , 30, "Write CR14."),
 
151
    EXIT_REASON(SVM_EXIT_WRITE_CR15                 , 31, "Write CR15."),
 
152
    EXIT_REASON(SVM_EXIT_READ_DR0                   , 32, "Read DR0."),
 
153
    EXIT_REASON(SVM_EXIT_READ_DR1                   , 33, "Read DR1."),
 
154
    EXIT_REASON(SVM_EXIT_READ_DR2                   , 34, "Read DR2."),
 
155
    EXIT_REASON(SVM_EXIT_READ_DR3                   , 35, "Read DR3."),
 
156
    EXIT_REASON(SVM_EXIT_READ_DR4                   , 36, "Read DR4."),
 
157
    EXIT_REASON(SVM_EXIT_READ_DR5                   , 37, "Read DR5."),
 
158
    EXIT_REASON(SVM_EXIT_READ_DR6                   , 38, "Read DR6."),
 
159
    EXIT_REASON(SVM_EXIT_READ_DR7                   , 39, "Read DR7."),
 
160
    EXIT_REASON(SVM_EXIT_READ_DR8                   , 40, "Read DR8."),
 
161
    EXIT_REASON(SVM_EXIT_READ_DR9                   , 41, "Read DR9."),
 
162
    EXIT_REASON(SVM_EXIT_READ_DR10                  , 42, "Read DR10."),
 
163
    EXIT_REASON(SVM_EXIT_READ_DR11                  , 43, "Read DR11"),
 
164
    EXIT_REASON(SVM_EXIT_READ_DR12                  , 44, "Read DR12."),
 
165
    EXIT_REASON(SVM_EXIT_READ_DR13                  , 45, "Read DR13."),
 
166
    EXIT_REASON(SVM_EXIT_READ_DR14                  , 46, "Read DR14."),
 
167
    EXIT_REASON(SVM_EXIT_READ_DR15                  , 47, "Read DR15."),
 
168
    EXIT_REASON(SVM_EXIT_WRITE_DR0                  , 48, "Write DR0."),
 
169
    EXIT_REASON(SVM_EXIT_WRITE_DR1                  , 49, "Write DR1."),
 
170
    EXIT_REASON(SVM_EXIT_WRITE_DR2                  , 50, "Write DR2."),
 
171
    EXIT_REASON(SVM_EXIT_WRITE_DR3                  , 51, "Write DR3."),
 
172
    EXIT_REASON(SVM_EXIT_WRITE_DR4                  , 52, "Write DR4."),
 
173
    EXIT_REASON(SVM_EXIT_WRITE_DR5                  , 53, "Write DR5."),
 
174
    EXIT_REASON(SVM_EXIT_WRITE_DR6                  , 54, "Write DR6."),
 
175
    EXIT_REASON(SVM_EXIT_WRITE_DR7                  , 55, "Write DR7."),
 
176
    EXIT_REASON(SVM_EXIT_WRITE_DR8                  , 56, "Write DR8."),
 
177
    EXIT_REASON(SVM_EXIT_WRITE_DR9                  , 57, "Write DR9."),
 
178
    EXIT_REASON(SVM_EXIT_WRITE_DR10                 , 58, "Write DR10."),
 
179
    EXIT_REASON(SVM_EXIT_WRITE_DR11                 , 59, "Write DR11."),
 
180
    EXIT_REASON(SVM_EXIT_WRITE_DR12                 , 60, "Write DR12."),
 
181
    EXIT_REASON(SVM_EXIT_WRITE_DR13                 , 61, "Write DR13."),
 
182
    EXIT_REASON(SVM_EXIT_WRITE_DR14                 , 62, "Write DR14."),
 
183
    EXIT_REASON(SVM_EXIT_WRITE_DR15                 , 63, "Write DR15."),
 
184
    EXIT_REASON(SVM_EXIT_EXCEPTION_0                , 64, "Exception Vector 0  (0x0)."),
 
185
    EXIT_REASON(SVM_EXIT_EXCEPTION_1                , 65, "Exception Vector 1  (0x1)."),
 
186
    EXIT_REASON(SVM_EXIT_EXCEPTION_2                , 66, "Exception Vector 2  (0x2)."),
 
187
    EXIT_REASON(SVM_EXIT_EXCEPTION_3                , 67, "Exception Vector 3  (0x3)."),
 
188
    EXIT_REASON(SVM_EXIT_EXCEPTION_4                , 68, "Exception Vector 4  (0x4)."),
 
189
    EXIT_REASON(SVM_EXIT_EXCEPTION_5                , 69, "Exception Vector 5  (0x5)."),
 
190
    EXIT_REASON(SVM_EXIT_EXCEPTION_6                , 70, "Exception Vector 6  (0x6)."),
 
191
    EXIT_REASON(SVM_EXIT_EXCEPTION_7                , 71, "Exception Vector 7  (0x7)."),
 
192
    EXIT_REASON(SVM_EXIT_EXCEPTION_8                , 72, "Exception Vector 8  (0x8)."),
 
193
    EXIT_REASON(SVM_EXIT_EXCEPTION_9                , 73, "Exception Vector 9  (0x9)."),
 
194
    EXIT_REASON(SVM_EXIT_EXCEPTION_A                , 74, "Exception Vector 10 (0xA)."),
 
195
    EXIT_REASON(SVM_EXIT_EXCEPTION_B                , 75, "Exception Vector 11 (0xB)."),
 
196
    EXIT_REASON(SVM_EXIT_EXCEPTION_C                , 76, "Exception Vector 12 (0xC)."),
 
197
    EXIT_REASON(SVM_EXIT_EXCEPTION_D                , 77, "Exception Vector 13 (0xD)."),
 
198
    EXIT_REASON(SVM_EXIT_EXCEPTION_E                , 78, "Exception Vector 14 (0xE)."),
 
199
    EXIT_REASON(SVM_EXIT_EXCEPTION_F                , 79, "Exception Vector 15 (0xF)."),
 
200
    EXIT_REASON(SVM_EXIT_EXCEPTION_10               , 80, "Exception Vector 16 (0x10)."),
 
201
    EXIT_REASON(SVM_EXIT_EXCEPTION_11               , 81, "Exception Vector 17 (0x11)."),
 
202
    EXIT_REASON(SVM_EXIT_EXCEPTION_12               , 82, "Exception Vector 18 (0x12)."),
 
203
    EXIT_REASON(SVM_EXIT_EXCEPTION_13               , 83, "Exception Vector 19 (0x13)."),
 
204
    EXIT_REASON(SVM_EXIT_EXCEPTION_14               , 84, "Exception Vector 20 (0x14)."),
 
205
    EXIT_REASON(SVM_EXIT_EXCEPTION_15               , 85, "Exception Vector 22 (0x15)."),
 
206
    EXIT_REASON(SVM_EXIT_EXCEPTION_16               , 86, "Exception Vector 22 (0x16)."),
 
207
    EXIT_REASON(SVM_EXIT_EXCEPTION_17               , 87, "Exception Vector 23 (0x17)."),
 
208
    EXIT_REASON(SVM_EXIT_EXCEPTION_18               , 88, "Exception Vector 24 (0x18)."),
 
209
    EXIT_REASON(SVM_EXIT_EXCEPTION_19               , 89, "Exception Vector 25 (0x19)."),
 
210
    EXIT_REASON(SVM_EXIT_EXCEPTION_1A               , 90, "Exception Vector 26 (0x1A)."),
 
211
    EXIT_REASON(SVM_EXIT_EXCEPTION_1B               , 91, "Exception Vector 27 (0x1B)."),
 
212
    EXIT_REASON(SVM_EXIT_EXCEPTION_1C               , 92, "Exception Vector 28 (0x1C)."),
 
213
    EXIT_REASON(SVM_EXIT_EXCEPTION_1D               , 93, "Exception Vector 29 (0x1D)."),
 
214
    EXIT_REASON(SVM_EXIT_EXCEPTION_1E               , 94, "Exception Vector 30 (0x1E)."),
 
215
    EXIT_REASON(SVM_EXIT_EXCEPTION_1F               , 95, "Exception Vector 31 (0x1F)."),
 
216
    EXIT_REASON(SVM_EXIT_INTR                       , 96, "Physical maskable interrupt."),
 
217
    EXIT_REASON(SVM_EXIT_NMI                        , 97, "Physical non-maskable interrupt."),
 
218
    EXIT_REASON(SVM_EXIT_SMI                        , 98, "System management interrupt."),
 
219
    EXIT_REASON(SVM_EXIT_INIT                       , 99, "Physical INIT signal."),
 
220
    EXIT_REASON(SVM_EXIT_VINTR                      ,100, "Virtual interrupt."),
 
221
    EXIT_REASON(SVM_EXIT_CR0_SEL_WRITE              ,101, "Write to CR0 that changed any bits other than CR0.TS or CR0.MP."),
 
222
    EXIT_REASON(SVM_EXIT_IDTR_READ                  ,102, "Read IDTR"),
 
223
    EXIT_REASON(SVM_EXIT_GDTR_READ                  ,103, "Read GDTR"),
 
224
    EXIT_REASON(SVM_EXIT_LDTR_READ                  ,104, "Read LDTR."),
 
225
    EXIT_REASON(SVM_EXIT_TR_READ                    ,105, "Read TR."),
 
226
    EXIT_REASON(SVM_EXIT_TR_READ                    ,106, "Write IDTR."),
 
227
    EXIT_REASON(SVM_EXIT_TR_READ                    ,107, "Write GDTR."),
 
228
    EXIT_REASON(SVM_EXIT_TR_READ                    ,108, "Write LDTR."),
 
229
    EXIT_REASON(SVM_EXIT_TR_READ                    ,109, "Write TR."),
 
230
    EXIT_REASON(SVM_EXIT_RDTSC                      ,110, "RDTSC instruction."),
 
231
    EXIT_REASON(SVM_EXIT_RDPMC                      ,111, "RDPMC instruction."),
 
232
    EXIT_REASON(SVM_EXIT_PUSHF                      ,112, "PUSHF instruction."),
 
233
    EXIT_REASON(SVM_EXIT_POPF                       ,113, "POPF instruction."),
 
234
    EXIT_REASON(SVM_EXIT_CPUID                      ,114, "CPUID instruction."),
 
235
    EXIT_REASON(SVM_EXIT_RSM                        ,115, "RSM instruction."),
 
236
    EXIT_REASON(SVM_EXIT_IRET                       ,116, "IRET instruction."),
 
237
    EXIT_REASON(SVM_EXIT_SWINT                      ,117, "Software interrupt (INTn instructions)."),
 
238
    EXIT_REASON(SVM_EXIT_INVD                       ,118, "INVD instruction."),
 
239
    EXIT_REASON(SVM_EXIT_PAUSE                      ,119, "PAUSE instruction."),
 
240
    EXIT_REASON(SVM_EXIT_HLT                        ,120, "HLT instruction."),
 
241
    EXIT_REASON(SVM_EXIT_INVLPG                     ,121, "INVLPG instruction."),
 
242
    EXIT_REASON(SVM_EXIT_INVLPGA                    ,122, "INVLPGA instruction."),
 
243
    EXIT_REASON(SVM_EXIT_IOIO                       ,123, "IN/OUT accessing protected port (EXITINFO1 field provides more information)."),
 
244
    EXIT_REASON(SVM_EXIT_MSR                        ,124, "RDMSR or WRMSR access to protected MSR."),
 
245
    EXIT_REASON(SVM_EXIT_TASK_SWITCH                ,125, "Task switch."),
 
246
    EXIT_REASON(SVM_EXIT_FERR_FREEZE                ,126, "FP legacy handling enabled, and processor is frozen in an x87/mmx instruction waiting for an interrupt"),
 
247
    EXIT_REASON(SVM_EXIT_SHUTDOWN                   ,127, "Shutdown."),
 
248
    EXIT_REASON(SVM_EXIT_VMRUN                      ,128, "VMRUN instruction."),
 
249
    EXIT_REASON(SVM_EXIT_VMMCALL                    ,129, "VMCALL instruction."),
 
250
    EXIT_REASON(SVM_EXIT_VMLOAD                     ,130, "VMLOAD instruction."),
 
251
    EXIT_REASON(SVM_EXIT_VMSAVE                     ,131, "VMSAVE instruction."),
 
252
    EXIT_REASON(SVM_EXIT_STGI                       ,132, "STGI instruction."),
 
253
    EXIT_REASON(SVM_EXIT_CLGI                       ,133, "CLGI instruction."),
 
254
    EXIT_REASON(SVM_EXIT_SKINIT                     ,134, "SKINIT instruction."),
 
255
    EXIT_REASON(SVM_EXIT_RDTSCP                     ,135, "RDTSCP instruction."),
 
256
    EXIT_REASON(SVM_EXIT_ICEBP                      ,136, "ICEBP instruction."),
 
257
    EXIT_REASON(SVM_EXIT_WBINVD                     ,137, "WBINVD instruction."),
 
258
    EXIT_REASON(SVM_EXIT_MONITOR                    ,138, "MONITOR instruction."),
 
259
    EXIT_REASON(SVM_EXIT_MWAIT_UNCOND               ,139, "MWAIT instruction unconditional."),
 
260
    EXIT_REASON(SVM_EXIT_MWAIT_ARMED                ,140, "MWAIT instruction when armed."),
 
261
    EXIT_REASON(SVM_EXIT_NPF                        ,1024, "Nested paging: host-level page fault occurred (EXITINFO1 contains fault errorcode; EXITINFO2 contains the guest physical address causing the fault)."),
 
262
    EXIT_REASON_NIL()
 
263
};
 
264
# undef EXIT_REASON
 
265
# undef EXIT_REASON_NIL
 
266
#endif /* VBOX_WITH_STATISTICS */
 
267
 
 
268
/*******************************************************************************
 
269
*   Internal Functions                                                         *
 
270
*******************************************************************************/
 
271
static DECLCALLBACK(int) hwaccmR3Save(PVM pVM, PSSMHANDLE pSSM);
 
272
static DECLCALLBACK(int) hwaccmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
 
273
static int hwaccmR3InitCPU(PVM pVM);
 
274
static int hwaccmR3TermCPU(PVM pVM);
 
275
 
 
276
 
 
277
/**
 
278
 * Initializes the HWACCM.
 
279
 *
 
280
 * @returns VBox status code.
 
281
 * @param   pVM         The VM to operate on.
 
282
 */
 
283
VMMR3DECL(int) HWACCMR3Init(PVM pVM)
 
284
{
 
285
    LogFlow(("HWACCMR3Init\n"));
 
286
 
 
287
    /*
 
288
     * Assert alignment and sizes.
 
289
     */
 
290
    AssertCompileMemberAlignment(VM, hwaccm.s, 32);
 
291
    AssertCompile(sizeof(pVM->hwaccm.s) <= sizeof(pVM->hwaccm.padding));
 
292
 
 
293
    /* Some structure checks. */
 
294
    AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, ctrl.EventInject) == 0xA8, ("ctrl.EventInject offset = %x\n", RT_OFFSETOF(SVM_VMCB, ctrl.EventInject)));
 
295
    AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, ctrl.ExitIntInfo) == 0x88, ("ctrl.ExitIntInfo offset = %x\n", RT_OFFSETOF(SVM_VMCB, ctrl.ExitIntInfo)));
 
296
    AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, ctrl.TLBCtrl) == 0x58, ("ctrl.TLBCtrl offset = %x\n", RT_OFFSETOF(SVM_VMCB, ctrl.TLBCtrl)));
 
297
 
 
298
    AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest) == 0x400, ("guest offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest)));
 
299
    AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.TR) == 0x490, ("guest.TR offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.TR)));
 
300
    AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u8CPL) == 0x4CB, ("guest.u8CPL offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u8CPL)));
 
301
    AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u64EFER) == 0x4D0, ("guest.u64EFER offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u64EFER)));
 
302
    AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u64CR4) == 0x548, ("guest.u64CR4 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u64CR4)));
 
303
    AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u64RIP) == 0x578, ("guest.u64RIP offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u64RIP)));
 
304
    AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u64RSP) == 0x5D8, ("guest.u64RSP offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u64RSP)));
 
305
    AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u64CR2) == 0x640, ("guest.u64CR2 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u64CR2)));
 
306
    AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u64GPAT) == 0x668, ("guest.u64GPAT offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u64GPAT)));
 
307
    AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u64LASTEXCPTO) == 0x690, ("guest.u64LASTEXCPTO offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u64LASTEXCPTO)));
 
308
    AssertReleaseMsg(sizeof(SVM_VMCB) == 0x1000, ("SVM_VMCB size = %x\n", sizeof(SVM_VMCB)));
 
309
 
 
310
 
 
311
    /*
 
312
     * Register the saved state data unit.
 
313
     */
 
314
    int rc = SSMR3RegisterInternal(pVM, "HWACCM", 0, HWACCM_SSM_VERSION, sizeof(HWACCM),
 
315
                                   NULL, NULL, NULL,
 
316
                                   NULL, hwaccmR3Save, NULL,
 
317
                                   NULL, hwaccmR3Load, NULL);
 
318
    if (RT_FAILURE(rc))
 
319
        return rc;
 
320
 
 
321
    /* Misc initialisation. */
 
322
    pVM->hwaccm.s.vmx.fSupported = false;
 
323
    pVM->hwaccm.s.svm.fSupported = false;
 
324
    pVM->hwaccm.s.vmx.fEnabled   = false;
 
325
    pVM->hwaccm.s.svm.fEnabled   = false;
 
326
 
 
327
    pVM->hwaccm.s.fNestedPaging  = false;
 
328
    pVM->hwaccm.s.fLargePages    = false;
 
329
 
 
330
    /* Disabled by default. */
 
331
    pVM->fHWACCMEnabled = false;
 
332
 
 
333
    /*
 
334
     * Check CFGM options.
 
335
     */
 
336
    PCFGMNODE pRoot      = CFGMR3GetRoot(pVM);
 
337
    PCFGMNODE pHWVirtExt = CFGMR3GetChild(pRoot, "HWVirtExt/");
 
338
    /* Nested paging: disabled by default. */
 
339
    rc = CFGMR3QueryBoolDef(pHWVirtExt, "EnableNestedPaging", &pVM->hwaccm.s.fAllowNestedPaging, false);
 
340
    AssertRC(rc);
 
341
 
 
342
    /* Large pages: disabled by default. */
 
343
    rc = CFGMR3QueryBoolDef(pHWVirtExt, "EnableLargePages", &pVM->hwaccm.s.fLargePages, false);
 
344
    AssertRC(rc);
 
345
 
 
346
    /* VT-x VPID: disabled by default. */
 
347
    rc = CFGMR3QueryBoolDef(pHWVirtExt, "EnableVPID", &pVM->hwaccm.s.vmx.fAllowVPID, false);
 
348
    AssertRC(rc);
 
349
 
 
350
    /* HWACCM support must be explicitely enabled in the configuration file. */
 
351
    rc = CFGMR3QueryBoolDef(pHWVirtExt, "Enabled", &pVM->hwaccm.s.fAllowed, false);
 
352
    AssertRC(rc);
 
353
 
 
354
    /* TPR patching for 32 bits (Windows) guests with IO-APIC: disabled by default. */
 
355
    rc = CFGMR3QueryBoolDef(pHWVirtExt, "TPRPatchingEnabled", &pVM->hwaccm.s.fTRPPatchingAllowed, false);
 
356
    AssertRC(rc);
 
357
 
 
358
#ifdef RT_OS_DARWIN
 
359
    if (VMMIsHwVirtExtForced(pVM) != pVM->hwaccm.s.fAllowed)
 
360
#else
 
361
    if (VMMIsHwVirtExtForced(pVM) && !pVM->hwaccm.s.fAllowed)
 
362
#endif
 
363
    {
 
364
        AssertLogRelMsgFailed(("VMMIsHwVirtExtForced=%RTbool fAllowed=%RTbool\n",
 
365
                               VMMIsHwVirtExtForced(pVM), pVM->hwaccm.s.fAllowed));
 
366
        return VERR_HWACCM_CONFIG_MISMATCH;
 
367
    }
 
368
 
 
369
    if (VMMIsHwVirtExtForced(pVM))
 
370
        pVM->fHWACCMEnabled = true;
 
371
 
 
372
#if HC_ARCH_BITS == 32
 
373
    /* 64-bit mode is configurable and it depends on both the kernel mode and VT-x.
 
374
     * (To use the default, don't set 64bitEnabled in CFGM.) */
 
375
    rc = CFGMR3QueryBoolDef(pHWVirtExt, "64bitEnabled", &pVM->hwaccm.s.fAllow64BitGuests, false);
 
376
    AssertLogRelRCReturn(rc, rc);
 
377
    if (pVM->hwaccm.s.fAllow64BitGuests)
 
378
    {
 
379
# ifdef RT_OS_DARWIN
 
380
        if (!VMMIsHwVirtExtForced(pVM))
 
381
# else
 
382
        if (!pVM->hwaccm.s.fAllowed)
 
383
# endif
 
384
            return VM_SET_ERROR(pVM, VERR_INVALID_PARAMETER, "64-bit guest support was requested without also enabling HWVirtEx (VT-x/AMD-V).");
 
385
    }
 
386
#else
 
387
    /* On 64-bit hosts 64-bit guest support is enabled by default, but allow this to be overridden
 
388
     * via VBoxInternal/HWVirtExt/64bitEnabled=0. (ConsoleImpl2.cpp doesn't set this to false for 64-bit.) */
 
389
    rc = CFGMR3QueryBoolDef(pHWVirtExt, "64bitEnabled", &pVM->hwaccm.s.fAllow64BitGuests, true);
 
390
    AssertLogRelRCReturn(rc, rc);
 
391
#endif
 
392
 
 
393
 
 
394
    /** Determine the init method for AMD-V and VT-x; either one global init for each host CPU
 
395
     *  or local init each time we wish to execute guest code.
 
396
     *
 
397
     *  Default false for Mac OS X and Windows due to the higher risk of conflicts with other hypervisors.
 
398
     */
 
399
    rc = CFGMR3QueryBoolDef(pHWVirtExt, "Exclusive", &pVM->hwaccm.s.fGlobalInit,
 
400
#if defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS)
 
401
                            false
 
402
#else
 
403
                            true
 
404
#endif
 
405
                           );
 
406
 
 
407
    /* Max number of resume loops. */
 
408
    rc = CFGMR3QueryU32Def(pHWVirtExt, "MaxResumeLoops", &pVM->hwaccm.s.cMaxResumeLoops, 0 /* set by R0 later */);
 
409
    AssertRC(rc);
 
410
 
 
411
    return rc;
 
412
}
 
413
 
 
414
/**
 
415
 * Initializes the per-VCPU HWACCM.
 
416
 *
 
417
 * @returns VBox status code.
 
418
 * @param   pVM         The VM to operate on.
 
419
 */
 
420
static int hwaccmR3InitCPU(PVM pVM)
 
421
{
 
422
    LogFlow(("HWACCMR3InitCPU\n"));
 
423
 
 
424
    for (VMCPUID i = 0; i < pVM->cCpus; i++)
 
425
    {
 
426
        PVMCPU pVCpu = &pVM->aCpus[i];
 
427
 
 
428
        pVCpu->hwaccm.s.fActive = false;
 
429
    }
 
430
 
 
431
#ifdef VBOX_WITH_STATISTICS
 
432
    STAM_REG(pVM, &pVM->hwaccm.s.StatTPRPatchSuccess,   STAMTYPE_COUNTER, "/HWACCM/TPR/Patch/Success",  STAMUNIT_OCCURENCES,    "Number of times an instruction was successfully patched.");
 
433
    STAM_REG(pVM, &pVM->hwaccm.s.StatTPRPatchFailure,   STAMTYPE_COUNTER, "/HWACCM/TPR/Patch/Failed",   STAMUNIT_OCCURENCES,    "Number of unsuccessful patch attempts.");
 
434
    STAM_REG(pVM, &pVM->hwaccm.s.StatTPRReplaceSuccess, STAMTYPE_COUNTER, "/HWACCM/TPR/Replace/Success",STAMUNIT_OCCURENCES,    "Number of times an instruction was successfully patched.");
 
435
    STAM_REG(pVM, &pVM->hwaccm.s.StatTPRReplaceFailure, STAMTYPE_COUNTER, "/HWACCM/TPR/Replace/Failed", STAMUNIT_OCCURENCES,    "Number of unsuccessful patch attempts.");
 
436
 
 
437
    /*
 
438
     * Statistics.
 
439
     */
 
440
    for (VMCPUID i = 0; i < pVM->cCpus; i++)
 
441
    {
 
442
        PVMCPU pVCpu = &pVM->aCpus[i];
 
443
        int    rc;
 
444
 
 
445
        rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatPoke, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of RTMpPokeCpu",
 
446
                             "/PROF/HWACCM/CPU%d/Poke", i);
 
447
        AssertRC(rc);
 
448
        rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatSpinPoke, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of poke wait",
 
449
                             "/PROF/HWACCM/CPU%d/PokeWait", i);
 
450
        AssertRC(rc);
 
451
        rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatSpinPokeFailed, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of poke wait when RTMpPokeCpu fails",
 
452
                             "/PROF/HWACCM/CPU%d/PokeWaitFailed", i);
 
453
        AssertRC(rc);
 
454
        rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatEntry, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode entry",
 
455
                             "/PROF/HWACCM/CPU%d/SwitchToGC", i);
 
456
        AssertRC(rc);
 
457
        rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit1, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode exit part 1",
 
458
                             "/PROF/HWACCM/CPU%d/SwitchFromGC_1", i);
 
459
        AssertRC(rc);
 
460
        rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit2, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode exit part 2",
 
461
                             "/PROF/HWACCM/CPU%d/SwitchFromGC_2", i);
 
462
        AssertRC(rc);
 
463
# if 1 /* temporary for tracking down darwin holdup. */
 
464
        rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit2Sub1, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Temporary - I/O",
 
465
                             "/PROF/HWACCM/CPU%d/SwitchFromGC_2/Sub1", i);
 
466
        AssertRC(rc);
 
467
        rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit2Sub2, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Temporary - CRx RWs",
 
468
                             "/PROF/HWACCM/CPU%d/SwitchFromGC_2/Sub2", i);
 
469
        AssertRC(rc);
 
470
        rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit2Sub3, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Temporary - Exceptions",
 
471
                             "/PROF/HWACCM/CPU%d/SwitchFromGC_2/Sub3", i);
 
472
        AssertRC(rc);
 
473
# endif
 
474
        rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatInGC, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of vmlaunch",
 
475
                             "/PROF/HWACCM/CPU%d/InGC", i);
 
476
        AssertRC(rc);
 
477
 
 
478
# if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
 
479
        rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatWorldSwitch3264, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of the 32/64 switcher",
 
480
                             "/PROF/HWACCM/CPU%d/Switcher3264", i);
 
481
        AssertRC(rc);
 
482
# endif
 
483
 
 
484
# define HWACCM_REG_COUNTER(a, b) \
 
485
        rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Profiling of vmlaunch", b, i); \
 
486
        AssertRC(rc);
 
487
 
 
488
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitShadowNM,           "/HWACCM/CPU%d/Exit/Trap/Shw/#NM");
 
489
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestNM,            "/HWACCM/CPU%d/Exit/Trap/Gst/#NM");
 
490
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitShadowPF,           "/HWACCM/CPU%d/Exit/Trap/Shw/#PF");
 
491
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestPF,            "/HWACCM/CPU%d/Exit/Trap/Gst/#PF");
 
492
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestUD,            "/HWACCM/CPU%d/Exit/Trap/Gst/#UD");
 
493
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestSS,            "/HWACCM/CPU%d/Exit/Trap/Gst/#SS");
 
494
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestNP,            "/HWACCM/CPU%d/Exit/Trap/Gst/#NP");
 
495
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestGP,            "/HWACCM/CPU%d/Exit/Trap/Gst/#GP");
 
496
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestMF,            "/HWACCM/CPU%d/Exit/Trap/Gst/#MF");
 
497
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestDE,            "/HWACCM/CPU%d/Exit/Trap/Gst/#DE");
 
498
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestDB,            "/HWACCM/CPU%d/Exit/Trap/Gst/#DB");
 
499
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitInvpg,              "/HWACCM/CPU%d/Exit/Instr/Invlpg");
 
500
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitInvd,               "/HWACCM/CPU%d/Exit/Instr/Invd");
 
501
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitCpuid,              "/HWACCM/CPU%d/Exit/Instr/Cpuid");
 
502
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitRdtsc,              "/HWACCM/CPU%d/Exit/Instr/Rdtsc");
 
503
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitRdpmc,              "/HWACCM/CPU%d/Exit/Instr/Rdpmc");
 
504
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitRdmsr,              "/HWACCM/CPU%d/Exit/Instr/Rdmsr");
 
505
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitWrmsr,              "/HWACCM/CPU%d/Exit/Instr/Wrmsr");
 
506
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitMwait,              "/HWACCM/CPU%d/Exit/Instr/Mwait");
 
507
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitMonitor,            "/HWACCM/CPU%d/Exit/Instr/Monitor");
 
508
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitDRxWrite,           "/HWACCM/CPU%d/Exit/Instr/DR/Write");
 
509
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitDRxRead,            "/HWACCM/CPU%d/Exit/Instr/DR/Read");
 
510
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitCLTS,               "/HWACCM/CPU%d/Exit/Instr/CLTS");
 
511
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitLMSW,               "/HWACCM/CPU%d/Exit/Instr/LMSW");
 
512
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitCli,                "/HWACCM/CPU%d/Exit/Instr/Cli");
 
513
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitSti,                "/HWACCM/CPU%d/Exit/Instr/Sti");
 
514
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitPushf,              "/HWACCM/CPU%d/Exit/Instr/Pushf");
 
515
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitPopf,               "/HWACCM/CPU%d/Exit/Instr/Popf");
 
516
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIret,               "/HWACCM/CPU%d/Exit/Instr/Iret");
 
517
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitInt,                "/HWACCM/CPU%d/Exit/Instr/Int");
 
518
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitHlt,                "/HWACCM/CPU%d/Exit/Instr/Hlt");
 
519
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIOWrite,            "/HWACCM/CPU%d/Exit/IO/Write");
 
520
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIORead,             "/HWACCM/CPU%d/Exit/IO/Read");
 
521
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIOStringWrite,      "/HWACCM/CPU%d/Exit/IO/WriteString");
 
522
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIOStringRead,       "/HWACCM/CPU%d/Exit/IO/ReadString");
 
523
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIrqWindow,          "/HWACCM/CPU%d/Exit/IrqWindow");
 
524
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitMaxResume,          "/HWACCM/CPU%d/Exit/MaxResume");
 
525
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitPreemptPending,     "/HWACCM/CPU%d/Exit/PreemptPending");
 
526
 
 
527
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatSwitchGuestIrq,         "/HWACCM/CPU%d/Switch/IrqPending");
 
528
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatSwitchToR3,             "/HWACCM/CPU%d/Switch/ToR3");
 
529
 
 
530
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatIntInject,              "/HWACCM/CPU%d/Irq/Inject");
 
531
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatIntReinject,            "/HWACCM/CPU%d/Irq/Reinject");
 
532
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatPendingHostIrq,         "/HWACCM/CPU%d/Irq/PendingOnHost");
 
533
 
 
534
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushPage,              "/HWACCM/CPU%d/Flush/Page");
 
535
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushPageManual,        "/HWACCM/CPU%d/Flush/Page/Virt");
 
536
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushPhysPageManual,    "/HWACCM/CPU%d/Flush/Page/Phys");
 
537
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLB,               "/HWACCM/CPU%d/Flush/TLB");
 
538
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBManual,         "/HWACCM/CPU%d/Flush/TLB/Manual");
 
539
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBCRxChange,      "/HWACCM/CPU%d/Flush/TLB/CRx");
 
540
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushPageInvlpg,        "/HWACCM/CPU%d/Flush/Page/Invlpg");
 
541
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBWorldSwitch,    "/HWACCM/CPU%d/Flush/TLB/Switch");
 
542
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatNoFlushTLBWorldSwitch,  "/HWACCM/CPU%d/Flush/TLB/Skipped");
 
543
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushASID,              "/HWACCM/CPU%d/Flush/TLB/ASID");
 
544
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBInvlpga,        "/HWACCM/CPU%d/Flush/TLB/PhysInvl");
 
545
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTlbShootdown,           "/HWACCM/CPU%d/Flush/Shootdown/Page");
 
546
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTlbShootdownFlush,      "/HWACCM/CPU%d/Flush/Shootdown/TLB");
 
547
 
 
548
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCOffset,              "/HWACCM/CPU%d/TSC/Offset");
 
549
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCIntercept,           "/HWACCM/CPU%d/TSC/Intercept");
 
550
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCInterceptOverFlow,   "/HWACCM/CPU%d/TSC/InterceptOverflow");
 
551
 
 
552
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatDRxArmed,               "/HWACCM/CPU%d/Debug/Armed");
 
553
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatDRxContextSwitch,       "/HWACCM/CPU%d/Debug/ContextSwitch");
 
554
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatDRxIOCheck,             "/HWACCM/CPU%d/Debug/IOCheck");
 
555
 
 
556
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatLoadMinimal,            "/HWACCM/CPU%d/Load/Minimal");
 
557
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatLoadFull,               "/HWACCM/CPU%d/Load/Full");
 
558
 
 
559
#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
 
560
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFpu64SwitchBack,        "/HWACCM/CPU%d/Switch64/Fpu");
 
561
        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatDebug64SwitchBack,      "/HWACCM/CPU%d/Switch64/Debug");
 
562
#endif
 
563
 
 
564
        for (unsigned j=0;j<RT_ELEMENTS(pVCpu->hwaccm.s.StatExitCRxWrite);j++)
 
565
        {
 
566
            rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExitCRxWrite[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Profiling of CRx writes",
 
567
                                "/HWACCM/CPU%d/Exit/Instr/CR/Write/%x", i, j);
 
568
            AssertRC(rc);
 
569
            rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExitCRxRead[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Profiling of CRx reads",
 
570
                                "/HWACCM/CPU%d/Exit/Instr/CR/Read/%x", i, j);
 
571
            AssertRC(rc);
 
572
        }
 
573
 
 
574
#undef HWACCM_REG_COUNTER
 
575
 
 
576
        pVCpu->hwaccm.s.paStatExitReason = NULL;
 
577
 
 
578
        rc = MMHyperAlloc(pVM, MAX_EXITREASON_STAT*sizeof(*pVCpu->hwaccm.s.paStatExitReason), 0, MM_TAG_HWACCM, (void **)&pVCpu->hwaccm.s.paStatExitReason);
 
579
        AssertRC(rc);
 
580
        if (RT_SUCCESS(rc))
 
581
        {
 
582
            const char * const *papszDesc = ASMIsIntelCpu() ? &g_apszVTxExitReasons[0] : &g_apszAmdVExitReasons[0];
 
583
            for (int j=0;j<MAX_EXITREASON_STAT;j++)
 
584
            {
 
585
                if (papszDesc[j])
 
586
                {
 
587
                    rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.paStatExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
 
588
                                        papszDesc[j], "/HWACCM/CPU%d/Exit/Reason/%02x", i, j);
 
589
                    AssertRC(rc);
 
590
                }
 
591
            }
 
592
            rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExitReasonNPF, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Nested page fault", "/HWACCM/CPU%d/Exit/Reason/#NPF", i);
 
593
            AssertRC(rc);
 
594
        }
 
595
        pVCpu->hwaccm.s.paStatExitReasonR0 = MMHyperR3ToR0(pVM, pVCpu->hwaccm.s.paStatExitReason);
 
596
# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
 
597
        Assert(pVCpu->hwaccm.s.paStatExitReasonR0 != NIL_RTR0PTR || !VMMIsHwVirtExtForced(pVM));
 
598
# else
 
599
        Assert(pVCpu->hwaccm.s.paStatExitReasonR0 != NIL_RTR0PTR);
 
600
# endif
 
601
 
 
602
        rc = MMHyperAlloc(pVM, sizeof(STAMCOUNTER) * 256, 8, MM_TAG_HWACCM, (void **)&pVCpu->hwaccm.s.paStatInjectedIrqs);
 
603
        AssertRCReturn(rc, rc);
 
604
        pVCpu->hwaccm.s.paStatInjectedIrqsR0 = MMHyperR3ToR0(pVM, pVCpu->hwaccm.s.paStatInjectedIrqs);
 
605
# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
 
606
        Assert(pVCpu->hwaccm.s.paStatInjectedIrqsR0 != NIL_RTR0PTR || !VMMIsHwVirtExtForced(pVM));
 
607
# else
 
608
        Assert(pVCpu->hwaccm.s.paStatInjectedIrqsR0 != NIL_RTR0PTR);
 
609
# endif
 
610
        for (unsigned j = 0; j < 255; j++)
 
611
            STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.paStatInjectedIrqs[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Forwarded interrupts.",
 
612
                            (j < 0x20) ? "/HWACCM/CPU%d/Interrupt/Trap/%02X" : "/HWACCM/CPU%d/Interrupt/IRQ/%02X", i, j);
 
613
 
 
614
    }
 
615
#endif /* VBOX_WITH_STATISTICS */
 
616
 
 
617
#ifdef VBOX_WITH_CRASHDUMP_MAGIC
 
618
    /* Magic marker for searching in crash dumps. */
 
619
    for (VMCPUID i = 0; i < pVM->cCpus; i++)
 
620
    {
 
621
        PVMCPU pVCpu = &pVM->aCpus[i];
 
622
 
 
623
        PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache;
 
624
        strcpy((char *)pCache->aMagic, "VMCSCACHE Magic");
 
625
        pCache->uMagic = UINT64_C(0xDEADBEEFDEADBEEF);
 
626
    }
 
627
#endif
 
628
    return VINF_SUCCESS;
 
629
}
 
630
 
 
631
/**
 
632
 * Called when a init phase has completed.
 
633
 *
 
634
 * @returns VBox status code.
 
635
 * @param   pVM                 The VM.
 
636
 * @param   enmWhat             The phase that completed.
 
637
 */
 
638
VMMR3_INT_DECL(int) HWACCMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
 
639
{
 
640
    switch (enmWhat)
 
641
    {
 
642
    case VMINITCOMPLETED_RING3:
 
643
        return hwaccmR3InitCPU(pVM);
 
644
    default:
 
645
        return VINF_SUCCESS;
 
646
    }
 
647
}
 
648
 
 
649
/**
 
650
 * Turns off normal raw mode features
 
651
 *
 
652
 * @param   pVM         The VM to operate on.
 
653
 */
 
654
static void hwaccmR3DisableRawMode(PVM pVM)
 
655
{
 
656
    /* Disable PATM & CSAM. */
 
657
    PATMR3AllowPatching(pVM, false);
 
658
    CSAMDisableScanning(pVM);
 
659
 
 
660
    /* Turn off IDT/LDT/GDT and TSS monitoring and sycing. */
 
661
    SELMR3DisableMonitoring(pVM);
 
662
    TRPMR3DisableMonitoring(pVM);
 
663
 
 
664
    /* Disable the switcher code (safety precaution). */
 
665
    VMMR3DisableSwitcher(pVM);
 
666
 
 
667
    /* Disable mapping of the hypervisor into the shadow page table. */
 
668
    PGMR3MappingsDisable(pVM);
 
669
 
 
670
    /* Disable the switcher */
 
671
    VMMR3DisableSwitcher(pVM);
 
672
 
 
673
    /* Reinit the paging mode to force the new shadow mode. */
 
674
    for (VMCPUID i = 0; i < pVM->cCpus; i++)
 
675
    {
 
676
        PVMCPU pVCpu = &pVM->aCpus[i];
 
677
 
 
678
        PGMR3ChangeMode(pVM, pVCpu, PGMMODE_REAL);
 
679
    }
 
680
}
 
681
 
 
682
/**
 
683
 * Initialize VT-x or AMD-V.
 
684
 *
 
685
 * @returns VBox status code.
 
686
 * @param   pVM         The VM handle.
 
687
 */
 
688
VMMR3DECL(int) HWACCMR3InitFinalizeR0(PVM pVM)
 
689
{
 
690
    int rc;
 
691
 
 
692
    /* Hack to allow users to work around broken BIOSes that incorrectly set EFER.SVME, which makes us believe somebody else
 
693
     * is already using AMD-V.
 
694
     */
 
695
    if (    !pVM->hwaccm.s.vmx.fSupported
 
696
        &&  !pVM->hwaccm.s.svm.fSupported
 
697
        &&  pVM->hwaccm.s.lLastError == VERR_SVM_IN_USE /* implies functional AMD-V */
 
698
        &&  RTEnvExist("VBOX_HWVIRTEX_IGNORE_SVM_IN_USE"))
 
699
    {
 
700
        LogRel(("HWACCM: VBOX_HWVIRTEX_IGNORE_SVM_IN_USE active!\n"));
 
701
        pVM->hwaccm.s.svm.fSupported        = true;
 
702
        pVM->hwaccm.s.svm.fIgnoreInUseError = true;
 
703
    }
 
704
    else
 
705
    if (    !pVM->hwaccm.s.vmx.fSupported
 
706
        &&  !pVM->hwaccm.s.svm.fSupported)
 
707
    {
 
708
        LogRel(("HWACCM: No VT-x or AMD-V CPU extension found. Reason %Rrc\n", pVM->hwaccm.s.lLastError));
 
709
        LogRel(("HWACCM: VMX MSR_IA32_FEATURE_CONTROL=%RX64\n", pVM->hwaccm.s.vmx.msr.feature_ctrl));
 
710
 
 
711
        if (VMMIsHwVirtExtForced(pVM))
 
712
        {
 
713
            switch (pVM->hwaccm.s.lLastError)
 
714
            {
 
715
            case VERR_VMX_NO_VMX:
 
716
                return VM_SET_ERROR(pVM, VERR_VMX_NO_VMX, "VT-x is not available.");
 
717
            case VERR_VMX_IN_VMX_ROOT_MODE:
 
718
                return VM_SET_ERROR(pVM, VERR_VMX_IN_VMX_ROOT_MODE, "VT-x is being used by another hypervisor.");
 
719
            case VERR_SVM_IN_USE:
 
720
                return VM_SET_ERROR(pVM, VERR_SVM_IN_USE, "AMD-V is being used by another hypervisor.");
 
721
            case VERR_SVM_NO_SVM:
 
722
                return VM_SET_ERROR(pVM, VERR_SVM_NO_SVM, "AMD-V is not available.");
 
723
            case VERR_SVM_DISABLED:
 
724
                return VM_SET_ERROR(pVM, VERR_SVM_DISABLED, "AMD-V is disabled in the BIOS.");
 
725
            default:
 
726
                return pVM->hwaccm.s.lLastError;
 
727
            }
 
728
        }
 
729
        return VINF_SUCCESS;
 
730
    }
 
731
 
 
732
    if (pVM->hwaccm.s.vmx.fSupported)
 
733
    {
 
734
        rc = SUPR3QueryVTxSupported();
 
735
        if (RT_FAILURE(rc))
 
736
        {
 
737
#ifdef RT_OS_LINUX
 
738
            LogRel(("HWACCM: The host kernel does not support VT-x -- Linux 2.6.13 or newer required!\n"));
 
739
#else
 
740
            LogRel(("HWACCM: The host kernel does not support VT-x!\n"));
 
741
#endif
 
742
            if (   pVM->cCpus > 1
 
743
                || VMMIsHwVirtExtForced(pVM))
 
744
                return rc;
 
745
 
 
746
            /* silently fall back to raw mode */
 
747
            return VINF_SUCCESS;
 
748
        }
 
749
    }
 
750
 
 
751
    if (!pVM->hwaccm.s.fAllowed)
 
752
        return VINF_SUCCESS;    /* nothing to do */
 
753
 
 
754
    /* Enable VT-x or AMD-V on all host CPUs. */
 
755
    rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HWACC_ENABLE, 0, NULL);
 
756
    if (RT_FAILURE(rc))
 
757
    {
 
758
        LogRel(("HWACCMR3InitFinalize: SUPR3CallVMMR0Ex VMMR0_DO_HWACC_ENABLE failed with %Rrc\n", rc));
 
759
        return rc;
 
760
    }
 
761
    Assert(!pVM->fHWACCMEnabled || VMMIsHwVirtExtForced(pVM));
 
762
 
 
763
    pVM->hwaccm.s.fHasIoApic = PDMHasIoApic(pVM);
 
764
    /* No TPR patching is required when the IO-APIC is not enabled for this VM. (Main should have taken care of this already) */
 
765
    if (!pVM->hwaccm.s.fHasIoApic)
 
766
    {
 
767
        Assert(!pVM->hwaccm.s.fTRPPatchingAllowed); /* paranoia */
 
768
        pVM->hwaccm.s.fTRPPatchingAllowed = false;
 
769
    }
 
770
 
 
771
    bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
 
772
    if (pVM->hwaccm.s.vmx.fSupported)
 
773
    {
 
774
        Log(("pVM->hwaccm.s.vmx.fSupported = %d\n", pVM->hwaccm.s.vmx.fSupported));
 
775
 
 
776
        if (    pVM->hwaccm.s.fInitialized == false
 
777
            &&  pVM->hwaccm.s.vmx.msr.feature_ctrl != 0)
 
778
        {
 
779
            uint64_t val;
 
780
            RTGCPHYS GCPhys = 0;
 
781
 
 
782
            LogRel(("HWACCM: Host CR4=%08X\n", pVM->hwaccm.s.vmx.hostCR4));
 
783
            LogRel(("HWACCM: MSR_IA32_FEATURE_CONTROL      = %RX64\n", pVM->hwaccm.s.vmx.msr.feature_ctrl));
 
784
            LogRel(("HWACCM: MSR_IA32_VMX_BASIC_INFO       = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_basic_info));
 
785
            LogRel(("HWACCM: VMCS id                       = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
 
786
            LogRel(("HWACCM: VMCS size                     = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
 
787
            LogRel(("HWACCM: VMCS physical address limit   = %s\n", MSR_IA32_VMX_BASIC_INFO_VMCS_PHYS_WIDTH(pVM->hwaccm.s.vmx.msr.vmx_basic_info) ? "< 4 GB" : "None"));
 
788
            LogRel(("HWACCM: VMCS memory type              = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_MEM_TYPE(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
 
789
            LogRel(("HWACCM: Dual monitor treatment        = %d\n", MSR_IA32_VMX_BASIC_INFO_VMCS_DUAL_MON(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
 
790
 
 
791
            LogRel(("HWACCM: MSR_IA32_VMX_PINBASED_CTLS    = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.u));
 
792
            val = pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.allowed1;
 
793
            if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT)
 
794
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT\n"));
 
795
            if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT)
 
796
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT\n"));
 
797
            if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI)
 
798
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI\n"));
 
799
            if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER)
 
800
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER\n"));
 
801
            val = pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.disallowed0;
 
802
            if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT)
 
803
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT *must* be set\n"));
 
804
            if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT)
 
805
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT *must* be set\n"));
 
806
            if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI)
 
807
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI *must* be set\n"));
 
808
            if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER)
 
809
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER *must* be set\n"));
 
810
 
 
811
            LogRel(("HWACCM: MSR_IA32_VMX_PROCBASED_CTLS   = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.u));
 
812
            val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1;
 
813
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT)
 
814
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT\n"));
 
815
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET)
 
816
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET\n"));
 
817
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT)
 
818
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT\n"));
 
819
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT)
 
820
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT\n"));
 
821
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT)
 
822
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT\n"));
 
823
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT)
 
824
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT\n"));
 
825
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT)
 
826
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT\n"));
 
827
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT)
 
828
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT\n"));
 
829
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT)
 
830
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT\n"));
 
831
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT)
 
832
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT\n"));
 
833
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT)
 
834
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT\n"));
 
835
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
 
836
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW\n"));
 
837
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT)
 
838
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT\n"));
 
839
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT)
 
840
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT\n"));
 
841
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT)
 
842
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT\n"));
 
843
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS)
 
844
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS\n"));
 
845
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG)
 
846
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG\n"));
 
847
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
 
848
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS\n"));
 
849
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT)
 
850
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT\n"));
 
851
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT)
 
852
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT\n"));
 
853
            if (val & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
 
854
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL\n"));
 
855
 
 
856
            val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.disallowed0;
 
857
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT)
 
858
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT *must* be set\n"));
 
859
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET)
 
860
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET *must* be set\n"));
 
861
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT)
 
862
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT *must* be set\n"));
 
863
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT)
 
864
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT *must* be set\n"));
 
865
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT)
 
866
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT *must* be set\n"));
 
867
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT)
 
868
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT *must* be set\n"));
 
869
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT)
 
870
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT *must* be set\n"));
 
871
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT)
 
872
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT *must* be set\n"));
 
873
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT)
 
874
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT *must* be set\n"));
 
875
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT)
 
876
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT *must* be set\n"));
 
877
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT)
 
878
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT *must* be set\n"));
 
879
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
 
880
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW *must* be set\n"));
 
881
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT)
 
882
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT *must* be set\n"));
 
883
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT)
 
884
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT *must* be set\n"));
 
885
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT)
 
886
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT *must* be set\n"));
 
887
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS)
 
888
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS *must* be set\n"));
 
889
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG)
 
890
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG *must* be set\n"));
 
891
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
 
892
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS *must* be set\n"));
 
893
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT)
 
894
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT *must* be set\n"));
 
895
            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT)
 
896
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT *must* be set\n"));
 
897
            if (val & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
 
898
                LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL *must* be set\n"));
 
899
 
 
900
            if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
 
901
            {
 
902
                LogRel(("HWACCM: MSR_IA32_VMX_PROCBASED_CTLS2  = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.u));
 
903
                val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1;
 
904
                if (val & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
 
905
                    LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC\n"));
 
906
                if (val & VMX_VMCS_CTRL_PROC_EXEC2_EPT)
 
907
                    LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_EPT\n"));
 
908
                if (val & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT)
 
909
                    LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT\n"));
 
910
                if (val & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP_EXIT)
 
911
                    LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP_EXIT\n"));
 
912
                if (val & VMX_VMCS_CTRL_PROC_EXEC2_X2APIC)
 
913
                    LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_X2APIC\n"));
 
914
                if (val & VMX_VMCS_CTRL_PROC_EXEC2_VPID)
 
915
                    LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_VPID\n"));
 
916
                if (val & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
 
917
                    LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT\n"));
 
918
                if (val & VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE)
 
919
                    LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE\n"));
 
920
                if (val & VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT)
 
921
                    LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT\n"));
 
922
 
 
923
                val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0;
 
924
                if (val & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
 
925
                    LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC *must* be set\n"));
 
926
                if (val & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT)
 
927
                    LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT *must* be set\n"));
 
928
                if (val & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP_EXIT)
 
929
                    LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP_EXIT *must* be set\n"));
 
930
                if (val & VMX_VMCS_CTRL_PROC_EXEC2_X2APIC)
 
931
                    LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_X2APIC *must* be set\n"));
 
932
                if (val & VMX_VMCS_CTRL_PROC_EXEC2_EPT)
 
933
                    LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_EPT *must* be set\n"));
 
934
                if (val & VMX_VMCS_CTRL_PROC_EXEC2_VPID)
 
935
                    LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_VPID *must* be set\n"));
 
936
                if (val & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
 
937
                    LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT *must* be set\n"));
 
938
                if (val & VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE)
 
939
                    LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE *must* be set\n"));
 
940
                if (val & VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT)
 
941
                    LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT *must* be set\n"));
 
942
            }
 
943
 
 
944
            LogRel(("HWACCM: MSR_IA32_VMX_ENTRY_CTLS       = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_entry.u));
 
945
            val = pVM->hwaccm.s.vmx.msr.vmx_entry.n.allowed1;
 
946
            if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG)
 
947
                LogRel(("HWACCM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG\n"));
 
948
            if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE)
 
949
                LogRel(("HWACCM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE\n"));
 
950
            if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM)
 
951
                LogRel(("HWACCM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM\n"));
 
952
            if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON)
 
953
                LogRel(("HWACCM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON\n"));
 
954
            if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR)
 
955
                LogRel(("HWACCM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR\n"));
 
956
            if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR)
 
957
                LogRel(("HWACCM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR\n"));
 
958
            if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR)
 
959
                LogRel(("HWACCM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR\n"));
 
960
            val = pVM->hwaccm.s.vmx.msr.vmx_entry.n.disallowed0;
 
961
            if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG)
 
962
                LogRel(("HWACCM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG *must* be set\n"));
 
963
            if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE)
 
964
                LogRel(("HWACCM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE *must* be set\n"));
 
965
            if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM)
 
966
                LogRel(("HWACCM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM *must* be set\n"));
 
967
            if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON)
 
968
                LogRel(("HWACCM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON *must* be set\n"));
 
969
            if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR)
 
970
                LogRel(("HWACCM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR *must* be set\n"));
 
971
            if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR)
 
972
                LogRel(("HWACCM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR *must* be set\n"));
 
973
            if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR)
 
974
                LogRel(("HWACCM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR *must* be set\n"));
 
975
 
 
976
            LogRel(("HWACCM: MSR_IA32_VMX_EXIT_CTLS        = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_exit.u));
 
977
            val = pVM->hwaccm.s.vmx.msr.vmx_exit.n.allowed1;
 
978
            if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG)
 
979
                LogRel(("HWACCM:    VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG\n"));
 
980
            if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64)
 
981
                LogRel(("HWACCM:    VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64\n"));
 
982
            if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ)
 
983
                LogRel(("HWACCM:    VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ\n"));
 
984
            if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR)
 
985
                LogRel(("HWACCM:    VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR\n"));
 
986
            if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR)
 
987
                LogRel(("HWACCM:    VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR\n"));
 
988
            if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR)
 
989
                LogRel(("HWACCM:    VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR\n"));
 
990
            if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR)
 
991
                LogRel(("HWACCM:    VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR\n"));
 
992
            if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER)
 
993
                LogRel(("HWACCM:    VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER\n"));
 
994
            val = pVM->hwaccm.s.vmx.msr.vmx_exit.n.disallowed0;
 
995
            if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG)
 
996
                LogRel(("HWACCM:    VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG *must* be set\n"));
 
997
            if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64)
 
998
                LogRel(("HWACCM:    VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64 *must* be set\n"));
 
999
            if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ)
 
1000
                LogRel(("HWACCM:    VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ *must* be set\n"));
 
1001
            if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR)
 
1002
                LogRel(("HWACCM:    VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR *must* be set\n"));
 
1003
            if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR)
 
1004
                LogRel(("HWACCM:    VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR *must* be set\n"));
 
1005
            if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR)
 
1006
                LogRel(("HWACCM:    VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR *must* be set\n"));
 
1007
            if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR)
 
1008
                LogRel(("HWACCM:    VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR *must* be set\n"));
 
1009
            if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER)
 
1010
                LogRel(("HWACCM:    VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER *must* be set\n"));
 
1011
 
 
1012
            if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps)
 
1013
            {
 
1014
                LogRel(("HWACCM: MSR_IA32_VMX_EPT_VPID_CAPS    = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_eptcaps));
 
1015
 
 
1016
                if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_RWX_X_ONLY)
 
1017
                    LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_RWX_X_ONLY\n"));
 
1018
                if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_RWX_W_ONLY)
 
1019
                    LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_RWX_W_ONLY\n"));
 
1020
                if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_RWX_WX_ONLY)
 
1021
                    LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_RWX_WX_ONLY\n"));
 
1022
                if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_21_BITS)
 
1023
                    LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_GAW_21_BITS\n"));
 
1024
                if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_30_BITS)
 
1025
                    LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_GAW_30_BITS\n"));
 
1026
                if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_39_BITS)
 
1027
                    LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_GAW_39_BITS\n"));
 
1028
                if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_48_BITS)
 
1029
                    LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_GAW_48_BITS\n"));
 
1030
                if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_57_BITS)
 
1031
                    LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_GAW_57_BITS\n"));
 
1032
                if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_UC)
 
1033
                    LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_EMT_UC\n"));
 
1034
                if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WC)
 
1035
                    LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_EMT_WC\n"));
 
1036
                if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WT)
 
1037
                    LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_EMT_WT\n"));
 
1038
                if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WP)
 
1039
                    LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_EMT_WP\n"));
 
1040
                if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WB)
 
1041
                    LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_EMT_WB\n"));
 
1042
                if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_21_BITS)
 
1043
                    LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_SP_21_BITS\n"));
 
1044
                if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_30_BITS)
 
1045
                    LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_SP_30_BITS\n"));
 
1046
                if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_39_BITS)
 
1047
                    LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_SP_39_BITS\n"));
 
1048
                if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_48_BITS)
 
1049
                    LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_SP_48_BITS\n"));
 
1050
                if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT)
 
1051
                    LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_INVEPT\n"));
 
1052
                if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_INDIV)
 
1053
                    LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_INDIV\n"));
 
1054
                if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_CONTEXT)
 
1055
                    LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_CONTEXT\n"));
 
1056
                if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_ALL)
 
1057
                    LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_ALL\n"));
 
1058
                if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID)
 
1059
                    LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_INVVPID\n"));
 
1060
                if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV)
 
1061
                    LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV\n"));
 
1062
                if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_CONTEXT)
 
1063
                    LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_CONTEXT\n"));
 
1064
                if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL)
 
1065
                    LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL\n"));
 
1066
                if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_CONTEXT_GLOBAL)
 
1067
                    LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_CONTEXT_GLOBAL\n"));
 
1068
            }
 
1069
 
 
1070
            LogRel(("HWACCM: MSR_IA32_VMX_MISC             = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_misc));
 
1071
            if (MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hwaccm.s.vmx.msr.vmx_misc) == pVM->hwaccm.s.vmx.cPreemptTimerShift)
 
1072
                LogRel(("HWACCM:    MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT %x\n", MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hwaccm.s.vmx.msr.vmx_misc)));
 
1073
            else
 
1074
                LogRel(("HWACCM:    MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT %x - erratum detected, using %x instead\n", MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hwaccm.s.vmx.msr.vmx_misc), pVM->hwaccm.s.vmx.cPreemptTimerShift));
 
1075
            LogRel(("HWACCM:    MSR_IA32_VMX_MISC_ACTIVITY_STATES %x\n", MSR_IA32_VMX_MISC_ACTIVITY_STATES(pVM->hwaccm.s.vmx.msr.vmx_misc)));
 
1076
            LogRel(("HWACCM:    MSR_IA32_VMX_MISC_CR3_TARGET      %x\n", MSR_IA32_VMX_MISC_CR3_TARGET(pVM->hwaccm.s.vmx.msr.vmx_misc)));
 
1077
            LogRel(("HWACCM:    MSR_IA32_VMX_MISC_MAX_MSR         %x\n", MSR_IA32_VMX_MISC_MAX_MSR(pVM->hwaccm.s.vmx.msr.vmx_misc)));
 
1078
            LogRel(("HWACCM:    MSR_IA32_VMX_MISC_MSEG_ID         %x\n", MSR_IA32_VMX_MISC_MSEG_ID(pVM->hwaccm.s.vmx.msr.vmx_misc)));
 
1079
 
 
1080
            LogRel(("HWACCM: MSR_IA32_VMX_CR0_FIXED0       = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0));
 
1081
            LogRel(("HWACCM: MSR_IA32_VMX_CR0_FIXED1       = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1));
 
1082
            LogRel(("HWACCM: MSR_IA32_VMX_CR4_FIXED0       = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0));
 
1083
            LogRel(("HWACCM: MSR_IA32_VMX_CR4_FIXED1       = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1));
 
1084
            LogRel(("HWACCM: MSR_IA32_VMX_VMCS_ENUM        = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum));
 
1085
 
 
1086
            LogRel(("HWACCM: TPR shadow physaddr           = %RHp\n", pVM->hwaccm.s.vmx.pAPICPhys));
 
1087
 
 
1088
            /* Paranoia */
 
1089
            AssertRelease(MSR_IA32_VMX_MISC_MAX_MSR(pVM->hwaccm.s.vmx.msr.vmx_misc) >= 512);
 
1090
 
 
1091
            for (VMCPUID i = 0; i < pVM->cCpus; i++)
 
1092
            {
 
1093
                LogRel(("HWACCM: VCPU%d: MSR bitmap physaddr      = %RHp\n", i, pVM->aCpus[i].hwaccm.s.vmx.pMSRBitmapPhys));
 
1094
                LogRel(("HWACCM: VCPU%d: VMCS physaddr            = %RHp\n", i, pVM->aCpus[i].hwaccm.s.vmx.pVMCSPhys));
 
1095
            }
 
1096
 
 
1097
#ifdef HWACCM_VTX_WITH_EPT
 
1098
            if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_EPT)
 
1099
                pVM->hwaccm.s.fNestedPaging = pVM->hwaccm.s.fAllowNestedPaging;
 
1100
#endif /* HWACCM_VTX_WITH_EPT */
 
1101
#ifdef HWACCM_VTX_WITH_VPID
 
1102
            if (    (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VPID)
 
1103
                &&  !pVM->hwaccm.s.fNestedPaging)    /* VPID and EPT are mutually exclusive. */
 
1104
                pVM->hwaccm.s.vmx.fVPID = pVM->hwaccm.s.vmx.fAllowVPID;
 
1105
#endif /* HWACCM_VTX_WITH_VPID */
 
1106
 
 
1107
            /* Unrestricted guest execution relies on EPT. */
 
1108
            if (    pVM->hwaccm.s.fNestedPaging
 
1109
                &&  (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE))
 
1110
            {
 
1111
                pVM->hwaccm.s.vmx.fUnrestrictedGuest = true;
 
1112
            }
 
1113
 
 
1114
            /* Only try once. */
 
1115
            pVM->hwaccm.s.fInitialized = true;
 
1116
 
 
1117
            if (!pVM->hwaccm.s.vmx.fUnrestrictedGuest)
 
1118
            {
 
1119
                /* Allocate three pages for the TSS we need for real mode emulation. (2 pages for the IO bitmap) */
 
1120
                rc = PDMR3VMMDevHeapAlloc(pVM, HWACCM_VTX_TOTAL_DEVHEAP_MEM, (RTR3PTR *)&pVM->hwaccm.s.vmx.pRealModeTSS);
 
1121
                if (RT_SUCCESS(rc))
 
1122
                {
 
1123
                    /* The I/O bitmap starts right after the virtual interrupt redirection bitmap. */
 
1124
                    ASMMemZero32(pVM->hwaccm.s.vmx.pRealModeTSS, sizeof(*pVM->hwaccm.s.vmx.pRealModeTSS));
 
1125
                    pVM->hwaccm.s.vmx.pRealModeTSS->offIoBitmap = sizeof(*pVM->hwaccm.s.vmx.pRealModeTSS);
 
1126
                    /* Bit set to 0 means redirection enabled. */
 
1127
                    memset(pVM->hwaccm.s.vmx.pRealModeTSS->IntRedirBitmap, 0x0, sizeof(pVM->hwaccm.s.vmx.pRealModeTSS->IntRedirBitmap));
 
1128
                    /* Allow all port IO, so the VT-x IO intercepts do their job. */
 
1129
                    memset(pVM->hwaccm.s.vmx.pRealModeTSS + 1, 0, PAGE_SIZE*2);
 
1130
                    *((unsigned char *)pVM->hwaccm.s.vmx.pRealModeTSS + HWACCM_VTX_TSS_SIZE - 2) = 0xff;
 
1131
 
 
1132
                    /* Construct a 1024 element page directory with 4 MB pages for the identity mapped page table used in
 
1133
                     * real and protected mode without paging with EPT.
 
1134
                     */
 
1135
                    pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable = (PX86PD)((char *)pVM->hwaccm.s.vmx.pRealModeTSS + PAGE_SIZE * 3);
 
1136
                    for (unsigned i=0;i<X86_PG_ENTRIES;i++)
 
1137
                    {
 
1138
                        pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable->a[i].u  = _4M * i;
 
1139
                        pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable->a[i].u |= X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_A | X86_PDE4M_D | X86_PDE4M_PS | X86_PDE4M_G;
 
1140
                    }
 
1141
 
 
1142
                    /* We convert it here every time as pci regions could be reconfigured. */
 
1143
                    rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hwaccm.s.vmx.pRealModeTSS, &GCPhys);
 
1144
                    AssertRC(rc);
 
1145
                    LogRel(("HWACCM: Real Mode TSS guest physaddr  = %RGp\n", GCPhys));
 
1146
 
 
1147
                    rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
 
1148
                    AssertRC(rc);
 
1149
                    LogRel(("HWACCM: Non-Paging Mode EPT CR3       = %RGp\n", GCPhys));
 
1150
                }
 
1151
                else
 
1152
                {
 
1153
                    LogRel(("HWACCM: No real mode VT-x support (PDMR3VMMDevHeapAlloc returned %Rrc)\n", rc));
 
1154
                    pVM->hwaccm.s.vmx.pRealModeTSS = NULL;
 
1155
                    pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable = NULL;
 
1156
                }
 
1157
            }
 
1158
 
 
1159
            rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HWACC_SETUP_VM, 0, NULL);
 
1160
            AssertRC(rc);
 
1161
            if (rc == VINF_SUCCESS)
 
1162
            {
 
1163
                pVM->fHWACCMEnabled = true;
 
1164
                pVM->hwaccm.s.vmx.fEnabled = true;
 
1165
                hwaccmR3DisableRawMode(pVM);
 
1166
 
 
1167
                CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
 
1168
#ifdef VBOX_ENABLE_64_BITS_GUESTS
 
1169
                if (pVM->hwaccm.s.fAllow64BitGuests)
 
1170
                {
 
1171
                    CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
 
1172
                    CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
 
1173
                    CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL);            /* 64 bits only on Intel CPUs */
 
1174
                    CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
 
1175
                    CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NXE);
 
1176
                }
 
1177
                else
 
1178
                /* Turn on NXE if PAE has been enabled *and* the host has turned on NXE (we reuse the host EFER in the switcher) */
 
1179
                /* Todo: this needs to be fixed properly!! */
 
1180
                if (    CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE)
 
1181
                    &&  (pVM->hwaccm.s.vmx.hostEFER & MSR_K6_EFER_NXE))
 
1182
                    CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NXE);
 
1183
 
 
1184
                LogRel((pVM->hwaccm.s.fAllow64BitGuests
 
1185
                        ? "HWACCM: 32-bit and 64-bit guests supported.\n"
 
1186
                        : "HWACCM: 32-bit guests supported.\n"));
 
1187
#else
 
1188
                LogRel(("HWACCM: 32-bit guests supported.\n"));
 
1189
#endif
 
1190
                LogRel(("HWACCM: VMX enabled!\n"));
 
1191
                if (pVM->hwaccm.s.fNestedPaging)
 
1192
                {
 
1193
                    LogRel(("HWACCM: Enabled nested paging\n"));
 
1194
                    LogRel(("HWACCM: EPT root page                 = %RHp\n", PGMGetHyperCR3(VMMGetCpu(pVM))));
 
1195
                    if (pVM->hwaccm.s.vmx.fUnrestrictedGuest)
 
1196
                        LogRel(("HWACCM: Unrestricted guest execution enabled!\n"));
 
1197
 
 
1198
#if HC_ARCH_BITS == 64
 
1199
                    if (pVM->hwaccm.s.fLargePages)
 
1200
                    {
 
1201
                        /* Use large (2 MB) pages for our EPT PDEs where possible. */
 
1202
                        PGMSetLargePageUsage(pVM, true);
 
1203
                        LogRel(("HWACCM: Large page support enabled!\n"));
 
1204
                    }
 
1205
#endif
 
1206
                }
 
1207
                else
 
1208
                    Assert(!pVM->hwaccm.s.vmx.fUnrestrictedGuest);
 
1209
 
 
1210
                if (pVM->hwaccm.s.vmx.fVPID)
 
1211
                    LogRel(("HWACCM: Enabled VPID\n"));
 
1212
 
 
1213
                if (   pVM->hwaccm.s.fNestedPaging
 
1214
                    || pVM->hwaccm.s.vmx.fVPID)
 
1215
                {
 
1216
                    LogRel(("HWACCM: enmFlushPage    %d\n", pVM->hwaccm.s.vmx.enmFlushPage));
 
1217
                    LogRel(("HWACCM: enmFlushContext %d\n", pVM->hwaccm.s.vmx.enmFlushContext));
 
1218
                }
 
1219
 
 
1220
                /* TPR patching status logging. */
 
1221
                if (pVM->hwaccm.s.fTRPPatchingAllowed)
 
1222
                {
 
1223
                    if (    (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
 
1224
                        &&  (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
 
1225
                    {
 
1226
                        pVM->hwaccm.s.fTRPPatchingAllowed = false;  /* not necessary as we have a hardware solution. */
 
1227
                        LogRel(("HWACCM: TPR Patching not required (VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC).\n"));
 
1228
                    }
 
1229
                    else
 
1230
                    {
 
1231
                        uint32_t u32Eax, u32Dummy;
 
1232
 
 
1233
                        /* TPR patching needs access to the MSR_K8_LSTAR msr. */
 
1234
                        ASMCpuId(0x80000000, &u32Eax, &u32Dummy, &u32Dummy, &u32Dummy);
 
1235
                        if (    u32Eax < 0x80000001
 
1236
                            ||  !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
 
1237
                        {
 
1238
                            pVM->hwaccm.s.fTRPPatchingAllowed = false;
 
1239
                            LogRel(("HWACCM: TPR patching disabled (long mode not supported).\n"));
 
1240
                        }
 
1241
                    }
 
1242
                }
 
1243
                LogRel(("HWACCM: TPR Patching %s.\n", (pVM->hwaccm.s.fTRPPatchingAllowed) ? "enabled" : "disabled"));
 
1244
 
 
1245
                /*
 
1246
                 * Check for preemption timer config override and log the state of it.
 
1247
                 */
 
1248
                if (pVM->hwaccm.s.vmx.fUsePreemptTimer)
 
1249
                {
 
1250
                    PCFGMNODE pCfgHwAccM = CFGMR3GetChild(CFGMR3GetRoot(pVM), "HWACCM");
 
1251
                    int rc2 = CFGMR3QueryBoolDef(pCfgHwAccM, "UsePreemptTimer", &pVM->hwaccm.s.vmx.fUsePreemptTimer, true);
 
1252
                    AssertLogRelRC(rc2);
 
1253
                }
 
1254
                if (pVM->hwaccm.s.vmx.fUsePreemptTimer)
 
1255
                    LogRel(("HWACCM: Using the VMX-preemption timer (cPreemptTimerShift=%u)\n", pVM->hwaccm.s.vmx.cPreemptTimerShift));
 
1256
            }
 
1257
            else
 
1258
            {
 
1259
                LogRel(("HWACCM: VMX setup failed with rc=%Rrc!\n", rc));
 
1260
                LogRel(("HWACCM: Last instruction error %x\n", pVM->aCpus[0].hwaccm.s.vmx.lasterror.ulInstrError));
 
1261
                pVM->fHWACCMEnabled = false;
 
1262
            }
 
1263
        }
 
1264
    }
 
1265
    else
 
1266
    if (pVM->hwaccm.s.svm.fSupported)
 
1267
    {
 
1268
        Log(("pVM->hwaccm.s.svm.fSupported = %d\n", pVM->hwaccm.s.svm.fSupported));
 
1269
 
 
1270
        if (pVM->hwaccm.s.fInitialized == false)
 
1271
        {
 
1272
            /* Erratum 170 which requires a forced TLB flush for each world switch:
 
1273
             * See http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/33610.pdf
 
1274
             *
 
1275
             * All BH-G1/2 and DH-G1/2 models include a fix:
 
1276
             * Athlon X2:   0x6b 1/2
 
1277
             *              0x68 1/2
 
1278
             * Athlon 64:   0x7f 1
 
1279
             *              0x6f 2
 
1280
             * Sempron:     0x7f 1/2
 
1281
             *              0x6f 2
 
1282
             *              0x6c 2
 
1283
             *              0x7c 2
 
1284
             * Turion 64:   0x68 2
 
1285
             *
 
1286
             */
 
1287
            uint32_t u32Dummy;
 
1288
            uint32_t u32Version, u32Family, u32Model, u32Stepping, u32BaseFamily;
 
1289
            ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy);
 
1290
            u32BaseFamily= (u32Version >> 8) & 0xf;
 
1291
            u32Family    = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0);
 
1292
            u32Model     = ((u32Version >> 4) & 0xf);
 
1293
            u32Model     = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4);
 
1294
            u32Stepping  = u32Version & 0xf;
 
1295
            if (    u32Family == 0xf
 
1296
                &&  !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) &&  u32Stepping >= 1)
 
1297
                &&  !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) &&  u32Stepping >= 2))
 
1298
            {
 
1299
                LogRel(("HWACMM: AMD cpu with erratum 170 family %x model %x stepping %x\n", u32Family, u32Model, u32Stepping));
 
1300
            }
 
1301
 
 
1302
            LogRel(("HWACMM: cpuid 0x80000001.u32AMDFeatureECX = %RX32\n", pVM->hwaccm.s.cpuid.u32AMDFeatureECX));
 
1303
            LogRel(("HWACMM: cpuid 0x80000001.u32AMDFeatureEDX = %RX32\n", pVM->hwaccm.s.cpuid.u32AMDFeatureEDX));
 
1304
            LogRel(("HWACCM: AMD HWCR MSR                      = %RX64\n", pVM->hwaccm.s.svm.msrHWCR));
 
1305
            LogRel(("HWACCM: AMD-V revision                    = %X\n", pVM->hwaccm.s.svm.u32Rev));
 
1306
            LogRel(("HWACCM: AMD-V max ASID                    = %d\n", pVM->hwaccm.s.uMaxASID));
 
1307
            LogRel(("HWACCM: AMD-V features                    = %X\n", pVM->hwaccm.s.svm.u32Features));
 
1308
 
 
1309
            if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
 
1310
                LogRel(("HWACCM:    AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING\n"));
 
1311
            if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_LBR_VIRT)
 
1312
                LogRel(("HWACCM:    AMD_CPUID_SVM_FEATURE_EDX_LBR_VIRT\n"));
 
1313
            if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_SVM_LOCK)
 
1314
                LogRel(("HWACCM:    AMD_CPUID_SVM_FEATURE_EDX_SVM_LOCK\n"));
 
1315
            if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
 
1316
                LogRel(("HWACCM:    AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE\n"));
 
1317
            if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_SSE_3_5_DISABLE)
 
1318
                LogRel(("HWACCM:    AMD_CPUID_SVM_FEATURE_EDX_SSE_3_5_DISABLE\n"));
 
1319
            if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER)
 
1320
                LogRel(("HWACCM:    AMD_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER\n"));
 
1321
 
 
1322
            /* Only try once. */
 
1323
            pVM->hwaccm.s.fInitialized = true;
 
1324
 
 
1325
            if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
 
1326
                pVM->hwaccm.s.fNestedPaging = pVM->hwaccm.s.fAllowNestedPaging;
 
1327
 
 
1328
            rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HWACC_SETUP_VM, 0, NULL);
 
1329
            AssertRC(rc);
 
1330
            if (rc == VINF_SUCCESS)
 
1331
            {
 
1332
                pVM->fHWACCMEnabled = true;
 
1333
                pVM->hwaccm.s.svm.fEnabled = true;
 
1334
 
 
1335
                if (pVM->hwaccm.s.fNestedPaging)
 
1336
                {
 
1337
                    LogRel(("HWACCM:    Enabled nested paging\n"));
 
1338
#if HC_ARCH_BITS == 64
 
1339
                    if (pVM->hwaccm.s.fLargePages)
 
1340
                    {
 
1341
                        /* Use large (2 MB) pages for our nested paging PDEs where possible. */
 
1342
                        PGMSetLargePageUsage(pVM, true);
 
1343
                        LogRel(("HWACCM:    Large page support enabled!\n"));
 
1344
                    }
 
1345
#endif
 
1346
                }
 
1347
 
 
1348
                hwaccmR3DisableRawMode(pVM);
 
1349
                CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
 
1350
                CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL);
 
1351
                CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP);
 
1352
#ifdef VBOX_ENABLE_64_BITS_GUESTS
 
1353
                if (pVM->hwaccm.s.fAllow64BitGuests)
 
1354
                {
 
1355
                    CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
 
1356
                    CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
 
1357
                    CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NXE);
 
1358
                    CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
 
1359
                }
 
1360
                else
 
1361
                /* Turn on NXE if PAE has been enabled. */
 
1362
                if (CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE))
 
1363
                    CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NXE);
 
1364
#endif
 
1365
 
 
1366
                LogRel((pVM->hwaccm.s.fAllow64BitGuests
 
1367
                        ? "HWACCM:    32-bit and 64-bit guest supported.\n"
 
1368
                        : "HWACCM:    32-bit guest supported.\n"));
 
1369
 
 
1370
                LogRel(("HWACCM:    TPR Patching %s.\n", (pVM->hwaccm.s.fTRPPatchingAllowed) ? "enabled" : "disabled"));
 
1371
            }
 
1372
            else
 
1373
            {
 
1374
                pVM->fHWACCMEnabled = false;
 
1375
            }
 
1376
        }
 
1377
    }
 
1378
    if (pVM->fHWACCMEnabled)
 
1379
        LogRel(("HWACCM:    VT-x/AMD-V init method: %s\n", (pVM->hwaccm.s.fGlobalInit) ? "GLOBAL" : "LOCAL"));
 
1380
    RTLogRelSetBuffering(fOldBuffered);
 
1381
    return VINF_SUCCESS;
 
1382
}
 
1383
 
 
1384
/**
 
1385
 * Applies relocations to data and code managed by this
 
1386
 * component. This function will be called at init and
 
1387
 * whenever the VMM need to relocate it self inside the GC.
 
1388
 *
 
1389
 * @param   pVM     The VM.
 
1390
 */
 
1391
VMMR3DECL(void) HWACCMR3Relocate(PVM pVM)
 
1392
{
 
1393
    Log(("HWACCMR3Relocate to %RGv\n", MMHyperGetArea(pVM, 0)));
 
1394
 
 
1395
    /* Fetch the current paging mode during the relocate callback during state loading. */
 
1396
    if (VMR3GetState(pVM) == VMSTATE_LOADING)
 
1397
    {
 
1398
        for (VMCPUID i = 0; i < pVM->cCpus; i++)
 
1399
        {
 
1400
            PVMCPU pVCpu = &pVM->aCpus[i];
 
1401
 
 
1402
            pVCpu->hwaccm.s.enmShadowMode            = PGMGetShadowMode(pVCpu);
 
1403
            Assert(pVCpu->hwaccm.s.vmx.enmCurrGuestMode == PGMGetGuestMode(pVCpu));
 
1404
            pVCpu->hwaccm.s.vmx.enmCurrGuestMode     = PGMGetGuestMode(pVCpu);
 
1405
        }
 
1406
    }
 
1407
#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
 
1408
    if (pVM->fHWACCMEnabled)
 
1409
    {
 
1410
        int rc;
 
1411
 
 
1412
        switch(PGMGetHostMode(pVM))
 
1413
        {
 
1414
        case PGMMODE_32_BIT:
 
1415
            pVM->hwaccm.s.pfnHost32ToGuest64R0 = VMMR3GetHostToGuestSwitcher(pVM, VMMSWITCHER_32_TO_AMD64);
 
1416
            break;
 
1417
 
 
1418
        case PGMMODE_PAE:
 
1419
        case PGMMODE_PAE_NX:
 
1420
            pVM->hwaccm.s.pfnHost32ToGuest64R0 = VMMR3GetHostToGuestSwitcher(pVM, VMMSWITCHER_PAE_TO_AMD64);
 
1421
            break;
 
1422
 
 
1423
        default:
 
1424
            AssertFailed();
 
1425
            break;
 
1426
        }
 
1427
        rc = PDMR3LdrGetSymbolRC(pVM, NULL,       "VMXGCStartVM64", &pVM->hwaccm.s.pfnVMXGCStartVM64);
 
1428
        AssertReleaseMsgRC(rc, ("VMXGCStartVM64 -> rc=%Rrc\n", rc));
 
1429
 
 
1430
        rc = PDMR3LdrGetSymbolRC(pVM, NULL,       "SVMGCVMRun64",   &pVM->hwaccm.s.pfnSVMGCVMRun64);
 
1431
        AssertReleaseMsgRC(rc, ("SVMGCVMRun64 -> rc=%Rrc\n", rc));
 
1432
 
 
1433
        rc = PDMR3LdrGetSymbolRC(pVM, NULL,       "HWACCMSaveGuestFPU64",   &pVM->hwaccm.s.pfnSaveGuestFPU64);
 
1434
        AssertReleaseMsgRC(rc, ("HWACCMSetupFPU64 -> rc=%Rrc\n", rc));
 
1435
 
 
1436
        rc = PDMR3LdrGetSymbolRC(pVM, NULL,       "HWACCMSaveGuestDebug64",   &pVM->hwaccm.s.pfnSaveGuestDebug64);
 
1437
        AssertReleaseMsgRC(rc, ("HWACCMSetupDebug64 -> rc=%Rrc\n", rc));
 
1438
 
 
1439
# ifdef DEBUG
 
1440
        rc = PDMR3LdrGetSymbolRC(pVM, NULL,       "HWACCMTestSwitcher64",   &pVM->hwaccm.s.pfnTest64);
 
1441
        AssertReleaseMsgRC(rc, ("HWACCMTestSwitcher64 -> rc=%Rrc\n", rc));
 
1442
# endif
 
1443
    }
 
1444
#endif
 
1445
    return;
 
1446
}
 
1447
 
 
1448
/**
 
1449
 * Checks hardware accelerated raw mode is allowed.
 
1450
 *
 
1451
 * @returns boolean
 
1452
 * @param   pVM         The VM to operate on.
 
1453
 */
 
1454
VMMR3DECL(bool) HWACCMR3IsAllowed(PVM pVM)
 
1455
{
 
1456
    return pVM->hwaccm.s.fAllowed;
 
1457
}
 
1458
 
 
1459
/**
 
1460
 * Notification callback which is called whenever there is a chance that a CR3
 
1461
 * value might have changed.
 
1462
 *
 
1463
 * This is called by PGM.
 
1464
 *
 
1465
 * @param   pVM            The VM to operate on.
 
1466
 * @param   pVCpu          The VMCPU to operate on.
 
1467
 * @param   enmShadowMode  New shadow paging mode.
 
1468
 * @param   enmGuestMode   New guest paging mode.
 
1469
 */
 
1470
VMMR3DECL(void) HWACCMR3PagingModeChanged(PVM pVM, PVMCPU pVCpu, PGMMODE enmShadowMode, PGMMODE enmGuestMode)
 
1471
{
 
1472
    /* Ignore page mode changes during state loading. */
 
1473
    if (VMR3GetState(pVCpu->pVMR3) == VMSTATE_LOADING)
 
1474
        return;
 
1475
 
 
1476
    pVCpu->hwaccm.s.enmShadowMode = enmShadowMode;
 
1477
 
 
1478
    if (   pVM->hwaccm.s.vmx.fEnabled
 
1479
        && pVM->fHWACCMEnabled)
 
1480
    {
 
1481
        if (    pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL
 
1482
            &&  enmGuestMode >= PGMMODE_PROTECTED)
 
1483
        {
 
1484
            PCPUMCTX pCtx;
 
1485
 
 
1486
            pCtx = CPUMQueryGuestCtxPtr(pVCpu);
 
1487
 
 
1488
            /* After a real mode switch to protected mode we must force
 
1489
             * CPL to 0. Our real mode emulation had to set it to 3.
 
1490
             */
 
1491
            pCtx->ssHid.Attr.n.u2Dpl  = 0;
 
1492
        }
 
1493
    }
 
1494
 
 
1495
    if (pVCpu->hwaccm.s.vmx.enmCurrGuestMode != enmGuestMode)
 
1496
    {
 
1497
        /* Keep track of paging mode changes. */
 
1498
        pVCpu->hwaccm.s.vmx.enmPrevGuestMode = pVCpu->hwaccm.s.vmx.enmCurrGuestMode;
 
1499
        pVCpu->hwaccm.s.vmx.enmCurrGuestMode = enmGuestMode;
 
1500
 
 
1501
        /* Did we miss a change, because all code was executed in the recompiler? */
 
1502
        if (pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode == enmGuestMode)
 
1503
        {
 
1504
            Log(("HWACCMR3PagingModeChanged missed %s->%s transition (prev %s)\n", PGMGetModeName(pVCpu->hwaccm.s.vmx.enmPrevGuestMode), PGMGetModeName(pVCpu->hwaccm.s.vmx.enmCurrGuestMode), PGMGetModeName(pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode)));
 
1505
            pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode = pVCpu->hwaccm.s.vmx.enmPrevGuestMode;
 
1506
        }
 
1507
    }
 
1508
 
 
1509
    /* Reset the contents of the read cache. */
 
1510
    PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache;
 
1511
    for (unsigned j=0;j<pCache->Read.cValidEntries;j++)
 
1512
        pCache->Read.aFieldVal[j] = 0;
 
1513
}
 
1514
 
 
1515
/**
 
1516
 * Terminates the HWACCM.
 
1517
 *
 
1518
 * Termination means cleaning up and freeing all resources,
 
1519
 * the VM it self is at this point powered off or suspended.
 
1520
 *
 
1521
 * @returns VBox status code.
 
1522
 * @param   pVM         The VM to operate on.
 
1523
 */
 
1524
VMMR3DECL(int) HWACCMR3Term(PVM pVM)
 
1525
{
 
1526
    if (pVM->hwaccm.s.vmx.pRealModeTSS)
 
1527
    {
 
1528
        PDMR3VMMDevHeapFree(pVM, pVM->hwaccm.s.vmx.pRealModeTSS);
 
1529
        pVM->hwaccm.s.vmx.pRealModeTSS       = 0;
 
1530
    }
 
1531
    hwaccmR3TermCPU(pVM);
 
1532
    return 0;
 
1533
}
 
1534
 
 
1535
/**
 
1536
 * Terminates the per-VCPU HWACCM.
 
1537
 *
 
1538
 * @returns VBox status code.
 
1539
 * @param   pVM         The VM to operate on.
 
1540
 */
 
1541
static int hwaccmR3TermCPU(PVM pVM)
 
1542
{
 
1543
    for (VMCPUID i = 0; i < pVM->cCpus; i++)
 
1544
    {
 
1545
        PVMCPU pVCpu = &pVM->aCpus[i];
 
1546
 
 
1547
#ifdef VBOX_WITH_STATISTICS
 
1548
        if (pVCpu->hwaccm.s.paStatExitReason)
 
1549
        {
 
1550
            MMHyperFree(pVM, pVCpu->hwaccm.s.paStatExitReason);
 
1551
            pVCpu->hwaccm.s.paStatExitReason   = NULL;
 
1552
            pVCpu->hwaccm.s.paStatExitReasonR0 = NIL_RTR0PTR;
 
1553
        }
 
1554
        if (pVCpu->hwaccm.s.paStatInjectedIrqs)
 
1555
        {
 
1556
            MMHyperFree(pVM, pVCpu->hwaccm.s.paStatInjectedIrqs);
 
1557
            pVCpu->hwaccm.s.paStatInjectedIrqs   = NULL;
 
1558
            pVCpu->hwaccm.s.paStatInjectedIrqsR0 = NIL_RTR0PTR;
 
1559
        }
 
1560
#endif
 
1561
 
 
1562
#ifdef VBOX_WITH_CRASHDUMP_MAGIC
 
1563
        memset(pVCpu->hwaccm.s.vmx.VMCSCache.aMagic, 0, sizeof(pVCpu->hwaccm.s.vmx.VMCSCache.aMagic));
 
1564
        pVCpu->hwaccm.s.vmx.VMCSCache.uMagic = 0;
 
1565
        pVCpu->hwaccm.s.vmx.VMCSCache.uPos = 0xffffffff;
 
1566
#endif
 
1567
    }
 
1568
    return 0;
 
1569
}
 
1570
 
 
1571
/**
 
1572
 * Resets a virtual CPU.
 
1573
 *
 
1574
 * Used by HWACCMR3Reset and CPU hot plugging.
 
1575
 *
 
1576
 * @param   pVCpu   The CPU to reset.
 
1577
 */
 
1578
VMMR3DECL(void) HWACCMR3ResetCpu(PVMCPU pVCpu)
 
1579
{
 
1580
    /* On first entry we'll sync everything. */
 
1581
    pVCpu->hwaccm.s.fContextUseFlags = HWACCM_CHANGED_ALL;
 
1582
 
 
1583
    pVCpu->hwaccm.s.vmx.cr0_mask = 0;
 
1584
    pVCpu->hwaccm.s.vmx.cr4_mask = 0;
 
1585
 
 
1586
    pVCpu->hwaccm.s.fActive        = false;
 
1587
    pVCpu->hwaccm.s.Event.fPending = false;
 
1588
 
 
1589
    /* Reset state information for real-mode emulation in VT-x. */
 
1590
    pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode = PGMMODE_REAL;
 
1591
    pVCpu->hwaccm.s.vmx.enmPrevGuestMode     = PGMMODE_REAL;
 
1592
    pVCpu->hwaccm.s.vmx.enmCurrGuestMode     = PGMMODE_REAL;
 
1593
 
 
1594
    /* Reset the contents of the read cache. */
 
1595
    PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache;
 
1596
    for (unsigned j=0;j<pCache->Read.cValidEntries;j++)
 
1597
        pCache->Read.aFieldVal[j] = 0;
 
1598
 
 
1599
#ifdef VBOX_WITH_CRASHDUMP_MAGIC
 
1600
    /* Magic marker for searching in crash dumps. */
 
1601
    strcpy((char *)pCache->aMagic, "VMCSCACHE Magic");
 
1602
    pCache->uMagic = UINT64_C(0xDEADBEEFDEADBEEF);
 
1603
#endif
 
1604
}
 
1605
 
 
1606
/**
 
1607
 * The VM is being reset.
 
1608
 *
 
1609
 * For the HWACCM component this means that any GDT/LDT/TSS monitors
 
1610
 * needs to be removed.
 
1611
 *
 
1612
 * @param   pVM     VM handle.
 
1613
 */
 
1614
VMMR3DECL(void) HWACCMR3Reset(PVM pVM)
 
1615
{
 
1616
    LogFlow(("HWACCMR3Reset:\n"));
 
1617
 
 
1618
    if (pVM->fHWACCMEnabled)
 
1619
        hwaccmR3DisableRawMode(pVM);
 
1620
 
 
1621
    for (VMCPUID i = 0; i < pVM->cCpus; i++)
 
1622
    {
 
1623
        PVMCPU pVCpu = &pVM->aCpus[i];
 
1624
 
 
1625
        HWACCMR3ResetCpu(pVCpu);
 
1626
    }
 
1627
 
 
1628
    /* Clear all patch information. */
 
1629
    pVM->hwaccm.s.pGuestPatchMem         = 0;
 
1630
    pVM->hwaccm.s.pFreeGuestPatchMem     = 0;
 
1631
    pVM->hwaccm.s.cbGuestPatchMem        = 0;
 
1632
    pVM->hwaccm.s.cPatches           = 0;
 
1633
    pVM->hwaccm.s.PatchTree          = 0;
 
1634
    pVM->hwaccm.s.fTPRPatchingActive = false;
 
1635
    ASMMemZero32(pVM->hwaccm.s.aPatches, sizeof(pVM->hwaccm.s.aPatches));
 
1636
}
 
1637
 
 
1638
/**
 
1639
 * Callback to patch a TPR instruction (vmmcall or mov cr8)
 
1640
 *
 
1641
 * @returns VBox strict status code.
 
1642
 * @param   pVM     The VM handle.
 
1643
 * @param   pVCpu   The VMCPU for the EMT we're being called on.
 
1644
 * @param   pvUser  Unused
 
1645
 *
 
1646
 */
 
1647
DECLCALLBACK(VBOXSTRICTRC) hwaccmR3RemovePatches(PVM pVM, PVMCPU pVCpu, void *pvUser)
 
1648
{
 
1649
    VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
 
1650
 
 
1651
    /* Only execute the handler on the VCPU the original patch request was issued. */
 
1652
    if (pVCpu->idCpu != idCpu)
 
1653
        return VINF_SUCCESS;
 
1654
 
 
1655
    Log(("hwaccmR3RemovePatches\n"));
 
1656
    for (unsigned i = 0; i < pVM->hwaccm.s.cPatches; i++)
 
1657
    {
 
1658
        uint8_t         szInstr[15];
 
1659
        PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.aPatches[i];
 
1660
        RTGCPTR         pInstrGC = (RTGCPTR)pPatch->Core.Key;
 
1661
        int             rc;
 
1662
 
 
1663
#ifdef LOG_ENABLED
 
1664
        char            szOutput[256];
 
1665
 
 
1666
        rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, CPUMGetGuestCS(pVCpu), pInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
 
1667
                                szOutput, sizeof(szOutput), NULL);
 
1668
        if (RT_SUCCESS(rc))
 
1669
            Log(("Patched instr: %s\n", szOutput));
 
1670
#endif
 
1671
 
 
1672
        /* Check if the instruction is still the same. */
 
1673
        rc = PGMPhysSimpleReadGCPtr(pVCpu, szInstr, pInstrGC, pPatch->cbNewOp);
 
1674
        if (rc != VINF_SUCCESS)
 
1675
        {
 
1676
            Log(("Patched code removed? (rc=%Rrc0\n", rc));
 
1677
            continue;   /* swapped out or otherwise removed; skip it. */
 
1678
        }
 
1679
 
 
1680
        if (memcmp(szInstr, pPatch->aNewOpcode, pPatch->cbNewOp))
 
1681
        {
 
1682
            Log(("Patched instruction was changed! (rc=%Rrc0\n", rc));
 
1683
            continue;   /* skip it. */
 
1684
        }
 
1685
 
 
1686
        rc = PGMPhysSimpleWriteGCPtr(pVCpu, pInstrGC, pPatch->aOpcode, pPatch->cbOp);
 
1687
        AssertRC(rc);
 
1688
 
 
1689
#ifdef LOG_ENABLED
 
1690
        rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, CPUMGetGuestCS(pVCpu), pInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
 
1691
                                szOutput, sizeof(szOutput), NULL);
 
1692
        if (RT_SUCCESS(rc))
 
1693
            Log(("Original instr: %s\n", szOutput));
 
1694
#endif
 
1695
    }
 
1696
    pVM->hwaccm.s.cPatches           = 0;
 
1697
    pVM->hwaccm.s.PatchTree          = 0;
 
1698
    pVM->hwaccm.s.pFreeGuestPatchMem = pVM->hwaccm.s.pGuestPatchMem;
 
1699
    pVM->hwaccm.s.fTPRPatchingActive = false;
 
1700
    return VINF_SUCCESS;
 
1701
}
 
1702
 
 
1703
/**
 
1704
 * Enable patching in a VT-x/AMD-V guest
 
1705
 *
 
1706
 * @returns VBox status code.
 
1707
 * @param   pVM         The VM to operate on.
 
1708
 * @param   idCpu       VCPU to execute hwaccmR3RemovePatches on
 
1709
 * @param   pPatchMem   Patch memory range
 
1710
 * @param   cbPatchMem  Size of the memory range
 
1711
 */
 
1712
int hwaccmR3EnablePatching(PVM pVM, VMCPUID idCpu, RTRCPTR pPatchMem, unsigned cbPatchMem)
 
1713
{
 
1714
    int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hwaccmR3RemovePatches, (void *)idCpu);
 
1715
    AssertRC(rc);
 
1716
 
 
1717
    pVM->hwaccm.s.pGuestPatchMem      = pPatchMem;
 
1718
    pVM->hwaccm.s.pFreeGuestPatchMem  = pPatchMem;
 
1719
    pVM->hwaccm.s.cbGuestPatchMem     = cbPatchMem;
 
1720
    return VINF_SUCCESS;
 
1721
}
 
1722
 
 
1723
/**
 
1724
 * Enable patching in a VT-x/AMD-V guest
 
1725
 *
 
1726
 * @returns VBox status code.
 
1727
 * @param   pVM         The VM to operate on.
 
1728
 * @param   pPatchMem   Patch memory range
 
1729
 * @param   cbPatchMem  Size of the memory range
 
1730
 */
 
1731
VMMR3DECL(int)  HWACMMR3EnablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
 
1732
{
 
1733
    Log(("HWACMMR3EnablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
 
1734
    if (pVM->cCpus > 1)
 
1735
    {
 
1736
        /* We own the IOM lock here and could cause a deadlock by waiting for a VCPU that is blocking on the IOM lock. */
 
1737
        int rc = VMR3ReqCallNoWaitU(pVM->pUVM, VMCPUID_ANY_QUEUE,
 
1738
                                    (PFNRT)hwaccmR3EnablePatching, 4, pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
 
1739
        AssertRC(rc);
 
1740
        return rc;
 
1741
    }
 
1742
    return hwaccmR3EnablePatching(pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
 
1743
}
 
1744
 
 
1745
/**
 
1746
 * Disable patching in a VT-x/AMD-V guest
 
1747
 *
 
1748
 * @returns VBox status code.
 
1749
 * @param   pVM         The VM to operate on.
 
1750
 * @param   pPatchMem   Patch memory range
 
1751
 * @param   cbPatchMem  Size of the memory range
 
1752
 */
 
1753
VMMR3DECL(int)  HWACMMR3DisablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
 
1754
{
 
1755
    Log(("HWACMMR3DisablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
 
1756
 
 
1757
    Assert(pVM->hwaccm.s.pGuestPatchMem == pPatchMem);
 
1758
    Assert(pVM->hwaccm.s.cbGuestPatchMem == cbPatchMem);
 
1759
 
 
1760
    /* @todo Potential deadlock when other VCPUs are waiting on the IOM lock (we own it)!! */
 
1761
    int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hwaccmR3RemovePatches, (void *)VMMGetCpuId(pVM));
 
1762
    AssertRC(rc);
 
1763
 
 
1764
    pVM->hwaccm.s.pGuestPatchMem      = 0;
 
1765
    pVM->hwaccm.s.pFreeGuestPatchMem  = 0;
 
1766
    pVM->hwaccm.s.cbGuestPatchMem     = 0;
 
1767
    pVM->hwaccm.s.fTPRPatchingActive = false;
 
1768
    return VINF_SUCCESS;
 
1769
}
 
1770
 
 
1771
 
 
1772
/**
 
1773
 * Callback to patch a TPR instruction (vmmcall or mov cr8)
 
1774
 *
 
1775
 * @returns VBox strict status code.
 
1776
 * @param   pVM     The VM handle.
 
1777
 * @param   pVCpu   The VMCPU for the EMT we're being called on.
 
1778
 * @param   pvUser  User specified CPU context
 
1779
 *
 
1780
 */
 
1781
DECLCALLBACK(VBOXSTRICTRC) hwaccmR3ReplaceTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
 
1782
{
 
1783
    VMCPUID      idCpu  = (VMCPUID)(uintptr_t)pvUser;
 
1784
    PCPUMCTX     pCtx   = CPUMQueryGuestCtxPtr(pVCpu);
 
1785
    PDISCPUSTATE pDis   = &pVCpu->hwaccm.s.DisState;
 
1786
    unsigned     cbOp;
 
1787
 
 
1788
    /* Only execute the handler on the VCPU the original patch request was issued. (the other CPU(s) might not yet have switched to protected mode) */
 
1789
    if (pVCpu->idCpu != idCpu)
 
1790
        return VINF_SUCCESS;
 
1791
 
 
1792
    Log(("hwaccmR3ReplaceTprInstr: %RGv\n", pCtx->rip));
 
1793
 
 
1794
    /* Two or more VCPUs were racing to patch this instruction. */
 
1795
    PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
 
1796
    if (pPatch)
 
1797
        return VINF_SUCCESS;
 
1798
 
 
1799
    Assert(pVM->hwaccm.s.cPatches < RT_ELEMENTS(pVM->hwaccm.s.aPatches));
 
1800
 
 
1801
    int rc = EMInterpretDisasOne(pVM, pVCpu, CPUMCTX2CORE(pCtx), pDis, &cbOp);
 
1802
    AssertRC(rc);
 
1803
    if (    rc == VINF_SUCCESS
 
1804
        &&  pDis->pCurInstr->opcode == OP_MOV
 
1805
        &&  cbOp >= 3)
 
1806
    {
 
1807
        uint8_t         aVMMCall[3] = { 0xf, 0x1, 0xd9};
 
1808
        uint32_t        idx = pVM->hwaccm.s.cPatches;
 
1809
 
 
1810
        pPatch = &pVM->hwaccm.s.aPatches[idx];
 
1811
 
 
1812
        rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aOpcode, pCtx->rip, cbOp);
 
1813
        AssertRC(rc);
 
1814
 
 
1815
        pPatch->cbOp     = cbOp;
 
1816
 
 
1817
        if (pDis->param1.flags == USE_DISPLACEMENT32)
 
1818
        {
 
1819
            /* write. */
 
1820
            if (pDis->param2.flags == USE_REG_GEN32)
 
1821
            {
 
1822
                pPatch->enmType     = HWACCMTPRINSTR_WRITE_REG;
 
1823
                pPatch->uSrcOperand = pDis->param2.base.reg_gen;
 
1824
            }
 
1825
            else
 
1826
            {
 
1827
                Assert(pDis->param2.flags == USE_IMMEDIATE32);
 
1828
                pPatch->enmType     = HWACCMTPRINSTR_WRITE_IMM;
 
1829
                pPatch->uSrcOperand = pDis->param2.parval;
 
1830
            }
 
1831
            rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, aVMMCall, sizeof(aVMMCall));
 
1832
            AssertRC(rc);
 
1833
 
 
1834
            memcpy(pPatch->aNewOpcode, aVMMCall, sizeof(aVMMCall));
 
1835
            pPatch->cbNewOp = sizeof(aVMMCall);
 
1836
        }
 
1837
        else
 
1838
        {
 
1839
            RTGCPTR  oldrip   = pCtx->rip;
 
1840
            uint32_t oldcbOp  = cbOp;
 
1841
            uint32_t uMmioReg = pDis->param1.base.reg_gen;
 
1842
 
 
1843
            /* read */
 
1844
            Assert(pDis->param1.flags == USE_REG_GEN32);
 
1845
 
 
1846
            /* Found:
 
1847
                *   mov eax, dword [fffe0080]        (5 bytes)
 
1848
                * Check if next instruction is:
 
1849
                *   shr eax, 4
 
1850
                */
 
1851
            pCtx->rip += cbOp;
 
1852
            rc = EMInterpretDisasOne(pVM, pVCpu, CPUMCTX2CORE(pCtx), pDis, &cbOp);
 
1853
            pCtx->rip = oldrip;
 
1854
            if (    rc == VINF_SUCCESS
 
1855
                &&  pDis->pCurInstr->opcode == OP_SHR
 
1856
                &&  pDis->param1.flags == USE_REG_GEN32
 
1857
                &&  pDis->param1.base.reg_gen == uMmioReg
 
1858
                &&  pDis->param2.flags == USE_IMMEDIATE8
 
1859
                &&  pDis->param2.parval == 4
 
1860
                &&  oldcbOp + cbOp < sizeof(pVM->hwaccm.s.aPatches[idx].aOpcode))
 
1861
            {
 
1862
                uint8_t szInstr[15];
 
1863
 
 
1864
                /* Replacing two instructions now. */
 
1865
                rc = PGMPhysSimpleReadGCPtr(pVCpu, &pPatch->aOpcode, pCtx->rip, oldcbOp + cbOp);
 
1866
                AssertRC(rc);
 
1867
 
 
1868
                pPatch->cbOp = oldcbOp + cbOp;
 
1869
 
 
1870
                /* 0xF0, 0x0F, 0x20, 0xC0 = mov eax, cr8 */
 
1871
                szInstr[0] = 0xF0;
 
1872
                szInstr[1] = 0x0F;
 
1873
                szInstr[2] = 0x20;
 
1874
                szInstr[3] = 0xC0 | pDis->param1.base.reg_gen;
 
1875
                for (unsigned i = 4; i < pPatch->cbOp; i++)
 
1876
                    szInstr[i] = 0x90;  /* nop */
 
1877
 
 
1878
                rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, szInstr, pPatch->cbOp);
 
1879
                AssertRC(rc);
 
1880
 
 
1881
                memcpy(pPatch->aNewOpcode, szInstr, pPatch->cbOp);
 
1882
                pPatch->cbNewOp = pPatch->cbOp;
 
1883
 
 
1884
                Log(("Acceptable read/shr candidate!\n"));
 
1885
                pPatch->enmType = HWACCMTPRINSTR_READ_SHR4;
 
1886
            }
 
1887
            else
 
1888
            {
 
1889
                pPatch->enmType     = HWACCMTPRINSTR_READ;
 
1890
                pPatch->uDstOperand = pDis->param1.base.reg_gen;
 
1891
 
 
1892
                rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, aVMMCall, sizeof(aVMMCall));
 
1893
                AssertRC(rc);
 
1894
 
 
1895
                memcpy(pPatch->aNewOpcode, aVMMCall, sizeof(aVMMCall));
 
1896
                pPatch->cbNewOp = sizeof(aVMMCall);
 
1897
            }
 
1898
        }
 
1899
 
 
1900
        pPatch->Core.Key = pCtx->eip;
 
1901
        rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core);
 
1902
        AssertRC(rc);
 
1903
 
 
1904
        pVM->hwaccm.s.cPatches++;
 
1905
        STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRReplaceSuccess);
 
1906
        return VINF_SUCCESS;
 
1907
    }
 
1908
 
 
1909
    /* Save invalid patch, so we will not try again. */
 
1910
    uint32_t  idx = pVM->hwaccm.s.cPatches;
 
1911
 
 
1912
#ifdef LOG_ENABLED
 
1913
    char      szOutput[256];
 
1914
    rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCtx->rip, DBGF_DISAS_FLAGS_DEFAULT_MODE,
 
1915
                            szOutput, sizeof(szOutput), NULL);
 
1916
    if (RT_SUCCESS(rc))
 
1917
        Log(("Failed to patch instr: %s\n", szOutput));
 
1918
#endif
 
1919
 
 
1920
    pPatch = &pVM->hwaccm.s.aPatches[idx];
 
1921
    pPatch->Core.Key = pCtx->eip;
 
1922
    pPatch->enmType  = HWACCMTPRINSTR_INVALID;
 
1923
    rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core);
 
1924
    AssertRC(rc);
 
1925
    pVM->hwaccm.s.cPatches++;
 
1926
    STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRReplaceFailure);
 
1927
    return VINF_SUCCESS;
 
1928
}
 
1929
 
 
1930
/**
 
1931
 * Callback to patch a TPR instruction (jump to generated code)
 
1932
 *
 
1933
 * @returns VBox strict status code.
 
1934
 * @param   pVM     The VM handle.
 
1935
 * @param   pVCpu   The VMCPU for the EMT we're being called on.
 
1936
 * @param   pvUser  User specified CPU context
 
1937
 *
 
1938
 */
 
1939
DECLCALLBACK(VBOXSTRICTRC) hwaccmR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
 
1940
{
 
1941
    VMCPUID      idCpu  = (VMCPUID)(uintptr_t)pvUser;
 
1942
    PCPUMCTX     pCtx   = CPUMQueryGuestCtxPtr(pVCpu);
 
1943
    PDISCPUSTATE pDis   = &pVCpu->hwaccm.s.DisState;
 
1944
    unsigned     cbOp;
 
1945
    int          rc;
 
1946
#ifdef LOG_ENABLED
 
1947
    RTGCPTR      pInstr;
 
1948
    char         szOutput[256];
 
1949
#endif
 
1950
 
 
1951
    /* Only execute the handler on the VCPU the original patch request was issued. (the other CPU(s) might not yet have switched to protected mode) */
 
1952
    if (pVCpu->idCpu != idCpu)
 
1953
        return VINF_SUCCESS;
 
1954
 
 
1955
    Assert(pVM->hwaccm.s.cPatches < RT_ELEMENTS(pVM->hwaccm.s.aPatches));
 
1956
 
 
1957
    /* Two or more VCPUs were racing to patch this instruction. */
 
1958
    PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
 
1959
    if (pPatch)
 
1960
    {
 
1961
        Log(("hwaccmR3PatchTprInstr: already patched %RGv\n", pCtx->rip));
 
1962
        return VINF_SUCCESS;
 
1963
    }
 
1964
 
 
1965
    Log(("hwaccmR3PatchTprInstr %RGv\n", pCtx->rip));
 
1966
 
 
1967
    rc = EMInterpretDisasOne(pVM, pVCpu, CPUMCTX2CORE(pCtx), pDis, &cbOp);
 
1968
    AssertRC(rc);
 
1969
    if (    rc == VINF_SUCCESS
 
1970
        &&  pDis->pCurInstr->opcode == OP_MOV
 
1971
        &&  cbOp >= 5)
 
1972
    {
 
1973
        uint32_t        idx = pVM->hwaccm.s.cPatches;
 
1974
        uint8_t         aPatch[64];
 
1975
        uint32_t        off = 0;
 
1976
 
 
1977
        pPatch = &pVM->hwaccm.s.aPatches[idx];
 
1978
 
 
1979
#ifdef LOG_ENABLED
 
1980
        rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCtx->rip, DBGF_DISAS_FLAGS_DEFAULT_MODE,
 
1981
                                szOutput, sizeof(szOutput), NULL);
 
1982
        if (RT_SUCCESS(rc))
 
1983
            Log(("Original instr: %s\n", szOutput));
 
1984
#endif
 
1985
 
 
1986
        rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aOpcode, pCtx->rip, cbOp);
 
1987
        AssertRC(rc);
 
1988
 
 
1989
        pPatch->cbOp    = cbOp;
 
1990
        pPatch->enmType = HWACCMTPRINSTR_JUMP_REPLACEMENT;
 
1991
 
 
1992
        if (pDis->param1.flags == USE_DISPLACEMENT32)
 
1993
        {
 
1994
            /*
 
1995
                * TPR write:
 
1996
                *
 
1997
                * push ECX                      [51]
 
1998
                * push EDX                      [52]
 
1999
                * push EAX                      [50]
 
2000
                * xor EDX,EDX                   [31 D2]
 
2001
                * mov EAX,EAX                   [89 C0]
 
2002
                *  or
 
2003
                * mov EAX,0000000CCh            [B8 CC 00 00 00]
 
2004
                * mov ECX,0C0000082h            [B9 82 00 00 C0]
 
2005
                * wrmsr                         [0F 30]
 
2006
                * pop EAX                       [58]
 
2007
                * pop EDX                       [5A]
 
2008
                * pop ECX                       [59]
 
2009
                * jmp return_address            [E9 return_address]
 
2010
                *
 
2011
                */
 
2012
            bool fUsesEax = (pDis->param2.flags == USE_REG_GEN32 && pDis->param2.base.reg_gen == USE_REG_EAX);
 
2013
 
 
2014
            aPatch[off++] = 0x51;    /* push ecx */
 
2015
            aPatch[off++] = 0x52;    /* push edx */
 
2016
            if (!fUsesEax)
 
2017
                aPatch[off++] = 0x50;    /* push eax */
 
2018
            aPatch[off++] = 0x31;    /* xor edx, edx */
 
2019
            aPatch[off++] = 0xD2;
 
2020
            if (pDis->param2.flags == USE_REG_GEN32)
 
2021
            {
 
2022
                if (!fUsesEax)
 
2023
                {
 
2024
                    aPatch[off++] = 0x89;    /* mov eax, src_reg */
 
2025
                    aPatch[off++] = MAKE_MODRM(3, pDis->param2.base.reg_gen, USE_REG_EAX);
 
2026
                }
 
2027
            }
 
2028
            else
 
2029
            {
 
2030
                Assert(pDis->param2.flags == USE_IMMEDIATE32);
 
2031
                aPatch[off++] = 0xB8;    /* mov eax, immediate */
 
2032
                *(uint32_t *)&aPatch[off] = pDis->param2.parval;
 
2033
                off += sizeof(uint32_t);
 
2034
            }
 
2035
            aPatch[off++] = 0xB9;    /* mov ecx, 0xc0000082 */
 
2036
            *(uint32_t *)&aPatch[off] = MSR_K8_LSTAR;
 
2037
            off += sizeof(uint32_t);
 
2038
 
 
2039
            aPatch[off++] = 0x0F;    /* wrmsr */
 
2040
            aPatch[off++] = 0x30;
 
2041
            if (!fUsesEax)
 
2042
                aPatch[off++] = 0x58;    /* pop eax */
 
2043
            aPatch[off++] = 0x5A;    /* pop edx */
 
2044
            aPatch[off++] = 0x59;    /* pop ecx */
 
2045
        }
 
2046
        else
 
2047
        {
 
2048
            /*
 
2049
                * TPR read:
 
2050
                *
 
2051
                * push ECX                      [51]
 
2052
                * push EDX                      [52]
 
2053
                * push EAX                      [50]
 
2054
                * mov ECX,0C0000082h            [B9 82 00 00 C0]
 
2055
                * rdmsr                         [0F 32]
 
2056
                * mov EAX,EAX                   [89 C0]
 
2057
                * pop EAX                       [58]
 
2058
                * pop EDX                       [5A]
 
2059
                * pop ECX                       [59]
 
2060
                * jmp return_address            [E9 return_address]
 
2061
                *
 
2062
                */
 
2063
            Assert(pDis->param1.flags == USE_REG_GEN32);
 
2064
 
 
2065
            if (pDis->param1.base.reg_gen != USE_REG_ECX)
 
2066
                aPatch[off++] = 0x51;    /* push ecx */
 
2067
            if (pDis->param1.base.reg_gen != USE_REG_EDX)
 
2068
                aPatch[off++] = 0x52;    /* push edx */
 
2069
            if (pDis->param1.base.reg_gen != USE_REG_EAX)
 
2070
                aPatch[off++] = 0x50;    /* push eax */
 
2071
 
 
2072
            aPatch[off++] = 0x31;    /* xor edx, edx */
 
2073
            aPatch[off++] = 0xD2;
 
2074
 
 
2075
            aPatch[off++] = 0xB9;    /* mov ecx, 0xc0000082 */
 
2076
            *(uint32_t *)&aPatch[off] = MSR_K8_LSTAR;
 
2077
            off += sizeof(uint32_t);
 
2078
 
 
2079
            aPatch[off++] = 0x0F;    /* rdmsr */
 
2080
            aPatch[off++] = 0x32;
 
2081
 
 
2082
            if (pDis->param1.base.reg_gen != USE_REG_EAX)
 
2083
            {
 
2084
                aPatch[off++] = 0x89;    /* mov dst_reg, eax */
 
2085
                aPatch[off++] = MAKE_MODRM(3, USE_REG_EAX, pDis->param1.base.reg_gen);
 
2086
            }
 
2087
 
 
2088
            if (pDis->param1.base.reg_gen != USE_REG_EAX)
 
2089
                aPatch[off++] = 0x58;    /* pop eax */
 
2090
            if (pDis->param1.base.reg_gen != USE_REG_EDX)
 
2091
                aPatch[off++] = 0x5A;    /* pop edx */
 
2092
            if (pDis->param1.base.reg_gen != USE_REG_ECX)
 
2093
                aPatch[off++] = 0x59;    /* pop ecx */
 
2094
        }
 
2095
        aPatch[off++] = 0xE9;    /* jmp return_address */
 
2096
        *(RTRCUINTPTR *)&aPatch[off] = ((RTRCUINTPTR)pCtx->eip + cbOp) - ((RTRCUINTPTR)pVM->hwaccm.s.pFreeGuestPatchMem + off + 4);
 
2097
        off += sizeof(RTRCUINTPTR);
 
2098
 
 
2099
        if (pVM->hwaccm.s.pFreeGuestPatchMem + off <= pVM->hwaccm.s.pGuestPatchMem + pVM->hwaccm.s.cbGuestPatchMem)
 
2100
        {
 
2101
            /* Write new code to the patch buffer. */
 
2102
            rc = PGMPhysSimpleWriteGCPtr(pVCpu, pVM->hwaccm.s.pFreeGuestPatchMem, aPatch, off);
 
2103
            AssertRC(rc);
 
2104
 
 
2105
#ifdef LOG_ENABLED
 
2106
            pInstr = pVM->hwaccm.s.pFreeGuestPatchMem;
 
2107
            while (true)
 
2108
            {
 
2109
                uint32_t cb;
 
2110
 
 
2111
                rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pInstr, DBGF_DISAS_FLAGS_DEFAULT_MODE,
 
2112
                                        szOutput, sizeof(szOutput), &cb);
 
2113
                if (RT_SUCCESS(rc))
 
2114
                    Log(("Patch instr %s\n", szOutput));
 
2115
 
 
2116
                pInstr += cb;
 
2117
 
 
2118
                if (pInstr >= pVM->hwaccm.s.pFreeGuestPatchMem + off)
 
2119
                    break;
 
2120
            }
 
2121
#endif
 
2122
 
 
2123
            pPatch->aNewOpcode[0] = 0xE9;
 
2124
            *(RTRCUINTPTR *)&pPatch->aNewOpcode[1] = ((RTRCUINTPTR)pVM->hwaccm.s.pFreeGuestPatchMem) - ((RTRCUINTPTR)pCtx->eip + 5);
 
2125
 
 
2126
            /* Overwrite the TPR instruction with a jump. */
 
2127
            rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->eip, pPatch->aNewOpcode, 5);
 
2128
            AssertRC(rc);
 
2129
 
 
2130
#ifdef LOG_ENABLED
 
2131
            rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCtx->rip, DBGF_DISAS_FLAGS_DEFAULT_MODE,
 
2132
                                    szOutput, sizeof(szOutput), NULL);
 
2133
            if (RT_SUCCESS(rc))
 
2134
                Log(("Jump: %s\n", szOutput));
 
2135
#endif
 
2136
            pVM->hwaccm.s.pFreeGuestPatchMem += off;
 
2137
            pPatch->cbNewOp = 5;
 
2138
 
 
2139
            pPatch->Core.Key = pCtx->eip;
 
2140
            rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core);
 
2141
            AssertRC(rc);
 
2142
 
 
2143
            pVM->hwaccm.s.cPatches++;
 
2144
            pVM->hwaccm.s.fTPRPatchingActive = true;
 
2145
            STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRPatchSuccess);
 
2146
            return VINF_SUCCESS;
 
2147
        }
 
2148
        else
 
2149
            Log(("Ran out of space in our patch buffer!\n"));
 
2150
    }
 
2151
 
 
2152
    /* Save invalid patch, so we will not try again. */
 
2153
    uint32_t  idx = pVM->hwaccm.s.cPatches;
 
2154
 
 
2155
#ifdef LOG_ENABLED
 
2156
    rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCtx->rip, DBGF_DISAS_FLAGS_DEFAULT_MODE,
 
2157
                            szOutput, sizeof(szOutput), NULL);
 
2158
    if (RT_SUCCESS(rc))
 
2159
        Log(("Failed to patch instr: %s\n", szOutput));
 
2160
#endif
 
2161
 
 
2162
    pPatch = &pVM->hwaccm.s.aPatches[idx];
 
2163
    pPatch->Core.Key = pCtx->eip;
 
2164
    pPatch->enmType  = HWACCMTPRINSTR_INVALID;
 
2165
    rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core);
 
2166
    AssertRC(rc);
 
2167
    pVM->hwaccm.s.cPatches++;
 
2168
    STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRPatchFailure);
 
2169
    return VINF_SUCCESS;
 
2170
}
 
2171
 
 
2172
/**
 
2173
 * Attempt to patch TPR mmio instructions
 
2174
 *
 
2175
 * @returns VBox status code.
 
2176
 * @param   pVM         The VM to operate on.
 
2177
 * @param   pVCpu       The VM CPU to operate on.
 
2178
 * @param   pCtx        CPU context
 
2179
 */
 
2180
VMMR3DECL(int) HWACCMR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
 
2181
{
 
2182
    int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, (pVM->hwaccm.s.pGuestPatchMem) ? hwaccmR3PatchTprInstr : hwaccmR3ReplaceTprInstr, (void *)pVCpu->idCpu);
 
2183
    AssertRC(rc);
 
2184
    return rc;
 
2185
}
 
2186
 
 
2187
/**
 
2188
 * Force execution of the current IO code in the recompiler
 
2189
 *
 
2190
 * @returns VBox status code.
 
2191
 * @param   pVM         The VM to operate on.
 
2192
 * @param   pCtx        Partial VM execution context
 
2193
 */
 
2194
VMMR3DECL(int) HWACCMR3EmulateIoBlock(PVM pVM, PCPUMCTX pCtx)
 
2195
{
 
2196
    PVMCPU pVCpu = VMMGetCpu(pVM);
 
2197
 
 
2198
    Assert(pVM->fHWACCMEnabled);
 
2199
    Log(("HWACCMR3EmulateIoBlock\n"));
 
2200
 
 
2201
    /* This is primarily intended to speed up Grub, so we don't care about paged protected mode. */
 
2202
    if (HWACCMCanEmulateIoBlockEx(pCtx))
 
2203
    {
 
2204
        Log(("HWACCMR3EmulateIoBlock -> enabled\n"));
 
2205
        pVCpu->hwaccm.s.EmulateIoBlock.fEnabled         = true;
 
2206
        pVCpu->hwaccm.s.EmulateIoBlock.GCPtrFunctionEip = pCtx->rip;
 
2207
        pVCpu->hwaccm.s.EmulateIoBlock.cr0              = pCtx->cr0;
 
2208
        return VINF_EM_RESCHEDULE_REM;
 
2209
    }
 
2210
    return VINF_SUCCESS;
 
2211
}
 
2212
 
 
2213
/**
 
2214
 * Checks if we can currently use hardware accelerated raw mode.
 
2215
 *
 
2216
 * @returns boolean
 
2217
 * @param   pVM         The VM to operate on.
 
2218
 * @param   pCtx        Partial VM execution context
 
2219
 */
 
2220
VMMR3DECL(bool) HWACCMR3CanExecuteGuest(PVM pVM, PCPUMCTX pCtx)
 
2221
{
 
2222
    PVMCPU pVCpu = VMMGetCpu(pVM);
 
2223
 
 
2224
    Assert(pVM->fHWACCMEnabled);
 
2225
 
 
2226
    /* If we're still executing the IO code, then return false. */
 
2227
    if (    RT_UNLIKELY(pVCpu->hwaccm.s.EmulateIoBlock.fEnabled)
 
2228
        &&  pCtx->rip <  pVCpu->hwaccm.s.EmulateIoBlock.GCPtrFunctionEip + 0x200
 
2229
        &&  pCtx->rip >  pVCpu->hwaccm.s.EmulateIoBlock.GCPtrFunctionEip - 0x200
 
2230
        &&  pCtx->cr0 == pVCpu->hwaccm.s.EmulateIoBlock.cr0)
 
2231
        return false;
 
2232
 
 
2233
    pVCpu->hwaccm.s.EmulateIoBlock.fEnabled = false;
 
2234
 
 
2235
    /* AMD-V supports real & protected mode with or without paging. */
 
2236
    if (pVM->hwaccm.s.svm.fEnabled)
 
2237
    {
 
2238
        pVCpu->hwaccm.s.fActive = true;
 
2239
        return true;
 
2240
    }
 
2241
 
 
2242
    pVCpu->hwaccm.s.fActive = false;
 
2243
 
 
2244
    /* Note! The context supplied by REM is partial. If we add more checks here, be sure to verify that REM provides this info! */
 
2245
    Assert((pVM->hwaccm.s.vmx.fUnrestrictedGuest && !pVM->hwaccm.s.vmx.pRealModeTSS) || (!pVM->hwaccm.s.vmx.fUnrestrictedGuest && pVM->hwaccm.s.vmx.pRealModeTSS));
 
2246
 
 
2247
    bool fSupportsRealMode = pVM->hwaccm.s.vmx.fUnrestrictedGuest || PDMVMMDevHeapIsEnabled(pVM);
 
2248
    if (!pVM->hwaccm.s.vmx.fUnrestrictedGuest)
 
2249
    {
 
2250
        /** The VMM device heap is a requirement for emulating real mode or protected mode without paging when the unrestricted guest execution feature is missing. */
 
2251
        if (fSupportsRealMode)
 
2252
        {
 
2253
            if (CPUMIsGuestInRealModeEx(pCtx))
 
2254
            {
 
2255
                /* VT-x will not allow high selector bases in v86 mode; fall back to the recompiler in that case.
 
2256
                 * The base must also be equal to (sel << 4).
 
2257
                 */
 
2258
                if (   (   pCtx->cs != (pCtx->csHid.u64Base >> 4)
 
2259
                        && pCtx->csHid.u64Base != 0xffff0000 /* we can deal with the BIOS code as it's also mapped into the lower region. */)
 
2260
                    || pCtx->ds != (pCtx->dsHid.u64Base >> 4)
 
2261
                    || pCtx->es != (pCtx->esHid.u64Base >> 4)
 
2262
                    || pCtx->fs != (pCtx->fsHid.u64Base >> 4)
 
2263
                    || pCtx->gs != (pCtx->gsHid.u64Base >> 4)
 
2264
                    || pCtx->ss != (pCtx->ssHid.u64Base >> 4))
 
2265
                {
 
2266
                    return false;
 
2267
                }
 
2268
            }
 
2269
            else
 
2270
            {
 
2271
                PGMMODE enmGuestMode = PGMGetGuestMode(pVCpu);
 
2272
                /* Verify the requirements for executing code in protected mode. VT-x can't handle the CPU state right after a switch
 
2273
                 * from real to protected mode. (all sorts of RPL & DPL assumptions)
 
2274
                 */
 
2275
                if (    pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL
 
2276
                    &&  enmGuestMode >= PGMMODE_PROTECTED)
 
2277
                {
 
2278
                    if (   (pCtx->cs & X86_SEL_RPL)
 
2279
                        || (pCtx->ds & X86_SEL_RPL)
 
2280
                        || (pCtx->es & X86_SEL_RPL)
 
2281
                        || (pCtx->fs & X86_SEL_RPL)
 
2282
                        || (pCtx->gs & X86_SEL_RPL)
 
2283
                        || (pCtx->ss & X86_SEL_RPL))
 
2284
                    {
 
2285
                        return false;
 
2286
                    }
 
2287
                }
 
2288
                /* VT-x also chokes on invalid tr or ldtr selectors (minix) */
 
2289
                if (    pCtx->gdtr.cbGdt
 
2290
                    &&  (   pCtx->tr > pCtx->gdtr.cbGdt
 
2291
                         || pCtx->ldtr > pCtx->gdtr.cbGdt))
 
2292
                {
 
2293
                        return false;
 
2294
                }
 
2295
            }
 
2296
        }
 
2297
        else
 
2298
        {
 
2299
            if (    !CPUMIsGuestInLongModeEx(pCtx)
 
2300
                &&  !pVM->hwaccm.s.vmx.fUnrestrictedGuest)
 
2301
            {
 
2302
                /** @todo   This should (probably) be set on every excursion to the REM,
 
2303
                 *          however it's too risky right now. So, only apply it when we go
 
2304
                 *          back to REM for real mode execution. (The XP hack below doesn't
 
2305
                 *          work reliably without this.)
 
2306
                 *  Update: Implemented in EM.cpp, see #ifdef EM_NOTIFY_HWACCM.  */
 
2307
                pVM->aCpus[0].hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL_GUEST;
 
2308
 
 
2309
                if (    !pVM->hwaccm.s.fNestedPaging        /* requires a fake PD for real *and* protected mode without paging - stored in the VMM device heap*/
 
2310
                    ||  CPUMIsGuestInRealModeEx(pCtx))      /* requires a fake TSS for real mode - stored in the VMM device heap */
 
2311
                    return false;
 
2312
 
 
2313
                /* Too early for VT-x; Solaris guests will fail with a guru meditation otherwise; same for XP. */
 
2314
                if (pCtx->idtr.pIdt == 0 || pCtx->idtr.cbIdt == 0 || pCtx->tr == 0)
 
2315
                    return false;
 
2316
 
 
2317
                /* The guest is about to complete the switch to protected mode. Wait a bit longer. */
 
2318
                /* Windows XP; switch to protected mode; all selectors are marked not present in the
 
2319
                 * hidden registers (possible recompiler bug; see load_seg_vm) */
 
2320
                if (pCtx->csHid.Attr.n.u1Present == 0)
 
2321
                    return false;
 
2322
                if (pCtx->ssHid.Attr.n.u1Present == 0)
 
2323
                    return false;
 
2324
 
 
2325
                /* Windows XP: possible same as above, but new recompiler requires new heuristics?
 
2326
                   VT-x doesn't seem to like something about the guest state and this stuff avoids it. */
 
2327
                /** @todo This check is actually wrong, it doesn't take the direction of the
 
2328
                 *        stack segment into account. But, it does the job for now. */
 
2329
                if (pCtx->rsp >= pCtx->ssHid.u32Limit)
 
2330
                    return false;
 
2331
    #if 0
 
2332
                if (    pCtx->cs >= pCtx->gdtr.cbGdt
 
2333
                    ||  pCtx->ss >= pCtx->gdtr.cbGdt
 
2334
                    ||  pCtx->ds >= pCtx->gdtr.cbGdt
 
2335
                    ||  pCtx->es >= pCtx->gdtr.cbGdt
 
2336
                    ||  pCtx->fs >= pCtx->gdtr.cbGdt
 
2337
                    ||  pCtx->gs >= pCtx->gdtr.cbGdt)
 
2338
                    return false;
 
2339
    #endif
 
2340
            }
 
2341
        }
 
2342
    }
 
2343
 
 
2344
    if (pVM->hwaccm.s.vmx.fEnabled)
 
2345
    {
 
2346
        uint32_t mask;
 
2347
 
 
2348
        /* if bit N is set in cr0_fixed0, then it must be set in the guest's cr0. */
 
2349
        mask = (uint32_t)pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0;
 
2350
        /* Note: We ignore the NE bit here on purpose; see vmmr0\hwaccmr0.cpp for details. */
 
2351
        mask &= ~X86_CR0_NE;
 
2352
 
 
2353
        if (fSupportsRealMode)
 
2354
        {
 
2355
            /* Note: We ignore the PE & PG bits here on purpose; we emulate real and protected mode without paging. */
 
2356
            mask &= ~(X86_CR0_PG|X86_CR0_PE);
 
2357
        }
 
2358
        else
 
2359
        {
 
2360
            /* We support protected mode without paging using identity mapping. */
 
2361
            mask &= ~X86_CR0_PG;
 
2362
        }
 
2363
        if ((pCtx->cr0 & mask) != mask)
 
2364
            return false;
 
2365
 
 
2366
        /* if bit N is cleared in cr0_fixed1, then it must be zero in the guest's cr0. */
 
2367
        mask = (uint32_t)~pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1;
 
2368
        if ((pCtx->cr0 & mask) != 0)
 
2369
            return false;
 
2370
 
 
2371
        /* if bit N is set in cr4_fixed0, then it must be set in the guest's cr4. */
 
2372
        mask  = (uint32_t)pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0;
 
2373
        mask &= ~X86_CR4_VMXE;
 
2374
        if ((pCtx->cr4 & mask) != mask)
 
2375
            return false;
 
2376
 
 
2377
        /* if bit N is cleared in cr4_fixed1, then it must be zero in the guest's cr4. */
 
2378
        mask = (uint32_t)~pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1;
 
2379
        if ((pCtx->cr4 & mask) != 0)
 
2380
            return false;
 
2381
 
 
2382
        pVCpu->hwaccm.s.fActive = true;
 
2383
        return true;
 
2384
    }
 
2385
 
 
2386
    return false;
 
2387
}
 
2388
 
 
2389
/**
 
2390
 * Checks if we need to reschedule due to VMM device heap changes
 
2391
 *
 
2392
 * @returns boolean
 
2393
 * @param   pVM         The VM to operate on.
 
2394
 * @param   pCtx        VM execution context
 
2395
 */
 
2396
VMMR3DECL(bool) HWACCMR3IsRescheduleRequired(PVM pVM, PCPUMCTX pCtx)
 
2397
{
 
2398
    /** The VMM device heap is a requirement for emulating real mode or protected mode without paging when the unrestricted guest execution feature is missing. (VT-x only) */
 
2399
    if (    pVM->hwaccm.s.vmx.fEnabled
 
2400
        &&  !pVM->hwaccm.s.vmx.fUnrestrictedGuest
 
2401
        &&  !CPUMIsGuestInPagedProtectedModeEx(pCtx)
 
2402
        &&  !PDMVMMDevHeapIsEnabled(pVM)
 
2403
        &&  (pVM->hwaccm.s.fNestedPaging || CPUMIsGuestInRealModeEx(pCtx)))
 
2404
        return true;
 
2405
 
 
2406
    return false;
 
2407
}
 
2408
 
 
2409
 
 
2410
/**
 
2411
 * Notification from EM about a rescheduling into hardware assisted execution
 
2412
 * mode.
 
2413
 *
 
2414
 * @param   pVCpu       Pointer to the current virtual cpu structure.
 
2415
 */
 
2416
VMMR3DECL(void) HWACCMR3NotifyScheduled(PVMCPU pVCpu)
 
2417
{
 
2418
    pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL_GUEST;
 
2419
}
 
2420
 
 
2421
/**
 
2422
 * Notification from EM about returning from instruction emulation (REM / EM).
 
2423
 *
 
2424
 * @param   pVCpu       Pointer to the current virtual cpu structure.
 
2425
 */
 
2426
VMMR3DECL(void) HWACCMR3NotifyEmulated(PVMCPU pVCpu)
 
2427
{
 
2428
    pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL_GUEST;
 
2429
}
 
2430
 
 
2431
/**
 
2432
 * Checks if we are currently using hardware accelerated raw mode.
 
2433
 *
 
2434
 * @returns boolean
 
2435
 * @param   pVCpu        The VMCPU to operate on.
 
2436
 */
 
2437
VMMR3DECL(bool) HWACCMR3IsActive(PVMCPU pVCpu)
 
2438
{
 
2439
    return pVCpu->hwaccm.s.fActive;
 
2440
}
 
2441
 
 
2442
/**
 
2443
 * Checks if we are currently using nested paging.
 
2444
 *
 
2445
 * @returns boolean
 
2446
 * @param   pVM         The VM to operate on.
 
2447
 */
 
2448
VMMR3DECL(bool) HWACCMR3IsNestedPagingActive(PVM pVM)
 
2449
{
 
2450
    return pVM->hwaccm.s.fNestedPaging;
 
2451
}
 
2452
 
 
2453
/**
 
2454
 * Checks if we are currently using VPID in VT-x mode.
 
2455
 *
 
2456
 * @returns boolean
 
2457
 * @param   pVM         The VM to operate on.
 
2458
 */
 
2459
VMMR3DECL(bool) HWACCMR3IsVPIDActive(PVM pVM)
 
2460
{
 
2461
    return pVM->hwaccm.s.vmx.fVPID;
 
2462
}
 
2463
 
 
2464
 
 
2465
/**
 
2466
 * Checks if internal events are pending. In that case we are not allowed to dispatch interrupts.
 
2467
 *
 
2468
 * @returns boolean
 
2469
 * @param   pVM         The VM to operate on.
 
2470
 */
 
2471
VMMR3DECL(bool) HWACCMR3IsEventPending(PVMCPU pVCpu)
 
2472
{
 
2473
    return HWACCMIsEnabled(pVCpu->pVMR3) && pVCpu->hwaccm.s.Event.fPending;
 
2474
}
 
2475
 
 
2476
/**
 
2477
 * Checks if the VMX-preemption timer is being used.
 
2478
 *
 
2479
 * @returns true if it is, false if it isn't.
 
2480
 * @param   pVM         The VM handle.
 
2481
 */
 
2482
VMMR3DECL(bool) HWACCMR3IsVmxPreemptionTimerUsed(PVM pVM)
 
2483
{
 
2484
    return HWACCMIsEnabled(pVM)
 
2485
        && pVM->hwaccm.s.vmx.fEnabled
 
2486
        && pVM->hwaccm.s.vmx.fUsePreemptTimer;
 
2487
}
 
2488
 
 
2489
/**
 
2490
 * Restart an I/O instruction that was refused in ring-0
 
2491
 *
 
2492
 * @returns Strict VBox status code. Informational status codes other than the one documented
 
2493
 *          here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
 
2494
 * @retval  VINF_SUCCESS                Success.
 
2495
 * @retval  VINF_EM_FIRST-VINF_EM_LAST  Success with some exceptions (see IOM_SUCCESS()), the
 
2496
 *                                      status code must be passed on to EM.
 
2497
 * @retval  VERR_NOT_FOUND if no pending I/O instruction.
 
2498
 *
 
2499
 * @param   pVM         The VM to operate on.
 
2500
 * @param   pVCpu       The VMCPU to operate on.
 
2501
 * @param   pCtx        VCPU register context
 
2502
 */
 
2503
VMMR3DECL(VBOXSTRICTRC) HWACCMR3RestartPendingIOInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
 
2504
{
 
2505
    HWACCMPENDINGIO enmType = pVCpu->hwaccm.s.PendingIO.enmType;
 
2506
 
 
2507
    pVCpu->hwaccm.s.PendingIO.enmType = HWACCMPENDINGIO_INVALID;
 
2508
 
 
2509
    if (    pVCpu->hwaccm.s.PendingIO.GCPtrRip != pCtx->rip
 
2510
        ||  enmType  == HWACCMPENDINGIO_INVALID)
 
2511
        return VERR_NOT_FOUND;
 
2512
 
 
2513
    VBOXSTRICTRC rcStrict;
 
2514
    switch (enmType)
 
2515
    {
 
2516
    case HWACCMPENDINGIO_PORT_READ:
 
2517
    {
 
2518
        uint32_t uAndVal = pVCpu->hwaccm.s.PendingIO.s.Port.uAndVal;
 
2519
        uint32_t u32Val  = 0;
 
2520
 
 
2521
        rcStrict = IOMIOPortRead(pVM, pVCpu->hwaccm.s.PendingIO.s.Port.uPort,
 
2522
                                 &u32Val,
 
2523
                                 pVCpu->hwaccm.s.PendingIO.s.Port.cbSize);
 
2524
        if (IOM_SUCCESS(rcStrict))
 
2525
        {
 
2526
            /* Write back to the EAX register. */
 
2527
            pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal);
 
2528
            pCtx->rip = pVCpu->hwaccm.s.PendingIO.GCPtrRipNext;
 
2529
        }
 
2530
        break;
 
2531
    }
 
2532
 
 
2533
    case HWACCMPENDINGIO_PORT_WRITE:
 
2534
        rcStrict = IOMIOPortWrite(pVM, pVCpu->hwaccm.s.PendingIO.s.Port.uPort,
 
2535
                                  pCtx->eax & pVCpu->hwaccm.s.PendingIO.s.Port.uAndVal,
 
2536
                                  pVCpu->hwaccm.s.PendingIO.s.Port.cbSize);
 
2537
        if (IOM_SUCCESS(rcStrict))
 
2538
            pCtx->rip = pVCpu->hwaccm.s.PendingIO.GCPtrRipNext;
 
2539
        break;
 
2540
 
 
2541
    default:
 
2542
        AssertFailed();
 
2543
        return VERR_INTERNAL_ERROR;
 
2544
    }
 
2545
 
 
2546
    return rcStrict;
 
2547
}
 
2548
 
 
2549
/**
 
2550
 * Inject an NMI into a running VM (only VCPU 0!)
 
2551
 *
 
2552
 * @returns boolean
 
2553
 * @param   pVM         The VM to operate on.
 
2554
 */
 
2555
VMMR3DECL(int)  HWACCMR3InjectNMI(PVM pVM)
 
2556
{
 
2557
    VMCPU_FF_SET(&pVM->aCpus[0], VMCPU_FF_INTERRUPT_NMI);
 
2558
    return VINF_SUCCESS;
 
2559
}
 
2560
 
 
2561
/**
 
2562
 * Check fatal VT-x/AMD-V error and produce some meaningful
 
2563
 * log release message.
 
2564
 *
 
2565
 * @param   pVM         The VM to operate on.
 
2566
 * @param   iStatusCode VBox status code
 
2567
 */
 
2568
VMMR3DECL(void) HWACCMR3CheckError(PVM pVM, int iStatusCode)
 
2569
{
 
2570
    for (VMCPUID i = 0; i < pVM->cCpus; i++)
 
2571
    {
 
2572
        switch(iStatusCode)
 
2573
        {
 
2574
        case VERR_VMX_INVALID_VMCS_FIELD:
 
2575
            break;
 
2576
 
 
2577
        case VERR_VMX_INVALID_VMCS_PTR:
 
2578
            LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current pointer %RGp vs %RGp\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.u64VMCSPhys, pVM->aCpus[i].hwaccm.s.vmx.pVMCSPhys));
 
2579
            LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current VMCS version %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulVMCSRevision));
 
2580
            LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Entered Cpu %d\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.idEnteredCpu));
 
2581
            LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current Cpu %d\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.idCurrentCpu));
 
2582
            break;
 
2583
 
 
2584
        case VERR_VMX_UNABLE_TO_START_VM:
 
2585
            LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d instruction error %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulInstrError));
 
2586
            LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d exit reason       %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulExitReason));
 
2587
#if 0 /* @todo dump the current control fields to the release log */
 
2588
            if (pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulInstrError == VMX_ERROR_VMENTRY_INVALID_CONTROL_FIELDS)
 
2589
            {
 
2590
 
 
2591
            }
 
2592
#endif
 
2593
            break;
 
2594
 
 
2595
        case VERR_VMX_UNABLE_TO_RESUME_VM:
 
2596
            LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d instruction error %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulInstrError));
 
2597
            LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d exit reason       %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulExitReason));
 
2598
            break;
 
2599
 
 
2600
        case VERR_VMX_INVALID_VMXON_PTR:
 
2601
            break;
 
2602
        }
 
2603
    }
 
2604
}
 
2605
 
 
2606
/**
 
2607
 * Execute state save operation.
 
2608
 *
 
2609
 * @returns VBox status code.
 
2610
 * @param   pVM             VM Handle.
 
2611
 * @param   pSSM            SSM operation handle.
 
2612
 */
 
2613
static DECLCALLBACK(int) hwaccmR3Save(PVM pVM, PSSMHANDLE pSSM)
 
2614
{
 
2615
    int rc;
 
2616
 
 
2617
    Log(("hwaccmR3Save:\n"));
 
2618
 
 
2619
    for (VMCPUID i = 0; i < pVM->cCpus; i++)
 
2620
    {
 
2621
        /*
 
2622
         * Save the basic bits - fortunately all the other things can be resynced on load.
 
2623
         */
 
2624
        rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.Event.fPending);
 
2625
        AssertRCReturn(rc, rc);
 
2626
        rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.Event.errCode);
 
2627
        AssertRCReturn(rc, rc);
 
2628
        rc = SSMR3PutU64(pSSM, pVM->aCpus[i].hwaccm.s.Event.intInfo);
 
2629
        AssertRCReturn(rc, rc);
 
2630
 
 
2631
        rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.vmx.enmLastSeenGuestMode);
 
2632
        AssertRCReturn(rc, rc);
 
2633
        rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.vmx.enmCurrGuestMode);
 
2634
        AssertRCReturn(rc, rc);
 
2635
        rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.vmx.enmPrevGuestMode);
 
2636
        AssertRCReturn(rc, rc);
 
2637
    }
 
2638
#ifdef VBOX_HWACCM_WITH_GUEST_PATCHING
 
2639
    rc = SSMR3PutGCPtr(pSSM, pVM->hwaccm.s.pGuestPatchMem);
 
2640
    AssertRCReturn(rc, rc);
 
2641
    rc = SSMR3PutGCPtr(pSSM, pVM->hwaccm.s.pFreeGuestPatchMem);
 
2642
    AssertRCReturn(rc, rc);
 
2643
    rc = SSMR3PutU32(pSSM, pVM->hwaccm.s.cbGuestPatchMem);
 
2644
    AssertRCReturn(rc, rc);
 
2645
 
 
2646
    /* Store all the guest patch records too. */
 
2647
    rc = SSMR3PutU32(pSSM, pVM->hwaccm.s.cPatches);
 
2648
    AssertRCReturn(rc, rc);
 
2649
 
 
2650
    for (unsigned i = 0; i < pVM->hwaccm.s.cPatches; i++)
 
2651
    {
 
2652
        PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.aPatches[i];
 
2653
 
 
2654
        rc = SSMR3PutU32(pSSM, pPatch->Core.Key);
 
2655
        AssertRCReturn(rc, rc);
 
2656
 
 
2657
        rc = SSMR3PutMem(pSSM, pPatch->aOpcode, sizeof(pPatch->aOpcode));
 
2658
        AssertRCReturn(rc, rc);
 
2659
 
 
2660
        rc = SSMR3PutU32(pSSM, pPatch->cbOp);
 
2661
        AssertRCReturn(rc, rc);
 
2662
 
 
2663
        rc = SSMR3PutMem(pSSM, pPatch->aNewOpcode, sizeof(pPatch->aNewOpcode));
 
2664
        AssertRCReturn(rc, rc);
 
2665
 
 
2666
        rc = SSMR3PutU32(pSSM, pPatch->cbNewOp);
 
2667
        AssertRCReturn(rc, rc);
 
2668
 
 
2669
        AssertCompileSize(HWACCMTPRINSTR, 4);
 
2670
        rc = SSMR3PutU32(pSSM, (uint32_t)pPatch->enmType);
 
2671
        AssertRCReturn(rc, rc);
 
2672
 
 
2673
        rc = SSMR3PutU32(pSSM, pPatch->uSrcOperand);
 
2674
        AssertRCReturn(rc, rc);
 
2675
 
 
2676
        rc = SSMR3PutU32(pSSM, pPatch->uDstOperand);
 
2677
        AssertRCReturn(rc, rc);
 
2678
 
 
2679
        rc = SSMR3PutU32(pSSM, pPatch->pJumpTarget);
 
2680
        AssertRCReturn(rc, rc);
 
2681
 
 
2682
        rc = SSMR3PutU32(pSSM, pPatch->cFaults);
 
2683
        AssertRCReturn(rc, rc);
 
2684
    }
 
2685
#endif
 
2686
    return VINF_SUCCESS;
 
2687
}
 
2688
 
 
2689
/**
 
2690
 * Execute state load operation.
 
2691
 *
 
2692
 * @returns VBox status code.
 
2693
 * @param   pVM             VM Handle.
 
2694
 * @param   pSSM            SSM operation handle.
 
2695
 * @param   uVersion        Data layout version.
 
2696
 * @param   uPass           The data pass.
 
2697
 */
 
2698
static DECLCALLBACK(int) hwaccmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
 
2699
{
 
2700
    int rc;
 
2701
 
 
2702
    Log(("hwaccmR3Load:\n"));
 
2703
    Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
 
2704
 
 
2705
    /*
 
2706
     * Validate version.
 
2707
     */
 
2708
    if (   uVersion != HWACCM_SSM_VERSION
 
2709
        && uVersion != HWACCM_SSM_VERSION_NO_PATCHING
 
2710
        && uVersion != HWACCM_SSM_VERSION_2_0_X)
 
2711
    {
 
2712
        AssertMsgFailed(("hwaccmR3Load: Invalid version uVersion=%d!\n", uVersion));
 
2713
        return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
 
2714
    }
 
2715
    for (VMCPUID i = 0; i < pVM->cCpus; i++)
 
2716
    {
 
2717
        rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hwaccm.s.Event.fPending);
 
2718
        AssertRCReturn(rc, rc);
 
2719
        rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hwaccm.s.Event.errCode);
 
2720
        AssertRCReturn(rc, rc);
 
2721
        rc = SSMR3GetU64(pSSM, &pVM->aCpus[i].hwaccm.s.Event.intInfo);
 
2722
        AssertRCReturn(rc, rc);
 
2723
 
 
2724
        if (uVersion >= HWACCM_SSM_VERSION_NO_PATCHING)
 
2725
        {
 
2726
            uint32_t val;
 
2727
 
 
2728
            rc = SSMR3GetU32(pSSM, &val);
 
2729
            AssertRCReturn(rc, rc);
 
2730
            pVM->aCpus[i].hwaccm.s.vmx.enmLastSeenGuestMode = (PGMMODE)val;
 
2731
 
 
2732
            rc = SSMR3GetU32(pSSM, &val);
 
2733
            AssertRCReturn(rc, rc);
 
2734
            pVM->aCpus[i].hwaccm.s.vmx.enmCurrGuestMode = (PGMMODE)val;
 
2735
 
 
2736
            rc = SSMR3GetU32(pSSM, &val);
 
2737
            AssertRCReturn(rc, rc);
 
2738
            pVM->aCpus[i].hwaccm.s.vmx.enmPrevGuestMode = (PGMMODE)val;
 
2739
        }
 
2740
    }
 
2741
#ifdef VBOX_HWACCM_WITH_GUEST_PATCHING
 
2742
    if (uVersion > HWACCM_SSM_VERSION_NO_PATCHING)
 
2743
    {
 
2744
        rc = SSMR3GetGCPtr(pSSM, &pVM->hwaccm.s.pGuestPatchMem);
 
2745
        AssertRCReturn(rc, rc);
 
2746
        rc = SSMR3GetGCPtr(pSSM, &pVM->hwaccm.s.pFreeGuestPatchMem);
 
2747
        AssertRCReturn(rc, rc);
 
2748
        rc = SSMR3GetU32(pSSM, &pVM->hwaccm.s.cbGuestPatchMem);
 
2749
        AssertRCReturn(rc, rc);
 
2750
 
 
2751
        /* Fetch all TPR patch records. */
 
2752
        rc = SSMR3GetU32(pSSM, &pVM->hwaccm.s.cPatches);
 
2753
        AssertRCReturn(rc, rc);
 
2754
 
 
2755
        for (unsigned i = 0; i < pVM->hwaccm.s.cPatches; i++)
 
2756
        {
 
2757
            PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.aPatches[i];
 
2758
 
 
2759
            rc = SSMR3GetU32(pSSM, &pPatch->Core.Key);
 
2760
            AssertRCReturn(rc, rc);
 
2761
 
 
2762
            rc = SSMR3GetMem(pSSM, pPatch->aOpcode, sizeof(pPatch->aOpcode));
 
2763
            AssertRCReturn(rc, rc);
 
2764
 
 
2765
            rc = SSMR3GetU32(pSSM, &pPatch->cbOp);
 
2766
            AssertRCReturn(rc, rc);
 
2767
 
 
2768
            rc = SSMR3GetMem(pSSM, pPatch->aNewOpcode, sizeof(pPatch->aNewOpcode));
 
2769
            AssertRCReturn(rc, rc);
 
2770
 
 
2771
            rc = SSMR3GetU32(pSSM, &pPatch->cbNewOp);
 
2772
            AssertRCReturn(rc, rc);
 
2773
 
 
2774
            rc = SSMR3GetU32(pSSM, (uint32_t *)&pPatch->enmType);
 
2775
            AssertRCReturn(rc, rc);
 
2776
 
 
2777
            if (pPatch->enmType == HWACCMTPRINSTR_JUMP_REPLACEMENT)
 
2778
                pVM->hwaccm.s.fTPRPatchingActive = true;
 
2779
 
 
2780
            Assert(pPatch->enmType == HWACCMTPRINSTR_JUMP_REPLACEMENT || pVM->hwaccm.s.fTPRPatchingActive == false);
 
2781
 
 
2782
            rc = SSMR3GetU32(pSSM, &pPatch->uSrcOperand);
 
2783
            AssertRCReturn(rc, rc);
 
2784
 
 
2785
            rc = SSMR3GetU32(pSSM, &pPatch->uDstOperand);
 
2786
            AssertRCReturn(rc, rc);
 
2787
 
 
2788
            rc = SSMR3GetU32(pSSM, &pPatch->cFaults);
 
2789
            AssertRCReturn(rc, rc);
 
2790
 
 
2791
            rc = SSMR3GetU32(pSSM, &pPatch->pJumpTarget);
 
2792
            AssertRCReturn(rc, rc);
 
2793
 
 
2794
            Log(("hwaccmR3Load: patch %d\n", i));
 
2795
            Log(("Key       = %x\n", pPatch->Core.Key));
 
2796
            Log(("cbOp      = %d\n", pPatch->cbOp));
 
2797
            Log(("cbNewOp   = %d\n", pPatch->cbNewOp));
 
2798
            Log(("type      = %d\n", pPatch->enmType));
 
2799
            Log(("srcop     = %d\n", pPatch->uSrcOperand));
 
2800
            Log(("dstop     = %d\n", pPatch->uDstOperand));
 
2801
            Log(("cFaults   = %d\n", pPatch->cFaults));
 
2802
            Log(("target    = %x\n", pPatch->pJumpTarget));
 
2803
            rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core);
 
2804
            AssertRC(rc);
 
2805
        }
 
2806
    }
 
2807
#endif
 
2808
 
 
2809
    /* Recheck all VCPUs if we can go straight into hwaccm execution mode. */
 
2810
    if (HWACCMIsEnabled(pVM))
 
2811
    {
 
2812
        for (VMCPUID i = 0; i < pVM->cCpus; i++)
 
2813
        {
 
2814
            PVMCPU pVCpu = &pVM->aCpus[i];
 
2815
 
 
2816
            HWACCMR3CanExecuteGuest(pVM, CPUMQueryGuestCtxPtr(pVCpu));
 
2817
        }
 
2818
    }
 
2819
    return VINF_SUCCESS;
 
2820
}
 
2821