5
/* We shift all the intercept bits so we can OR them with the
7
INTERCEPT_INTR = HF_HIF_SHIFT,
12
INTERCEPT_SELECTIVE_CR0,
36
INTERCEPT_TASK_SWITCH,
37
INTERCEPT_FERR_FREEZE,
50
/* This is not really an intercept but rather a placeholder to
51
show that we are in an SVM (just like a hidden flag, but keeps the
53
#define INTERCEPT_SVM 63
54
#define INTERCEPT_SVM_MASK (1ULL << INTERCEPT_SVM)
56
struct __attribute__ ((__packed__)) vmcb_control_area {
57
uint16_t intercept_cr_read;
58
uint16_t intercept_cr_write;
59
uint16_t intercept_dr_read;
60
uint16_t intercept_dr_write;
61
uint32_t intercept_exceptions;
63
uint8_t reserved_1[44];
64
uint64_t iopm_base_pa;
65
uint64_t msrpm_base_pa;
69
uint8_t reserved_2[3];
73
uint8_t reserved_3[4];
75
uint32_t exit_code_hi;
78
uint32_t exit_int_info;
79
uint32_t exit_int_info_err;
81
uint8_t reserved_4[16];
83
uint32_t event_inj_err;
86
uint8_t reserved_5[832];
90
4
#define TLB_CONTROL_DO_NOTHING 0
91
5
#define TLB_CONTROL_FLUSH_ALL_ASID 1
117
31
#define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT)
118
32
#define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT)
120
struct __attribute__ ((__packed__)) vmcb_seg {
127
struct __attribute__ ((__packed__)) vmcb_save_area {
134
struct vmcb_seg gdtr;
135
struct vmcb_seg ldtr;
136
struct vmcb_seg idtr;
138
uint8_t reserved_1[43];
140
uint8_t reserved_2[4];
142
uint8_t reserved_3[112];
150
uint8_t reserved_4[88];
152
uint8_t reserved_5[24];
158
uint64_t kernel_gs_base;
159
uint64_t sysenter_cs;
160
uint64_t sysenter_esp;
161
uint64_t sysenter_eip;
163
/* qemu: cr8 added to reuse this as hsave */
165
uint8_t reserved_6[32 - 8]; /* originally 32 */
170
uint64_t last_excp_from;
171
uint64_t last_excp_to;
174
struct __attribute__ ((__packed__)) vmcb {
175
struct vmcb_control_area control;
176
struct vmcb_save_area save;
179
#define SVM_CPUID_FEATURE_SHIFT 2
180
#define SVM_CPUID_FUNC 0x8000000a
182
#define MSR_EFER_SVME_MASK (1ULL << 12)
184
#define SVM_SELECTOR_S_SHIFT 4
185
#define SVM_SELECTOR_DPL_SHIFT 5
186
#define SVM_SELECTOR_P_SHIFT 7
187
#define SVM_SELECTOR_AVL_SHIFT 8
188
#define SVM_SELECTOR_L_SHIFT 9
189
#define SVM_SELECTOR_DB_SHIFT 10
190
#define SVM_SELECTOR_G_SHIFT 11
192
#define SVM_SELECTOR_TYPE_MASK (0xf)
193
#define SVM_SELECTOR_S_MASK (1 << SVM_SELECTOR_S_SHIFT)
194
#define SVM_SELECTOR_DPL_MASK (3 << SVM_SELECTOR_DPL_SHIFT)
195
#define SVM_SELECTOR_P_MASK (1 << SVM_SELECTOR_P_SHIFT)
196
#define SVM_SELECTOR_AVL_MASK (1 << SVM_SELECTOR_AVL_SHIFT)
197
#define SVM_SELECTOR_L_MASK (1 << SVM_SELECTOR_L_SHIFT)
198
#define SVM_SELECTOR_DB_MASK (1 << SVM_SELECTOR_DB_SHIFT)
199
#define SVM_SELECTOR_G_MASK (1 << SVM_SELECTOR_G_SHIFT)
201
#define SVM_SELECTOR_WRITE_MASK (1 << 1)
202
#define SVM_SELECTOR_READ_MASK SVM_SELECTOR_WRITE_MASK
203
#define SVM_SELECTOR_CODE_MASK (1 << 3)
205
#define INTERCEPT_CR0_MASK 1
206
#define INTERCEPT_CR3_MASK (1 << 3)
207
#define INTERCEPT_CR4_MASK (1 << 4)
209
#define INTERCEPT_DR0_MASK 1
210
#define INTERCEPT_DR1_MASK (1 << 1)
211
#define INTERCEPT_DR2_MASK (1 << 2)
212
#define INTERCEPT_DR3_MASK (1 << 3)
213
#define INTERCEPT_DR4_MASK (1 << 4)
214
#define INTERCEPT_DR5_MASK (1 << 5)
215
#define INTERCEPT_DR6_MASK (1 << 6)
216
#define INTERCEPT_DR7_MASK (1 << 7)
218
34
#define SVM_EVTINJ_VEC_MASK 0xff
220
36
#define SVM_EVTINJ_TYPE_SHIFT 8
315
131
#define SVM_CR0_SELECTIVE_MASK (1 << 3 | 1) /* TS and MP */
317
#define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda"
318
#define SVM_VMRUN ".byte 0x0f, 0x01, 0xd8"
319
#define SVM_VMSAVE ".byte 0x0f, 0x01, 0xdb"
320
#define SVM_CLGI ".byte 0x0f, 0x01, 0xdd"
321
#define SVM_STGI ".byte 0x0f, 0x01, 0xdc"
322
#define SVM_INVLPGA ".byte 0x0f, 0x01, 0xdf"
324
/* function references */
327
void vmexit(uint64_t exit_code, uint64_t exit_info_1);
328
int svm_check_intercept_param(uint32_t type, uint64_t param);
329
static inline int svm_check_intercept(unsigned int type) {
330
return svm_check_intercept_param(type, 0);
334
#define INTERCEPTED(mask) (env->intercept & mask)
335
#define INTERCEPTEDw(var, mask) (env->intercept ## var & mask)
336
#define INTERCEPTEDl(var, mask) (env->intercept ## var & mask)
338
#define SVM_LOAD_SEG(addr, seg_index, seg) \
339
cpu_x86_load_seg_cache(env, \
341
lduw_phys(addr + offsetof(struct vmcb, save.seg.selector)),\
342
ldq_phys(addr + offsetof(struct vmcb, save.seg.base)),\
343
ldl_phys(addr + offsetof(struct vmcb, save.seg.limit)),\
344
vmcb2cpu_attrib(lduw_phys(addr + offsetof(struct vmcb, save.seg.attrib)), ldq_phys(addr + offsetof(struct vmcb, save.seg.base)), ldl_phys(addr + offsetof(struct vmcb, save.seg.limit))))
346
#define SVM_LOAD_SEG2(addr, seg_qemu, seg_vmcb) \
347
env->seg_qemu.selector = lduw_phys(addr + offsetof(struct vmcb, save.seg_vmcb.selector)); \
348
env->seg_qemu.base = ldq_phys(addr + offsetof(struct vmcb, save.seg_vmcb.base)); \
349
env->seg_qemu.limit = ldl_phys(addr + offsetof(struct vmcb, save.seg_vmcb.limit)); \
350
env->seg_qemu.flags = vmcb2cpu_attrib(lduw_phys(addr + offsetof(struct vmcb, save.seg_vmcb.attrib)), env->seg_qemu.base, env->seg_qemu.limit)
352
#define SVM_SAVE_SEG(addr, seg_qemu, seg_vmcb) \
353
stw_phys(addr + offsetof(struct vmcb, save.seg_vmcb.selector), env->seg_qemu.selector); \
354
stq_phys(addr + offsetof(struct vmcb, save.seg_vmcb.base), env->seg_qemu.base); \
355
stl_phys(addr + offsetof(struct vmcb, save.seg_vmcb.limit), env->seg_qemu.limit); \
356
stw_phys(addr + offsetof(struct vmcb, save.seg_vmcb.attrib), cpu2vmcb_attrib(env->seg_qemu.flags))
133
struct __attribute__ ((__packed__)) vmcb_control_area {
134
uint16_t intercept_cr_read;
135
uint16_t intercept_cr_write;
136
uint16_t intercept_dr_read;
137
uint16_t intercept_dr_write;
138
uint32_t intercept_exceptions;
140
uint8_t reserved_1[44];
141
uint64_t iopm_base_pa;
142
uint64_t msrpm_base_pa;
146
uint8_t reserved_2[3];
150
uint8_t reserved_3[4];
152
uint64_t exit_info_1;
153
uint64_t exit_info_2;
154
uint32_t exit_int_info;
155
uint32_t exit_int_info_err;
157
uint8_t reserved_4[16];
159
uint32_t event_inj_err;
162
uint8_t reserved_5[832];
165
struct __attribute__ ((__packed__)) vmcb_seg {
172
struct __attribute__ ((__packed__)) vmcb_save_area {
179
struct vmcb_seg gdtr;
180
struct vmcb_seg ldtr;
181
struct vmcb_seg idtr;
183
uint8_t reserved_1[43];
185
uint8_t reserved_2[4];
187
uint8_t reserved_3[112];
195
uint8_t reserved_4[88];
197
uint8_t reserved_5[24];
203
uint64_t kernel_gs_base;
204
uint64_t sysenter_cs;
205
uint64_t sysenter_esp;
206
uint64_t sysenter_eip;
208
uint8_t reserved_6[32];
213
uint64_t last_excp_from;
214
uint64_t last_excp_to;
217
struct __attribute__ ((__packed__)) vmcb {
218
struct vmcb_control_area control;
219
struct vmcb_save_area save;