19
#include "segment_descriptor.h"
21
22
#include <linux/kvm.h>
22
23
#include <linux/fs.h>
23
24
#include <linux/vmalloc.h>
25
#include <linux/module.h>
25
27
#include <asm/uaccess.h>
27
29
#define MAX_IO_MSRS 256
30
#define CR0_RESERVED_BITS \
31
(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
32
| X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
33
| X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
34
#define CR4_RESERVED_BITS \
35
(~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
36
| X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
37
| X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
38
| X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
40
#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
41
#define EFER_RESERVED_BITS 0xfffffffffffff2fe
43
#define STAT_OFFSET(x) offsetof(struct kvm_vcpu, stat.x)
45
struct kvm_stats_debugfs_item debugfs_entries[] = {
46
{ "pf_fixed", STAT_OFFSET(pf_fixed) },
47
{ "pf_guest", STAT_OFFSET(pf_guest) },
48
{ "tlb_flush", STAT_OFFSET(tlb_flush) },
49
{ "invlpg", STAT_OFFSET(invlpg) },
50
{ "exits", STAT_OFFSET(exits) },
51
{ "io_exits", STAT_OFFSET(io_exits) },
52
{ "mmio_exits", STAT_OFFSET(mmio_exits) },
53
{ "signal_exits", STAT_OFFSET(signal_exits) },
54
{ "irq_window", STAT_OFFSET(irq_window_exits) },
55
{ "halt_exits", STAT_OFFSET(halt_exits) },
56
{ "halt_wakeup", STAT_OFFSET(halt_wakeup) },
57
{ "request_irq", STAT_OFFSET(request_irq_exits) },
58
{ "irq_exits", STAT_OFFSET(irq_exits) },
59
{ "light_exits", STAT_OFFSET(light_exits) },
60
{ "efer_reload", STAT_OFFSET(efer_reload) },
65
unsigned long segment_base(u16 selector)
67
struct descriptor_table gdt;
68
struct segment_descriptor *d;
69
unsigned long table_base;
75
asm("sgdt %0" : "=m"(gdt));
76
table_base = gdt.base;
78
if (selector & 4) { /* from ldt */
81
asm("sldt %0" : "=g"(ldt_selector));
82
table_base = segment_base(ldt_selector);
84
d = (struct segment_descriptor *)(table_base + (selector & ~7));
85
v = d->base_low | ((unsigned long)d->base_mid << 16) |
86
((unsigned long)d->base_high << 24);
88
if (d->system == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
89
v |= ((unsigned long) \
90
((struct segment_descriptor_64 *)d)->base_higher) << 32;
94
EXPORT_SYMBOL_GPL(segment_base);
96
u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
98
if (irqchip_in_kernel(vcpu->kvm))
99
return vcpu->apic_base;
101
return vcpu->apic_base;
103
EXPORT_SYMBOL_GPL(kvm_get_apic_base);
105
void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
107
/* TODO: reserve bits check */
108
if (irqchip_in_kernel(vcpu->kvm))
109
kvm_lapic_set_base(vcpu, data);
111
vcpu->apic_base = data;
113
EXPORT_SYMBOL_GPL(kvm_set_apic_base);
115
static void inject_gp(struct kvm_vcpu *vcpu)
117
kvm_x86_ops->inject_gp(vcpu, 0);
121
* Load the pae pdptrs. Return true is they are all valid.
123
int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
125
gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
126
unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
129
u64 pdpte[ARRAY_SIZE(vcpu->pdptrs)];
131
mutex_lock(&vcpu->kvm->lock);
132
ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
133
offset * sizeof(u64), sizeof(pdpte));
138
for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
139
if ((pdpte[i] & 1) && (pdpte[i] & 0xfffffff0000001e6ull)) {
146
memcpy(vcpu->pdptrs, pdpte, sizeof(vcpu->pdptrs));
148
mutex_unlock(&vcpu->kvm->lock);
153
void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
155
if (cr0 & CR0_RESERVED_BITS) {
156
printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
162
if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
163
printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
168
if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
169
printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
170
"and a clear PE flag\n");
175
if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
177
if ((vcpu->shadow_efer & EFER_LME)) {
181
printk(KERN_DEBUG "set_cr0: #GP, start paging "
182
"in long mode while PAE is disabled\n");
186
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
188
printk(KERN_DEBUG "set_cr0: #GP, start paging "
189
"in long mode while CS.L == 1\n");
196
if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) {
197
printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
205
kvm_x86_ops->set_cr0(vcpu, cr0);
208
mutex_lock(&vcpu->kvm->lock);
209
kvm_mmu_reset_context(vcpu);
210
mutex_unlock(&vcpu->kvm->lock);
213
EXPORT_SYMBOL_GPL(set_cr0);
215
void lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
217
set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f));
219
EXPORT_SYMBOL_GPL(lmsw);
221
void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
223
if (cr4 & CR4_RESERVED_BITS) {
224
printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
229
if (is_long_mode(vcpu)) {
230
if (!(cr4 & X86_CR4_PAE)) {
231
printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
236
} else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
237
&& !load_pdptrs(vcpu, vcpu->cr3)) {
238
printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
243
if (cr4 & X86_CR4_VMXE) {
244
printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
248
kvm_x86_ops->set_cr4(vcpu, cr4);
250
mutex_lock(&vcpu->kvm->lock);
251
kvm_mmu_reset_context(vcpu);
252
mutex_unlock(&vcpu->kvm->lock);
254
EXPORT_SYMBOL_GPL(set_cr4);
256
void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
258
if (is_long_mode(vcpu)) {
259
if (cr3 & CR3_L_MODE_RESERVED_BITS) {
260
printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
266
if (cr3 & CR3_PAE_RESERVED_BITS) {
268
"set_cr3: #GP, reserved bits\n");
272
if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
273
printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
280
* We don't check reserved bits in nonpae mode, because
281
* this isn't enforced, and VMware depends on this.
285
mutex_lock(&vcpu->kvm->lock);
287
* Does the new cr3 value map to physical memory? (Note, we
288
* catch an invalid cr3 even in real-mode, because it would
289
* cause trouble later on when we turn on paging anyway.)
291
* A real CPU would silently accept an invalid cr3 and would
292
* attempt to use it - with largely undefined (and often hard
293
* to debug) behavior on the guest side.
295
if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
299
vcpu->mmu.new_cr3(vcpu);
301
mutex_unlock(&vcpu->kvm->lock);
303
EXPORT_SYMBOL_GPL(set_cr3);
305
void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
307
if (cr8 & CR8_RESERVED_BITS) {
308
printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
312
if (irqchip_in_kernel(vcpu->kvm))
313
kvm_lapic_set_tpr(vcpu, cr8);
317
EXPORT_SYMBOL_GPL(set_cr8);
319
unsigned long get_cr8(struct kvm_vcpu *vcpu)
321
if (irqchip_in_kernel(vcpu->kvm))
322
return kvm_lapic_get_cr8(vcpu);
326
EXPORT_SYMBOL_GPL(get_cr8);
30
329
* List of msr numbers which we expose to userspace through KVM_GET_MSRS
315
1005
num_msrs_to_save = j;
1009
* Only apic need an MMIO device hook, so shortcut now..
1011
static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
1014
struct kvm_io_device *dev;
1017
dev = &vcpu->apic->dev;
1018
if (dev->in_range(dev, addr))
1025
static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
1028
struct kvm_io_device *dev;
1030
dev = vcpu_find_pervcpu_dev(vcpu, addr);
1032
dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
1036
int emulator_read_std(unsigned long addr,
1039
struct kvm_vcpu *vcpu)
1044
gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
1045
unsigned offset = addr & (PAGE_SIZE-1);
1046
unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
1049
if (gpa == UNMAPPED_GVA)
1050
return X86EMUL_PROPAGATE_FAULT;
1051
ret = kvm_read_guest(vcpu->kvm, gpa, data, tocopy);
1053
return X86EMUL_UNHANDLEABLE;
1060
return X86EMUL_CONTINUE;
1062
EXPORT_SYMBOL_GPL(emulator_read_std);
1064
static int emulator_write_std(unsigned long addr,
1067
struct kvm_vcpu *vcpu)
1069
pr_unimpl(vcpu, "emulator_write_std: addr %lx n %d\n", addr, bytes);
1070
return X86EMUL_UNHANDLEABLE;
1073
static int emulator_read_emulated(unsigned long addr,
1076
struct kvm_vcpu *vcpu)
1078
struct kvm_io_device *mmio_dev;
1081
if (vcpu->mmio_read_completed) {
1082
memcpy(val, vcpu->mmio_data, bytes);
1083
vcpu->mmio_read_completed = 0;
1084
return X86EMUL_CONTINUE;
1087
gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
1089
/* For APIC access vmexit */
1090
if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
1093
if (emulator_read_std(addr, val, bytes, vcpu)
1094
== X86EMUL_CONTINUE)
1095
return X86EMUL_CONTINUE;
1096
if (gpa == UNMAPPED_GVA)
1097
return X86EMUL_PROPAGATE_FAULT;
1101
* Is this MMIO handled locally?
1103
mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
1105
kvm_iodevice_read(mmio_dev, gpa, bytes, val);
1106
return X86EMUL_CONTINUE;
1109
vcpu->mmio_needed = 1;
1110
vcpu->mmio_phys_addr = gpa;
1111
vcpu->mmio_size = bytes;
1112
vcpu->mmio_is_write = 0;
1114
return X86EMUL_UNHANDLEABLE;
1117
static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1118
const void *val, int bytes)
1122
ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
1125
kvm_mmu_pte_write(vcpu, gpa, val, bytes);
1129
static int emulator_write_emulated_onepage(unsigned long addr,
1132
struct kvm_vcpu *vcpu)
1134
struct kvm_io_device *mmio_dev;
1135
gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
1137
if (gpa == UNMAPPED_GVA) {
1138
kvm_x86_ops->inject_page_fault(vcpu, addr, 2);
1139
return X86EMUL_PROPAGATE_FAULT;
1142
/* For APIC access vmexit */
1143
if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
1146
if (emulator_write_phys(vcpu, gpa, val, bytes))
1147
return X86EMUL_CONTINUE;
1151
* Is this MMIO handled locally?
1153
mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
1155
kvm_iodevice_write(mmio_dev, gpa, bytes, val);
1156
return X86EMUL_CONTINUE;
1159
vcpu->mmio_needed = 1;
1160
vcpu->mmio_phys_addr = gpa;
1161
vcpu->mmio_size = bytes;
1162
vcpu->mmio_is_write = 1;
1163
memcpy(vcpu->mmio_data, val, bytes);
1165
return X86EMUL_CONTINUE;
1168
int emulator_write_emulated(unsigned long addr,
1171
struct kvm_vcpu *vcpu)
1173
/* Crossing a page boundary? */
1174
if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
1177
now = -addr & ~PAGE_MASK;
1178
rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
1179
if (rc != X86EMUL_CONTINUE)
1185
return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
1187
EXPORT_SYMBOL_GPL(emulator_write_emulated);
1189
static int emulator_cmpxchg_emulated(unsigned long addr,
1193
struct kvm_vcpu *vcpu)
1195
static int reported;
1199
printk(KERN_WARNING "kvm: emulating exchange as write\n");
1201
return emulator_write_emulated(addr, new, bytes, vcpu);
1204
static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
1206
return kvm_x86_ops->get_segment_base(vcpu, seg);
1209
int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
1211
return X86EMUL_CONTINUE;
1214
int emulate_clts(struct kvm_vcpu *vcpu)
1216
vcpu->cr0 &= ~X86_CR0_TS;
1217
kvm_x86_ops->set_cr0(vcpu, vcpu->cr0);
1218
return X86EMUL_CONTINUE;
1221
int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
1223
struct kvm_vcpu *vcpu = ctxt->vcpu;
1227
*dest = kvm_x86_ops->get_dr(vcpu, dr);
1228
return X86EMUL_CONTINUE;
1230
pr_unimpl(vcpu, "%s: unexpected dr %u\n", __FUNCTION__, dr);
1231
return X86EMUL_UNHANDLEABLE;
1235
int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
1237
unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
1240
kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
1242
/* FIXME: better handling */
1243
return X86EMUL_UNHANDLEABLE;
1245
return X86EMUL_CONTINUE;
1248
void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
1250
static int reported;
1252
unsigned long rip = vcpu->rip;
1253
unsigned long rip_linear;
1255
rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
1260
emulator_read_std(rip_linear, (void *)opcodes, 4, vcpu);
1262
printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
1263
context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
1266
EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
1268
struct x86_emulate_ops emulate_ops = {
1269
.read_std = emulator_read_std,
1270
.write_std = emulator_write_std,
1271
.read_emulated = emulator_read_emulated,
1272
.write_emulated = emulator_write_emulated,
1273
.cmpxchg_emulated = emulator_cmpxchg_emulated,
1276
int emulate_instruction(struct kvm_vcpu *vcpu,
1277
struct kvm_run *run,
1284
vcpu->mmio_fault_cr2 = cr2;
1285
kvm_x86_ops->cache_regs(vcpu);
1287
vcpu->mmio_is_write = 0;
1288
vcpu->pio.string = 0;
1292
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
1294
vcpu->emulate_ctxt.vcpu = vcpu;
1295
vcpu->emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
1296
vcpu->emulate_ctxt.cr2 = cr2;
1297
vcpu->emulate_ctxt.mode =
1298
(vcpu->emulate_ctxt.eflags & X86_EFLAGS_VM)
1299
? X86EMUL_MODE_REAL : cs_l
1300
? X86EMUL_MODE_PROT64 : cs_db
1301
? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
1303
if (vcpu->emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
1304
vcpu->emulate_ctxt.cs_base = 0;
1305
vcpu->emulate_ctxt.ds_base = 0;
1306
vcpu->emulate_ctxt.es_base = 0;
1307
vcpu->emulate_ctxt.ss_base = 0;
1309
vcpu->emulate_ctxt.cs_base =
1310
get_segment_base(vcpu, VCPU_SREG_CS);
1311
vcpu->emulate_ctxt.ds_base =
1312
get_segment_base(vcpu, VCPU_SREG_DS);
1313
vcpu->emulate_ctxt.es_base =
1314
get_segment_base(vcpu, VCPU_SREG_ES);
1315
vcpu->emulate_ctxt.ss_base =
1316
get_segment_base(vcpu, VCPU_SREG_SS);
1319
vcpu->emulate_ctxt.gs_base =
1320
get_segment_base(vcpu, VCPU_SREG_GS);
1321
vcpu->emulate_ctxt.fs_base =
1322
get_segment_base(vcpu, VCPU_SREG_FS);
1324
r = x86_decode_insn(&vcpu->emulate_ctxt, &emulate_ops);
1326
if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
1327
return EMULATE_DONE;
1328
return EMULATE_FAIL;
1332
r = x86_emulate_insn(&vcpu->emulate_ctxt, &emulate_ops);
1334
if (vcpu->pio.string)
1335
return EMULATE_DO_MMIO;
1337
if ((r || vcpu->mmio_is_write) && run) {
1338
run->exit_reason = KVM_EXIT_MMIO;
1339
run->mmio.phys_addr = vcpu->mmio_phys_addr;
1340
memcpy(run->mmio.data, vcpu->mmio_data, 8);
1341
run->mmio.len = vcpu->mmio_size;
1342
run->mmio.is_write = vcpu->mmio_is_write;
1346
if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
1347
return EMULATE_DONE;
1348
if (!vcpu->mmio_needed) {
1349
kvm_report_emulation_failure(vcpu, "mmio");
1350
return EMULATE_FAIL;
1352
return EMULATE_DO_MMIO;
1355
kvm_x86_ops->decache_regs(vcpu);
1356
kvm_x86_ops->set_rflags(vcpu, vcpu->emulate_ctxt.eflags);
1358
if (vcpu->mmio_is_write) {
1359
vcpu->mmio_needed = 0;
1360
return EMULATE_DO_MMIO;
1363
return EMULATE_DONE;
1365
EXPORT_SYMBOL_GPL(emulate_instruction);
1367
static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
1371
for (i = 0; i < ARRAY_SIZE(vcpu->pio.guest_pages); ++i)
1372
if (vcpu->pio.guest_pages[i]) {
1373
kvm_release_page(vcpu->pio.guest_pages[i]);
1374
vcpu->pio.guest_pages[i] = NULL;
1378
static int pio_copy_data(struct kvm_vcpu *vcpu)
1380
void *p = vcpu->pio_data;
1383
int nr_pages = vcpu->pio.guest_pages[1] ? 2 : 1;
1385
q = vmap(vcpu->pio.guest_pages, nr_pages, VM_READ|VM_WRITE,
1388
free_pio_guest_pages(vcpu);
1391
q += vcpu->pio.guest_page_offset;
1392
bytes = vcpu->pio.size * vcpu->pio.cur_count;
1394
memcpy(q, p, bytes);
1396
memcpy(p, q, bytes);
1397
q -= vcpu->pio.guest_page_offset;
1399
free_pio_guest_pages(vcpu);
1403
int complete_pio(struct kvm_vcpu *vcpu)
1405
struct kvm_pio_request *io = &vcpu->pio;
1409
kvm_x86_ops->cache_regs(vcpu);
1413
memcpy(&vcpu->regs[VCPU_REGS_RAX], vcpu->pio_data,
1417
r = pio_copy_data(vcpu);
1419
kvm_x86_ops->cache_regs(vcpu);
1426
delta *= io->cur_count;
1428
* The size of the register should really depend on
1429
* current address size.
1431
vcpu->regs[VCPU_REGS_RCX] -= delta;
1437
vcpu->regs[VCPU_REGS_RDI] += delta;
1439
vcpu->regs[VCPU_REGS_RSI] += delta;
1442
kvm_x86_ops->decache_regs(vcpu);
1444
io->count -= io->cur_count;
1450
static void kernel_pio(struct kvm_io_device *pio_dev,
1451
struct kvm_vcpu *vcpu,
1454
/* TODO: String I/O for in kernel device */
1456
mutex_lock(&vcpu->kvm->lock);
1458
kvm_iodevice_read(pio_dev, vcpu->pio.port,
1462
kvm_iodevice_write(pio_dev, vcpu->pio.port,
1465
mutex_unlock(&vcpu->kvm->lock);
1468
static void pio_string_write(struct kvm_io_device *pio_dev,
1469
struct kvm_vcpu *vcpu)
1471
struct kvm_pio_request *io = &vcpu->pio;
1472
void *pd = vcpu->pio_data;
1475
mutex_lock(&vcpu->kvm->lock);
1476
for (i = 0; i < io->cur_count; i++) {
1477
kvm_iodevice_write(pio_dev, io->port,
1482
mutex_unlock(&vcpu->kvm->lock);
1485
static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
1488
return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr);
1491
int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
1492
int size, unsigned port)
1494
struct kvm_io_device *pio_dev;
1496
vcpu->run->exit_reason = KVM_EXIT_IO;
1497
vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
1498
vcpu->run->io.size = vcpu->pio.size = size;
1499
vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
1500
vcpu->run->io.count = vcpu->pio.count = vcpu->pio.cur_count = 1;
1501
vcpu->run->io.port = vcpu->pio.port = port;
1503
vcpu->pio.string = 0;
1505
vcpu->pio.guest_page_offset = 0;
1508
kvm_x86_ops->cache_regs(vcpu);
1509
memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4);
1510
kvm_x86_ops->decache_regs(vcpu);
1512
kvm_x86_ops->skip_emulated_instruction(vcpu);
1514
pio_dev = vcpu_find_pio_dev(vcpu, port);
1516
kernel_pio(pio_dev, vcpu, vcpu->pio_data);
1522
EXPORT_SYMBOL_GPL(kvm_emulate_pio);
1524
int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
1525
int size, unsigned long count, int down,
1526
gva_t address, int rep, unsigned port)
1528
unsigned now, in_page;
1532
struct kvm_io_device *pio_dev;
1534
vcpu->run->exit_reason = KVM_EXIT_IO;
1535
vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
1536
vcpu->run->io.size = vcpu->pio.size = size;
1537
vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
1538
vcpu->run->io.count = vcpu->pio.count = vcpu->pio.cur_count = count;
1539
vcpu->run->io.port = vcpu->pio.port = port;
1541
vcpu->pio.string = 1;
1542
vcpu->pio.down = down;
1543
vcpu->pio.guest_page_offset = offset_in_page(address);
1544
vcpu->pio.rep = rep;
1547
kvm_x86_ops->skip_emulated_instruction(vcpu);
1552
in_page = PAGE_SIZE - offset_in_page(address);
1554
in_page = offset_in_page(address) + size;
1555
now = min(count, (unsigned long)in_page / size);
1558
* String I/O straddles page boundary. Pin two guest pages
1559
* so that we satisfy atomicity constraints. Do just one
1560
* transaction to avoid complexity.
1567
* String I/O in reverse. Yuck. Kill the guest, fix later.
1569
pr_unimpl(vcpu, "guest string pio down\n");
1573
vcpu->run->io.count = now;
1574
vcpu->pio.cur_count = now;
1576
if (vcpu->pio.cur_count == vcpu->pio.count)
1577
kvm_x86_ops->skip_emulated_instruction(vcpu);
1579
for (i = 0; i < nr_pages; ++i) {
1580
mutex_lock(&vcpu->kvm->lock);
1581
page = gva_to_page(vcpu, address + i * PAGE_SIZE);
1582
vcpu->pio.guest_pages[i] = page;
1583
mutex_unlock(&vcpu->kvm->lock);
1586
free_pio_guest_pages(vcpu);
1591
pio_dev = vcpu_find_pio_dev(vcpu, port);
1592
if (!vcpu->pio.in) {
1593
/* string PIO write */
1594
ret = pio_copy_data(vcpu);
1595
if (ret >= 0 && pio_dev) {
1596
pio_string_write(pio_dev, vcpu);
1598
if (vcpu->pio.count == 0)
1602
pr_unimpl(vcpu, "no string pio read support yet, "
1603
"port %x size %d count %ld\n",
1608
EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
318
1610
__init void kvm_arch_init(void)
320
1612
kvm_init_msr_list();
1615
int kvm_emulate_halt(struct kvm_vcpu *vcpu)
1617
++vcpu->stat.halt_exits;
1618
if (irqchip_in_kernel(vcpu->kvm)) {
1619
vcpu->mp_state = VCPU_MP_STATE_HALTED;
1620
kvm_vcpu_block(vcpu);
1621
if (vcpu->mp_state != VCPU_MP_STATE_RUNNABLE)
1625
vcpu->run->exit_reason = KVM_EXIT_HLT;
1629
EXPORT_SYMBOL_GPL(kvm_emulate_halt);
1631
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
1633
unsigned long nr, a0, a1, a2, a3, ret;
1635
kvm_x86_ops->cache_regs(vcpu);
1637
nr = vcpu->regs[VCPU_REGS_RAX];
1638
a0 = vcpu->regs[VCPU_REGS_RBX];
1639
a1 = vcpu->regs[VCPU_REGS_RCX];
1640
a2 = vcpu->regs[VCPU_REGS_RDX];
1641
a3 = vcpu->regs[VCPU_REGS_RSI];
1643
if (!is_long_mode(vcpu)) {
1656
vcpu->regs[VCPU_REGS_RAX] = ret;
1657
kvm_x86_ops->decache_regs(vcpu);
1660
EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
1662
int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
1664
char instruction[3];
1667
mutex_lock(&vcpu->kvm->lock);
1670
* Blow out the MMU to ensure that no other VCPU has an active mapping
1671
* to ensure that the updated hypercall appears atomically across all
1674
kvm_mmu_zap_all(vcpu->kvm);
1676
kvm_x86_ops->cache_regs(vcpu);
1677
kvm_x86_ops->patch_hypercall(vcpu, instruction);
1678
if (emulator_write_emulated(vcpu->rip, instruction, 3, vcpu)
1679
!= X86EMUL_CONTINUE)
1682
mutex_unlock(&vcpu->kvm->lock);
1687
static u64 mk_cr_64(u64 curr_cr, u32 new_val)
1689
return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
1692
void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
1694
struct descriptor_table dt = { limit, base };
1696
kvm_x86_ops->set_gdt(vcpu, &dt);
1699
void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
1701
struct descriptor_table dt = { limit, base };
1703
kvm_x86_ops->set_idt(vcpu, &dt);
1706
void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
1707
unsigned long *rflags)
1710
*rflags = kvm_x86_ops->get_rflags(vcpu);
1713
unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
1715
kvm_x86_ops->decache_cr4_guest_bits(vcpu);
1726
vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
1731
void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
1732
unsigned long *rflags)
1736
set_cr0(vcpu, mk_cr_64(vcpu->cr0, val));
1737
*rflags = kvm_x86_ops->get_rflags(vcpu);
1746
set_cr4(vcpu, mk_cr_64(vcpu->cr4, val));
1749
vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
1753
void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
1757
struct kvm_cpuid_entry *e, *best;
1759
kvm_x86_ops->cache_regs(vcpu);
1760
function = vcpu->regs[VCPU_REGS_RAX];
1761
vcpu->regs[VCPU_REGS_RAX] = 0;
1762
vcpu->regs[VCPU_REGS_RBX] = 0;
1763
vcpu->regs[VCPU_REGS_RCX] = 0;
1764
vcpu->regs[VCPU_REGS_RDX] = 0;
1766
for (i = 0; i < vcpu->cpuid_nent; ++i) {
1767
e = &vcpu->cpuid_entries[i];
1768
if (e->function == function) {
1773
* Both basic or both extended?
1775
if (((e->function ^ function) & 0x80000000) == 0)
1776
if (!best || e->function > best->function)
1780
vcpu->regs[VCPU_REGS_RAX] = best->eax;
1781
vcpu->regs[VCPU_REGS_RBX] = best->ebx;
1782
vcpu->regs[VCPU_REGS_RCX] = best->ecx;
1783
vcpu->regs[VCPU_REGS_RDX] = best->edx;
1785
kvm_x86_ops->decache_regs(vcpu);
1786
kvm_x86_ops->skip_emulated_instruction(vcpu);
1788
EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
1791
* Check if userspace requested an interrupt window, and that the
1792
* interrupt window is open.
1794
* No need to exit to userspace if we already have an interrupt queued.
1796
static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
1797
struct kvm_run *kvm_run)
1799
return (!vcpu->irq_summary &&
1800
kvm_run->request_interrupt_window &&
1801
vcpu->interrupt_window_open &&
1802
(kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
1805
static void post_kvm_run_save(struct kvm_vcpu *vcpu,
1806
struct kvm_run *kvm_run)
1808
kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
1809
kvm_run->cr8 = get_cr8(vcpu);
1810
kvm_run->apic_base = kvm_get_apic_base(vcpu);
1811
if (irqchip_in_kernel(vcpu->kvm))
1812
kvm_run->ready_for_interrupt_injection = 1;
1814
kvm_run->ready_for_interrupt_injection =
1815
(vcpu->interrupt_window_open &&
1816
vcpu->irq_summary == 0);
1819
static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1823
if (unlikely(vcpu->mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) {
1824
pr_debug("vcpu %d received sipi with vector # %x\n",
1825
vcpu->vcpu_id, vcpu->sipi_vector);
1826
kvm_lapic_reset(vcpu);
1827
r = kvm_x86_ops->vcpu_reset(vcpu);
1830
vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
1834
if (vcpu->guest_debug.enabled)
1835
kvm_x86_ops->guest_debug_pre(vcpu);
1838
r = kvm_mmu_reload(vcpu);
1842
kvm_inject_pending_timer_irqs(vcpu);
1845
in_special_section();
1847
kvm_x86_ops->prepare_guest_switch(vcpu);
1848
kvm_load_guest_fpu(vcpu);
1850
local_irq_disable();
1852
if (signal_pending(current)) {
1856
kvm_run->exit_reason = KVM_EXIT_INTR;
1857
++vcpu->stat.signal_exits;
1861
if (irqchip_in_kernel(vcpu->kvm))
1862
kvm_x86_ops->inject_pending_irq(vcpu);
1863
else if (!vcpu->mmio_read_completed)
1864
kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);
1866
vcpu->guest_mode = 1;
1870
if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
1871
kvm_x86_ops->tlb_flush(vcpu);
1873
kvm_x86_ops->run(vcpu, kvm_run);
1874
special_reload_dr7();
1876
vcpu->guest_mode = 0;
1882
* We must have an instruction between local_irq_enable() and
1883
* kvm_guest_exit(), so the timer interrupt isn't delayed by
1884
* the interrupt shadow. The stat.exits increment will do nicely.
1885
* But we need to prevent reordering, hence this barrier():
1894
* Profile KVM exit RIPs:
1896
if (unlikely(prof_on == KVM_PROFILING)) {
1897
kvm_x86_ops->cache_regs(vcpu);
1898
profile_hit(KVM_PROFILING, (void *)vcpu->rip);
1901
r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
1904
if (dm_request_for_irq_injection(vcpu, kvm_run)) {
1906
kvm_run->exit_reason = KVM_EXIT_INTR;
1907
++vcpu->stat.request_irq_exits;
1910
if (!need_resched()) {
1911
++vcpu->stat.light_exits;
1922
post_kvm_run_save(vcpu, kvm_run);
1927
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1934
if (unlikely(vcpu->mp_state == VCPU_MP_STATE_UNINITIALIZED)) {
1935
kvm_vcpu_block(vcpu);
1940
if (vcpu->sigset_active)
1941
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1943
/* re-sync apic's tpr */
1944
if (!irqchip_in_kernel(vcpu->kvm))
1945
set_cr8(vcpu, kvm_run->cr8);
1947
if (vcpu->pio.cur_count) {
1948
r = complete_pio(vcpu);
1952
#if CONFIG_HAS_IOMEM
1953
if (vcpu->mmio_needed) {
1954
memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
1955
vcpu->mmio_read_completed = 1;
1956
vcpu->mmio_needed = 0;
1957
r = emulate_instruction(vcpu, kvm_run,
1958
vcpu->mmio_fault_cr2, 0, 1);
1959
if (r == EMULATE_DO_MMIO) {
1961
* Read-modify-write. Back to userspace.
1968
if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
1969
kvm_x86_ops->cache_regs(vcpu);
1970
vcpu->regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
1971
kvm_x86_ops->decache_regs(vcpu);
1974
r = __vcpu_run(vcpu, kvm_run);
1977
if (vcpu->sigset_active)
1978
sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1984
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1988
kvm_x86_ops->cache_regs(vcpu);
1990
regs->rax = vcpu->regs[VCPU_REGS_RAX];
1991
regs->rbx = vcpu->regs[VCPU_REGS_RBX];
1992
regs->rcx = vcpu->regs[VCPU_REGS_RCX];
1993
regs->rdx = vcpu->regs[VCPU_REGS_RDX];
1994
regs->rsi = vcpu->regs[VCPU_REGS_RSI];
1995
regs->rdi = vcpu->regs[VCPU_REGS_RDI];
1996
regs->rsp = vcpu->regs[VCPU_REGS_RSP];
1997
regs->rbp = vcpu->regs[VCPU_REGS_RBP];
1998
#ifdef CONFIG_X86_64
1999
regs->r8 = vcpu->regs[VCPU_REGS_R8];
2000
regs->r9 = vcpu->regs[VCPU_REGS_R9];
2001
regs->r10 = vcpu->regs[VCPU_REGS_R10];
2002
regs->r11 = vcpu->regs[VCPU_REGS_R11];
2003
regs->r12 = vcpu->regs[VCPU_REGS_R12];
2004
regs->r13 = vcpu->regs[VCPU_REGS_R13];
2005
regs->r14 = vcpu->regs[VCPU_REGS_R14];
2006
regs->r15 = vcpu->regs[VCPU_REGS_R15];
2009
regs->rip = vcpu->rip;
2010
regs->rflags = kvm_x86_ops->get_rflags(vcpu);
2013
* Don't leak debug flags in case they were set for guest debugging
2015
if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep)
2016
regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
2023
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2027
vcpu->regs[VCPU_REGS_RAX] = regs->rax;
2028
vcpu->regs[VCPU_REGS_RBX] = regs->rbx;
2029
vcpu->regs[VCPU_REGS_RCX] = regs->rcx;
2030
vcpu->regs[VCPU_REGS_RDX] = regs->rdx;
2031
vcpu->regs[VCPU_REGS_RSI] = regs->rsi;
2032
vcpu->regs[VCPU_REGS_RDI] = regs->rdi;
2033
vcpu->regs[VCPU_REGS_RSP] = regs->rsp;
2034
vcpu->regs[VCPU_REGS_RBP] = regs->rbp;
2035
#ifdef CONFIG_X86_64
2036
vcpu->regs[VCPU_REGS_R8] = regs->r8;
2037
vcpu->regs[VCPU_REGS_R9] = regs->r9;
2038
vcpu->regs[VCPU_REGS_R10] = regs->r10;
2039
vcpu->regs[VCPU_REGS_R11] = regs->r11;
2040
vcpu->regs[VCPU_REGS_R12] = regs->r12;
2041
vcpu->regs[VCPU_REGS_R13] = regs->r13;
2042
vcpu->regs[VCPU_REGS_R14] = regs->r14;
2043
vcpu->regs[VCPU_REGS_R15] = regs->r15;
2046
vcpu->rip = regs->rip;
2047
kvm_x86_ops->set_rflags(vcpu, regs->rflags);
2049
kvm_x86_ops->decache_regs(vcpu);
2056
static void get_segment(struct kvm_vcpu *vcpu,
2057
struct kvm_segment *var, int seg)
2059
return kvm_x86_ops->get_segment(vcpu, var, seg);
2062
void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
2064
struct kvm_segment cs;
2066
get_segment(vcpu, &cs, VCPU_SREG_CS);
2070
EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
2072
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2073
struct kvm_sregs *sregs)
2075
struct descriptor_table dt;
2080
get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
2081
get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
2082
get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
2083
get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
2084
get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
2085
get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
2087
get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
2088
get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
2090
kvm_x86_ops->get_idt(vcpu, &dt);
2091
sregs->idt.limit = dt.limit;
2092
sregs->idt.base = dt.base;
2093
kvm_x86_ops->get_gdt(vcpu, &dt);
2094
sregs->gdt.limit = dt.limit;
2095
sregs->gdt.base = dt.base;
2097
kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2098
sregs->cr0 = vcpu->cr0;
2099
sregs->cr2 = vcpu->cr2;
2100
sregs->cr3 = vcpu->cr3;
2101
sregs->cr4 = vcpu->cr4;
2102
sregs->cr8 = get_cr8(vcpu);
2103
sregs->efer = vcpu->shadow_efer;
2104
sregs->apic_base = kvm_get_apic_base(vcpu);
2106
if (irqchip_in_kernel(vcpu->kvm)) {
2107
memset(sregs->interrupt_bitmap, 0,
2108
sizeof sregs->interrupt_bitmap);
2109
pending_vec = kvm_x86_ops->get_irq(vcpu);
2110
if (pending_vec >= 0)
2111
set_bit(pending_vec,
2112
(unsigned long *)sregs->interrupt_bitmap);
2114
memcpy(sregs->interrupt_bitmap, vcpu->irq_pending,
2115
sizeof sregs->interrupt_bitmap);
2122
static void set_segment(struct kvm_vcpu *vcpu,
2123
struct kvm_segment *var, int seg)
2125
return kvm_x86_ops->set_segment(vcpu, var, seg);
2128
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2129
struct kvm_sregs *sregs)
2131
int mmu_reset_needed = 0;
2132
int i, pending_vec, max_bits;
2133
struct descriptor_table dt;
2137
dt.limit = sregs->idt.limit;
2138
dt.base = sregs->idt.base;
2139
kvm_x86_ops->set_idt(vcpu, &dt);
2140
dt.limit = sregs->gdt.limit;
2141
dt.base = sregs->gdt.base;
2142
kvm_x86_ops->set_gdt(vcpu, &dt);
2144
vcpu->cr2 = sregs->cr2;
2145
mmu_reset_needed |= vcpu->cr3 != sregs->cr3;
2146
vcpu->cr3 = sregs->cr3;
2148
set_cr8(vcpu, sregs->cr8);
2150
mmu_reset_needed |= vcpu->shadow_efer != sregs->efer;
2151
#ifdef CONFIG_X86_64
2152
kvm_x86_ops->set_efer(vcpu, sregs->efer);
2154
kvm_set_apic_base(vcpu, sregs->apic_base);
2156
kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2158
mmu_reset_needed |= vcpu->cr0 != sregs->cr0;
2159
vcpu->cr0 = sregs->cr0;
2160
kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
2162
mmu_reset_needed |= vcpu->cr4 != sregs->cr4;
2163
kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
2164
if (!is_long_mode(vcpu) && is_pae(vcpu))
2165
load_pdptrs(vcpu, vcpu->cr3);
2167
if (mmu_reset_needed)
2168
kvm_mmu_reset_context(vcpu);
2170
if (!irqchip_in_kernel(vcpu->kvm)) {
2171
memcpy(vcpu->irq_pending, sregs->interrupt_bitmap,
2172
sizeof vcpu->irq_pending);
2173
vcpu->irq_summary = 0;
2174
for (i = 0; i < ARRAY_SIZE(vcpu->irq_pending); ++i)
2175
if (vcpu->irq_pending[i])
2176
__set_bit(i, &vcpu->irq_summary);
2178
max_bits = (sizeof sregs->interrupt_bitmap) << 3;
2179
pending_vec = find_first_bit(
2180
(const unsigned long *)sregs->interrupt_bitmap,
2182
/* Only pending external irq is handled here */
2183
if (pending_vec < max_bits) {
2184
kvm_x86_ops->set_irq(vcpu, pending_vec);
2185
pr_debug("Set back pending irq %d\n",
2190
set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
2191
set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
2192
set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
2193
set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
2194
set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
2195
set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
2197
set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
2198
set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
2205
int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
2206
struct kvm_debug_guest *dbg)
2212
r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
2220
* fxsave fpu state. Taken from x86_64/processor.h. To be killed when
2221
* we have asm/x86/processor.h
2232
u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
2233
#ifdef CONFIG_X86_64
2234
u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
2236
u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
2240
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2242
struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
2246
memcpy(fpu->fpr, fxsave->st_space, 128);
2247
fpu->fcw = fxsave->cwd;
2248
fpu->fsw = fxsave->swd;
2249
fpu->ftwx = fxsave->twd;
2250
fpu->last_opcode = fxsave->fop;
2251
fpu->last_ip = fxsave->rip;
2252
fpu->last_dp = fxsave->rdp;
2253
memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
2260
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2262
struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
2266
memcpy(fxsave->st_space, fpu->fpr, 128);
2267
fxsave->cwd = fpu->fcw;
2268
fxsave->swd = fpu->fsw;
2269
fxsave->twd = fpu->ftwx;
2270
fxsave->fop = fpu->last_opcode;
2271
fxsave->rip = fpu->last_ip;
2272
fxsave->rdp = fpu->last_dp;
2273
memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
2280
void fx_init(struct kvm_vcpu *vcpu)
2282
unsigned after_mxcsr_mask;
2284
/* Initialize guest FPU by resetting ours and saving into guest's */
2286
fx_save(&vcpu->host_fx_image);
2288
fx_save(&vcpu->guest_fx_image);
2289
fx_restore(&vcpu->host_fx_image);
2292
vcpu->cr0 |= X86_CR0_ET;
2293
after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
2294
vcpu->guest_fx_image.mxcsr = 0x1f80;
2295
memset((void *)&vcpu->guest_fx_image + after_mxcsr_mask,
2296
0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
2298
EXPORT_SYMBOL_GPL(fx_init);
2300
void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
2302
if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
2305
vcpu->guest_fpu_loaded = 1;
2306
fx_save(&vcpu->host_fx_image);
2307
fx_restore(&vcpu->guest_fx_image);
2309
EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
2311
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
2313
if (!vcpu->guest_fpu_loaded)
2316
vcpu->guest_fpu_loaded = 0;
2317
fx_save(&vcpu->guest_fx_image);
2318
fx_restore(&vcpu->host_fx_image);
2320
EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);