2
* Kernel-based Virtual Machine control library
4
* This library provides an API to control the kvm hardware virtualization
7
* Copyright (C) 2006 Qumranet
11
* Avi Kivity <avi@qumranet.com>
12
* Yaniv Kamay <yaniv@qumranet.com>
14
* This work is licensed under the GNU LGPL license, version 2.
18
#define __user /* temporary, until installed via make headers_install */
21
#include <linux/kvm.h>
23
#define EXPECTED_KVM_API_VERSION 12
25
#if EXPECTED_KVM_API_VERSION != KVM_API_VERSION
26
#error libkvm: userspace and kernel version mismatch
36
#include <sys/ioctl.h>
38
#include "kvm-abi-10.h"
40
#if defined(__x86_64__) || defined(__i386__)
44
int kvm_abi = EXPECTED_KVM_API_VERSION;
47
unsigned long phys_addr;
50
unsigned long userspace_addr;
53
struct slot_info slots[KVM_MAX_NUM_MEM_REGIONS];
59
for (i = 0; i < KVM_MAX_NUM_MEM_REGIONS; ++i)
63
int get_free_slot(kvm_context_t kvm)
68
#ifdef KVM_CAP_SET_TSS_ADDR
69
tss_ext = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_SET_TSS_ADDR);
75
* on older kernels where the set tss ioctl is not supprted we must save
76
* slot 0 to hold the extended memory, as the vmx will use the last 3
84
for (; i < KVM_MAX_NUM_MEM_REGIONS; ++i)
90
void register_slot(int slot, unsigned long phys_addr, unsigned long len,
91
int user_alloc, unsigned long userspace_addr)
93
slots[slot].phys_addr = phys_addr;
94
slots[slot].len = len;
95
slots[slot].user_alloc = user_alloc;
96
slots[slot].userspace_addr = userspace_addr;
99
void free_slot(int slot)
104
int get_slot(unsigned long phys_addr)
108
for (i = 0; i < KVM_MAX_NUM_MEM_REGIONS ; ++i) {
109
if (slots[i].len && slots[i].phys_addr <= phys_addr &&
110
(slots[i].phys_addr + slots[i].len) >= phys_addr)
116
int get_intersecting_slot(unsigned long phys_addr)
120
for (i = 0; i < KVM_MAX_NUM_MEM_REGIONS ; ++i)
121
if (slots[i].len && slots[i].phys_addr < phys_addr &&
122
(slots[i].phys_addr + slots[i].len) > phys_addr)
128
* dirty pages logging control
130
static int kvm_dirty_pages_log_change(kvm_context_t kvm, unsigned long phys_addr
136
slot = get_slot(phys_addr);
138
fprintf(stderr, "BUG: %s: invalid parameters\n", __FUNCTION__);
141
#ifdef KVM_CAP_USER_MEMORY
142
if (slots[slot].user_alloc) {
143
struct kvm_userspace_memory_region mem = {
145
.memory_size = slots[slot].len,
146
.guest_phys_addr = slots[slot].phys_addr,
147
.userspace_addr = slots[slot].userspace_addr,
150
r = ioctl(kvm->vm_fd, KVM_SET_USER_MEMORY_REGION, &mem);
153
if (!slots[slot].user_alloc) {
154
struct kvm_memory_region mem = {
156
.memory_size = slots[slot].len,
157
.guest_phys_addr = slots[slot].phys_addr,
160
r = ioctl(kvm->vm_fd, KVM_SET_MEMORY_REGION, &mem);
163
fprintf(stderr, "%s: %m\n", __FUNCTION__);
167
static int kvm_dirty_pages_log_change_all(kvm_context_t kvm, __u32 flag)
171
for (i=r=0; i<KVM_MAX_NUM_MEM_REGIONS && r==0; i++) {
173
r = kvm_dirty_pages_log_change(kvm, slots[i].phys_addr,
180
* Enable dirty page logging for all memory regions
182
int kvm_dirty_pages_log_enable_all(kvm_context_t kvm)
184
if (kvm->dirty_pages_log_all)
186
kvm->dirty_pages_log_all = 1;
187
return kvm_dirty_pages_log_change_all(kvm, KVM_MEM_LOG_DIRTY_PAGES);
191
* Enable dirty page logging only for memory regions that were created with
192
* dirty logging enabled (disable for all other memory regions).
194
int kvm_dirty_pages_log_reset(kvm_context_t kvm)
196
if (!kvm->dirty_pages_log_all)
198
kvm->dirty_pages_log_all = 0;
199
return kvm_dirty_pages_log_change_all(kvm, 0);
203
kvm_context_t kvm_init(struct kvm_callbacks *callbacks,
210
fd = open("/dev/kvm", O_RDWR);
212
perror("open /dev/kvm");
215
r = ioctl(fd, KVM_GET_API_VERSION, 0);
217
fprintf(stderr, "kvm kernel version too old: "
218
"KVM_GET_API_VERSION ioctl not supported\n");
221
if (r < EXPECTED_KVM_API_VERSION && r != 10) {
222
fprintf(stderr, "kvm kernel version too old: "
223
"We expect API version %d or newer, but got "
225
EXPECTED_KVM_API_VERSION, r);
228
if (r > EXPECTED_KVM_API_VERSION) {
229
fprintf(stderr, "kvm userspace version too old\n");
233
kvm = malloc(sizeof(*kvm));
236
kvm->callbacks = callbacks;
237
kvm->opaque = opaque;
238
kvm->dirty_pages_log_all = 0;
239
kvm->no_irqchip_creation = 0;
240
memset(&kvm->mem_regions, 0, sizeof(kvm->mem_regions));
248
void kvm_finalize(kvm_context_t kvm)
250
if (kvm->vcpu_fd[0] != -1)
251
close(kvm->vcpu_fd[0]);
252
if (kvm->vm_fd != -1)
258
void kvm_disable_irqchip_creation(kvm_context_t kvm)
260
kvm->no_irqchip_creation = 1;
263
int kvm_create_vcpu(kvm_context_t kvm, int slot)
268
r = ioctl(kvm->vm_fd, KVM_CREATE_VCPU, slot);
271
fprintf(stderr, "kvm_create_vcpu: %m\n");
274
kvm->vcpu_fd[slot] = r;
275
mmap_size = ioctl(kvm->fd, KVM_GET_VCPU_MMAP_SIZE, 0);
276
if (mmap_size == -1) {
278
fprintf(stderr, "get vcpu mmap size: %m\n");
281
kvm->run[slot] = mmap(NULL, mmap_size, PROT_READ|PROT_WRITE, MAP_SHARED,
282
kvm->vcpu_fd[slot], 0);
283
if (kvm->run[slot] == MAP_FAILED) {
285
fprintf(stderr, "mmap vcpu area: %m\n");
291
int kvm_set_shadow_pages(kvm_context_t kvm, unsigned int nrshadow_pages)
293
#ifdef KVM_CAP_MMU_SHADOW_CACHE_CONTROL
296
r = ioctl(kvm->fd, KVM_CHECK_EXTENSION,
297
KVM_CAP_MMU_SHADOW_CACHE_CONTROL);
299
r = ioctl(kvm->vm_fd, KVM_SET_NR_MMU_PAGES, nrshadow_pages);
301
fprintf(stderr, "kvm_set_shadow_pages: %m\n");
310
int kvm_get_shadow_pages(kvm_context_t kvm, unsigned int *nrshadow_pages)
312
#ifdef KVM_CAP_MMU_SHADOW_CACHE_CONTROL
315
r = ioctl(kvm->fd, KVM_CHECK_EXTENSION,
316
KVM_CAP_MMU_SHADOW_CACHE_CONTROL);
318
*nrshadow_pages = ioctl(kvm->vm_fd, KVM_GET_NR_MMU_PAGES);
326
int kvm_create_vm(kvm_context_t kvm)
330
kvm->vcpu_fd[0] = -1;
332
fd = ioctl(fd, KVM_CREATE_VM, 0);
334
fprintf(stderr, "kvm_create_vm: %m\n");
341
static int kvm_create_default_phys_mem(kvm_context_t kvm,
342
unsigned long phys_mem_bytes,
345
unsigned long memory = (phys_mem_bytes + PAGE_SIZE - 1) & PAGE_MASK;
348
#ifdef KVM_CAP_USER_MEMORY
349
r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_USER_MEMORY);
354
r = kvm_alloc_kernel_memory(kvm, memory, vm_mem);
358
r = kvm_arch_create_default_phys_mem(kvm, phys_mem_bytes, vm_mem);
362
kvm->physical_memory = *vm_mem;
366
int kvm_check_extension(kvm_context_t kvm, int ext)
370
ret = ioctl(kvm->fd, KVM_CHECK_EXTENSION, ext);
376
void kvm_create_irqchip(kvm_context_t kvm)
380
kvm->irqchip_in_kernel = 0;
381
#ifdef KVM_CAP_IRQCHIP
382
if (!kvm->no_irqchip_creation) {
383
r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_IRQCHIP);
384
if (r > 0) { /* kernel irqchip supported */
385
r = ioctl(kvm->vm_fd, KVM_CREATE_IRQCHIP);
387
kvm->irqchip_in_kernel = 1;
389
printf("Create kernel PIC irqchip failed\n");
395
int kvm_create(kvm_context_t kvm, unsigned long phys_mem_bytes, void **vm_mem)
399
r = kvm_create_vm(kvm);
402
r = kvm_arch_create(kvm, phys_mem_bytes, vm_mem);
406
r = kvm_create_default_phys_mem(kvm, phys_mem_bytes, vm_mem);
409
kvm_create_irqchip(kvm);
410
r = kvm_create_vcpu(kvm, 0);
418
#ifdef KVM_CAP_USER_MEMORY
420
void *kvm_create_userspace_phys_mem(kvm_context_t kvm, unsigned long phys_start,
421
unsigned long len, int log, int writable)
424
int prot = PROT_READ;
426
struct kvm_userspace_memory_region memory = {
428
.guest_phys_addr = phys_start,
429
.flags = log ? KVM_MEM_LOG_DIRTY_PAGES : 0,
435
ptr = mmap(NULL, len, prot, MAP_ANONYMOUS | MAP_SHARED, -1, 0);
436
if (ptr == MAP_FAILED) {
437
fprintf(stderr, "create_userspace_phys_mem: %s", strerror(errno));
443
memory.userspace_addr = (unsigned long)ptr;
444
memory.slot = get_free_slot(kvm);
445
r = ioctl(kvm->vm_fd, KVM_SET_USER_MEMORY_REGION, &memory);
447
fprintf(stderr, "create_userspace_phys_mem: %s", strerror(errno));
450
register_slot(memory.slot, memory.guest_phys_addr, memory.memory_size,
451
1, memory.userspace_addr);
458
void *kvm_create_phys_mem(kvm_context_t kvm, unsigned long phys_start,
459
unsigned long len, int log, int writable)
461
#ifdef KVM_CAP_USER_MEMORY
464
r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_USER_MEMORY);
466
return kvm_create_userspace_phys_mem(kvm, phys_start, len,
470
return kvm_create_kernel_phys_mem(kvm, phys_start, len,
474
int kvm_is_intersecting_mem(kvm_context_t kvm, unsigned long phys_start)
476
return get_intersecting_slot(phys_start) != -1;
479
int kvm_is_allocated_mem(kvm_context_t kvm, unsigned long phys_start,
484
slot = get_slot(phys_start);
487
if (slots[slot].len == len)
492
int kvm_create_mem_hole(kvm_context_t kvm, unsigned long phys_start,
495
#ifdef KVM_CAP_USER_MEMORY
498
struct kvm_userspace_memory_region rmslot;
499
struct kvm_userspace_memory_region newslot1;
500
struct kvm_userspace_memory_region newslot2;
502
len = (len + PAGE_SIZE - 1) & PAGE_MASK;
504
slot = get_intersecting_slot(phys_start);
505
/* no need to create hole, as there is already hole */
509
memset(&rmslot, 0, sizeof(struct kvm_userspace_memory_region));
510
memset(&newslot1, 0, sizeof(struct kvm_userspace_memory_region));
511
memset(&newslot2, 0, sizeof(struct kvm_userspace_memory_region));
513
rmslot.guest_phys_addr = slots[slot].phys_addr;
516
newslot1.guest_phys_addr = slots[slot].phys_addr;
517
newslot1.memory_size = phys_start - slots[slot].phys_addr;
518
newslot1.slot = slot;
519
newslot1.userspace_addr = slots[slot].userspace_addr;
521
newslot2.guest_phys_addr = newslot1.guest_phys_addr +
522
newslot1.memory_size + len;
523
newslot2.memory_size = slots[slot].phys_addr +
524
slots[slot].len - newslot2.guest_phys_addr;
525
newslot2.userspace_addr = newslot1.userspace_addr +
526
newslot1.memory_size;
527
newslot2.slot = get_free_slot(kvm);
529
r = ioctl(kvm->vm_fd, KVM_SET_USER_MEMORY_REGION, &rmslot);
531
fprintf(stderr, "kvm_create_mem_hole: %s\n", strerror(errno));
536
r = ioctl(kvm->vm_fd, KVM_SET_USER_MEMORY_REGION, &newslot1);
538
fprintf(stderr, "kvm_create_mem_hole: %s\n", strerror(errno));
541
register_slot(newslot1.slot, newslot1.guest_phys_addr,
542
newslot1.memory_size, 1, newslot1.userspace_addr);
544
r = ioctl(kvm->vm_fd, KVM_SET_USER_MEMORY_REGION, &newslot2);
546
fprintf(stderr, "kvm_create_mem_hole: %s\n", strerror(errno));
549
register_slot(newslot2.slot, newslot2.guest_phys_addr,
550
newslot2.memory_size, 1, newslot2.userspace_addr);
555
int kvm_register_userspace_phys_mem(kvm_context_t kvm,
556
unsigned long phys_start, void *userspace_addr,
557
unsigned long len, int log)
559
#ifdef KVM_CAP_USER_MEMORY
560
struct kvm_userspace_memory_region memory = {
562
.guest_phys_addr = phys_start,
563
.userspace_addr = (intptr_t)userspace_addr,
564
.flags = log ? KVM_MEM_LOG_DIRTY_PAGES : 0,
568
if (!kvm->physical_memory)
569
kvm->physical_memory = userspace_addr - phys_start;
571
memory.slot = get_free_slot(kvm);
572
r = ioctl(kvm->vm_fd, KVM_SET_USER_MEMORY_REGION, &memory);
574
fprintf(stderr, "create_userspace_phys_mem: %s\n", strerror(errno));
577
register_slot(memory.slot, memory.guest_phys_addr, memory.memory_size,
578
1, memory.userspace_addr);
586
/* destroy/free a whole slot.
587
* phys_start, len and slot are the params passed to kvm_create_phys_mem()
589
void kvm_destroy_phys_mem(kvm_context_t kvm, unsigned long phys_start,
593
struct kvm_memory_region *mem;
595
slot = get_slot(phys_start);
597
if (slot >= KVM_MAX_NUM_MEM_REGIONS) {
598
fprintf(stderr, "BUG: %s: invalid parameters (slot=%d)\n",
602
mem = &kvm->mem_regions[slot];
603
if (phys_start != mem->guest_phys_addr) {
605
"WARNING: %s: phys_start is 0x%lx expecting 0x%llx\n",
606
__FUNCTION__, phys_start, mem->guest_phys_addr);
607
phys_start = mem->guest_phys_addr;
609
kvm_create_phys_mem(kvm, phys_start, 0, 0, 0);
612
static int kvm_get_map(kvm_context_t kvm, int ioctl_num, int slot, void *buf)
615
struct kvm_dirty_log log = {
619
log.dirty_bitmap = buf;
621
r = ioctl(kvm->vm_fd, ioctl_num, &log);
627
int kvm_get_dirty_pages(kvm_context_t kvm, unsigned long phys_addr, void *buf)
631
slot = get_slot(phys_addr);
632
return kvm_get_map(kvm, KVM_GET_DIRTY_LOG, slot, buf);
635
int kvm_get_mem_map(kvm_context_t kvm, unsigned long phys_addr, void *buf)
639
slot = get_slot(phys_addr);
640
#ifdef KVM_GET_MEM_MAP
641
return kvm_get_map(kvm, KVM_GET_MEM_MAP, slot, buf);
642
#else /* not KVM_GET_MEM_MAP ==> fake it: all pages exist */
643
unsigned long i, n, m, npages;
646
if (slot >= KVM_MAX_NUM_MEM_REGIONS) {
650
npages = kvm->mem_regions[slot].memory_size / PAGE_SIZE;
653
memset(buf, 0xff, n); /* all pages exist */
655
for (i=0; i<=m; i++) /* last byte may not be "aligned" */
658
*(unsigned char*)(buf+n) = v;
660
#endif /* KVM_GET_MEM_MAP */
663
#ifdef KVM_CAP_IRQCHIP
665
int kvm_set_irq_level(kvm_context_t kvm, int irq, int level)
667
struct kvm_irq_level event;
670
if (!kvm->irqchip_in_kernel)
674
r = ioctl(kvm->vm_fd, KVM_IRQ_LINE, &event);
676
perror("kvm_set_irq_level");
680
int kvm_get_irqchip(kvm_context_t kvm, struct kvm_irqchip *chip)
684
if (!kvm->irqchip_in_kernel)
686
r = ioctl(kvm->vm_fd, KVM_GET_IRQCHIP, chip);
689
perror("kvm_get_irqchip\n");
694
int kvm_set_irqchip(kvm_context_t kvm, struct kvm_irqchip *chip)
698
if (!kvm->irqchip_in_kernel)
700
r = ioctl(kvm->vm_fd, KVM_SET_IRQCHIP, chip);
703
perror("kvm_set_irqchip\n");
710
static int handle_io(kvm_context_t kvm, struct kvm_run *run, int vcpu)
712
uint16_t addr = run->io.port;
715
void *p = (void *)run + run->io.data_offset;
717
for (i = 0; i < run->io.count; ++i) {
718
switch (run->io.direction) {
720
switch (run->io.size) {
722
r = kvm->callbacks->inb(kvm->opaque, addr, p);
725
r = kvm->callbacks->inw(kvm->opaque, addr, p);
728
r = kvm->callbacks->inl(kvm->opaque, addr, p);
731
fprintf(stderr, "bad I/O size %d\n", run->io.size);
735
case KVM_EXIT_IO_OUT:
736
switch (run->io.size) {
738
r = kvm->callbacks->outb(kvm->opaque, addr,
742
r = kvm->callbacks->outw(kvm->opaque, addr,
746
r = kvm->callbacks->outl(kvm->opaque, addr,
750
fprintf(stderr, "bad I/O size %d\n", run->io.size);
755
fprintf(stderr, "bad I/O direction %d\n", run->io.direction);
765
int handle_debug(kvm_context_t kvm, int vcpu)
767
return kvm->callbacks->debug(kvm->opaque, vcpu);
770
int kvm_get_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs)
772
return ioctl(kvm->vcpu_fd[vcpu], KVM_GET_REGS, regs);
775
int kvm_set_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs)
777
return ioctl(kvm->vcpu_fd[vcpu], KVM_SET_REGS, regs);
780
int kvm_get_fpu(kvm_context_t kvm, int vcpu, struct kvm_fpu *fpu)
782
return ioctl(kvm->vcpu_fd[vcpu], KVM_GET_FPU, fpu);
785
int kvm_set_fpu(kvm_context_t kvm, int vcpu, struct kvm_fpu *fpu)
787
return ioctl(kvm->vcpu_fd[vcpu], KVM_SET_FPU, fpu);
790
int kvm_get_sregs(kvm_context_t kvm, int vcpu, struct kvm_sregs *sregs)
792
return ioctl(kvm->vcpu_fd[vcpu], KVM_GET_SREGS, sregs);
795
int kvm_set_sregs(kvm_context_t kvm, int vcpu, struct kvm_sregs *sregs)
797
return ioctl(kvm->vcpu_fd[vcpu], KVM_SET_SREGS, sregs);
800
static int handle_mmio(kvm_context_t kvm, struct kvm_run *kvm_run)
802
unsigned long addr = kvm_run->mmio.phys_addr;
803
void *data = kvm_run->mmio.data;
806
/* hack: Red Hat 7.1 generates these wierd accesses. */
807
if (addr == 0xa0000 && kvm_run->mmio.len == 3)
810
if (kvm_run->mmio.is_write) {
811
switch (kvm_run->mmio.len) {
813
r = kvm->callbacks->writeb(kvm->opaque, addr, *(uint8_t *)data);
816
r = kvm->callbacks->writew(kvm->opaque, addr, *(uint16_t *)data);
819
r = kvm->callbacks->writel(kvm->opaque, addr, *(uint32_t *)data);
822
r = kvm->callbacks->writeq(kvm->opaque, addr, *(uint64_t *)data);
826
switch (kvm_run->mmio.len) {
828
r = kvm->callbacks->readb(kvm->opaque, addr, (uint8_t *)data);
831
r = kvm->callbacks->readw(kvm->opaque, addr, (uint16_t *)data);
834
r = kvm->callbacks->readl(kvm->opaque, addr, (uint32_t *)data);
837
r = kvm->callbacks->readq(kvm->opaque, addr, (uint64_t *)data);
844
int handle_io_window(kvm_context_t kvm)
846
return kvm->callbacks->io_window(kvm->opaque);
849
int handle_halt(kvm_context_t kvm, int vcpu)
851
return kvm->callbacks->halt(kvm->opaque, vcpu);
854
int handle_shutdown(kvm_context_t kvm, int vcpu)
856
return kvm->callbacks->shutdown(kvm->opaque, vcpu);
859
int try_push_interrupts(kvm_context_t kvm)
861
return kvm->callbacks->try_push_interrupts(kvm->opaque);
864
void post_kvm_run(kvm_context_t kvm, int vcpu)
866
kvm->callbacks->post_kvm_run(kvm->opaque, vcpu);
869
int pre_kvm_run(kvm_context_t kvm, int vcpu)
871
return kvm->callbacks->pre_kvm_run(kvm->opaque, vcpu);
874
int kvm_get_interrupt_flag(kvm_context_t kvm, int vcpu)
876
struct kvm_run *run = kvm->run[vcpu];
879
return ((struct kvm_run_abi10 *)run)->if_flag;
883
int kvm_is_ready_for_interrupt_injection(kvm_context_t kvm, int vcpu)
885
struct kvm_run *run = kvm->run[vcpu];
888
return ((struct kvm_run_abi10 *)run)->ready_for_interrupt_injection;
889
return run->ready_for_interrupt_injection;
892
int kvm_run(kvm_context_t kvm, int vcpu)
895
int fd = kvm->vcpu_fd[vcpu];
896
struct kvm_run *run = kvm->run[vcpu];
899
return kvm_run_abi10(kvm, vcpu);
902
if (!kvm->irqchip_in_kernel)
903
run->request_interrupt_window = try_push_interrupts(kvm);
904
r = pre_kvm_run(kvm, vcpu);
907
r = ioctl(fd, KVM_RUN, 0);
908
post_kvm_run(kvm, vcpu);
910
if (r == -1 && errno != EINTR && errno != EAGAIN) {
912
printf("kvm_run: %m\n");
916
r = handle_io_window(kvm);
920
switch (run->exit_reason) {
921
case KVM_EXIT_UNKNOWN:
922
fprintf(stderr, "unhandled vm exit: 0x%x vcpu_id %d\n",
923
(unsigned)run->hw.hardware_exit_reason, vcpu);
924
kvm_show_regs(kvm, vcpu);
927
case KVM_EXIT_FAIL_ENTRY:
928
fprintf(stderr, "kvm_run: failed entry, reason %u\n",
929
(unsigned)run->fail_entry.hardware_entry_failure_reason & 0xffff);
932
case KVM_EXIT_EXCEPTION:
933
fprintf(stderr, "exception %d (%x)\n",
936
kvm_show_regs(kvm, vcpu);
937
kvm_show_code(kvm, vcpu);
941
r = handle_io(kvm, run, vcpu);
944
r = handle_debug(kvm, vcpu);
947
r = handle_mmio(kvm, run);
950
r = handle_halt(kvm, vcpu);
952
case KVM_EXIT_IRQ_WINDOW_OPEN:
954
case KVM_EXIT_SHUTDOWN:
955
r = handle_shutdown(kvm, vcpu);
957
#ifdef KVM_EXIT_SET_TPR
958
case KVM_EXIT_SET_TPR:
962
fprintf(stderr, "unhandled vm exit: 0x%x\n", run->exit_reason);
963
kvm_show_regs(kvm, vcpu);
974
int kvm_inject_irq(kvm_context_t kvm, int vcpu, unsigned irq)
976
struct kvm_interrupt intr;
979
return ioctl(kvm->vcpu_fd[vcpu], KVM_INTERRUPT, &intr);
982
int kvm_guest_debug(kvm_context_t kvm, int vcpu, struct kvm_debug_guest *dbg)
984
return ioctl(kvm->vcpu_fd[vcpu], KVM_DEBUG_GUEST, dbg);
987
int kvm_set_signal_mask(kvm_context_t kvm, int vcpu, const sigset_t *sigset)
989
struct kvm_signal_mask *sigmask;
993
r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_SIGNAL_MASK, NULL);
998
sigmask = malloc(sizeof(*sigmask) + sizeof(*sigset));
1003
memcpy(sigmask->sigset, sigset, sizeof(*sigset));
1004
r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_SIGNAL_MASK, sigmask);
1011
int kvm_irqchip_in_kernel(kvm_context_t kvm)
1013
return kvm->irqchip_in_kernel;