2
* xen_internal.c: direct access to Xen hypervisor level
4
* Copyright (C) 2005-2010 Red Hat, Inc.
6
* See COPYING.LIB for the License of this software
8
* Daniel Veillard <veillard@redhat.com>
15
/* required for uint8_t, uint32_t, etc ... */
17
#include <sys/types.h>
22
#include <sys/ioctl.h>
27
#include <sys/utsname.h>
30
# include <sys/systeminfo.h>
34
# ifndef PRIV_XVM_CONTROL
35
# define PRIV_XVM_CONTROL ((const char *)"xvm_control")
40
/* required for dom0_getdomaininfo_t */
41
#include <xen/dom0_ops.h>
42
#include <xen/version.h>
43
#ifdef HAVE_XEN_LINUX_PRIVCMD_H
44
# include <xen/linux/privcmd.h>
46
# ifdef HAVE_XEN_SYS_PRIVCMD_H
47
# include <xen/sys/privcmd.h>
51
/* required for shutdown flags */
52
#include <xen/sched.h>
54
#include "virterror_internal.h"
56
#include "datatypes.h"
59
#include "xen_driver.h"
60
#include "xen_hypervisor.h"
61
#include "xs_internal.h"
62
#include "stats_linux.h"
63
#include "block_stats.h"
64
#include "xend_internal.h"
66
#include "capabilities.h"
69
#define VIR_FROM_THIS VIR_FROM_XEN
72
* so far there is 2 versions of the structures usable for doing
76
typedef struct v0_hypercall_struct {
82
# define XEN_V0_IOCTL_HYPERCALL_CMD \
83
_IOC(_IOC_NONE, 'P', 0, sizeof(v0_hypercall_t))
85
typedef struct v1_hypercall_struct
90
# define XEN_V1_IOCTL_HYPERCALL_CMD \
91
_IOC(_IOC_NONE, 'P', 0, sizeof(v1_hypercall_t))
92
typedef v1_hypercall_t hypercall_t;
94
typedef privcmd_hypercall_t hypercall_t;
96
# error "unsupported platform"
99
#ifndef __HYPERVISOR_sysctl
100
# define __HYPERVISOR_sysctl 35
102
#ifndef __HYPERVISOR_domctl
103
# define __HYPERVISOR_domctl 36
106
#ifdef WITH_RHEL5_API
107
# define SYS_IFACE_MIN_VERS_NUMA 3
109
# define SYS_IFACE_MIN_VERS_NUMA 4
112
/* xen-unstable changeset 19788 removed MAX_VIRT_CPUS from public
113
* headers. Its semanitc was retained with XEN_LEGACY_MAX_VCPUS.
114
* Ensure MAX_VIRT_CPUS is defined accordingly.
116
#if !defined(MAX_VIRT_CPUS) && defined(XEN_LEGACY_MAX_VCPUS)
117
# define MAX_VIRT_CPUS XEN_LEGACY_MAX_VCPUS
120
static int xen_ioctl_hypercall_cmd = 0;
121
static int initialized = 0;
122
static int in_init = 0;
123
static int hv_version = 0;
124
static int hypervisor_version = 2;
125
static int sys_interface_version = -1;
126
static int dom_interface_version = -1;
127
static int kb_per_pages = 0;
129
/* Regular expressions used by xenHypervisorGetCapabilities, and
130
* compiled once by xenHypervisorInit. Note that these are POSIX.2
131
* extended regular expressions (regex(7)).
133
static const char *flags_hvm_re = "^flags[[:blank:]]+:.* (vmx|svm)[[:space:]]";
134
static regex_t flags_hvm_rec;
135
static const char *flags_pae_re = "^flags[[:blank:]]+:.* pae[[:space:]]";
136
static regex_t flags_pae_rec;
137
static const char *xen_cap_re = "(xen|hvm)-[[:digit:]]+\\.[[:digit:]]+-(x86_32|x86_64|ia64|powerpc64)(p|be)?";
138
static regex_t xen_cap_rec;
141
* The content of the structures for a getdomaininfolist system hypercall
143
#ifndef DOMFLAGS_DYING
144
# define DOMFLAGS_DYING (1<<0) /* Domain is scheduled to die. */
145
# define DOMFLAGS_HVM (1<<1) /* Domain is HVM */
146
# define DOMFLAGS_SHUTDOWN (1<<2) /* The guest OS has shut down. */
147
# define DOMFLAGS_PAUSED (1<<3) /* Currently paused by control software. */
148
# define DOMFLAGS_BLOCKED (1<<4) /* Currently blocked pending an event. */
149
# define DOMFLAGS_RUNNING (1<<5) /* Domain is currently running. */
150
# define DOMFLAGS_CPUMASK 255 /* CPU to which this domain is bound. */
151
# define DOMFLAGS_CPUSHIFT 8
152
# define DOMFLAGS_SHUTDOWNMASK 255 /* DOMFLAGS_SHUTDOWN guest-supplied code. */
153
# define DOMFLAGS_SHUTDOWNSHIFT 16
157
* These flags explain why a system is in the state of "shutdown". Normally,
158
* They are defined in xen/sched.h
160
#ifndef SHUTDOWN_poweroff
161
# define SHUTDOWN_poweroff 0 /* Domain exited normally. Clean up and kill. */
162
# define SHUTDOWN_reboot 1 /* Clean up, kill, and then restart. */
163
# define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */
164
# define SHUTDOWN_crash 3 /* Tell controller we've crashed. */
167
#define XEN_V0_OP_GETDOMAININFOLIST 38
168
#define XEN_V1_OP_GETDOMAININFOLIST 38
169
#define XEN_V2_OP_GETDOMAININFOLIST 6
171
struct xen_v0_getdomaininfo {
172
domid_t domain; /* the domain number */
173
uint32_t flags; /* flags, see before */
174
uint64_t tot_pages; /* total number of pages used */
175
uint64_t max_pages; /* maximum number of pages allowed */
176
unsigned long shared_info_frame; /* MFN of shared_info struct */
177
uint64_t cpu_time; /* CPU time used */
178
uint32_t nr_online_vcpus; /* Number of VCPUs currently online. */
179
uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
181
xen_domain_handle_t handle;
183
typedef struct xen_v0_getdomaininfo xen_v0_getdomaininfo;
185
struct xen_v2_getdomaininfo {
186
domid_t domain; /* the domain number */
187
uint32_t flags; /* flags, see before */
188
uint64_t tot_pages; /* total number of pages used */
189
uint64_t max_pages; /* maximum number of pages allowed */
190
uint64_t shared_info_frame; /* MFN of shared_info struct */
191
uint64_t cpu_time; /* CPU time used */
192
uint32_t nr_online_vcpus; /* Number of VCPUs currently online. */
193
uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
195
xen_domain_handle_t handle;
197
typedef struct xen_v2_getdomaininfo xen_v2_getdomaininfo;
200
/* As of Hypervisor Call v2, DomCtl v5 we are now 8-byte aligned
201
even on 32-bit archs when dealing with uint64_t */
202
#define ALIGN_64 __attribute__((aligned(8)))
204
struct xen_v2d5_getdomaininfo {
205
domid_t domain; /* the domain number */
206
uint32_t flags; /* flags, see before */
207
uint64_t tot_pages ALIGN_64; /* total number of pages used */
208
uint64_t max_pages ALIGN_64; /* maximum number of pages allowed */
209
uint64_t shared_info_frame ALIGN_64; /* MFN of shared_info struct */
210
uint64_t cpu_time ALIGN_64; /* CPU time used */
211
uint32_t nr_online_vcpus; /* Number of VCPUs currently online. */
212
uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
214
xen_domain_handle_t handle;
216
typedef struct xen_v2d5_getdomaininfo xen_v2d5_getdomaininfo;
218
struct xen_v2d6_getdomaininfo {
219
domid_t domain; /* the domain number */
220
uint32_t flags; /* flags, see before */
221
uint64_t tot_pages ALIGN_64; /* total number of pages used */
222
uint64_t max_pages ALIGN_64; /* maximum number of pages allowed */
223
uint64_t shr_pages ALIGN_64; /* number of shared pages */
224
uint64_t shared_info_frame ALIGN_64; /* MFN of shared_info struct */
225
uint64_t cpu_time ALIGN_64; /* CPU time used */
226
uint32_t nr_online_vcpus; /* Number of VCPUs currently online. */
227
uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
229
xen_domain_handle_t handle;
231
typedef struct xen_v2d6_getdomaininfo xen_v2d6_getdomaininfo;
233
union xen_getdomaininfo {
234
struct xen_v0_getdomaininfo v0;
235
struct xen_v2_getdomaininfo v2;
236
struct xen_v2d5_getdomaininfo v2d5;
237
struct xen_v2d6_getdomaininfo v2d6;
239
typedef union xen_getdomaininfo xen_getdomaininfo;
241
union xen_getdomaininfolist {
242
struct xen_v0_getdomaininfo *v0;
243
struct xen_v2_getdomaininfo *v2;
244
struct xen_v2d5_getdomaininfo *v2d5;
245
struct xen_v2d6_getdomaininfo *v2d6;
247
typedef union xen_getdomaininfolist xen_getdomaininfolist;
250
struct xen_v2_getschedulerid {
251
uint32_t sched_id; /* Get Scheduler ID from Xen */
253
typedef struct xen_v2_getschedulerid xen_v2_getschedulerid;
256
union xen_getschedulerid {
257
struct xen_v2_getschedulerid *v2;
259
typedef union xen_getschedulerid xen_getschedulerid;
261
struct xen_v2s4_availheap {
262
uint32_t min_bitwidth; /* Smallest address width (zero if don't care). */
263
uint32_t max_bitwidth; /* Largest address width (zero if don't care). */
264
int32_t node; /* NUMA node (-1 for sum across all nodes). */
265
uint64_t avail_bytes; /* Bytes available in the specified region. */
268
typedef struct xen_v2s4_availheap xen_v2s4_availheap;
270
struct xen_v2s5_availheap {
271
uint32_t min_bitwidth; /* Smallest address width (zero if don't care). */
272
uint32_t max_bitwidth; /* Largest address width (zero if don't care). */
273
int32_t node; /* NUMA node (-1 for sum across all nodes). */
274
uint64_t avail_bytes ALIGN_64; /* Bytes available in the specified region. */
277
typedef struct xen_v2s5_availheap xen_v2s5_availheap;
280
#define XEN_GETDOMAININFOLIST_ALLOC(domlist, size) \
281
(hypervisor_version < 2 ? \
282
(VIR_ALLOC_N(domlist.v0, (size)) == 0) : \
283
(dom_interface_version >= 6 ? \
284
(VIR_ALLOC_N(domlist.v2d6, (size)) == 0) : \
285
(dom_interface_version == 5 ? \
286
(VIR_ALLOC_N(domlist.v2d5, (size)) == 0) : \
287
(VIR_ALLOC_N(domlist.v2, (size)) == 0))))
289
#define XEN_GETDOMAININFOLIST_FREE(domlist) \
290
(hypervisor_version < 2 ? \
291
VIR_FREE(domlist.v0) : \
292
(dom_interface_version >= 6 ? \
293
VIR_FREE(domlist.v2d6) : \
294
(dom_interface_version == 5 ? \
295
VIR_FREE(domlist.v2d5) : \
296
VIR_FREE(domlist.v2))))
298
#define XEN_GETDOMAININFOLIST_CLEAR(domlist, size) \
299
(hypervisor_version < 2 ? \
300
memset(domlist.v0, 0, sizeof(*domlist.v0) * size) : \
301
(dom_interface_version >= 6 ? \
302
memset(domlist.v2d6, 0, sizeof(*domlist.v2d6) * size) : \
303
(dom_interface_version == 5 ? \
304
memset(domlist.v2d5, 0, sizeof(*domlist.v2d5) * size) : \
305
memset(domlist.v2, 0, sizeof(*domlist.v2) * size))))
307
#define XEN_GETDOMAININFOLIST_DOMAIN(domlist, n) \
308
(hypervisor_version < 2 ? \
309
domlist.v0[n].domain : \
310
(dom_interface_version >= 6 ? \
311
domlist.v2d6[n].domain : \
312
(dom_interface_version == 5 ? \
313
domlist.v2d5[n].domain : \
314
domlist.v2[n].domain)))
316
#define XEN_GETDOMAININFOLIST_UUID(domlist, n) \
317
(hypervisor_version < 2 ? \
318
domlist.v0[n].handle : \
319
(dom_interface_version >= 6 ? \
320
domlist.v2d6[n].handle : \
321
(dom_interface_version == 5 ? \
322
domlist.v2d5[n].handle : \
323
domlist.v2[n].handle)))
325
#define XEN_GETDOMAININFOLIST_DATA(domlist) \
326
(hypervisor_version < 2 ? \
327
(void*)(domlist->v0) : \
328
(dom_interface_version >= 6 ? \
329
(void*)(domlist->v2d6) : \
330
(dom_interface_version == 5 ? \
331
(void*)(domlist->v2d5) : \
332
(void*)(domlist->v2))))
334
#define XEN_GETDOMAININFO_SIZE \
335
(hypervisor_version < 2 ? \
336
sizeof(xen_v0_getdomaininfo) : \
337
(dom_interface_version >= 6 ? \
338
sizeof(xen_v2d6_getdomaininfo) : \
339
(dom_interface_version == 5 ? \
340
sizeof(xen_v2d5_getdomaininfo) : \
341
sizeof(xen_v2_getdomaininfo))))
343
#define XEN_GETDOMAININFO_CLEAR(dominfo) \
344
(hypervisor_version < 2 ? \
345
memset(&(dominfo.v0), 0, sizeof(xen_v0_getdomaininfo)) : \
346
(dom_interface_version >= 6 ? \
347
memset(&(dominfo.v2d6), 0, sizeof(xen_v2d6_getdomaininfo)) : \
348
(dom_interface_version == 5 ? \
349
memset(&(dominfo.v2d5), 0, sizeof(xen_v2d5_getdomaininfo)) : \
350
memset(&(dominfo.v2), 0, sizeof(xen_v2_getdomaininfo)))))
352
#define XEN_GETDOMAININFO_DOMAIN(dominfo) \
353
(hypervisor_version < 2 ? \
354
dominfo.v0.domain : \
355
(dom_interface_version >= 6 ? \
356
dominfo.v2d6.domain : \
357
(dom_interface_version == 5 ? \
358
dominfo.v2d5.domain : \
361
#define XEN_GETDOMAININFO_CPUTIME(dominfo) \
362
(hypervisor_version < 2 ? \
363
dominfo.v0.cpu_time : \
364
(dom_interface_version >= 6 ? \
365
dominfo.v2d6.cpu_time : \
366
(dom_interface_version == 5 ? \
367
dominfo.v2d5.cpu_time : \
368
dominfo.v2.cpu_time)))
371
#define XEN_GETDOMAININFO_CPUCOUNT(dominfo) \
372
(hypervisor_version < 2 ? \
373
dominfo.v0.nr_online_vcpus : \
374
(dom_interface_version >= 6 ? \
375
dominfo.v2d6.nr_online_vcpus : \
376
(dom_interface_version == 5 ? \
377
dominfo.v2d5.nr_online_vcpus : \
378
dominfo.v2.nr_online_vcpus)))
380
#define XEN_GETDOMAININFO_MAXCPUID(dominfo) \
381
(hypervisor_version < 2 ? \
382
dominfo.v0.max_vcpu_id : \
383
(dom_interface_version >= 6 ? \
384
dominfo.v2d6.max_vcpu_id : \
385
(dom_interface_version == 5 ? \
386
dominfo.v2d5.max_vcpu_id : \
387
dominfo.v2.max_vcpu_id)))
389
#define XEN_GETDOMAININFO_FLAGS(dominfo) \
390
(hypervisor_version < 2 ? \
392
(dom_interface_version >= 6 ? \
393
dominfo.v2d6.flags : \
394
(dom_interface_version == 5 ? \
395
dominfo.v2d5.flags : \
398
#define XEN_GETDOMAININFO_TOT_PAGES(dominfo) \
399
(hypervisor_version < 2 ? \
400
dominfo.v0.tot_pages : \
401
(dom_interface_version >= 6 ? \
402
dominfo.v2d6.tot_pages : \
403
(dom_interface_version == 5 ? \
404
dominfo.v2d5.tot_pages : \
405
dominfo.v2.tot_pages)))
407
#define XEN_GETDOMAININFO_MAX_PAGES(dominfo) \
408
(hypervisor_version < 2 ? \
409
dominfo.v0.max_pages : \
410
(dom_interface_version >= 6 ? \
411
dominfo.v2d6.max_pages : \
412
(dom_interface_version == 5 ? \
413
dominfo.v2d5.max_pages : \
414
dominfo.v2.max_pages)))
416
#define XEN_GETDOMAININFO_UUID(dominfo) \
417
(hypervisor_version < 2 ? \
418
dominfo.v0.handle : \
419
(dom_interface_version >= 6 ? \
420
dominfo.v2d6.handle : \
421
(dom_interface_version == 5 ? \
422
dominfo.v2d5.handle : \
427
lock_pages(void *addr, size_t len)
430
return (mlock(addr, len));
437
unlock_pages(void *addr, size_t len)
440
return (munlock(addr, len));
447
struct xen_v0_getdomaininfolistop {
448
domid_t first_domain;
449
uint32_t max_domains;
450
struct xen_v0_getdomaininfo *buffer;
451
uint32_t num_domains;
453
typedef struct xen_v0_getdomaininfolistop xen_v0_getdomaininfolistop;
456
struct xen_v2_getdomaininfolistop {
457
domid_t first_domain;
458
uint32_t max_domains;
459
struct xen_v2_getdomaininfo *buffer;
460
uint32_t num_domains;
462
typedef struct xen_v2_getdomaininfolistop xen_v2_getdomaininfolistop;
464
/* As of HV version 2, sysctl version 3 the *buffer pointer is 64-bit aligned */
465
struct xen_v2s3_getdomaininfolistop {
466
domid_t first_domain;
467
uint32_t max_domains;
468
#ifdef __BIG_ENDIAN__
470
int __pad[(sizeof (long long) - sizeof (struct xen_v2d5_getdomaininfo *)) / sizeof (int)];
471
struct xen_v2d5_getdomaininfo *v;
475
struct xen_v2d5_getdomaininfo *v;
476
uint64_t pad ALIGN_64;
479
uint32_t num_domains;
481
typedef struct xen_v2s3_getdomaininfolistop xen_v2s3_getdomaininfolistop;
485
struct xen_v0_domainop {
488
typedef struct xen_v0_domainop xen_v0_domainop;
491
* The information for a destroydomain system hypercall
493
#define XEN_V0_OP_DESTROYDOMAIN 9
494
#define XEN_V1_OP_DESTROYDOMAIN 9
495
#define XEN_V2_OP_DESTROYDOMAIN 2
498
* The information for a pausedomain system hypercall
500
#define XEN_V0_OP_PAUSEDOMAIN 10
501
#define XEN_V1_OP_PAUSEDOMAIN 10
502
#define XEN_V2_OP_PAUSEDOMAIN 3
505
* The information for an unpausedomain system hypercall
507
#define XEN_V0_OP_UNPAUSEDOMAIN 11
508
#define XEN_V1_OP_UNPAUSEDOMAIN 11
509
#define XEN_V2_OP_UNPAUSEDOMAIN 4
512
* The information for an setmaxmem system hypercall
514
#define XEN_V0_OP_SETMAXMEM 28
515
#define XEN_V1_OP_SETMAXMEM 28
516
#define XEN_V2_OP_SETMAXMEM 11
518
struct xen_v0_setmaxmem {
522
typedef struct xen_v0_setmaxmem xen_v0_setmaxmem;
523
typedef struct xen_v0_setmaxmem xen_v1_setmaxmem;
525
struct xen_v2_setmaxmem {
528
typedef struct xen_v2_setmaxmem xen_v2_setmaxmem;
530
struct xen_v2d5_setmaxmem {
531
uint64_t maxmem ALIGN_64;
533
typedef struct xen_v2d5_setmaxmem xen_v2d5_setmaxmem;
536
* The information for an setmaxvcpu system hypercall
538
#define XEN_V0_OP_SETMAXVCPU 41
539
#define XEN_V1_OP_SETMAXVCPU 41
540
#define XEN_V2_OP_SETMAXVCPU 15
542
struct xen_v0_setmaxvcpu {
546
typedef struct xen_v0_setmaxvcpu xen_v0_setmaxvcpu;
547
typedef struct xen_v0_setmaxvcpu xen_v1_setmaxvcpu;
549
struct xen_v2_setmaxvcpu {
552
typedef struct xen_v2_setmaxvcpu xen_v2_setmaxvcpu;
555
* The information for an setvcpumap system hypercall
556
* Note that between 1 and 2 the limitation to 64 physical CPU was lifted
557
* hence the difference in structures
559
#define XEN_V0_OP_SETVCPUMAP 20
560
#define XEN_V1_OP_SETVCPUMAP 20
561
#define XEN_V2_OP_SETVCPUMAP 9
563
struct xen_v0_setvcpumap {
568
typedef struct xen_v0_setvcpumap xen_v0_setvcpumap;
569
typedef struct xen_v0_setvcpumap xen_v1_setvcpumap;
571
struct xen_v2_cpumap {
575
struct xen_v2_setvcpumap {
577
struct xen_v2_cpumap cpumap;
579
typedef struct xen_v2_setvcpumap xen_v2_setvcpumap;
581
/* HV version 2, Dom version 5 requires 64-bit alignment */
582
struct xen_v2d5_cpumap {
583
#ifdef __BIG_ENDIAN__
585
int __pad[(sizeof (long long) - sizeof (uint8_t *)) / sizeof (int)];
591
uint64_t pad ALIGN_64;
596
struct xen_v2d5_setvcpumap {
598
struct xen_v2d5_cpumap cpumap;
600
typedef struct xen_v2d5_setvcpumap xen_v2d5_setvcpumap;
603
* The information for an vcpuinfo system hypercall
605
#define XEN_V0_OP_GETVCPUINFO 43
606
#define XEN_V1_OP_GETVCPUINFO 43
607
#define XEN_V2_OP_GETVCPUINFO 14
609
struct xen_v0_vcpuinfo {
610
domid_t domain; /* owner's domain */
611
uint32_t vcpu; /* the vcpu number */
612
uint8_t online; /* seen as on line */
613
uint8_t blocked; /* blocked on event */
614
uint8_t running; /* scheduled on CPU */
615
uint64_t cpu_time; /* nanosecond of CPU used */
616
uint32_t cpu; /* current mapping */
617
cpumap_t cpumap; /* deprecated in V2 */
619
typedef struct xen_v0_vcpuinfo xen_v0_vcpuinfo;
620
typedef struct xen_v0_vcpuinfo xen_v1_vcpuinfo;
622
struct xen_v2_vcpuinfo {
623
uint32_t vcpu; /* the vcpu number */
624
uint8_t online; /* seen as on line */
625
uint8_t blocked; /* blocked on event */
626
uint8_t running; /* scheduled on CPU */
627
uint64_t cpu_time; /* nanosecond of CPU used */
628
uint32_t cpu; /* current mapping */
630
typedef struct xen_v2_vcpuinfo xen_v2_vcpuinfo;
632
struct xen_v2d5_vcpuinfo {
633
uint32_t vcpu; /* the vcpu number */
634
uint8_t online; /* seen as on line */
635
uint8_t blocked; /* blocked on event */
636
uint8_t running; /* scheduled on CPU */
637
uint64_t cpu_time ALIGN_64; /* nanosecond of CPU used */
638
uint32_t cpu; /* current mapping */
640
typedef struct xen_v2d5_vcpuinfo xen_v2d5_vcpuinfo;
643
* from V2 the pinning of a vcpu is read with a separate call
645
#define XEN_V2_OP_GETVCPUMAP 25
646
typedef struct xen_v2_setvcpumap xen_v2_getvcpumap;
647
typedef struct xen_v2d5_setvcpumap xen_v2d5_getvcpumap;
650
* from V2 we get the scheduler information
652
#define XEN_V2_OP_GETSCHEDULERID 4
655
* from V2 we get the available heap information
657
#define XEN_V2_OP_GETAVAILHEAP 9
660
* from V2 we get the scheduler parameter
662
#define XEN_V2_OP_SCHEDULER 16
663
/* Scheduler types. */
664
#define XEN_SCHEDULER_SEDF 4
665
#define XEN_SCHEDULER_CREDIT 5
666
/* get/set scheduler parameters */
667
#define XEN_DOMCTL_SCHEDOP_putinfo 0
668
#define XEN_DOMCTL_SCHEDOP_getinfo 1
670
struct xen_v2_setschedinfo {
674
struct xen_domctl_sched_sedf {
675
uint64_t period ALIGN_64;
676
uint64_t slice ALIGN_64;
677
uint64_t latency ALIGN_64;
681
struct xen_domctl_sched_credit {
687
typedef struct xen_v2_setschedinfo xen_v2_setschedinfo;
688
typedef struct xen_v2_setschedinfo xen_v2_getschedinfo;
692
* The hypercall operation structures also have changed on
693
* changeset 86d26e6ec89b
695
/* the old structure */
698
uint32_t interface_version;
700
xen_v0_getdomaininfolistop getdomaininfolist;
701
xen_v0_domainop domain;
702
xen_v0_setmaxmem setmaxmem;
703
xen_v0_setmaxvcpu setmaxvcpu;
704
xen_v0_setvcpumap setvcpumap;
705
xen_v0_vcpuinfo getvcpuinfo;
706
uint8_t padding[128];
709
typedef struct xen_op_v0 xen_op_v0;
710
typedef struct xen_op_v0 xen_op_v1;
712
/* the new structure for systems operations */
713
struct xen_op_v2_sys {
715
uint32_t interface_version;
717
xen_v2_getdomaininfolistop getdomaininfolist;
718
xen_v2s3_getdomaininfolistop getdomaininfolists3;
719
xen_v2_getschedulerid getschedulerid;
720
xen_v2s4_availheap availheap;
721
xen_v2s5_availheap availheap5;
722
uint8_t padding[128];
725
typedef struct xen_op_v2_sys xen_op_v2_sys;
727
/* the new structure for domains operation */
728
struct xen_op_v2_dom {
730
uint32_t interface_version;
733
xen_v2_setmaxmem setmaxmem;
734
xen_v2d5_setmaxmem setmaxmemd5;
735
xen_v2_setmaxvcpu setmaxvcpu;
736
xen_v2_setvcpumap setvcpumap;
737
xen_v2d5_setvcpumap setvcpumapd5;
738
xen_v2_vcpuinfo getvcpuinfo;
739
xen_v2d5_vcpuinfo getvcpuinfod5;
740
xen_v2_getvcpumap getvcpumap;
741
xen_v2d5_getvcpumap getvcpumapd5;
742
xen_v2_setschedinfo setschedinfo;
743
xen_v2_getschedinfo getschedinfo;
744
uint8_t padding[128];
747
typedef struct xen_op_v2_dom xen_op_v2_dom;
751
# define XEN_HYPERVISOR_SOCKET "/proc/xen/privcmd"
752
# define HYPERVISOR_CAPABILITIES "/sys/hypervisor/properties/capabilities"
754
# define XEN_HYPERVISOR_SOCKET "/dev/xen/privcmd"
756
# error "unsupported platform"
760
static unsigned long xenHypervisorGetMaxMemory(virDomainPtr domain);
764
struct xenUnifiedDriver xenHypervisorDriver = {
765
xenHypervisorOpen, /* open */
766
xenHypervisorClose, /* close */
767
xenHypervisorGetVersion, /* version */
769
NULL, /* nodeGetInfo */
770
xenHypervisorGetCapabilities, /* getCapabilities */
771
xenHypervisorListDomains, /* listDomains */
772
xenHypervisorNumOfDomains, /* numOfDomains */
773
NULL, /* domainCreateXML */
774
xenHypervisorPauseDomain, /* domainSuspend */
775
xenHypervisorResumeDomain, /* domainResume */
776
NULL, /* domainShutdown */
777
NULL, /* domainReboot */
778
xenHypervisorDestroyDomain, /* domainDestroy */
779
xenHypervisorDomainGetOSType, /* domainGetOSType */
780
xenHypervisorGetMaxMemory, /* domainGetMaxMemory */
781
xenHypervisorSetMaxMemory, /* domainSetMaxMemory */
782
NULL, /* domainSetMemory */
783
xenHypervisorGetDomainInfo, /* domainGetInfo */
784
NULL, /* domainSave */
785
NULL, /* domainRestore */
786
NULL, /* domainCoreDump */
787
xenHypervisorPinVcpu, /* domainPinVcpu */
788
xenHypervisorGetVcpus, /* domainGetVcpus */
789
NULL, /* listDefinedDomains */
790
NULL, /* numOfDefinedDomains */
791
NULL, /* domainCreate */
792
NULL, /* domainDefineXML */
793
NULL, /* domainUndefine */
794
NULL, /* domainAttachDeviceFlags */
795
NULL, /* domainDetachDeviceFlags */
796
NULL, /* domainUpdateDeviceFlags */
797
NULL, /* domainGetAutostart */
798
NULL, /* domainSetAutostart */
799
xenHypervisorGetSchedulerType, /* domainGetSchedulerType */
800
xenHypervisorGetSchedulerParameters, /* domainGetSchedulerParameters */
801
xenHypervisorSetSchedulerParameters, /* domainSetSchedulerParameters */
805
#define virXenError(code, ...) \
807
virReportErrorHelper(NULL, VIR_FROM_XEN, code, __FILE__, \
808
__FUNCTION__, __LINE__, __VA_ARGS__)
814
* @error: the error number
815
* @func: the function failing
816
* @info: extra information string
817
* @value: extra information number
819
* Handle an error at the xend daemon interface
822
virXenErrorFunc(virErrorNumber error, const char *func, const char *info,
828
if ((error == VIR_ERR_OK) || (in_init != 0))
832
errmsg =virErrorMsg(error, info);
834
snprintf(fullinfo, 999, "%s: %s", func, info);
836
virRaiseError(NULL, NULL, NULL, VIR_FROM_XEN, error, VIR_ERR_ERROR,
837
errmsg, fullinfo, NULL, value, 0, errmsg, fullinfo,
840
virRaiseError(NULL, NULL, NULL, VIR_FROM_XEN, error, VIR_ERR_ERROR,
841
errmsg, info, NULL, value, 0, errmsg, info,
849
* xenHypervisorDoV0Op:
850
* @handle: the handle to the Xen hypervisor
851
* @op: pointer to the hypervisor operation structure
853
* Do an hypervisor operation though the old interface,
854
* this leads to an hypervisor call through ioctl.
856
* Returns 0 in case of success and -1 in case of error.
859
xenHypervisorDoV0Op(int handle, xen_op_v0 * op)
864
memset(&hc, 0, sizeof(hc));
865
op->interface_version = hv_version << 8;
866
hc.op = __HYPERVISOR_dom0_op;
867
hc.arg[0] = (unsigned long) op;
869
if (lock_pages(op, sizeof(dom0_op_t)) < 0) {
870
virXenError(VIR_ERR_XEN_CALL, " locking");
874
ret = ioctl(handle, xen_ioctl_hypercall_cmd, (unsigned long) &hc);
876
virXenError(VIR_ERR_XEN_CALL, " ioctl %d",
877
xen_ioctl_hypercall_cmd);
880
if (unlock_pages(op, sizeof(dom0_op_t)) < 0) {
881
virXenError(VIR_ERR_XEN_CALL, " releasing");
891
* xenHypervisorDoV1Op:
892
* @handle: the handle to the Xen hypervisor
893
* @op: pointer to the hypervisor operation structure
895
* Do an hypervisor v1 operation, this leads to an hypervisor call through
898
* Returns 0 in case of success and -1 in case of error.
901
xenHypervisorDoV1Op(int handle, xen_op_v1* op)
906
memset(&hc, 0, sizeof(hc));
907
op->interface_version = DOM0_INTERFACE_VERSION;
908
hc.op = __HYPERVISOR_dom0_op;
909
hc.arg[0] = (unsigned long) op;
911
if (lock_pages(op, sizeof(dom0_op_t)) < 0) {
912
virXenError(VIR_ERR_XEN_CALL, " locking");
916
ret = ioctl(handle, xen_ioctl_hypercall_cmd, (unsigned long) &hc);
918
virXenError(VIR_ERR_XEN_CALL, " ioctl %d",
919
xen_ioctl_hypercall_cmd);
922
if (unlock_pages(op, sizeof(dom0_op_t)) < 0) {
923
virXenError(VIR_ERR_XEN_CALL, " releasing");
934
* xenHypervisorDoV2Sys:
935
* @handle: the handle to the Xen hypervisor
936
* @op: pointer to the hypervisor operation structure
938
* Do an hypervisor v2 system operation, this leads to an hypervisor
939
* call through ioctl.
941
* Returns 0 in case of success and -1 in case of error.
944
xenHypervisorDoV2Sys(int handle, xen_op_v2_sys* op)
949
memset(&hc, 0, sizeof(hc));
950
op->interface_version = sys_interface_version;
951
hc.op = __HYPERVISOR_sysctl;
952
hc.arg[0] = (unsigned long) op;
954
if (lock_pages(op, sizeof(dom0_op_t)) < 0) {
955
virXenError(VIR_ERR_XEN_CALL, " locking");
959
ret = ioctl(handle, xen_ioctl_hypercall_cmd, (unsigned long) &hc);
961
virXenError(VIR_ERR_XEN_CALL, " sys ioctl %d",
962
xen_ioctl_hypercall_cmd);
965
if (unlock_pages(op, sizeof(dom0_op_t)) < 0) {
966
virXenError(VIR_ERR_XEN_CALL, " releasing");
977
* xenHypervisorDoV2Dom:
978
* @handle: the handle to the Xen hypervisor
979
* @op: pointer to the hypervisor domain operation structure
981
* Do an hypervisor v2 domain operation, this leads to an hypervisor
982
* call through ioctl.
984
* Returns 0 in case of success and -1 in case of error.
987
xenHypervisorDoV2Dom(int handle, xen_op_v2_dom* op)
992
memset(&hc, 0, sizeof(hc));
993
op->interface_version = dom_interface_version;
994
hc.op = __HYPERVISOR_domctl;
995
hc.arg[0] = (unsigned long) op;
997
if (lock_pages(op, sizeof(dom0_op_t)) < 0) {
998
virXenError(VIR_ERR_XEN_CALL, " locking");
1002
ret = ioctl(handle, xen_ioctl_hypercall_cmd, (unsigned long) &hc);
1004
virXenError(VIR_ERR_XEN_CALL, " ioctl %d",
1005
xen_ioctl_hypercall_cmd);
1008
if (unlock_pages(op, sizeof(dom0_op_t)) < 0) {
1009
virXenError(VIR_ERR_XEN_CALL, " releasing");
1020
* virXen_getdomaininfolist:
1021
* @handle: the hypervisor handle
1022
* @first_domain: first domain in the range
1023
* @maxids: maximum number of domains to list
1024
* @dominfos: output structures
1026
* Do a low level hypercall to list existing domains information
1028
* Returns the number of domains or -1 in case of failure
1031
virXen_getdomaininfolist(int handle, int first_domain, int maxids,
1032
xen_getdomaininfolist *dominfos)
1036
if (lock_pages(XEN_GETDOMAININFOLIST_DATA(dominfos),
1037
XEN_GETDOMAININFO_SIZE * maxids) < 0) {
1038
virXenError(VIR_ERR_XEN_CALL, " locking");
1041
if (hypervisor_version > 1) {
1044
memset(&op, 0, sizeof(op));
1045
op.cmd = XEN_V2_OP_GETDOMAININFOLIST;
1047
if (sys_interface_version < 3) {
1048
op.u.getdomaininfolist.first_domain = (domid_t) first_domain;
1049
op.u.getdomaininfolist.max_domains = maxids;
1050
op.u.getdomaininfolist.buffer = dominfos->v2;
1051
op.u.getdomaininfolist.num_domains = maxids;
1053
op.u.getdomaininfolists3.first_domain = (domid_t) first_domain;
1054
op.u.getdomaininfolists3.max_domains = maxids;
1055
op.u.getdomaininfolists3.buffer.v = dominfos->v2d5;
1056
op.u.getdomaininfolists3.num_domains = maxids;
1058
ret = xenHypervisorDoV2Sys(handle, &op);
1061
if (sys_interface_version < 3)
1062
ret = op.u.getdomaininfolist.num_domains;
1064
ret = op.u.getdomaininfolists3.num_domains;
1066
} else if (hypervisor_version == 1) {
1069
memset(&op, 0, sizeof(op));
1070
op.cmd = XEN_V1_OP_GETDOMAININFOLIST;
1071
op.u.getdomaininfolist.first_domain = (domid_t) first_domain;
1072
op.u.getdomaininfolist.max_domains = maxids;
1073
op.u.getdomaininfolist.buffer = dominfos->v0;
1074
op.u.getdomaininfolist.num_domains = maxids;
1075
ret = xenHypervisorDoV1Op(handle, &op);
1077
ret = op.u.getdomaininfolist.num_domains;
1078
} else if (hypervisor_version == 0) {
1081
memset(&op, 0, sizeof(op));
1082
op.cmd = XEN_V0_OP_GETDOMAININFOLIST;
1083
op.u.getdomaininfolist.first_domain = (domid_t) first_domain;
1084
op.u.getdomaininfolist.max_domains = maxids;
1085
op.u.getdomaininfolist.buffer = dominfos->v0;
1086
op.u.getdomaininfolist.num_domains = maxids;
1087
ret = xenHypervisorDoV0Op(handle, &op);
1089
ret = op.u.getdomaininfolist.num_domains;
1091
if (unlock_pages(XEN_GETDOMAININFOLIST_DATA(dominfos),
1092
XEN_GETDOMAININFO_SIZE * maxids) < 0) {
1093
virXenError(VIR_ERR_XEN_CALL, " release");
1100
virXen_getdomaininfo(int handle, int first_domain,
1101
xen_getdomaininfo *dominfo) {
1102
xen_getdomaininfolist dominfos;
1104
if (hypervisor_version < 2) {
1105
dominfos.v0 = &(dominfo->v0);
1107
dominfos.v2 = &(dominfo->v2);
1110
return virXen_getdomaininfolist(handle, first_domain, 1, &dominfos);
1116
* xenHypervisorGetSchedulerType:
1117
* @domain: pointer to the Xen Hypervisor block
1118
* @nparams:give a number of scheduler parameters.
1120
* Do a low level hypercall to get scheduler type
1122
* Returns scheduler name or NULL in case of failure
1125
xenHypervisorGetSchedulerType(virDomainPtr domain, int *nparams)
1127
char *schedulertype = NULL;
1128
xenUnifiedPrivatePtr priv;
1130
if (domain->conn == NULL) {
1131
virXenErrorFunc(VIR_ERR_INTERNAL_ERROR, __FUNCTION__,
1132
"domain or conn is NULL", 0);
1136
priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
1137
if (priv->handle < 0) {
1138
virXenErrorFunc(VIR_ERR_INTERNAL_ERROR, __FUNCTION__,
1139
"priv->handle invalid", 0);
1142
if (domain->id < 0) {
1143
virXenError(VIR_ERR_OPERATION_INVALID,
1144
"%s", _("domain is not running"));
1149
* Support only dom_interface_version >=5
1150
* (Xen3.1.0 or later)
1151
* TODO: check on Xen 3.0.3
1153
if (dom_interface_version < 5) {
1154
virXenErrorFunc(VIR_ERR_NO_XEN, __FUNCTION__,
1155
"unsupported in dom interface < 5", 0);
1159
if (hypervisor_version > 1) {
1163
memset(&op, 0, sizeof(op));
1164
op.cmd = XEN_V2_OP_GETSCHEDULERID;
1165
ret = xenHypervisorDoV2Sys(priv->handle, &op);
1169
switch (op.u.getschedulerid.sched_id){
1170
case XEN_SCHEDULER_SEDF:
1171
schedulertype = strdup("sedf");
1172
if (schedulertype == NULL)
1173
virReportOOMError();
1177
case XEN_SCHEDULER_CREDIT:
1178
schedulertype = strdup("credit");
1179
if (schedulertype == NULL)
1180
virReportOOMError();
1189
return schedulertype;
1192
static const char *str_weight = "weight";
1193
static const char *str_cap = "cap";
1196
* xenHypervisorGetSchedulerParameters:
1197
* @domain: pointer to the Xen Hypervisor block
1198
* @params: pointer to scheduler parameters.
1199
* This memory area should be allocated before calling.
1200
* @nparams:this parameter should be same as
1201
* a given number of scheduler parameters.
1202
* from xenHypervisorGetSchedulerType().
1204
* Do a low level hypercall to get scheduler parameters
1206
* Returns 0 or -1 in case of failure
1209
xenHypervisorGetSchedulerParameters(virDomainPtr domain,
1210
virSchedParameterPtr params, int *nparams)
1212
xenUnifiedPrivatePtr priv;
1214
if (domain->conn == NULL) {
1215
virXenErrorFunc(VIR_ERR_INTERNAL_ERROR, __FUNCTION__,
1216
"domain or conn is NULL", 0);
1220
priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
1221
if (priv->handle < 0) {
1222
virXenErrorFunc(VIR_ERR_INTERNAL_ERROR, __FUNCTION__,
1223
"priv->handle invalid", 0);
1226
if (domain->id < 0) {
1227
virXenError(VIR_ERR_OPERATION_INVALID,
1228
"%s", _("domain is not running"));
1233
* Support only dom_interface_version >=5
1234
* (Xen3.1.0 or later)
1235
* TODO: check on Xen 3.0.3
1237
if (dom_interface_version < 5) {
1238
virXenErrorFunc(VIR_ERR_NO_XEN, __FUNCTION__,
1239
"unsupported in dom interface < 5", 0);
1243
if (hypervisor_version > 1) {
1244
xen_op_v2_sys op_sys;
1245
xen_op_v2_dom op_dom;
1248
memset(&op_sys, 0, sizeof(op_sys));
1249
op_sys.cmd = XEN_V2_OP_GETSCHEDULERID;
1250
ret = xenHypervisorDoV2Sys(priv->handle, &op_sys);
1254
switch (op_sys.u.getschedulerid.sched_id){
1255
case XEN_SCHEDULER_SEDF:
1256
/* TODO: Implement for Xen/SEDF */
1259
case XEN_SCHEDULER_CREDIT:
1262
memset(&op_dom, 0, sizeof(op_dom));
1263
op_dom.cmd = XEN_V2_OP_SCHEDULER;
1264
op_dom.domain = (domid_t) domain->id;
1265
op_dom.u.getschedinfo.sched_id = XEN_SCHEDULER_CREDIT;
1266
op_dom.u.getschedinfo.cmd = XEN_DOMCTL_SCHEDOP_getinfo;
1267
ret = xenHypervisorDoV2Dom(priv->handle, &op_dom);
1271
if (virStrcpyStatic(params[0].field, str_weight) == NULL) {
1272
virXenError(VIR_ERR_INTERNAL_ERROR,
1273
"Weight %s too big for destination", str_weight);
1276
params[0].type = VIR_DOMAIN_SCHED_FIELD_UINT;
1277
params[0].value.ui = op_dom.u.getschedinfo.u.credit.weight;
1279
if (virStrcpyStatic(params[1].field, str_cap) == NULL) {
1280
virXenError(VIR_ERR_INTERNAL_ERROR,
1281
"Cap %s too big for destination", str_cap);
1284
params[1].type = VIR_DOMAIN_SCHED_FIELD_UINT;
1285
params[1].value.ui = op_dom.u.getschedinfo.u.credit.cap;
1290
virXenErrorFunc(VIR_ERR_INVALID_ARG, __FUNCTION__,
1291
"Unknown scheduler", op_sys.u.getschedulerid.sched_id);
1300
* xenHypervisorSetSchedulerParameters:
1301
* @domain: pointer to the Xen Hypervisor block
1302
* @nparams:give a number of scheduler setting parameters .
1304
* Do a low level hypercall to set scheduler parameters
1306
* Returns 0 or -1 in case of failure
1309
xenHypervisorSetSchedulerParameters(virDomainPtr domain,
1310
virSchedParameterPtr params, int nparams)
1314
xenUnifiedPrivatePtr priv;
1317
if (domain->conn == NULL) {
1318
virXenErrorFunc(VIR_ERR_INTERNAL_ERROR, __FUNCTION__,
1319
"domain or conn is NULL", 0);
1323
if ((nparams == 0) || (params == NULL)) {
1324
virXenErrorFunc(VIR_ERR_INVALID_ARG, __FUNCTION__,
1325
"Noparameters given", 0);
1329
priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
1330
if (priv->handle < 0) {
1331
virXenErrorFunc(VIR_ERR_INTERNAL_ERROR, __FUNCTION__,
1332
"priv->handle invalid", 0);
1335
if (domain->id < 0) {
1336
virXenError(VIR_ERR_OPERATION_INVALID,
1337
"%s", _("domain is not running"));
1342
* Support only dom_interface_version >=5
1343
* (Xen3.1.0 or later)
1344
* TODO: check on Xen 3.0.3
1346
if (dom_interface_version < 5) {
1347
virXenErrorFunc(VIR_ERR_NO_XEN, __FUNCTION__,
1348
"unsupported in dom interface < 5", 0);
1352
if (hypervisor_version > 1) {
1353
xen_op_v2_sys op_sys;
1354
xen_op_v2_dom op_dom;
1357
memset(&op_sys, 0, sizeof(op_sys));
1358
op_sys.cmd = XEN_V2_OP_GETSCHEDULERID;
1359
ret = xenHypervisorDoV2Sys(priv->handle, &op_sys);
1360
if (ret == -1) return -1;
1362
switch (op_sys.u.getschedulerid.sched_id){
1363
case XEN_SCHEDULER_SEDF:
1364
/* TODO: Implement for Xen/SEDF */
1367
case XEN_SCHEDULER_CREDIT: {
1368
memset(&op_dom, 0, sizeof(op_dom));
1369
op_dom.cmd = XEN_V2_OP_SCHEDULER;
1370
op_dom.domain = (domid_t) domain->id;
1371
op_dom.u.getschedinfo.sched_id = XEN_SCHEDULER_CREDIT;
1372
op_dom.u.getschedinfo.cmd = XEN_DOMCTL_SCHEDOP_putinfo;
1375
* credit scheduler parameters
1376
* following values do not change the parameters
1378
op_dom.u.getschedinfo.u.credit.weight = 0;
1379
op_dom.u.getschedinfo.u.credit.cap = (uint16_t)~0U;
1381
for (i = 0; i < nparams; i++) {
1382
memset(&buf, 0, sizeof(buf));
1383
if (STREQ (params[i].field, str_weight) &&
1384
params[i].type == VIR_DOMAIN_SCHED_FIELD_UINT) {
1385
val = params[i].value.ui;
1386
if ((val < 1) || (val > USHRT_MAX)) {
1387
snprintf(buf, sizeof(buf), _("Credit scheduler weight parameter (%d) is out of range (1-65535)"), val);
1388
virXenErrorFunc(VIR_ERR_INVALID_ARG, __FUNCTION__, buf, val);
1391
op_dom.u.getschedinfo.u.credit.weight = val;
1392
} else if (STREQ (params[i].field, str_cap) &&
1393
params[i].type == VIR_DOMAIN_SCHED_FIELD_UINT) {
1394
val = params[i].value.ui;
1395
if (val >= USHRT_MAX) {
1396
snprintf(buf, sizeof(buf), _("Credit scheduler cap parameter (%d) is out of range (0-65534)"), val);
1397
virXenErrorFunc(VIR_ERR_INVALID_ARG, __FUNCTION__, buf, val);
1400
op_dom.u.getschedinfo.u.credit.cap = val;
1402
virXenErrorFunc(VIR_ERR_INVALID_ARG, __FUNCTION__,
1403
"Credit scheduler accepts 'cap' and 'weight' integer parameters",
1409
ret = xenHypervisorDoV2Dom(priv->handle, &op_dom);
1415
virXenErrorFunc(VIR_ERR_INVALID_ARG, __FUNCTION__,
1416
"Unknown scheduler", op_sys.u.getschedulerid.sched_id);
1426
xenHypervisorDomainBlockStats (virDomainPtr dom,
1428
struct _virDomainBlockStats *stats)
1431
xenUnifiedPrivatePtr priv;
1434
priv = (xenUnifiedPrivatePtr) dom->conn->privateData;
1435
xenUnifiedLock(priv);
1436
/* Need to lock because it hits the xenstore handle :-( */
1437
ret = xenLinuxDomainBlockStats (priv, dom, path, stats);
1438
xenUnifiedUnlock(priv);
1441
virXenErrorFunc(VIR_ERR_NO_SUPPORT, __FUNCTION__,
1442
"block statistics not supported on this platform",
1448
/* Paths have the form vif<domid>.<n> (this interface checks that
1449
* <domid> is the real domain ID and returns an error if not).
1451
* In future we may allow you to query bridge stats (virbrX or
1452
* xenbrX), but that will probably be through a separate
1453
* virNetwork interface, as yet not decided.
1456
xenHypervisorDomainInterfaceStats (virDomainPtr dom,
1458
struct _virDomainInterfaceStats *stats)
1461
int rqdomid, device;
1463
/* Verify that the vif requested is one belonging to the current
1466
if (sscanf (path, "vif%d.%d", &rqdomid, &device) != 2) {
1467
virXenErrorFunc(VIR_ERR_INVALID_ARG, __FUNCTION__,
1468
"invalid path, should be vif<domid>.<n>.", 0);
1471
if (rqdomid != dom->id) {
1472
virXenErrorFunc(VIR_ERR_INVALID_ARG, __FUNCTION__,
1473
"invalid path, vif<domid> should match this domain ID", 0);
1477
return linuxDomainInterfaceStats(path, stats);
1479
virXenErrorFunc(VIR_ERR_NO_SUPPORT, __FUNCTION__,
1480
"/proc/net/dev: Interface not found", 0);
1486
* virXen_pausedomain:
1487
* @handle: the hypervisor handle
1488
* @id: the domain id
1490
* Do a low level hypercall to pause the domain
1492
* Returns 0 or -1 in case of failure
1495
virXen_pausedomain(int handle, int id)
1499
if (hypervisor_version > 1) {
1502
memset(&op, 0, sizeof(op));
1503
op.cmd = XEN_V2_OP_PAUSEDOMAIN;
1504
op.domain = (domid_t) id;
1505
ret = xenHypervisorDoV2Dom(handle, &op);
1506
} else if (hypervisor_version == 1) {
1509
memset(&op, 0, sizeof(op));
1510
op.cmd = XEN_V1_OP_PAUSEDOMAIN;
1511
op.u.domain.domain = (domid_t) id;
1512
ret = xenHypervisorDoV1Op(handle, &op);
1513
} else if (hypervisor_version == 0) {
1516
memset(&op, 0, sizeof(op));
1517
op.cmd = XEN_V0_OP_PAUSEDOMAIN;
1518
op.u.domain.domain = (domid_t) id;
1519
ret = xenHypervisorDoV0Op(handle, &op);
1525
* virXen_unpausedomain:
1526
* @handle: the hypervisor handle
1527
* @id: the domain id
1529
* Do a low level hypercall to unpause the domain
1531
* Returns 0 or -1 in case of failure
1534
virXen_unpausedomain(int handle, int id)
1538
if (hypervisor_version > 1) {
1541
memset(&op, 0, sizeof(op));
1542
op.cmd = XEN_V2_OP_UNPAUSEDOMAIN;
1543
op.domain = (domid_t) id;
1544
ret = xenHypervisorDoV2Dom(handle, &op);
1545
} else if (hypervisor_version == 1) {
1548
memset(&op, 0, sizeof(op));
1549
op.cmd = XEN_V1_OP_UNPAUSEDOMAIN;
1550
op.u.domain.domain = (domid_t) id;
1551
ret = xenHypervisorDoV1Op(handle, &op);
1552
} else if (hypervisor_version == 0) {
1555
memset(&op, 0, sizeof(op));
1556
op.cmd = XEN_V0_OP_UNPAUSEDOMAIN;
1557
op.u.domain.domain = (domid_t) id;
1558
ret = xenHypervisorDoV0Op(handle, &op);
1564
* virXen_destroydomain:
1565
* @handle: the hypervisor handle
1566
* @id: the domain id
1568
* Do a low level hypercall to destroy the domain
1570
* Returns 0 or -1 in case of failure
1573
virXen_destroydomain(int handle, int id)
1577
if (hypervisor_version > 1) {
1580
memset(&op, 0, sizeof(op));
1581
op.cmd = XEN_V2_OP_DESTROYDOMAIN;
1582
op.domain = (domid_t) id;
1583
ret = xenHypervisorDoV2Dom(handle, &op);
1584
} else if (hypervisor_version == 1) {
1587
memset(&op, 0, sizeof(op));
1588
op.cmd = XEN_V1_OP_DESTROYDOMAIN;
1589
op.u.domain.domain = (domid_t) id;
1590
ret = xenHypervisorDoV1Op(handle, &op);
1591
} else if (hypervisor_version == 0) {
1594
memset(&op, 0, sizeof(op));
1595
op.cmd = XEN_V0_OP_DESTROYDOMAIN;
1596
op.u.domain.domain = (domid_t) id;
1597
ret = xenHypervisorDoV0Op(handle, &op);
1604
* @handle: the hypervisor handle
1605
* @id: the domain id
1606
* @memory: the amount of memory in kilobytes
1608
* Do a low level hypercall to change the max memory amount
1610
* Returns 0 or -1 in case of failure
1613
virXen_setmaxmem(int handle, int id, unsigned long memory)
1617
if (hypervisor_version > 1) {
1620
memset(&op, 0, sizeof(op));
1621
op.cmd = XEN_V2_OP_SETMAXMEM;
1622
op.domain = (domid_t) id;
1623
if (dom_interface_version < 5)
1624
op.u.setmaxmem.maxmem = memory;
1626
op.u.setmaxmemd5.maxmem = memory;
1627
ret = xenHypervisorDoV2Dom(handle, &op);
1628
} else if (hypervisor_version == 1) {
1631
memset(&op, 0, sizeof(op));
1632
op.cmd = XEN_V1_OP_SETMAXMEM;
1633
op.u.setmaxmem.domain = (domid_t) id;
1634
op.u.setmaxmem.maxmem = memory;
1635
ret = xenHypervisorDoV1Op(handle, &op);
1636
} else if (hypervisor_version == 0) {
1639
memset(&op, 0, sizeof(op));
1640
op.cmd = XEN_V0_OP_SETMAXMEM;
1641
op.u.setmaxmem.domain = (domid_t) id;
1642
op.u.setmaxmem.maxmem = memory;
1643
ret = xenHypervisorDoV0Op(handle, &op);
1649
* virXen_setmaxvcpus:
1650
* @handle: the hypervisor handle
1651
* @id: the domain id
1652
* @vcpus: the numbers of vcpus
1654
* Do a low level hypercall to change the max vcpus amount
1656
* Returns 0 or -1 in case of failure
1659
virXen_setmaxvcpus(int handle, int id, unsigned int vcpus)
1663
if (hypervisor_version > 1) {
1666
memset(&op, 0, sizeof(op));
1667
op.cmd = XEN_V2_OP_SETMAXVCPU;
1668
op.domain = (domid_t) id;
1669
op.u.setmaxvcpu.maxvcpu = vcpus;
1670
ret = xenHypervisorDoV2Dom(handle, &op);
1671
} else if (hypervisor_version == 1) {
1674
memset(&op, 0, sizeof(op));
1675
op.cmd = XEN_V1_OP_SETMAXVCPU;
1676
op.u.setmaxvcpu.domain = (domid_t) id;
1677
op.u.setmaxvcpu.maxvcpu = vcpus;
1678
ret = xenHypervisorDoV1Op(handle, &op);
1679
} else if (hypervisor_version == 0) {
1682
memset(&op, 0, sizeof(op));
1683
op.cmd = XEN_V0_OP_SETMAXVCPU;
1684
op.u.setmaxvcpu.domain = (domid_t) id;
1685
op.u.setmaxvcpu.maxvcpu = vcpus;
1686
ret = xenHypervisorDoV0Op(handle, &op);
1692
* virXen_setvcpumap:
1693
* @handle: the hypervisor handle
1694
* @id: the domain id
1695
* @vcpu: the vcpu to map
1696
* @cpumap: the bitmap for this vcpu
1697
* @maplen: the size of the bitmap in bytes
1699
* Do a low level hypercall to change the pinning for vcpu
1701
* Returns 0 or -1 in case of failure
1704
virXen_setvcpumap(int handle, int id, unsigned int vcpu,
1705
unsigned char * cpumap, int maplen)
1708
unsigned char *new = NULL;
1709
unsigned char *bitmap = NULL;
1712
if (hypervisor_version > 1) {
1715
if (lock_pages(cpumap, maplen) < 0) {
1716
virXenError(VIR_ERR_XEN_CALL, " locking");
1719
memset(&op, 0, sizeof(op));
1720
op.cmd = XEN_V2_OP_SETVCPUMAP;
1721
op.domain = (domid_t) id;
1723
/* The allocated memory to cpumap must be 'sizeof(uint64_t)' byte *
1724
* for Xen, and also nr_cpus must be 'sizeof(uint64_t) * 8' */
1726
if (VIR_ALLOC_N(new, sizeof(uint64_t)) < 0) {
1727
virReportOOMError();
1730
memcpy(new, cpumap, maplen);
1732
nr_cpus = sizeof(uint64_t) * 8;
1735
nr_cpus = maplen * 8;
1738
if (dom_interface_version < 5) {
1739
op.u.setvcpumap.vcpu = vcpu;
1740
op.u.setvcpumap.cpumap.bitmap = bitmap;
1741
op.u.setvcpumap.cpumap.nr_cpus = nr_cpus;
1743
op.u.setvcpumapd5.vcpu = vcpu;
1744
op.u.setvcpumapd5.cpumap.bitmap.v = bitmap;
1745
op.u.setvcpumapd5.cpumap.nr_cpus = nr_cpus;
1747
ret = xenHypervisorDoV2Dom(handle, &op);
1750
if (unlock_pages(cpumap, maplen) < 0) {
1751
virXenError(VIR_ERR_XEN_CALL, " release");
1755
cpumap_t xen_cpumap; /* limited to 64 CPUs in old hypervisors */
1756
uint64_t *pm = &xen_cpumap;
1759
if ((maplen > (int)sizeof(cpumap_t)) || (sizeof(cpumap_t) & 7))
1762
memset(pm, 0, sizeof(cpumap_t));
1763
for (j = 0; j < maplen; j++)
1764
*(pm + (j / 8)) |= cpumap[j] << (8 * (j & 7));
1766
if (hypervisor_version == 1) {
1769
memset(&op, 0, sizeof(op));
1770
op.cmd = XEN_V1_OP_SETVCPUMAP;
1771
op.u.setvcpumap.domain = (domid_t) id;
1772
op.u.setvcpumap.vcpu = vcpu;
1773
op.u.setvcpumap.cpumap = xen_cpumap;
1774
ret = xenHypervisorDoV1Op(handle, &op);
1775
} else if (hypervisor_version == 0) {
1778
memset(&op, 0, sizeof(op));
1779
op.cmd = XEN_V0_OP_SETVCPUMAP;
1780
op.u.setvcpumap.domain = (domid_t) id;
1781
op.u.setvcpumap.vcpu = vcpu;
1782
op.u.setvcpumap.cpumap = xen_cpumap;
1783
ret = xenHypervisorDoV0Op(handle, &op);
1791
* virXen_getvcpusinfo:
1792
* @handle: the hypervisor handle
1793
* @id: the domain id
1794
* @vcpu: the vcpu to map
1795
* @cpumap: the bitmap for this vcpu
1796
* @maplen: the size of the bitmap in bytes
1798
* Do a low level hypercall to change the pinning for vcpu
1800
* Returns 0 or -1 in case of failure
1803
virXen_getvcpusinfo(int handle, int id, unsigned int vcpu, virVcpuInfoPtr ipt,
1804
unsigned char *cpumap, int maplen)
1808
if (hypervisor_version > 1) {
1811
memset(&op, 0, sizeof(op));
1812
op.cmd = XEN_V2_OP_GETVCPUINFO;
1813
op.domain = (domid_t) id;
1814
if (dom_interface_version < 5)
1815
op.u.getvcpuinfo.vcpu = (uint16_t) vcpu;
1817
op.u.getvcpuinfod5.vcpu = (uint16_t) vcpu;
1818
ret = xenHypervisorDoV2Dom(handle, &op);
1823
if (dom_interface_version < 5) {
1824
if (op.u.getvcpuinfo.online) {
1825
if (op.u.getvcpuinfo.running)
1826
ipt->state = VIR_VCPU_RUNNING;
1827
if (op.u.getvcpuinfo.blocked)
1828
ipt->state = VIR_VCPU_BLOCKED;
1830
ipt->state = VIR_VCPU_OFFLINE;
1832
ipt->cpuTime = op.u.getvcpuinfo.cpu_time;
1833
ipt->cpu = op.u.getvcpuinfo.online ? (int)op.u.getvcpuinfo.cpu : -1;
1835
if (op.u.getvcpuinfod5.online) {
1836
if (op.u.getvcpuinfod5.running)
1837
ipt->state = VIR_VCPU_RUNNING;
1838
if (op.u.getvcpuinfod5.blocked)
1839
ipt->state = VIR_VCPU_BLOCKED;
1841
ipt->state = VIR_VCPU_OFFLINE;
1843
ipt->cpuTime = op.u.getvcpuinfod5.cpu_time;
1844
ipt->cpu = op.u.getvcpuinfod5.online ? (int)op.u.getvcpuinfod5.cpu : -1;
1846
if ((cpumap != NULL) && (maplen > 0)) {
1847
if (lock_pages(cpumap, maplen) < 0) {
1848
virXenError(VIR_ERR_XEN_CALL, " locking");
1851
memset(cpumap, 0, maplen);
1852
memset(&op, 0, sizeof(op));
1853
op.cmd = XEN_V2_OP_GETVCPUMAP;
1854
op.domain = (domid_t) id;
1855
if (dom_interface_version < 5) {
1856
op.u.getvcpumap.vcpu = vcpu;
1857
op.u.getvcpumap.cpumap.bitmap = cpumap;
1858
op.u.getvcpumap.cpumap.nr_cpus = maplen * 8;
1860
op.u.getvcpumapd5.vcpu = vcpu;
1861
op.u.getvcpumapd5.cpumap.bitmap.v = cpumap;
1862
op.u.getvcpumapd5.cpumap.nr_cpus = maplen * 8;
1864
ret = xenHypervisorDoV2Dom(handle, &op);
1865
if (unlock_pages(cpumap, maplen) < 0) {
1866
virXenError(VIR_ERR_XEN_CALL, " release");
1874
if (maplen > (int)sizeof(cpumap_t))
1875
mapl = (int)sizeof(cpumap_t);
1877
if (hypervisor_version == 1) {
1880
memset(&op, 0, sizeof(op));
1881
op.cmd = XEN_V1_OP_GETVCPUINFO;
1882
op.u.getvcpuinfo.domain = (domid_t) id;
1883
op.u.getvcpuinfo.vcpu = vcpu;
1884
ret = xenHypervisorDoV1Op(handle, &op);
1888
if (op.u.getvcpuinfo.online) {
1889
if (op.u.getvcpuinfo.running) ipt->state = VIR_VCPU_RUNNING;
1890
if (op.u.getvcpuinfo.blocked) ipt->state = VIR_VCPU_BLOCKED;
1892
else ipt->state = VIR_VCPU_OFFLINE;
1893
ipt->cpuTime = op.u.getvcpuinfo.cpu_time;
1894
ipt->cpu = op.u.getvcpuinfo.online ? (int)op.u.getvcpuinfo.cpu : -1;
1895
if ((cpumap != NULL) && (maplen > 0)) {
1896
for (cpu = 0; cpu < (mapl * 8); cpu++) {
1897
if (op.u.getvcpuinfo.cpumap & ((uint64_t)1<<cpu))
1898
VIR_USE_CPU(cpumap, cpu);
1901
} else if (hypervisor_version == 0) {
1904
memset(&op, 0, sizeof(op));
1905
op.cmd = XEN_V0_OP_GETVCPUINFO;
1906
op.u.getvcpuinfo.domain = (domid_t) id;
1907
op.u.getvcpuinfo.vcpu = vcpu;
1908
ret = xenHypervisorDoV0Op(handle, &op);
1912
if (op.u.getvcpuinfo.online) {
1913
if (op.u.getvcpuinfo.running) ipt->state = VIR_VCPU_RUNNING;
1914
if (op.u.getvcpuinfo.blocked) ipt->state = VIR_VCPU_BLOCKED;
1916
else ipt->state = VIR_VCPU_OFFLINE;
1917
ipt->cpuTime = op.u.getvcpuinfo.cpu_time;
1918
ipt->cpu = op.u.getvcpuinfo.online ? (int)op.u.getvcpuinfo.cpu : -1;
1919
if ((cpumap != NULL) && (maplen > 0)) {
1920
for (cpu = 0; cpu < (mapl * 8); cpu++) {
1921
if (op.u.getvcpuinfo.cpumap & ((uint64_t)1<<cpu))
1922
VIR_USE_CPU(cpumap, cpu);
1931
* xenHypervisorInit:
1933
* Initialize the hypervisor layer. Try to detect the kind of interface
1934
* used i.e. pre or post changeset 10277
1937
xenHypervisorInit(void)
1939
int fd, ret, cmd, errcode;
1941
v0_hypercall_t v0_hc;
1942
xen_getdomaininfo info;
1943
virVcpuInfoPtr ipt = NULL;
1946
if (hypervisor_version == -1)
1953
/* Compile regular expressions used by xenHypervisorGetCapabilities.
1954
* Note that errors here are really internal errors since these
1955
* regexps should never fail to compile.
1957
errcode = regcomp (&flags_hvm_rec, flags_hvm_re, REG_EXTENDED);
1960
regerror (errcode, &flags_hvm_rec, error, sizeof error);
1961
regfree (&flags_hvm_rec);
1962
virXenError(VIR_ERR_INTERNAL_ERROR, "%s", error);
1966
errcode = regcomp (&flags_pae_rec, flags_pae_re, REG_EXTENDED);
1969
regerror (errcode, &flags_pae_rec, error, sizeof error);
1970
regfree (&flags_pae_rec);
1971
regfree (&flags_hvm_rec);
1972
virXenError(VIR_ERR_INTERNAL_ERROR, "%s", error);
1976
errcode = regcomp (&xen_cap_rec, xen_cap_re, REG_EXTENDED);
1979
regerror (errcode, &xen_cap_rec, error, sizeof error);
1980
regfree (&xen_cap_rec);
1981
regfree (&flags_pae_rec);
1982
regfree (&flags_hvm_rec);
1983
virXenError(VIR_ERR_INTERNAL_ERROR, "%s", error);
1988
/* Xen hypervisor version detection begins. */
1989
ret = open(XEN_HYPERVISOR_SOCKET, O_RDWR);
1991
hypervisor_version = -1;
1997
* The size of the hypervisor call block changed July 2006
1998
* this detect if we are using the new or old hypercall_t structure
2000
hc.op = __HYPERVISOR_xen_version;
2001
hc.arg[0] = (unsigned long) XENVER_version;
2004
cmd = IOCTL_PRIVCMD_HYPERCALL;
2005
ret = ioctl(fd, cmd, (unsigned long) &hc);
2007
if ((ret != -1) && (ret != 0)) {
2008
DEBUG("Using new hypervisor call: %X", ret);
2010
xen_ioctl_hypercall_cmd = cmd;
2016
* check if the old hypercall are actually working
2018
v0_hc.op = __HYPERVISOR_xen_version;
2019
v0_hc.arg[0] = (unsigned long) XENVER_version;
2021
cmd = _IOC(_IOC_NONE, 'P', 0, sizeof(v0_hypercall_t));
2022
ret = ioctl(fd, cmd, (unsigned long) &v0_hc);
2023
if ((ret != -1) && (ret != 0)) {
2024
DEBUG("Using old hypervisor call: %X", ret);
2026
xen_ioctl_hypercall_cmd = cmd;
2027
hypervisor_version = 0;
2033
* we failed to make any hypercall
2036
hypervisor_version = -1;
2037
virXenError(VIR_ERR_XEN_CALL, " ioctl %lu",
2038
(unsigned long) IOCTL_PRIVCMD_HYPERCALL);
2045
* The hypercalls were refactored into 3 different section in August 2006
2046
* Try to detect if we are running a version post 3.0.2 with the new ones
2049
hypervisor_version = 2;
2051
if (VIR_ALLOC(ipt) < 0) {
2052
virReportOOMError();
2055
/* Currently consider RHEL5.0 Fedora7, xen-3.1, and xen-unstable */
2056
sys_interface_version = 2; /* XEN_SYSCTL_INTERFACE_VERSION */
2057
if (virXen_getdomaininfo(fd, 0, &info) == 1) {
2059
dom_interface_version = 3; /* XEN_DOMCTL_INTERFACE_VERSION */
2060
if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
2061
DEBUG0("Using hypervisor call v2, sys ver2 dom ver3");
2065
dom_interface_version = 4; /* XEN_DOMCTL_INTERFACE_VERSION */
2066
if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
2067
DEBUG0("Using hypervisor call v2, sys ver2 dom ver4");
2072
sys_interface_version = 3; /* XEN_SYSCTL_INTERFACE_VERSION */
2073
if (virXen_getdomaininfo(fd, 0, &info) == 1) {
2075
dom_interface_version = 5; /* XEN_DOMCTL_INTERFACE_VERSION */
2076
if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
2077
DEBUG0("Using hypervisor call v2, sys ver3 dom ver5");
2082
sys_interface_version = 4; /* XEN_SYSCTL_INTERFACE_VERSION */
2083
if (virXen_getdomaininfo(fd, 0, &info) == 1) {
2085
dom_interface_version = 5; /* XEN_DOMCTL_INTERFACE_VERSION */
2086
if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
2087
DEBUG0("Using hypervisor call v2, sys ver4 dom ver5");
2092
sys_interface_version = 6; /* XEN_SYSCTL_INTERFACE_VERSION */
2093
if (virXen_getdomaininfo(fd, 0, &info) == 1) {
2094
/* Xen 3.2, Fedora 9 */
2095
dom_interface_version = 5; /* XEN_DOMCTL_INTERFACE_VERSION */
2096
if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
2097
DEBUG0("Using hypervisor call v2, sys ver6 dom ver5");
2103
sys_interface_version = 7; /* XEN_SYSCTL_INTERFACE_VERSION */
2104
if (virXen_getdomaininfo(fd, 0, &info) == 1) {
2105
dom_interface_version = 6; /* XEN_DOMCTL_INTERFACE_VERSION */
2106
DEBUG0("Using hypervisor call v2, sys ver7 dom ver6");
2110
hypervisor_version = 1;
2111
sys_interface_version = -1;
2112
if (virXen_getdomaininfo(fd, 0, &info) == 1) {
2113
DEBUG0("Using hypervisor call v1");
2118
* we failed to make the getdomaininfolist hypercall
2121
DEBUG0("Failed to find any Xen hypervisor method");
2122
hypervisor_version = -1;
2123
virXenError(VIR_ERR_XEN_CALL, " ioctl %lu",
2124
(unsigned long)IOCTL_PRIVCMD_HYPERCALL);
2138
* xenHypervisorOpen:
2139
* @conn: pointer to the connection block
2140
* @name: URL for the target, NULL for local
2141
* @flags: combination of virDrvOpenFlag(s)
2143
* Connects to the Xen hypervisor.
2145
* Returns 0 or -1 in case of error.
2148
xenHypervisorOpen(virConnectPtr conn,
2149
virConnectAuthPtr auth ATTRIBUTE_UNUSED,
2150
int flags ATTRIBUTE_UNUSED)
2153
xenUnifiedPrivatePtr priv = (xenUnifiedPrivatePtr) conn->privateData;
2155
if (initialized == 0)
2156
if (xenHypervisorInit() == -1)
2161
ret = open(XEN_HYPERVISOR_SOCKET, O_RDWR);
2163
virXenError(VIR_ERR_NO_XEN, "%s", XEN_HYPERVISOR_SOCKET);
2173
* xenHypervisorClose:
2174
* @conn: pointer to the connection block
2176
* Close the connection to the Xen hypervisor.
2178
* Returns 0 in case of success or -1 in case of error.
2181
xenHypervisorClose(virConnectPtr conn)
2184
xenUnifiedPrivatePtr priv;
2189
priv = (xenUnifiedPrivatePtr) conn->privateData;
2191
if (priv->handle < 0)
2194
ret = close(priv->handle);
2203
* xenHypervisorGetVersion:
2204
* @conn: pointer to the connection block
2205
* @hvVer: where to store the version
2207
* Call the hypervisor to extracts his own internal API version
2209
* Returns 0 in case of success, -1 in case of error
2212
xenHypervisorGetVersion(virConnectPtr conn, unsigned long *hvVer)
2214
xenUnifiedPrivatePtr priv;
2218
priv = (xenUnifiedPrivatePtr) conn->privateData;
2219
if (priv->handle < 0 || hvVer == NULL)
2221
*hvVer = (hv_version >> 16) * 1000000 + (hv_version & 0xFFFF) * 1000;
2236
xenHypervisorBuildCapabilities(virConnectPtr conn,
2237
const char *hostmachine,
2239
const char *hvm_type,
2240
struct guest_arch *guest_archs,
2241
int nr_guest_archs) {
2244
int hv_major = hv_version >> 16;
2245
int hv_minor = hv_version & 0xFFFF;
2247
if ((caps = virCapabilitiesNew(hostmachine, 1, 1)) == NULL)
2250
virCapabilitiesSetMacPrefix(caps, (unsigned char[]){ 0x00, 0x16, 0x3e });
2252
if (hvm_type && STRNEQ(hvm_type, "") &&
2253
virCapabilitiesAddHostFeature(caps, hvm_type) < 0)
2256
virCapabilitiesAddHostFeature(caps, "pae") < 0)
2260
if (virCapabilitiesAddHostMigrateTransport(caps,
2265
if (sys_interface_version >= SYS_IFACE_MIN_VERS_NUMA) {
2266
if (xenDaemonNodeGetTopology(conn, caps) != 0) {
2267
virCapabilitiesFree(caps);
2272
for (i = 0; i < nr_guest_archs; ++i) {
2273
virCapsGuestPtr guest;
2274
char const *const xen_machines[] = {guest_archs[i].hvm ? "xenfv" : "xenpv"};
2275
virCapsGuestMachinePtr *machines;
2277
if ((machines = virCapabilitiesAllocMachines(xen_machines, 1)) == NULL)
2280
if ((guest = virCapabilitiesAddGuest(caps,
2281
guest_archs[i].hvm ? "hvm" : "xen",
2282
guest_archs[i].model,
2283
guest_archs[i].bits,
2284
(STREQ(hostmachine, "x86_64") ?
2285
"/usr/lib64/xen/bin/qemu-dm" :
2286
"/usr/lib/xen/bin/qemu-dm"),
2287
(guest_archs[i].hvm ?
2288
"/usr/lib/xen/boot/hvmloader" :
2291
machines)) == NULL) {
2292
virCapabilitiesFreeMachines(machines, 1);
2297
if (virCapabilitiesAddGuestDomain(guest,
2305
if (guest_archs[i].pae &&
2306
virCapabilitiesAddGuestFeature(guest,
2312
if (guest_archs[i].nonpae &&
2313
virCapabilitiesAddGuestFeature(guest,
2319
if (guest_archs[i].ia64_be &&
2320
virCapabilitiesAddGuestFeature(guest,
2326
if (guest_archs[i].hvm) {
2327
if (virCapabilitiesAddGuestFeature(guest,
2332
// In Xen 3.1.0, APIC is always on and can't be toggled
2333
if (virCapabilitiesAddGuestFeature(guest,
2343
caps->defaultConsoleTargetType = VIR_DOMAIN_CHR_CONSOLE_TARGET_TYPE_XEN;
2348
virCapabilitiesFree(caps);
2355
get_cpu_flags(virConnectPtr conn, const char **hvm, int *pae, int *longmode)
2358
uint32_t r_eax, r_ebx, r_ecx, r_edx;
2365
/* returns -1, errno 22 if in 32-bit mode */
2366
*longmode = (sysinfo(SI_ARCHITECTURE_64, tmpbuf, sizeof(tmpbuf)) != -1);
2368
if ((fd = open("/dev/cpu/self/cpuid", O_RDONLY)) == -1 ||
2369
pread(fd, ®s, sizeof(regs), 0) != sizeof(regs)) {
2370
virReportSystemError(errno, "%s", _("could not read CPU flags"));
2377
if (STREQLEN((const char *)®s.r_ebx, "AuthcAMDenti", 12)) {
2378
if (pread(fd, ®s, sizeof (regs), 0x80000001) == sizeof (regs)) {
2379
/* Read secure virtual machine bit (bit 2 of ECX feature ID) */
2380
if ((regs.r_ecx >> 2) & 1) {
2383
if ((regs.r_edx >> 6) & 1)
2386
} else if (STREQLEN((const char *)®s.r_ebx, "GenuntelineI", 12)) {
2387
if (pread(fd, ®s, sizeof (regs), 0x00000001) == sizeof (regs)) {
2388
/* Read VMXE feature bit (bit 5 of ECX feature ID) */
2389
if ((regs.r_ecx >> 5) & 1)
2391
if ((regs.r_edx >> 6) & 1)
2405
xenHypervisorMakeCapabilitiesSunOS(virConnectPtr conn)
2407
struct guest_arch guest_arches[32];
2409
virCapsPtr caps = NULL;
2410
struct utsname utsname;
2414
if (!get_cpu_flags(conn, &hvm, &pae, &longmode))
2417
/* Really, this never fails - look at the man-page. */
2420
guest_arches[i].model = "i686";
2421
guest_arches[i].bits = 32;
2422
guest_arches[i].hvm = 0;
2423
guest_arches[i].pae = pae;
2424
guest_arches[i].nonpae = !pae;
2425
guest_arches[i].ia64_be = 0;
2429
guest_arches[i].model = "x86_64";
2430
guest_arches[i].bits = 64;
2431
guest_arches[i].hvm = 0;
2432
guest_arches[i].pae = 0;
2433
guest_arches[i].nonpae = 0;
2434
guest_arches[i].ia64_be = 0;
2438
if (hvm[0] != '\0') {
2439
guest_arches[i].model = "i686";
2440
guest_arches[i].bits = 32;
2441
guest_arches[i].hvm = 1;
2442
guest_arches[i].pae = pae;
2443
guest_arches[i].nonpae = 1;
2444
guest_arches[i].ia64_be = 0;
2448
guest_arches[i].model = "x86_64";
2449
guest_arches[i].bits = 64;
2450
guest_arches[i].hvm = 1;
2451
guest_arches[i].pae = 0;
2452
guest_arches[i].nonpae = 0;
2453
guest_arches[i].ia64_be = 0;
2458
if ((caps = xenHypervisorBuildCapabilities(conn,
2461
guest_arches, i)) == NULL)
2462
virReportOOMError();
2470
* xenHypervisorMakeCapabilitiesInternal:
2471
* @conn: pointer to the connection block
2472
* @cpuinfo: file handle containing /proc/cpuinfo data, or NULL
2473
* @capabilities: file handle containing /sys/hypervisor/properties/capabilities data, or NULL
2475
* Return the capabilities of this hypervisor.
2478
xenHypervisorMakeCapabilitiesInternal(virConnectPtr conn,
2479
const char *hostmachine,
2480
FILE *cpuinfo, FILE *capabilities)
2482
char line[1024], *str, *token;
2484
char *saveptr = NULL;
2487
char hvm_type[4] = ""; /* "vmx" or "svm" (or "" if not in CPU). */
2489
struct guest_arch guest_archs[32];
2490
int nr_guest_archs = 0;
2491
virCapsPtr caps = NULL;
2493
memset(guest_archs, 0, sizeof(guest_archs));
2495
/* /proc/cpuinfo: flags: Intel calls HVM "vmx", AMD calls it "svm".
2496
* It's not clear if this will work on IA64, let alone other
2497
* architectures and non-Linux. (XXX)
2500
while (fgets (line, sizeof line, cpuinfo)) {
2501
if (regexec (&flags_hvm_rec, line, sizeof(subs)/sizeof(regmatch_t), subs, 0) == 0
2502
&& subs[0].rm_so != -1) {
2503
if (virStrncpy(hvm_type,
2504
&line[subs[1].rm_so],
2505
subs[1].rm_eo-subs[1].rm_so,
2506
sizeof(hvm_type)) == NULL)
2508
} else if (regexec (&flags_pae_rec, line, 0, NULL, 0) == 0)
2513
/* Most of the useful info is in /sys/hypervisor/properties/capabilities
2514
* which is documented in the code in xen-unstable.hg/xen/arch/.../setup.c.
2516
* It is a space-separated list of supported guest architectures.
2521
* | | | +-- PAE supported
2522
* | | +------- x86_32 or x86_64
2523
* | +----------- the version of Xen, eg. "3.0"
2524
* +--------------- "xen" or "hvm" for para or full virt respectively
2526
* For PPC this file appears to be always empty (?)
2531
* | | | +-- Big-endian supported
2532
* | | +------- always "ia64"
2533
* | +----------- the version of Xen, eg. "3.0"
2534
* +--------------- "xen" or "hvm" for para or full virt respectively
2537
/* Expecting one line in this file - ignore any more. */
2538
if ((capabilities) && (fgets (line, sizeof line, capabilities))) {
2539
/* Split the line into tokens. strtok_r is OK here because we "own"
2540
* this buffer. Parse out the features from each token.
2542
for (str = line, nr_guest_archs = 0;
2543
nr_guest_archs < sizeof guest_archs / sizeof guest_archs[0]
2544
&& (token = strtok_r (str, " ", &saveptr)) != NULL;
2547
if (regexec (&xen_cap_rec, token, sizeof subs / sizeof subs[0],
2549
int hvm = STRPREFIX(&token[subs[1].rm_so], "hvm");
2551
int bits, pae = 0, nonpae = 0, ia64_be = 0;
2553
if (STRPREFIX(&token[subs[2].rm_so], "x86_32")) {
2556
if (subs[3].rm_so != -1 &&
2557
STRPREFIX(&token[subs[3].rm_so], "p"))
2562
else if (STRPREFIX(&token[subs[2].rm_so], "x86_64")) {
2566
else if (STRPREFIX(&token[subs[2].rm_so], "ia64")) {
2569
if (subs[3].rm_so != -1 &&
2570
STRPREFIX(&token[subs[3].rm_so], "be"))
2573
else if (STRPREFIX(&token[subs[2].rm_so], "powerpc64")) {
2577
/* XXX surely no other Xen archs exist */
2581
/* Search for existing matching (model,hvm) tuple */
2582
for (i = 0 ; i < nr_guest_archs ; i++) {
2583
if (STREQ(guest_archs[i].model, model) &&
2584
guest_archs[i].hvm == hvm) {
2589
/* Too many arch flavours - highly unlikely ! */
2590
if (i >= ARRAY_CARDINALITY(guest_archs))
2592
/* Didn't find a match, so create a new one */
2593
if (i == nr_guest_archs)
2596
guest_archs[i].model = model;
2597
guest_archs[i].bits = bits;
2598
guest_archs[i].hvm = hvm;
2600
/* Careful not to overwrite a previous positive
2601
setting with a negative one here - some archs
2602
can do both pae & non-pae, but Xen reports
2603
separately capabilities so we're merging archs */
2605
guest_archs[i].pae = pae;
2607
guest_archs[i].nonpae = nonpae;
2609
guest_archs[i].ia64_be = ia64_be;
2614
if ((caps = xenHypervisorBuildCapabilities(conn,
2619
nr_guest_archs)) == NULL)
2625
virReportOOMError();
2626
virCapabilitiesFree(caps);
2631
* xenHypervisorMakeCapabilities:
2633
* Return the capabilities of this hypervisor.
2636
xenHypervisorMakeCapabilities(virConnectPtr conn)
2639
return xenHypervisorMakeCapabilitiesSunOS(conn);
2642
FILE *cpuinfo, *capabilities;
2643
struct utsname utsname;
2645
/* Really, this never fails - look at the man-page. */
2648
cpuinfo = fopen ("/proc/cpuinfo", "r");
2649
if (cpuinfo == NULL) {
2650
if (errno != ENOENT) {
2651
virReportSystemError(errno,
2652
_("cannot read file %s"),
2658
capabilities = fopen ("/sys/hypervisor/properties/capabilities", "r");
2659
if (capabilities == NULL) {
2660
if (errno != ENOENT) {
2662
virReportSystemError(errno,
2663
_("cannot read file %s"),
2664
"/sys/hypervisor/properties/capabilities");
2669
caps = xenHypervisorMakeCapabilitiesInternal(conn,
2677
fclose(capabilities);
2686
* xenHypervisorGetCapabilities:
2687
* @conn: pointer to the connection block
2689
* Return the capabilities of this hypervisor.
2692
xenHypervisorGetCapabilities (virConnectPtr conn)
2694
xenUnifiedPrivatePtr priv = (xenUnifiedPrivatePtr) conn->privateData;
2697
if (!(xml = virCapabilitiesFormatXML(priv->caps))) {
2698
virReportOOMError();
2707
* xenHypervisorNumOfDomains:
2708
* @conn: pointer to the connection block
2710
* Provides the number of active domains.
2712
* Returns the number of domain found or -1 in case of error
2715
xenHypervisorNumOfDomains(virConnectPtr conn)
2717
xen_getdomaininfolist dominfos;
2719
static int last_maxids = 2;
2720
int maxids = last_maxids;
2721
xenUnifiedPrivatePtr priv;
2725
priv = (xenUnifiedPrivatePtr) conn->privateData;
2726
if (priv->handle < 0)
2730
if (!(XEN_GETDOMAININFOLIST_ALLOC(dominfos, maxids))) {
2731
virReportOOMError();
2735
XEN_GETDOMAININFOLIST_CLEAR(dominfos, maxids);
2737
ret = virXen_getdomaininfolist(priv->handle, 0, maxids, &dominfos);
2739
XEN_GETDOMAININFOLIST_FREE(dominfos);
2745
/* Can't possibly have more than 65,000 concurrent guests
2746
* so limit how many times we try, to avoid increasing
2747
* without bound & thus allocating all of system memory !
2748
* XXX I'll regret this comment in a few years time ;-)
2750
if (nbids == maxids) {
2751
if (maxids < 65000) {
2758
if ((nbids < 0) || (nbids > maxids))
2764
* xenHypervisorListDomains:
2765
* @conn: pointer to the connection block
2766
* @ids: array to collect the list of IDs of active domains
2767
* @maxids: size of @ids
2769
* Collect the list of active domains, and store their ID in @maxids
2771
* Returns the number of domain found or -1 in case of error
2774
xenHypervisorListDomains(virConnectPtr conn, int *ids, int maxids)
2776
xen_getdomaininfolist dominfos;
2778
xenUnifiedPrivatePtr priv;
2783
priv = (xenUnifiedPrivatePtr) conn->privateData;
2784
if (priv->handle < 0 ||
2785
(ids == NULL) || (maxids < 0))
2791
if (!(XEN_GETDOMAININFOLIST_ALLOC(dominfos, maxids))) {
2792
virReportOOMError();
2796
XEN_GETDOMAININFOLIST_CLEAR(dominfos, maxids);
2797
memset(ids, 0, maxids * sizeof(int));
2799
ret = virXen_getdomaininfolist(priv->handle, 0, maxids, &dominfos);
2802
XEN_GETDOMAININFOLIST_FREE(dominfos);
2807
if ((nbids < 0) || (nbids > maxids)) {
2808
XEN_GETDOMAININFOLIST_FREE(dominfos);
2812
for (i = 0;i < nbids;i++) {
2813
ids[i] = XEN_GETDOMAININFOLIST_DOMAIN(dominfos, i);
2816
XEN_GETDOMAININFOLIST_FREE(dominfos);
2823
xenHypervisorDomainGetOSType (virDomainPtr dom)
2825
xenUnifiedPrivatePtr priv;
2826
xen_getdomaininfo dominfo;
2827
char *ostype = NULL;
2829
priv = (xenUnifiedPrivatePtr) dom->conn->privateData;
2830
if (priv->handle < 0) {
2831
virXenErrorFunc(VIR_ERR_INTERNAL_ERROR, __FUNCTION__,
2832
_("domain shut off or invalid"), 0);
2836
/* HV's earlier than 3.1.0 don't include the HVM flags in guests status*/
2837
if (hypervisor_version < 2 ||
2838
dom_interface_version < 4) {
2839
virXenErrorFunc(VIR_ERR_INTERNAL_ERROR, __FUNCTION__,
2840
_("unsupported in dom interface < 4"), 0);
2844
XEN_GETDOMAININFO_CLEAR(dominfo);
2846
if (virXen_getdomaininfo(priv->handle, dom->id, &dominfo) < 0) {
2847
virXenErrorFunc(VIR_ERR_INTERNAL_ERROR, __FUNCTION__,
2848
_("cannot get domain details"), 0);
2852
if (XEN_GETDOMAININFO_DOMAIN(dominfo) != dom->id) {
2853
virXenErrorFunc(VIR_ERR_INTERNAL_ERROR, __FUNCTION__,
2854
_("cannot get domain details"), 0);
2858
if (XEN_GETDOMAININFO_FLAGS(dominfo) & DOMFLAGS_HVM)
2859
ostype = strdup("hvm");
2861
ostype = strdup("linux");
2864
virReportOOMError();
2870
xenHypervisorHasDomain(virConnectPtr conn,
2873
xenUnifiedPrivatePtr priv;
2874
xen_getdomaininfo dominfo;
2876
priv = (xenUnifiedPrivatePtr) conn->privateData;
2877
if (priv->handle < 0)
2880
XEN_GETDOMAININFO_CLEAR(dominfo);
2882
if (virXen_getdomaininfo(priv->handle, id, &dominfo) < 0)
2885
if (XEN_GETDOMAININFO_DOMAIN(dominfo) != id)
2892
xenHypervisorLookupDomainByID(virConnectPtr conn,
2895
xenUnifiedPrivatePtr priv;
2896
xen_getdomaininfo dominfo;
2900
priv = (xenUnifiedPrivatePtr) conn->privateData;
2901
if (priv->handle < 0)
2904
XEN_GETDOMAININFO_CLEAR(dominfo);
2906
if (virXen_getdomaininfo(priv->handle, id, &dominfo) < 0)
2909
if (XEN_GETDOMAININFO_DOMAIN(dominfo) != id)
2912
xenUnifiedLock(priv);
2913
name = xenStoreDomainGetName(conn, id);
2914
xenUnifiedUnlock(priv);
2918
ret = virGetDomain(conn, name, XEN_GETDOMAININFO_UUID(dominfo));
2927
xenHypervisorLookupDomainByUUID(virConnectPtr conn,
2928
const unsigned char *uuid)
2930
xen_getdomaininfolist dominfos;
2931
xenUnifiedPrivatePtr priv;
2934
int maxids = 100, nids, i, id;
2936
priv = (xenUnifiedPrivatePtr) conn->privateData;
2937
if (priv->handle < 0)
2941
if (!(XEN_GETDOMAININFOLIST_ALLOC(dominfos, maxids))) {
2942
virReportOOMError();
2946
XEN_GETDOMAININFOLIST_CLEAR(dominfos, maxids);
2948
nids = virXen_getdomaininfolist(priv->handle, 0, maxids, &dominfos);
2951
XEN_GETDOMAININFOLIST_FREE(dominfos);
2955
/* Can't possibly have more than 65,000 concurrent guests
2956
* so limit how many times we try, to avoid increasing
2957
* without bound & thus allocating all of system memory !
2958
* XXX I'll regret this comment in a few years time ;-)
2960
if (nids == maxids) {
2961
XEN_GETDOMAININFOLIST_FREE(dominfos);
2962
if (maxids < 65000) {
2970
for (i = 0 ; i < nids ; i++) {
2971
if (memcmp(XEN_GETDOMAININFOLIST_UUID(dominfos, i), uuid, VIR_UUID_BUFLEN) == 0) {
2972
id = XEN_GETDOMAININFOLIST_DOMAIN(dominfos, i);
2976
XEN_GETDOMAININFOLIST_FREE(dominfos);
2981
xenUnifiedLock(priv);
2982
name = xenStoreDomainGetName(conn, id);
2983
xenUnifiedUnlock(priv);
2987
ret = virGetDomain(conn, name, uuid);
2996
* xenHypervisorGetMaxVcpus:
2998
* Returns the maximum of CPU defined by Xen.
3001
xenHypervisorGetMaxVcpus(virConnectPtr conn,
3002
const char *type ATTRIBUTE_UNUSED)
3004
xenUnifiedPrivatePtr priv;
3008
priv = (xenUnifiedPrivatePtr) conn->privateData;
3009
if (priv->handle < 0)
3012
return MAX_VIRT_CPUS;
3016
* xenHypervisorGetDomMaxMemory:
3017
* @conn: connection data
3020
* Retrieve the maximum amount of physical memory allocated to a
3023
* Returns the memory size in kilobytes or 0 in case of error.
3026
xenHypervisorGetDomMaxMemory(virConnectPtr conn, int id)
3028
xenUnifiedPrivatePtr priv;
3029
xen_getdomaininfo dominfo;
3035
priv = (xenUnifiedPrivatePtr) conn->privateData;
3036
if (priv->handle < 0)
3039
if (kb_per_pages == 0) {
3040
kb_per_pages = sysconf(_SC_PAGESIZE) / 1024;
3041
if (kb_per_pages <= 0)
3045
XEN_GETDOMAININFO_CLEAR(dominfo);
3047
ret = virXen_getdomaininfo(priv->handle, id, &dominfo);
3049
if ((ret < 0) || (XEN_GETDOMAININFO_DOMAIN(dominfo) != id))
3052
return((unsigned long) XEN_GETDOMAININFO_MAX_PAGES(dominfo) * kb_per_pages);
3057
* xenHypervisorGetMaxMemory:
3058
* @domain: a domain object or NULL
3060
* Retrieve the maximum amount of physical memory allocated to a
3061
* domain. If domain is NULL, then this get the amount of memory reserved
3062
* to Domain0 i.e. the domain where the application runs.
3064
* Returns the memory size in kilobytes or 0 in case of error.
3066
static unsigned long ATTRIBUTE_NONNULL (1)
3067
xenHypervisorGetMaxMemory(virDomainPtr domain)
3069
xenUnifiedPrivatePtr priv;
3071
if (domain->conn == NULL)
3074
priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
3075
if (priv->handle < 0 || domain->id < 0)
3078
return(xenHypervisorGetDomMaxMemory(domain->conn, domain->id));
3083
* xenHypervisorGetDomInfo:
3084
* @conn: connection data
3085
* @id: the domain ID
3086
* @info: the place where information should be stored
3088
* Do an hypervisor call to get the related set of domain information.
3090
* Returns 0 in case of success, -1 in case of error.
3093
xenHypervisorGetDomInfo(virConnectPtr conn, int id, virDomainInfoPtr info)
3095
xenUnifiedPrivatePtr priv;
3096
xen_getdomaininfo dominfo;
3098
uint32_t domain_flags, domain_state, domain_shutdown_cause;
3100
if (kb_per_pages == 0) {
3101
kb_per_pages = sysconf(_SC_PAGESIZE) / 1024;
3102
if (kb_per_pages <= 0)
3109
priv = (xenUnifiedPrivatePtr) conn->privateData;
3110
if (priv->handle < 0 || info == NULL)
3113
memset(info, 0, sizeof(virDomainInfo));
3114
XEN_GETDOMAININFO_CLEAR(dominfo);
3116
ret = virXen_getdomaininfo(priv->handle, id, &dominfo);
3118
if ((ret < 0) || (XEN_GETDOMAININFO_DOMAIN(dominfo) != id))
3121
domain_flags = XEN_GETDOMAININFO_FLAGS(dominfo);
3122
domain_flags &= ~DOMFLAGS_HVM; /* Mask out HVM flags */
3123
domain_state = domain_flags & 0xFF; /* Mask out high bits */
3124
switch (domain_state) {
3125
case DOMFLAGS_DYING:
3126
info->state = VIR_DOMAIN_SHUTDOWN;
3128
case DOMFLAGS_SHUTDOWN:
3129
/* The domain is shutdown. Determine the cause. */
3130
domain_shutdown_cause = domain_flags >> DOMFLAGS_SHUTDOWNSHIFT;
3131
switch (domain_shutdown_cause) {
3132
case SHUTDOWN_crash:
3133
info->state = VIR_DOMAIN_CRASHED;
3136
info->state = VIR_DOMAIN_SHUTOFF;
3139
case DOMFLAGS_PAUSED:
3140
info->state = VIR_DOMAIN_PAUSED;
3142
case DOMFLAGS_BLOCKED:
3143
info->state = VIR_DOMAIN_BLOCKED;
3145
case DOMFLAGS_RUNNING:
3146
info->state = VIR_DOMAIN_RUNNING;
3149
info->state = VIR_DOMAIN_NOSTATE;
3153
* the API brings back the cpu time in nanoseconds,
3154
* convert to microseconds, same thing convert to
3155
* kilobytes from page counts
3157
info->cpuTime = XEN_GETDOMAININFO_CPUTIME(dominfo);
3158
info->memory = XEN_GETDOMAININFO_TOT_PAGES(dominfo) * kb_per_pages;
3159
info->maxMem = XEN_GETDOMAININFO_MAX_PAGES(dominfo);
3160
if(info->maxMem != UINT_MAX)
3161
info->maxMem *= kb_per_pages;
3162
info->nrVirtCpu = XEN_GETDOMAININFO_CPUCOUNT(dominfo);
3167
* xenHypervisorGetDomainInfo:
3168
* @domain: pointer to the domain block
3169
* @info: the place where information should be stored
3171
* Do an hypervisor call to get the related set of domain information.
3173
* Returns 0 in case of success, -1 in case of error.
3176
xenHypervisorGetDomainInfo(virDomainPtr domain, virDomainInfoPtr info)
3178
xenUnifiedPrivatePtr priv;
3180
if (domain->conn == NULL)
3183
priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
3184
if (priv->handle < 0 || info == NULL ||
3188
return(xenHypervisorGetDomInfo(domain->conn, domain->id, info));
3194
* xenHypervisorNodeGetCellsFreeMemory:
3195
* @conn: pointer to the hypervisor connection
3196
* @freeMems: pointer to the array of unsigned long long
3197
* @startCell: index of first cell to return freeMems info on.
3198
* @maxCells: Maximum number of cells for which freeMems information can
3201
* This call returns the amount of free memory in one or more NUMA cells.
3202
* The @freeMems array must be allocated by the caller and will be filled
3203
* with the amount of free memory in kilobytes for each cell requested,
3204
* starting with startCell (in freeMems[0]), up to either
3205
* (startCell + maxCells), or the number of additional cells in the node,
3206
* whichever is smaller.
3208
* Returns the number of entries filled in freeMems, or -1 in case of error.
3211
xenHypervisorNodeGetCellsFreeMemory(virConnectPtr conn, unsigned long long *freeMems,
3212
int startCell, int maxCells)
3214
xen_op_v2_sys op_sys;
3216
xenUnifiedPrivatePtr priv;
3219
virXenErrorFunc(VIR_ERR_INVALID_ARG, __FUNCTION__,
3220
"invalid argument", 0);
3224
priv = conn->privateData;
3226
if (priv->nbNodeCells < 0) {
3227
virXenErrorFunc(VIR_ERR_XEN_CALL, __FUNCTION__,
3228
"cannot determine actual number of cells",0);
3232
if ((maxCells < 1) || (startCell >= priv->nbNodeCells)) {
3233
virXenErrorFunc(VIR_ERR_INVALID_ARG, __FUNCTION__,
3234
"invalid argument", 0);
3239
* Support only sys_interface_version >=4
3241
if (sys_interface_version < SYS_IFACE_MIN_VERS_NUMA) {
3242
virXenErrorFunc(VIR_ERR_XEN_CALL, __FUNCTION__,
3243
"unsupported in sys interface < 4", 0);
3247
if (priv->handle < 0) {
3248
virXenErrorFunc(VIR_ERR_INTERNAL_ERROR, __FUNCTION__,
3249
"priv->handle invalid", 0);
3253
memset(&op_sys, 0, sizeof(op_sys));
3254
op_sys.cmd = XEN_V2_OP_GETAVAILHEAP;
3256
for (i = startCell, j = 0;(i < priv->nbNodeCells) && (j < maxCells);i++,j++) {
3257
if (sys_interface_version >= 5)
3258
op_sys.u.availheap5.node = i;
3260
op_sys.u.availheap.node = i;
3261
ret = xenHypervisorDoV2Sys(priv->handle, &op_sys);
3265
if (sys_interface_version >= 5)
3266
freeMems[j] = op_sys.u.availheap5.avail_bytes;
3268
freeMems[j] = op_sys.u.availheap.avail_bytes;
3275
* xenHypervisorPauseDomain:
3276
* @domain: pointer to the domain block
3278
* Do an hypervisor call to pause the given domain
3280
* Returns 0 in case of success, -1 in case of error.
3283
xenHypervisorPauseDomain(virDomainPtr domain)
3286
xenUnifiedPrivatePtr priv;
3288
if (domain->conn == NULL)
3291
priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
3292
if (priv->handle < 0 || domain->id < 0)
3295
ret = virXen_pausedomain(priv->handle, domain->id);
3302
* xenHypervisorResumeDomain:
3303
* @domain: pointer to the domain block
3305
* Do an hypervisor call to resume the given domain
3307
* Returns 0 in case of success, -1 in case of error.
3310
xenHypervisorResumeDomain(virDomainPtr domain)
3313
xenUnifiedPrivatePtr priv;
3315
if (domain->conn == NULL)
3318
priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
3319
if (priv->handle < 0 || domain->id < 0)
3322
ret = virXen_unpausedomain(priv->handle, domain->id);
3329
* xenHypervisorDestroyDomain:
3330
* @domain: pointer to the domain block
3332
* Do an hypervisor call to destroy the given domain
3334
* Returns 0 in case of success, -1 in case of error.
3337
xenHypervisorDestroyDomain(virDomainPtr domain)
3340
xenUnifiedPrivatePtr priv;
3342
if (domain->conn == NULL)
3345
priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
3346
if (priv->handle < 0 || domain->id < 0)
3349
ret = virXen_destroydomain(priv->handle, domain->id);
3356
* xenHypervisorSetMaxMemory:
3357
* @domain: pointer to the domain block
3358
* @memory: the max memory size in kilobytes.
3360
* Do an hypervisor call to change the maximum amount of memory used
3362
* Returns 0 in case of success, -1 in case of error.
3365
xenHypervisorSetMaxMemory(virDomainPtr domain, unsigned long memory)
3368
xenUnifiedPrivatePtr priv;
3370
if (domain->conn == NULL)
3373
priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
3374
if (priv->handle < 0 || domain->id < 0)
3377
ret = virXen_setmaxmem(priv->handle, domain->id, memory);
3386
* xenHypervisorSetVcpus:
3387
* @domain: pointer to domain object
3388
* @nvcpus: the new number of virtual CPUs for this domain
3390
* Dynamically change the number of virtual CPUs used by the domain.
3392
* Returns 0 in case of success, -1 in case of failure.
3396
xenHypervisorSetVcpus(virDomainPtr domain, unsigned int nvcpus)
3399
xenUnifiedPrivatePtr priv;
3401
if (domain->conn == NULL)
3404
priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
3405
if (priv->handle < 0 || domain->id < 0 || nvcpus < 1)
3408
ret = virXen_setmaxvcpus(priv->handle, domain->id, nvcpus);
3415
* xenHypervisorPinVcpu:
3416
* @domain: pointer to domain object
3417
* @vcpu: virtual CPU number
3418
* @cpumap: pointer to a bit map of real CPUs (in 8-bit bytes)
3419
* @maplen: length of cpumap in bytes
3421
* Dynamically change the real CPUs which can be allocated to a virtual CPU.
3423
* Returns 0 in case of success, -1 in case of failure.
3427
xenHypervisorPinVcpu(virDomainPtr domain, unsigned int vcpu,
3428
unsigned char *cpumap, int maplen)
3431
xenUnifiedPrivatePtr priv;
3433
if (domain->conn == NULL)
3436
priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
3437
if (priv->handle < 0 || (domain->id < 0) ||
3438
(cpumap == NULL) || (maplen < 1))
3441
ret = virXen_setvcpumap(priv->handle, domain->id, vcpu,
3450
* virDomainGetVcpus:
3451
* @domain: pointer to domain object, or NULL for Domain0
3452
* @info: pointer to an array of virVcpuInfo structures (OUT)
3453
* @maxinfo: number of structures in info array
3454
* @cpumaps: pointer to an bit map of real CPUs for all vcpus of this domain (in 8-bit bytes) (OUT)
3455
* If cpumaps is NULL, then no cpumap information is returned by the API.
3456
* It's assumed there is <maxinfo> cpumap in cpumaps array.
3457
* The memory allocated to cpumaps must be (maxinfo * maplen) bytes
3458
* (ie: calloc(maxinfo, maplen)).
3459
* One cpumap inside cpumaps has the format described in virDomainPinVcpu() API.
3460
* @maplen: number of bytes in one cpumap, from 1 up to size of CPU map in
3461
* underlying virtualization system (Xen...).
3463
* Extract information about virtual CPUs of domain, store it in info array
3464
* and also in cpumaps if this pointer isn't NULL.
3466
* Returns the number of info filled in case of success, -1 in case of failure.
3470
xenHypervisorGetVcpus(virDomainPtr domain, virVcpuInfoPtr info, int maxinfo,
3471
unsigned char *cpumaps, int maplen)
3473
xen_getdomaininfo dominfo;
3475
xenUnifiedPrivatePtr priv;
3479
if (domain->conn == NULL)
3482
priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
3483
if (priv->handle < 0 || (domain->id < 0) ||
3484
(info == NULL) || (maxinfo < 1) ||
3485
(sizeof(cpumap_t) & 7)) {
3486
virXenErrorFunc(VIR_ERR_INTERNAL_ERROR, __FUNCTION__,
3487
_("domain shut off or invalid"), 0);
3490
if ((cpumaps != NULL) && (maplen < 1)) {
3491
virXenErrorFunc(VIR_ERR_INVALID_ARG, __FUNCTION__,
3492
"invalid argument", 0);
3495
/* first get the number of virtual CPUs in this domain */
3496
XEN_GETDOMAININFO_CLEAR(dominfo);
3497
ret = virXen_getdomaininfo(priv->handle, domain->id,
3500
if ((ret < 0) || (XEN_GETDOMAININFO_DOMAIN(dominfo) != domain->id)) {
3501
virXenErrorFunc(VIR_ERR_INTERNAL_ERROR, __FUNCTION__,
3502
_("cannot get domain details"), 0);
3505
nbinfo = XEN_GETDOMAININFO_CPUCOUNT(dominfo) + 1;
3506
if (nbinfo > maxinfo) nbinfo = maxinfo;
3508
if (cpumaps != NULL)
3509
memset(cpumaps, 0, maxinfo * maplen);
3511
for (i = 0, ipt = info; i < nbinfo; i++, ipt++) {
3512
if ((cpumaps != NULL) && (i < maxinfo)) {
3513
ret = virXen_getvcpusinfo(priv->handle, domain->id, i,
3515
(unsigned char *)VIR_GET_CPUMAP(cpumaps, maplen, i),
3518
virXenErrorFunc(VIR_ERR_INTERNAL_ERROR, __FUNCTION__,
3519
_("cannot get VCPUs info"), 0);
3523
ret = virXen_getvcpusinfo(priv->handle, domain->id, i,
3526
virXenErrorFunc(VIR_ERR_INTERNAL_ERROR, __FUNCTION__,
3527
_("cannot get VCPUs info"), 0);
3537
* xenHypervisorGetVcpuMax:
3539
* Returns the maximum number of virtual CPUs supported for
3540
* the guest VM. If the guest is inactive, this is the maximum
3541
* of CPU defined by Xen. If the guest is running this reflect
3542
* the maximum number of virtual CPUs the guest was booted with.
3545
xenHypervisorGetVcpuMax(virDomainPtr domain)
3547
xen_getdomaininfo dominfo;
3550
xenUnifiedPrivatePtr priv;
3552
if (domain->conn == NULL)
3555
priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
3556
if (priv->handle < 0)
3559
/* inactive domain */
3560
if (domain->id < 0) {
3561
maxcpu = MAX_VIRT_CPUS;
3563
XEN_GETDOMAININFO_CLEAR(dominfo);
3564
ret = virXen_getdomaininfo(priv->handle, domain->id,
3567
if ((ret < 0) || (XEN_GETDOMAININFO_DOMAIN(dominfo) != domain->id))
3569
maxcpu = XEN_GETDOMAININFO_MAXCPUID(dominfo) + 1;
3576
* xenHavePrivilege()
3578
* Return true if the current process should be able to connect to Xen.
3584
return priv_ineffect (PRIV_XVM_CONTROL);
3586
return access(XEN_HYPERVISOR_SOCKET, R_OK) == 0;