2
* QEMU S390x KVM implementation
4
* Copyright (c) 2009 Alexander Graf <agraf@suse.de>
5
* Copyright IBM Corp. 2012
7
* This library is free software; you can redistribute it and/or
8
* modify it under the terms of the GNU Lesser General Public
9
* License as published by the Free Software Foundation; either
10
* version 2 of the License, or (at your option) any later version.
12
* This library is distributed in the hope that it will be useful,
13
* but WITHOUT ANY WARRANTY; without even the implied warranty of
14
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15
* Lesser General Public License for more details.
17
* Contributions after 2012-10-29 are licensed under the terms of the
18
* GNU GPL, version 2 or (at your option) any later version.
20
* You should have received a copy of the GNU (Lesser) General Public
21
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
24
#include "qemu/osdep.h"
25
#include <sys/ioctl.h>
27
#include <linux/kvm.h>
28
#include <asm/ptrace.h>
30
#include "qemu-common.h"
32
#include "qemu/error-report.h"
33
#include "qemu/timer.h"
34
#include "sysemu/sysemu.h"
35
#include "sysemu/kvm.h"
37
#include "sysemu/device_tree.h"
38
#include "qapi/qmp/qjson.h"
39
#include "exec/gdbstub.h"
40
#include "exec/address-spaces.h"
42
#include "qapi-event.h"
43
#include "hw/s390x/s390-pci-inst.h"
44
#include "hw/s390x/s390-pci-bus.h"
45
#include "hw/s390x/ipl.h"
46
#include "hw/s390x/ebcdic.h"
47
#include "exec/memattrs.h"
48
#include "hw/s390x/s390-virtio-ccw.h"
50
/* #define DEBUG_KVM */
53
#define DPRINTF(fmt, ...) \
54
do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
56
#define DPRINTF(fmt, ...) \
60
#define kvm_vm_check_mem_attr(s, attr) \
61
kvm_vm_check_attr(s, KVM_S390_VM_MEM_CTRL, attr)
63
#define IPA0_DIAG 0x8300
64
#define IPA0_SIGP 0xae00
65
#define IPA0_B2 0xb200
66
#define IPA0_B9 0xb900
67
#define IPA0_EB 0xeb00
68
#define IPA0_E3 0xe300
70
#define PRIV_B2_SCLP_CALL 0x20
71
#define PRIV_B2_CSCH 0x30
72
#define PRIV_B2_HSCH 0x31
73
#define PRIV_B2_MSCH 0x32
74
#define PRIV_B2_SSCH 0x33
75
#define PRIV_B2_STSCH 0x34
76
#define PRIV_B2_TSCH 0x35
77
#define PRIV_B2_TPI 0x36
78
#define PRIV_B2_SAL 0x37
79
#define PRIV_B2_RSCH 0x38
80
#define PRIV_B2_STCRW 0x39
81
#define PRIV_B2_STCPS 0x3a
82
#define PRIV_B2_RCHP 0x3b
83
#define PRIV_B2_SCHM 0x3c
84
#define PRIV_B2_CHSC 0x5f
85
#define PRIV_B2_SIGA 0x74
86
#define PRIV_B2_XSCH 0x76
88
#define PRIV_EB_SQBS 0x8a
89
#define PRIV_EB_PCISTB 0xd0
90
#define PRIV_EB_SIC 0xd1
92
#define PRIV_B9_EQBS 0x9c
93
#define PRIV_B9_CLP 0xa0
94
#define PRIV_B9_PCISTG 0xd0
95
#define PRIV_B9_PCILG 0xd2
96
#define PRIV_B9_RPCIT 0xd3
98
#define PRIV_E3_MPCIFC 0xd0
99
#define PRIV_E3_STPCIFC 0xd4
101
#define DIAG_TIMEREVENT 0x288
102
#define DIAG_IPL 0x308
103
#define DIAG_KVM_HYPERCALL 0x500
104
#define DIAG_KVM_BREAKPOINT 0x501
106
#define ICPT_INSTRUCTION 0x04
107
#define ICPT_PROGRAM 0x08
108
#define ICPT_EXT_INT 0x14
109
#define ICPT_WAITPSW 0x1c
110
#define ICPT_SOFT_INTERCEPT 0x24
111
#define ICPT_CPU_STOP 0x28
112
#define ICPT_OPEREXC 0x2c
115
#define NR_LOCAL_IRQS 32
117
* Needs to be big enough to contain max_cpus emergency signals
118
* and in addition NR_LOCAL_IRQS interrupts
120
#define VCPU_IRQ_BUF_SIZE (sizeof(struct kvm_s390_irq) * \
121
(max_cpus + NR_LOCAL_IRQS))
123
static CPUWatchpoint hw_watchpoint;
125
* We don't use a list because this structure is also used to transmit the
126
* hardware breakpoints to the kernel.
128
static struct kvm_hw_breakpoint *hw_breakpoints;
129
static int nb_hw_breakpoints;
131
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
135
static QemuMutex qemu_sigp_mutex;
137
static int cap_sync_regs;
138
static int cap_async_pf;
139
static int cap_mem_op;
140
static int cap_s390_irq;
143
static void *legacy_s390_alloc(size_t size, uint64_t *align);
145
static int kvm_s390_query_mem_limit(KVMState *s, uint64_t *memory_limit)
147
struct kvm_device_attr attr = {
148
.group = KVM_S390_VM_MEM_CTRL,
149
.attr = KVM_S390_VM_MEM_LIMIT_SIZE,
150
.addr = (uint64_t) memory_limit,
153
return kvm_vm_ioctl(s, KVM_GET_DEVICE_ATTR, &attr);
156
int kvm_s390_set_mem_limit(KVMState *s, uint64_t new_limit, uint64_t *hw_limit)
160
struct kvm_device_attr attr = {
161
.group = KVM_S390_VM_MEM_CTRL,
162
.attr = KVM_S390_VM_MEM_LIMIT_SIZE,
163
.addr = (uint64_t) &new_limit,
166
if (!kvm_vm_check_mem_attr(s, KVM_S390_VM_MEM_LIMIT_SIZE)) {
170
rc = kvm_s390_query_mem_limit(s, hw_limit);
173
} else if (*hw_limit < new_limit) {
177
return kvm_vm_ioctl(s, KVM_SET_DEVICE_ATTR, &attr);
180
static bool kvm_s390_cmma_available(void)
182
static bool initialized, value;
186
value = kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_ENABLE_CMMA) &&
187
kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_CLR_CMMA);
192
void kvm_s390_cmma_reset(void)
195
struct kvm_device_attr attr = {
196
.group = KVM_S390_VM_MEM_CTRL,
197
.attr = KVM_S390_VM_MEM_CLR_CMMA,
200
if (mem_path || !kvm_s390_cmma_available()) {
204
rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
205
trace_kvm_clear_cmma(rc);
208
static void kvm_s390_enable_cmma(void)
211
struct kvm_device_attr attr = {
212
.group = KVM_S390_VM_MEM_CTRL,
213
.attr = KVM_S390_VM_MEM_ENABLE_CMMA,
216
rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
217
trace_kvm_enable_cmma(rc);
220
static void kvm_s390_set_attr(uint64_t attr)
222
struct kvm_device_attr attribute = {
223
.group = KVM_S390_VM_CRYPTO,
227
int ret = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attribute);
230
error_report("Failed to set crypto device attribute %lu: %s",
231
attr, strerror(-ret));
235
static void kvm_s390_init_aes_kw(void)
237
uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_AES_KW;
239
if (object_property_get_bool(OBJECT(qdev_get_machine()), "aes-key-wrap",
241
attr = KVM_S390_VM_CRYPTO_ENABLE_AES_KW;
244
if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) {
245
kvm_s390_set_attr(attr);
249
static void kvm_s390_init_dea_kw(void)
251
uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_DEA_KW;
253
if (object_property_get_bool(OBJECT(qdev_get_machine()), "dea-key-wrap",
255
attr = KVM_S390_VM_CRYPTO_ENABLE_DEA_KW;
258
if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) {
259
kvm_s390_set_attr(attr);
263
void kvm_s390_crypto_reset(void)
265
if (s390_has_feat(S390_FEAT_MSA_EXT_3)) {
266
kvm_s390_init_aes_kw();
267
kvm_s390_init_dea_kw();
271
int kvm_arch_init(MachineState *ms, KVMState *s)
273
cap_sync_regs = kvm_check_extension(s, KVM_CAP_SYNC_REGS);
274
cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF);
275
cap_mem_op = kvm_check_extension(s, KVM_CAP_S390_MEM_OP);
276
cap_s390_irq = kvm_check_extension(s, KVM_CAP_S390_INJECT_IRQ);
278
if (!kvm_check_extension(s, KVM_CAP_S390_GMAP)
279
|| !kvm_check_extension(s, KVM_CAP_S390_COW)) {
280
phys_mem_set_alloc(legacy_s390_alloc);
283
kvm_vm_enable_cap(s, KVM_CAP_S390_USER_SIGP, 0);
284
kvm_vm_enable_cap(s, KVM_CAP_S390_VECTOR_REGISTERS, 0);
285
kvm_vm_enable_cap(s, KVM_CAP_S390_USER_STSI, 0);
287
if (kvm_vm_enable_cap(s, KVM_CAP_S390_RI, 0) == 0) {
292
qemu_mutex_init(&qemu_sigp_mutex);
297
unsigned long kvm_arch_vcpu_id(CPUState *cpu)
299
return cpu->cpu_index;
302
int kvm_arch_init_vcpu(CPUState *cs)
304
S390CPU *cpu = S390_CPU(cs);
305
kvm_s390_set_cpu_state(cpu, cpu->env.cpu_state);
306
cpu->irqstate = g_malloc0(VCPU_IRQ_BUF_SIZE);
310
void kvm_s390_reset_vcpu(S390CPU *cpu)
312
CPUState *cs = CPU(cpu);
314
/* The initial reset call is needed here to reset in-kernel
315
* vcpu data that we can't access directly from QEMU
316
* (i.e. with older kernels which don't support sync_regs/ONE_REG).
317
* Before this ioctl cpu_synchronize_state() is called in common kvm
319
if (kvm_vcpu_ioctl(cs, KVM_S390_INITIAL_RESET, NULL)) {
320
error_report("Initial CPU reset failed on CPU %i", cs->cpu_index);
324
static int can_sync_regs(CPUState *cs, int regs)
326
return cap_sync_regs && (cs->kvm_run->kvm_valid_regs & regs) == regs;
329
int kvm_arch_put_registers(CPUState *cs, int level)
331
S390CPU *cpu = S390_CPU(cs);
332
CPUS390XState *env = &cpu->env;
333
struct kvm_sregs sregs;
334
struct kvm_regs regs;
335
struct kvm_fpu fpu = {};
339
/* always save the PSW and the GPRS*/
340
cs->kvm_run->psw_addr = env->psw.addr;
341
cs->kvm_run->psw_mask = env->psw.mask;
343
if (can_sync_regs(cs, KVM_SYNC_GPRS)) {
344
for (i = 0; i < 16; i++) {
345
cs->kvm_run->s.regs.gprs[i] = env->regs[i];
346
cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GPRS;
349
for (i = 0; i < 16; i++) {
350
regs.gprs[i] = env->regs[i];
352
r = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s);
358
if (can_sync_regs(cs, KVM_SYNC_VRS)) {
359
for (i = 0; i < 32; i++) {
360
cs->kvm_run->s.regs.vrs[i][0] = env->vregs[i][0].ll;
361
cs->kvm_run->s.regs.vrs[i][1] = env->vregs[i][1].ll;
363
cs->kvm_run->s.regs.fpc = env->fpc;
364
cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_VRS;
365
} else if (can_sync_regs(cs, KVM_SYNC_FPRS)) {
366
for (i = 0; i < 16; i++) {
367
cs->kvm_run->s.regs.fprs[i] = get_freg(env, i)->ll;
369
cs->kvm_run->s.regs.fpc = env->fpc;
370
cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_FPRS;
373
for (i = 0; i < 16; i++) {
374
fpu.fprs[i] = get_freg(env, i)->ll;
378
r = kvm_vcpu_ioctl(cs, KVM_SET_FPU, &fpu);
384
/* Do we need to save more than that? */
385
if (level == KVM_PUT_RUNTIME_STATE) {
389
if (can_sync_regs(cs, KVM_SYNC_ARCH0)) {
390
cs->kvm_run->s.regs.cputm = env->cputm;
391
cs->kvm_run->s.regs.ckc = env->ckc;
392
cs->kvm_run->s.regs.todpr = env->todpr;
393
cs->kvm_run->s.regs.gbea = env->gbea;
394
cs->kvm_run->s.regs.pp = env->pp;
395
cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ARCH0;
398
* These ONE_REGS are not protected by a capability. As they are only
399
* necessary for migration we just trace a possible error, but don't
400
* return with an error return code.
402
kvm_set_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm);
403
kvm_set_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc);
404
kvm_set_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr);
405
kvm_set_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea);
406
kvm_set_one_reg(cs, KVM_REG_S390_PP, &env->pp);
409
if (can_sync_regs(cs, KVM_SYNC_RICCB)) {
410
memcpy(cs->kvm_run->s.regs.riccb, env->riccb, 64);
411
cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_RICCB;
414
/* pfault parameters */
415
if (can_sync_regs(cs, KVM_SYNC_PFAULT)) {
416
cs->kvm_run->s.regs.pft = env->pfault_token;
417
cs->kvm_run->s.regs.pfs = env->pfault_select;
418
cs->kvm_run->s.regs.pfc = env->pfault_compare;
419
cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PFAULT;
420
} else if (cap_async_pf) {
421
r = kvm_set_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token);
425
r = kvm_set_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare);
429
r = kvm_set_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select);
435
/* access registers and control registers*/
436
if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) {
437
for (i = 0; i < 16; i++) {
438
cs->kvm_run->s.regs.acrs[i] = env->aregs[i];
439
cs->kvm_run->s.regs.crs[i] = env->cregs[i];
441
cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ACRS;
442
cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_CRS;
444
for (i = 0; i < 16; i++) {
445
sregs.acrs[i] = env->aregs[i];
446
sregs.crs[i] = env->cregs[i];
448
r = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
454
/* Finally the prefix */
455
if (can_sync_regs(cs, KVM_SYNC_PREFIX)) {
456
cs->kvm_run->s.regs.prefix = env->psa;
457
cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PREFIX;
459
/* prefix is only supported via sync regs */
464
int kvm_arch_get_registers(CPUState *cs)
466
S390CPU *cpu = S390_CPU(cs);
467
CPUS390XState *env = &cpu->env;
468
struct kvm_sregs sregs;
469
struct kvm_regs regs;
474
env->psw.addr = cs->kvm_run->psw_addr;
475
env->psw.mask = cs->kvm_run->psw_mask;
478
if (can_sync_regs(cs, KVM_SYNC_GPRS)) {
479
for (i = 0; i < 16; i++) {
480
env->regs[i] = cs->kvm_run->s.regs.gprs[i];
483
r = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s);
487
for (i = 0; i < 16; i++) {
488
env->regs[i] = regs.gprs[i];
492
/* The ACRS and CRS */
493
if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) {
494
for (i = 0; i < 16; i++) {
495
env->aregs[i] = cs->kvm_run->s.regs.acrs[i];
496
env->cregs[i] = cs->kvm_run->s.regs.crs[i];
499
r = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
503
for (i = 0; i < 16; i++) {
504
env->aregs[i] = sregs.acrs[i];
505
env->cregs[i] = sregs.crs[i];
509
/* Floating point and vector registers */
510
if (can_sync_regs(cs, KVM_SYNC_VRS)) {
511
for (i = 0; i < 32; i++) {
512
env->vregs[i][0].ll = cs->kvm_run->s.regs.vrs[i][0];
513
env->vregs[i][1].ll = cs->kvm_run->s.regs.vrs[i][1];
515
env->fpc = cs->kvm_run->s.regs.fpc;
516
} else if (can_sync_regs(cs, KVM_SYNC_FPRS)) {
517
for (i = 0; i < 16; i++) {
518
get_freg(env, i)->ll = cs->kvm_run->s.regs.fprs[i];
520
env->fpc = cs->kvm_run->s.regs.fpc;
522
r = kvm_vcpu_ioctl(cs, KVM_GET_FPU, &fpu);
526
for (i = 0; i < 16; i++) {
527
get_freg(env, i)->ll = fpu.fprs[i];
533
if (can_sync_regs(cs, KVM_SYNC_PREFIX)) {
534
env->psa = cs->kvm_run->s.regs.prefix;
537
if (can_sync_regs(cs, KVM_SYNC_ARCH0)) {
538
env->cputm = cs->kvm_run->s.regs.cputm;
539
env->ckc = cs->kvm_run->s.regs.ckc;
540
env->todpr = cs->kvm_run->s.regs.todpr;
541
env->gbea = cs->kvm_run->s.regs.gbea;
542
env->pp = cs->kvm_run->s.regs.pp;
545
* These ONE_REGS are not protected by a capability. As they are only
546
* necessary for migration we just trace a possible error, but don't
547
* return with an error return code.
549
kvm_get_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm);
550
kvm_get_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc);
551
kvm_get_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr);
552
kvm_get_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea);
553
kvm_get_one_reg(cs, KVM_REG_S390_PP, &env->pp);
556
if (can_sync_regs(cs, KVM_SYNC_RICCB)) {
557
memcpy(env->riccb, cs->kvm_run->s.regs.riccb, 64);
560
/* pfault parameters */
561
if (can_sync_regs(cs, KVM_SYNC_PFAULT)) {
562
env->pfault_token = cs->kvm_run->s.regs.pft;
563
env->pfault_select = cs->kvm_run->s.regs.pfs;
564
env->pfault_compare = cs->kvm_run->s.regs.pfc;
565
} else if (cap_async_pf) {
566
r = kvm_get_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token);
570
r = kvm_get_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare);
574
r = kvm_get_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select);
583
int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_low)
586
struct kvm_device_attr attr = {
587
.group = KVM_S390_VM_TOD,
588
.attr = KVM_S390_VM_TOD_LOW,
589
.addr = (uint64_t)tod_low,
592
r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
597
attr.attr = KVM_S390_VM_TOD_HIGH;
598
attr.addr = (uint64_t)tod_high;
599
return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
602
int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_low)
606
struct kvm_device_attr attr = {
607
.group = KVM_S390_VM_TOD,
608
.attr = KVM_S390_VM_TOD_LOW,
609
.addr = (uint64_t)tod_low,
612
r = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
617
attr.attr = KVM_S390_VM_TOD_HIGH;
618
attr.addr = (uint64_t)tod_high;
619
return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
624
* @addr: the logical start address in guest memory
625
* @ar: the access register number
626
* @hostbuf: buffer in host memory. NULL = do only checks w/o copying
627
* @len: length that should be transferred
628
* @is_write: true = write, false = read
629
* Returns: 0 on success, non-zero if an exception or error occurred
631
* Use KVM ioctl to read/write from/to guest memory. An access exception
632
* is injected into the vCPU in case of translation errors.
634
int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf,
635
int len, bool is_write)
637
struct kvm_s390_mem_op mem_op = {
639
.flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION,
641
.op = is_write ? KVM_S390_MEMOP_LOGICAL_WRITE
642
: KVM_S390_MEMOP_LOGICAL_READ,
643
.buf = (uint64_t)hostbuf,
652
mem_op.flags |= KVM_S390_MEMOP_F_CHECK_ONLY;
655
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_S390_MEM_OP, &mem_op);
657
error_printf("KVM_S390_MEM_OP failed: %s\n", strerror(-ret));
663
* Legacy layout for s390:
664
* Older S390 KVM requires the topmost vma of the RAM to be
665
* smaller than an system defined value, which is at least 256GB.
666
* Larger systems have larger values. We put the guest between
667
* the end of data segment (system break) and this value. We
668
* use 32GB as a base to have enough room for the system break
669
* to grow. We also have to use MAP parameters that avoid
670
* read-only mapping of guest pages.
672
static void *legacy_s390_alloc(size_t size, uint64_t *align)
676
mem = mmap((void *) 0x800000000ULL, size,
677
PROT_EXEC|PROT_READ|PROT_WRITE,
678
MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
679
return mem == MAP_FAILED ? NULL : mem;
682
static uint8_t const *sw_bp_inst;
683
static uint8_t sw_bp_ilen;
685
static void determine_sw_breakpoint_instr(void)
687
/* DIAG 501 is used for sw breakpoints with old kernels */
688
static const uint8_t diag_501[] = {0x83, 0x24, 0x05, 0x01};
689
/* Instruction 0x0000 is used for sw breakpoints with recent kernels */
690
static const uint8_t instr_0x0000[] = {0x00, 0x00};
695
if (kvm_vm_enable_cap(kvm_state, KVM_CAP_S390_USER_INSTR0, 0)) {
696
sw_bp_inst = diag_501;
697
sw_bp_ilen = sizeof(diag_501);
698
DPRINTF("KVM: will use 4-byte sw breakpoints.\n");
700
sw_bp_inst = instr_0x0000;
701
sw_bp_ilen = sizeof(instr_0x0000);
702
DPRINTF("KVM: will use 2-byte sw breakpoints.\n");
706
int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
708
determine_sw_breakpoint_instr();
710
if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
712
cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)sw_bp_inst, sw_bp_ilen, 1)) {
718
int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
722
if (cpu_memory_rw_debug(cs, bp->pc, t, sw_bp_ilen, 0)) {
724
} else if (memcmp(t, sw_bp_inst, sw_bp_ilen)) {
726
} else if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
734
static struct kvm_hw_breakpoint *find_hw_breakpoint(target_ulong addr,
739
for (n = 0; n < nb_hw_breakpoints; n++) {
740
if (hw_breakpoints[n].addr == addr && hw_breakpoints[n].type == type &&
741
(hw_breakpoints[n].len == len || len == -1)) {
742
return &hw_breakpoints[n];
749
static int insert_hw_breakpoint(target_ulong addr, int len, int type)
753
if (find_hw_breakpoint(addr, len, type)) {
757
size = (nb_hw_breakpoints + 1) * sizeof(struct kvm_hw_breakpoint);
759
if (!hw_breakpoints) {
760
nb_hw_breakpoints = 0;
761
hw_breakpoints = (struct kvm_hw_breakpoint *)g_try_malloc(size);
764
(struct kvm_hw_breakpoint *)g_try_realloc(hw_breakpoints, size);
767
if (!hw_breakpoints) {
768
nb_hw_breakpoints = 0;
772
hw_breakpoints[nb_hw_breakpoints].addr = addr;
773
hw_breakpoints[nb_hw_breakpoints].len = len;
774
hw_breakpoints[nb_hw_breakpoints].type = type;
781
int kvm_arch_insert_hw_breakpoint(target_ulong addr,
782
target_ulong len, int type)
785
case GDB_BREAKPOINT_HW:
788
case GDB_WATCHPOINT_WRITE:
792
type = KVM_HW_WP_WRITE;
797
return insert_hw_breakpoint(addr, len, type);
800
int kvm_arch_remove_hw_breakpoint(target_ulong addr,
801
target_ulong len, int type)
804
struct kvm_hw_breakpoint *bp = find_hw_breakpoint(addr, len, type);
811
if (nb_hw_breakpoints > 0) {
813
* In order to trim the array, move the last element to the position to
814
* be removed - if necessary.
816
if (bp != &hw_breakpoints[nb_hw_breakpoints]) {
817
*bp = hw_breakpoints[nb_hw_breakpoints];
819
size = nb_hw_breakpoints * sizeof(struct kvm_hw_breakpoint);
821
(struct kvm_hw_breakpoint *)g_realloc(hw_breakpoints, size);
823
g_free(hw_breakpoints);
824
hw_breakpoints = NULL;
830
void kvm_arch_remove_all_hw_breakpoints(void)
832
nb_hw_breakpoints = 0;
833
g_free(hw_breakpoints);
834
hw_breakpoints = NULL;
837
void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
841
if (nb_hw_breakpoints > 0) {
842
dbg->arch.nr_hw_bp = nb_hw_breakpoints;
843
dbg->arch.hw_bp = hw_breakpoints;
845
for (i = 0; i < nb_hw_breakpoints; ++i) {
846
hw_breakpoints[i].phys_addr = s390_cpu_get_phys_addr_debug(cpu,
847
hw_breakpoints[i].addr);
849
dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
851
dbg->arch.nr_hw_bp = 0;
852
dbg->arch.hw_bp = NULL;
856
void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
860
MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
862
return MEMTXATTRS_UNSPECIFIED;
865
int kvm_arch_process_async_events(CPUState *cs)
870
static int s390_kvm_irq_to_interrupt(struct kvm_s390_irq *irq,
871
struct kvm_s390_interrupt *interrupt)
875
interrupt->type = irq->type;
877
case KVM_S390_INT_VIRTIO:
878
interrupt->parm = irq->u.ext.ext_params;
880
case KVM_S390_INT_PFAULT_INIT:
881
case KVM_S390_INT_PFAULT_DONE:
882
interrupt->parm64 = irq->u.ext.ext_params2;
884
case KVM_S390_PROGRAM_INT:
885
interrupt->parm = irq->u.pgm.code;
887
case KVM_S390_SIGP_SET_PREFIX:
888
interrupt->parm = irq->u.prefix.address;
890
case KVM_S390_INT_SERVICE:
891
interrupt->parm = irq->u.ext.ext_params;
894
interrupt->parm = irq->u.mchk.cr14;
895
interrupt->parm64 = irq->u.mchk.mcic;
897
case KVM_S390_INT_EXTERNAL_CALL:
898
interrupt->parm = irq->u.extcall.code;
900
case KVM_S390_INT_EMERGENCY:
901
interrupt->parm = irq->u.emerg.code;
903
case KVM_S390_SIGP_STOP:
904
case KVM_S390_RESTART:
905
break; /* These types have no parameters */
906
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
907
interrupt->parm = irq->u.io.subchannel_id << 16;
908
interrupt->parm |= irq->u.io.subchannel_nr;
909
interrupt->parm64 = (uint64_t)irq->u.io.io_int_parm << 32;
910
interrupt->parm64 |= irq->u.io.io_int_word;
919
static void inject_vcpu_irq_legacy(CPUState *cs, struct kvm_s390_irq *irq)
921
struct kvm_s390_interrupt kvmint = {};
924
r = s390_kvm_irq_to_interrupt(irq, &kvmint);
926
fprintf(stderr, "%s called with bogus interrupt\n", __func__);
930
r = kvm_vcpu_ioctl(cs, KVM_S390_INTERRUPT, &kvmint);
932
fprintf(stderr, "KVM failed to inject interrupt\n");
937
void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq)
939
CPUState *cs = CPU(cpu);
943
r = kvm_vcpu_ioctl(cs, KVM_S390_IRQ, irq);
947
error_report("KVM failed to inject interrupt %llx", irq->type);
951
inject_vcpu_irq_legacy(cs, irq);
954
static void __kvm_s390_floating_interrupt(struct kvm_s390_irq *irq)
956
struct kvm_s390_interrupt kvmint = {};
959
r = s390_kvm_irq_to_interrupt(irq, &kvmint);
961
fprintf(stderr, "%s called with bogus interrupt\n", __func__);
965
r = kvm_vm_ioctl(kvm_state, KVM_S390_INTERRUPT, &kvmint);
967
fprintf(stderr, "KVM failed to inject interrupt\n");
972
void kvm_s390_floating_interrupt(struct kvm_s390_irq *irq)
974
static bool use_flic = true;
978
r = kvm_s390_inject_flic(irq);
986
__kvm_s390_floating_interrupt(irq);
989
void kvm_s390_service_interrupt(uint32_t parm)
991
struct kvm_s390_irq irq = {
992
.type = KVM_S390_INT_SERVICE,
993
.u.ext.ext_params = parm,
996
kvm_s390_floating_interrupt(&irq);
999
static void enter_pgmcheck(S390CPU *cpu, uint16_t code)
1001
struct kvm_s390_irq irq = {
1002
.type = KVM_S390_PROGRAM_INT,
1006
kvm_s390_vcpu_interrupt(cpu, &irq);
1009
void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code)
1011
struct kvm_s390_irq irq = {
1012
.type = KVM_S390_PROGRAM_INT,
1014
.u.pgm.trans_exc_code = te_code,
1015
.u.pgm.exc_access_id = te_code & 3,
1018
kvm_s390_vcpu_interrupt(cpu, &irq);
1021
static int kvm_sclp_service_call(S390CPU *cpu, struct kvm_run *run,
1024
CPUS390XState *env = &cpu->env;
1029
cpu_synchronize_state(CPU(cpu));
1030
sccb = env->regs[ipbh0 & 0xf];
1031
code = env->regs[(ipbh0 & 0xf0) >> 4];
1033
r = sclp_service_call(env, sccb, code);
1035
enter_pgmcheck(cpu, -r);
1043
static int handle_b2(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
1045
CPUS390XState *env = &cpu->env;
1047
uint16_t ipbh0 = (run->s390_sieic.ipb & 0xffff0000) >> 16;
1049
cpu_synchronize_state(CPU(cpu));
1053
ioinst_handle_xsch(cpu, env->regs[1]);
1056
ioinst_handle_csch(cpu, env->regs[1]);
1059
ioinst_handle_hsch(cpu, env->regs[1]);
1062
ioinst_handle_msch(cpu, env->regs[1], run->s390_sieic.ipb);
1065
ioinst_handle_ssch(cpu, env->regs[1], run->s390_sieic.ipb);
1068
ioinst_handle_stcrw(cpu, run->s390_sieic.ipb);
1071
ioinst_handle_stsch(cpu, env->regs[1], run->s390_sieic.ipb);
1074
/* We should only get tsch via KVM_EXIT_S390_TSCH. */
1075
fprintf(stderr, "Spurious tsch intercept\n");
1078
ioinst_handle_chsc(cpu, run->s390_sieic.ipb);
1081
/* This should have been handled by kvm already. */
1082
fprintf(stderr, "Spurious tpi intercept\n");
1085
ioinst_handle_schm(cpu, env->regs[1], env->regs[2],
1086
run->s390_sieic.ipb);
1089
ioinst_handle_rsch(cpu, env->regs[1]);
1092
ioinst_handle_rchp(cpu, env->regs[1]);
1095
/* We do not provide this instruction, it is suppressed. */
1098
ioinst_handle_sal(cpu, env->regs[1]);
1101
/* Not provided, set CC = 3 for subchannel not operational */
1104
case PRIV_B2_SCLP_CALL:
1105
rc = kvm_sclp_service_call(cpu, run, ipbh0);
1109
DPRINTF("KVM: unhandled PRIV: 0xb2%x\n", ipa1);
1116
static uint64_t get_base_disp_rxy(S390CPU *cpu, struct kvm_run *run,
1119
CPUS390XState *env = &cpu->env;
1120
uint32_t x2 = (run->s390_sieic.ipa & 0x000f);
1121
uint32_t base2 = run->s390_sieic.ipb >> 28;
1122
uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) +
1123
((run->s390_sieic.ipb & 0xff00) << 4);
1125
if (disp2 & 0x80000) {
1126
disp2 += 0xfff00000;
1132
return (base2 ? env->regs[base2] : 0) +
1133
(x2 ? env->regs[x2] : 0) + (long)(int)disp2;
1136
static uint64_t get_base_disp_rsy(S390CPU *cpu, struct kvm_run *run,
1139
CPUS390XState *env = &cpu->env;
1140
uint32_t base2 = run->s390_sieic.ipb >> 28;
1141
uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) +
1142
((run->s390_sieic.ipb & 0xff00) << 4);
1144
if (disp2 & 0x80000) {
1145
disp2 += 0xfff00000;
1151
return (base2 ? env->regs[base2] : 0) + (long)(int)disp2;
1154
static int kvm_clp_service_call(S390CPU *cpu, struct kvm_run *run)
1156
uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
1158
return clp_service_call(cpu, r2);
1161
static int kvm_pcilg_service_call(S390CPU *cpu, struct kvm_run *run)
1163
uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
1164
uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
1166
return pcilg_service_call(cpu, r1, r2);
1169
static int kvm_pcistg_service_call(S390CPU *cpu, struct kvm_run *run)
1171
uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
1172
uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
1174
return pcistg_service_call(cpu, r1, r2);
1177
static int kvm_stpcifc_service_call(S390CPU *cpu, struct kvm_run *run)
1179
uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1183
cpu_synchronize_state(CPU(cpu));
1184
fiba = get_base_disp_rxy(cpu, run, &ar);
1186
return stpcifc_service_call(cpu, r1, fiba, ar);
1189
static int kvm_sic_service_call(S390CPU *cpu, struct kvm_run *run)
1195
static int kvm_rpcit_service_call(S390CPU *cpu, struct kvm_run *run)
1197
uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
1198
uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
1200
return rpcit_service_call(cpu, r1, r2);
1203
static int kvm_pcistb_service_call(S390CPU *cpu, struct kvm_run *run)
1205
uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1206
uint8_t r3 = run->s390_sieic.ipa & 0x000f;
1210
cpu_synchronize_state(CPU(cpu));
1211
gaddr = get_base_disp_rsy(cpu, run, &ar);
1213
return pcistb_service_call(cpu, r1, r3, gaddr, ar);
1216
static int kvm_mpcifc_service_call(S390CPU *cpu, struct kvm_run *run)
1218
uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1222
cpu_synchronize_state(CPU(cpu));
1223
fiba = get_base_disp_rxy(cpu, run, &ar);
1225
return mpcifc_service_call(cpu, r1, fiba, ar);
1228
static int handle_b9(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
1234
r = kvm_clp_service_call(cpu, run);
1236
case PRIV_B9_PCISTG:
1237
r = kvm_pcistg_service_call(cpu, run);
1240
r = kvm_pcilg_service_call(cpu, run);
1243
r = kvm_rpcit_service_call(cpu, run);
1246
/* just inject exception */
1251
DPRINTF("KVM: unhandled PRIV: 0xb9%x\n", ipa1);
1258
static int handle_eb(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl)
1263
case PRIV_EB_PCISTB:
1264
r = kvm_pcistb_service_call(cpu, run);
1267
r = kvm_sic_service_call(cpu, run);
1270
/* just inject exception */
1275
DPRINTF("KVM: unhandled PRIV: 0xeb%x\n", ipbl);
1282
static int handle_e3(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl)
1287
case PRIV_E3_MPCIFC:
1288
r = kvm_mpcifc_service_call(cpu, run);
1290
case PRIV_E3_STPCIFC:
1291
r = kvm_stpcifc_service_call(cpu, run);
1295
DPRINTF("KVM: unhandled PRIV: 0xe3%x\n", ipbl);
1302
static int handle_hypercall(S390CPU *cpu, struct kvm_run *run)
1304
CPUS390XState *env = &cpu->env;
1307
cpu_synchronize_state(CPU(cpu));
1308
ret = s390_virtio_hypercall(env);
1309
if (ret == -EINVAL) {
1310
enter_pgmcheck(cpu, PGM_SPECIFICATION);
1317
static void kvm_handle_diag_288(S390CPU *cpu, struct kvm_run *run)
1322
cpu_synchronize_state(CPU(cpu));
1323
r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1324
r3 = run->s390_sieic.ipa & 0x000f;
1325
rc = handle_diag_288(&cpu->env, r1, r3);
1327
enter_pgmcheck(cpu, PGM_SPECIFICATION);
1331
static void kvm_handle_diag_308(S390CPU *cpu, struct kvm_run *run)
1335
cpu_synchronize_state(CPU(cpu));
1336
r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1337
r3 = run->s390_sieic.ipa & 0x000f;
1338
handle_diag_308(&cpu->env, r1, r3);
1341
static int handle_sw_breakpoint(S390CPU *cpu, struct kvm_run *run)
1343
CPUS390XState *env = &cpu->env;
1346
cpu_synchronize_state(CPU(cpu));
1348
pc = env->psw.addr - sw_bp_ilen;
1349
if (kvm_find_sw_breakpoint(CPU(cpu), pc)) {
1357
#define DIAG_KVM_CODE_MASK 0x000000000000ffff
1359
static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb)
1365
* For any diagnose call we support, bits 48-63 of the resulting
1366
* address specify the function code; the remainder is ignored.
1368
func_code = decode_basedisp_rs(&cpu->env, ipb, NULL) & DIAG_KVM_CODE_MASK;
1369
switch (func_code) {
1370
case DIAG_TIMEREVENT:
1371
kvm_handle_diag_288(cpu, run);
1374
kvm_handle_diag_308(cpu, run);
1376
case DIAG_KVM_HYPERCALL:
1377
r = handle_hypercall(cpu, run);
1379
case DIAG_KVM_BREAKPOINT:
1380
r = handle_sw_breakpoint(cpu, run);
1383
DPRINTF("KVM: unknown DIAG: 0x%x\n", func_code);
1384
enter_pgmcheck(cpu, PGM_SPECIFICATION);
1391
typedef struct SigpInfo {
1394
uint64_t *status_reg;
1397
static void set_sigp_status(SigpInfo *si, uint64_t status)
1399
*si->status_reg &= 0xffffffff00000000ULL;
1400
*si->status_reg |= status;
1401
si->cc = SIGP_CC_STATUS_STORED;
1404
static void sigp_start(CPUState *cs, run_on_cpu_data arg)
1406
S390CPU *cpu = S390_CPU(cs);
1407
SigpInfo *si = arg.host_ptr;
1409
if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) {
1410
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1414
s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
1415
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1418
static void sigp_stop(CPUState *cs, run_on_cpu_data arg)
1420
S390CPU *cpu = S390_CPU(cs);
1421
SigpInfo *si = arg.host_ptr;
1422
struct kvm_s390_irq irq = {
1423
.type = KVM_S390_SIGP_STOP,
1426
if (s390_cpu_get_state(cpu) != CPU_STATE_OPERATING) {
1427
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1431
/* disabled wait - sleeping in user space */
1433
s390_cpu_set_state(CPU_STATE_STOPPED, cpu);
1435
/* execute the stop function */
1436
cpu->env.sigp_order = SIGP_STOP;
1437
kvm_s390_vcpu_interrupt(cpu, &irq);
1439
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1442
#define ADTL_SAVE_AREA_SIZE 1024
1443
static int kvm_s390_store_adtl_status(S390CPU *cpu, hwaddr addr)
1446
hwaddr len = ADTL_SAVE_AREA_SIZE;
1448
mem = cpu_physical_memory_map(addr, &len, 1);
1452
if (len != ADTL_SAVE_AREA_SIZE) {
1453
cpu_physical_memory_unmap(mem, len, 1, 0);
1457
memcpy(mem, &cpu->env.vregs, 512);
1459
cpu_physical_memory_unmap(mem, len, 1, len);
1464
#define KVM_S390_STORE_STATUS_DEF_ADDR offsetof(LowCore, floating_pt_save_area)
1465
#define SAVE_AREA_SIZE 512
1466
static int kvm_s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch)
1468
static const uint8_t ar_id = 1;
1469
uint64_t ckc = cpu->env.ckc >> 8;
1472
hwaddr len = SAVE_AREA_SIZE;
1474
mem = cpu_physical_memory_map(addr, &len, 1);
1478
if (len != SAVE_AREA_SIZE) {
1479
cpu_physical_memory_unmap(mem, len, 1, 0);
1484
cpu_physical_memory_write(offsetof(LowCore, ar_access_id), &ar_id, 1);
1486
for (i = 0; i < 16; ++i) {
1487
*((uint64_t *)mem + i) = get_freg(&cpu->env, i)->ll;
1489
memcpy(mem + 128, &cpu->env.regs, 128);
1490
memcpy(mem + 256, &cpu->env.psw, 16);
1491
memcpy(mem + 280, &cpu->env.psa, 4);
1492
memcpy(mem + 284, &cpu->env.fpc, 4);
1493
memcpy(mem + 292, &cpu->env.todpr, 4);
1494
memcpy(mem + 296, &cpu->env.cputm, 8);
1495
memcpy(mem + 304, &ckc, 8);
1496
memcpy(mem + 320, &cpu->env.aregs, 64);
1497
memcpy(mem + 384, &cpu->env.cregs, 128);
1499
cpu_physical_memory_unmap(mem, len, 1, len);
1504
static void sigp_stop_and_store_status(CPUState *cs, run_on_cpu_data arg)
1506
S390CPU *cpu = S390_CPU(cs);
1507
SigpInfo *si = arg.host_ptr;
1508
struct kvm_s390_irq irq = {
1509
.type = KVM_S390_SIGP_STOP,
1512
/* disabled wait - sleeping in user space */
1513
if (s390_cpu_get_state(cpu) == CPU_STATE_OPERATING && cs->halted) {
1514
s390_cpu_set_state(CPU_STATE_STOPPED, cpu);
1517
switch (s390_cpu_get_state(cpu)) {
1518
case CPU_STATE_OPERATING:
1519
cpu->env.sigp_order = SIGP_STOP_STORE_STATUS;
1520
kvm_s390_vcpu_interrupt(cpu, &irq);
1521
/* store will be performed when handling the stop intercept */
1523
case CPU_STATE_STOPPED:
1524
/* already stopped, just store the status */
1525
cpu_synchronize_state(cs);
1526
kvm_s390_store_status(cpu, KVM_S390_STORE_STATUS_DEF_ADDR, true);
1529
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1532
static void sigp_store_status_at_address(CPUState *cs, run_on_cpu_data arg)
1534
S390CPU *cpu = S390_CPU(cs);
1535
SigpInfo *si = arg.host_ptr;
1536
uint32_t address = si->param & 0x7ffffe00u;
1538
/* cpu has to be stopped */
1539
if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) {
1540
set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
1544
cpu_synchronize_state(cs);
1546
if (kvm_s390_store_status(cpu, address, false)) {
1547
set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
1550
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1553
static void sigp_store_adtl_status(CPUState *cs, run_on_cpu_data arg)
1555
S390CPU *cpu = S390_CPU(cs);
1556
SigpInfo *si = arg.host_ptr;
1558
if (!s390_has_feat(S390_FEAT_VECTOR)) {
1559
set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
1563
/* cpu has to be stopped */
1564
if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) {
1565
set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
1569
/* parameter must be aligned to 1024-byte boundary */
1570
if (si->param & 0x3ff) {
1571
set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
1575
cpu_synchronize_state(cs);
1577
if (kvm_s390_store_adtl_status(cpu, si->param)) {
1578
set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
1581
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1584
static void sigp_restart(CPUState *cs, run_on_cpu_data arg)
1586
S390CPU *cpu = S390_CPU(cs);
1587
SigpInfo *si = arg.host_ptr;
1588
struct kvm_s390_irq irq = {
1589
.type = KVM_S390_RESTART,
1592
switch (s390_cpu_get_state(cpu)) {
1593
case CPU_STATE_STOPPED:
1594
/* the restart irq has to be delivered prior to any other pending irq */
1595
cpu_synchronize_state(cs);
1596
do_restart_interrupt(&cpu->env);
1597
s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
1599
case CPU_STATE_OPERATING:
1600
kvm_s390_vcpu_interrupt(cpu, &irq);
1603
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1606
int kvm_s390_cpu_restart(S390CPU *cpu)
1610
run_on_cpu(CPU(cpu), sigp_restart, RUN_ON_CPU_HOST_PTR(&si));
1611
DPRINTF("DONE: KVM cpu restart: %p\n", &cpu->env);
1615
static void sigp_initial_cpu_reset(CPUState *cs, run_on_cpu_data arg)
1617
S390CPU *cpu = S390_CPU(cs);
1618
S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
1619
SigpInfo *si = arg.host_ptr;
1621
cpu_synchronize_state(cs);
1622
scc->initial_cpu_reset(cs);
1623
cpu_synchronize_post_reset(cs);
1624
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1627
static void sigp_cpu_reset(CPUState *cs, run_on_cpu_data arg)
1629
S390CPU *cpu = S390_CPU(cs);
1630
S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
1631
SigpInfo *si = arg.host_ptr;
1633
cpu_synchronize_state(cs);
1635
cpu_synchronize_post_reset(cs);
1636
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1639
static void sigp_set_prefix(CPUState *cs, run_on_cpu_data arg)
1641
S390CPU *cpu = S390_CPU(cs);
1642
SigpInfo *si = arg.host_ptr;
1643
uint32_t addr = si->param & 0x7fffe000u;
1645
cpu_synchronize_state(cs);
1647
if (!address_space_access_valid(&address_space_memory, addr,
1648
sizeof(struct LowCore), false)) {
1649
set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
1653
/* cpu has to be stopped */
1654
if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) {
1655
set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
1659
cpu->env.psa = addr;
1660
cpu_synchronize_post_init(cs);
1661
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1664
static int handle_sigp_single_dst(S390CPU *dst_cpu, uint8_t order,
1665
uint64_t param, uint64_t *status_reg)
1669
.status_reg = status_reg,
1672
/* cpu available? */
1673
if (dst_cpu == NULL) {
1674
return SIGP_CC_NOT_OPERATIONAL;
1677
/* only resets can break pending orders */
1678
if (dst_cpu->env.sigp_order != 0 &&
1679
order != SIGP_CPU_RESET &&
1680
order != SIGP_INITIAL_CPU_RESET) {
1681
return SIGP_CC_BUSY;
1686
run_on_cpu(CPU(dst_cpu), sigp_start, RUN_ON_CPU_HOST_PTR(&si));
1689
run_on_cpu(CPU(dst_cpu), sigp_stop, RUN_ON_CPU_HOST_PTR(&si));
1692
run_on_cpu(CPU(dst_cpu), sigp_restart, RUN_ON_CPU_HOST_PTR(&si));
1694
case SIGP_STOP_STORE_STATUS:
1695
run_on_cpu(CPU(dst_cpu), sigp_stop_and_store_status, RUN_ON_CPU_HOST_PTR(&si));
1697
case SIGP_STORE_STATUS_ADDR:
1698
run_on_cpu(CPU(dst_cpu), sigp_store_status_at_address, RUN_ON_CPU_HOST_PTR(&si));
1700
case SIGP_STORE_ADTL_STATUS:
1701
run_on_cpu(CPU(dst_cpu), sigp_store_adtl_status, RUN_ON_CPU_HOST_PTR(&si));
1703
case SIGP_SET_PREFIX:
1704
run_on_cpu(CPU(dst_cpu), sigp_set_prefix, RUN_ON_CPU_HOST_PTR(&si));
1706
case SIGP_INITIAL_CPU_RESET:
1707
run_on_cpu(CPU(dst_cpu), sigp_initial_cpu_reset, RUN_ON_CPU_HOST_PTR(&si));
1709
case SIGP_CPU_RESET:
1710
run_on_cpu(CPU(dst_cpu), sigp_cpu_reset, RUN_ON_CPU_HOST_PTR(&si));
1713
DPRINTF("KVM: unknown SIGP: 0x%x\n", order);
1714
set_sigp_status(&si, SIGP_STAT_INVALID_ORDER);
1720
static int sigp_set_architecture(S390CPU *cpu, uint32_t param,
1721
uint64_t *status_reg)
1726
/* due to the BQL, we are the only active cpu */
1727
CPU_FOREACH(cur_cs) {
1728
cur_cpu = S390_CPU(cur_cs);
1729
if (cur_cpu->env.sigp_order != 0) {
1730
return SIGP_CC_BUSY;
1732
cpu_synchronize_state(cur_cs);
1733
/* all but the current one have to be stopped */
1734
if (cur_cpu != cpu &&
1735
s390_cpu_get_state(cur_cpu) != CPU_STATE_STOPPED) {
1736
*status_reg &= 0xffffffff00000000ULL;
1737
*status_reg |= SIGP_STAT_INCORRECT_STATE;
1738
return SIGP_CC_STATUS_STORED;
1742
switch (param & 0xff) {
1743
case SIGP_MODE_ESA_S390:
1745
return SIGP_CC_NOT_OPERATIONAL;
1746
case SIGP_MODE_Z_ARCH_TRANS_ALL_PSW:
1747
case SIGP_MODE_Z_ARCH_TRANS_CUR_PSW:
1748
CPU_FOREACH(cur_cs) {
1749
cur_cpu = S390_CPU(cur_cs);
1750
cur_cpu->env.pfault_token = -1UL;
1754
*status_reg &= 0xffffffff00000000ULL;
1755
*status_reg |= SIGP_STAT_INVALID_PARAMETER;
1756
return SIGP_CC_STATUS_STORED;
1759
return SIGP_CC_ORDER_CODE_ACCEPTED;
1762
#define SIGP_ORDER_MASK 0x000000ff
1764
static int handle_sigp(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
1766
CPUS390XState *env = &cpu->env;
1767
const uint8_t r1 = ipa1 >> 4;
1768
const uint8_t r3 = ipa1 & 0x0f;
1771
uint64_t *status_reg;
1773
S390CPU *dst_cpu = NULL;
1775
cpu_synchronize_state(CPU(cpu));
1777
/* get order code */
1778
order = decode_basedisp_rs(env, run->s390_sieic.ipb, NULL)
1780
status_reg = &env->regs[r1];
1781
param = (r1 % 2) ? env->regs[r1] : env->regs[r1 + 1];
1783
if (qemu_mutex_trylock(&qemu_sigp_mutex)) {
1790
ret = sigp_set_architecture(cpu, param, status_reg);
1793
/* all other sigp orders target a single vcpu */
1794
dst_cpu = s390_cpu_addr2state(env->regs[r3]);
1795
ret = handle_sigp_single_dst(dst_cpu, order, param, status_reg);
1797
qemu_mutex_unlock(&qemu_sigp_mutex);
1800
trace_kvm_sigp_finished(order, CPU(cpu)->cpu_index,
1801
dst_cpu ? CPU(dst_cpu)->cpu_index : -1, ret);
1811
static int handle_instruction(S390CPU *cpu, struct kvm_run *run)
1813
unsigned int ipa0 = (run->s390_sieic.ipa & 0xff00);
1814
uint8_t ipa1 = run->s390_sieic.ipa & 0x00ff;
1817
DPRINTF("handle_instruction 0x%x 0x%x\n",
1818
run->s390_sieic.ipa, run->s390_sieic.ipb);
1821
r = handle_b2(cpu, run, ipa1);
1824
r = handle_b9(cpu, run, ipa1);
1827
r = handle_eb(cpu, run, run->s390_sieic.ipb & 0xff);
1830
r = handle_e3(cpu, run, run->s390_sieic.ipb & 0xff);
1833
r = handle_diag(cpu, run, run->s390_sieic.ipb);
1836
r = handle_sigp(cpu, run, ipa1);
1842
enter_pgmcheck(cpu, 0x0001);
1848
static bool is_special_wait_psw(CPUState *cs)
1850
/* signal quiesce */
1851
return cs->kvm_run->psw_addr == 0xfffUL;
1854
static void unmanageable_intercept(S390CPU *cpu, const char *str, int pswoffset)
1856
CPUState *cs = CPU(cpu);
1858
error_report("Unmanageable %s! CPU%i new PSW: 0x%016lx:%016lx",
1859
str, cs->cpu_index, ldq_phys(cs->as, cpu->env.psa + pswoffset),
1860
ldq_phys(cs->as, cpu->env.psa + pswoffset + 8));
1862
qemu_system_guest_panicked();
1865
static int handle_intercept(S390CPU *cpu)
1867
CPUState *cs = CPU(cpu);
1868
struct kvm_run *run = cs->kvm_run;
1869
int icpt_code = run->s390_sieic.icptcode;
1872
DPRINTF("intercept: 0x%x (at 0x%lx)\n", icpt_code,
1873
(long)cs->kvm_run->psw_addr);
1874
switch (icpt_code) {
1875
case ICPT_INSTRUCTION:
1876
r = handle_instruction(cpu, run);
1879
unmanageable_intercept(cpu, "program interrupt",
1880
offsetof(LowCore, program_new_psw));
1884
unmanageable_intercept(cpu, "external interrupt",
1885
offsetof(LowCore, external_new_psw));
1889
/* disabled wait, since enabled wait is handled in kernel */
1890
cpu_synchronize_state(cs);
1891
if (s390_cpu_halt(cpu) == 0) {
1892
if (is_special_wait_psw(cs)) {
1893
qemu_system_shutdown_request();
1895
qemu_system_guest_panicked();
1901
if (s390_cpu_set_state(CPU_STATE_STOPPED, cpu) == 0) {
1902
qemu_system_shutdown_request();
1904
if (cpu->env.sigp_order == SIGP_STOP_STORE_STATUS) {
1905
kvm_s390_store_status(cpu, KVM_S390_STORE_STATUS_DEF_ADDR,
1908
cpu->env.sigp_order = 0;
1912
/* currently only instr 0x0000 after enabled via capability */
1913
r = handle_sw_breakpoint(cpu, run);
1915
enter_pgmcheck(cpu, PGM_OPERATION);
1919
case ICPT_SOFT_INTERCEPT:
1920
fprintf(stderr, "KVM unimplemented icpt SOFT\n");
1924
fprintf(stderr, "KVM unimplemented icpt IO\n");
1928
fprintf(stderr, "Unknown intercept code: %d\n", icpt_code);
1936
static int handle_tsch(S390CPU *cpu)
1938
CPUState *cs = CPU(cpu);
1939
struct kvm_run *run = cs->kvm_run;
1942
cpu_synchronize_state(cs);
1944
ret = ioinst_handle_tsch(cpu, cpu->env.regs[1], run->s390_tsch.ipb);
1948
* If an I/O interrupt had been dequeued, we have to reinject it.
1950
if (run->s390_tsch.dequeued) {
1951
kvm_s390_io_interrupt(run->s390_tsch.subchannel_id,
1952
run->s390_tsch.subchannel_nr,
1953
run->s390_tsch.io_int_parm,
1954
run->s390_tsch.io_int_word);
1961
static void insert_stsi_3_2_2(S390CPU *cpu, __u64 addr, uint8_t ar)
1963
struct sysib_322 sysib;
1966
if (s390_cpu_virt_mem_read(cpu, addr, ar, &sysib, sizeof(sysib))) {
1969
/* Shift the stack of Extended Names to prepare for our own data */
1970
memmove(&sysib.ext_names[1], &sysib.ext_names[0],
1971
sizeof(sysib.ext_names[0]) * (sysib.count - 1));
1972
/* First virt level, that doesn't provide Ext Names delimits stack. It is
1973
* assumed it's not capable of managing Extended Names for lower levels.
1975
for (del = 1; del < sysib.count; del++) {
1976
if (!sysib.vm[del].ext_name_encoding || !sysib.ext_names[del][0]) {
1980
if (del < sysib.count) {
1981
memset(sysib.ext_names[del], 0,
1982
sizeof(sysib.ext_names[0]) * (sysib.count - del));
1984
/* Insert short machine name in EBCDIC, padded with blanks */
1986
memset(sysib.vm[0].name, 0x40, sizeof(sysib.vm[0].name));
1987
ebcdic_put(sysib.vm[0].name, qemu_name, MIN(sizeof(sysib.vm[0].name),
1988
strlen(qemu_name)));
1990
sysib.vm[0].ext_name_encoding = 2; /* 2 = UTF-8 */
1991
memset(sysib.ext_names[0], 0, sizeof(sysib.ext_names[0]));
1992
/* If hypervisor specifies zero Extended Name in STSI322 SYSIB, it's
1993
* considered by s390 as not capable of providing any Extended Name.
1994
* Therefore if no name was specified on qemu invocation, we go with the
1995
* same "KVMguest" default, which KVM has filled into short name field.
1998
strncpy((char *)sysib.ext_names[0], qemu_name,
1999
sizeof(sysib.ext_names[0]));
2001
strcpy((char *)sysib.ext_names[0], "KVMguest");
2004
memcpy(sysib.vm[0].uuid, &qemu_uuid, sizeof(sysib.vm[0].uuid));
2006
s390_cpu_virt_mem_write(cpu, addr, ar, &sysib, sizeof(sysib));
2009
static int handle_stsi(S390CPU *cpu)
2011
CPUState *cs = CPU(cpu);
2012
struct kvm_run *run = cs->kvm_run;
2014
switch (run->s390_stsi.fc) {
2016
if (run->s390_stsi.sel1 != 2 || run->s390_stsi.sel2 != 2) {
2019
/* Only sysib 3.2.2 needs post-handling for now. */
2020
insert_stsi_3_2_2(cpu, run->s390_stsi.addr, run->s390_stsi.ar);
2027
static int kvm_arch_handle_debug_exit(S390CPU *cpu)
2029
CPUState *cs = CPU(cpu);
2030
struct kvm_run *run = cs->kvm_run;
2033
struct kvm_debug_exit_arch *arch_info = &run->debug.arch;
2035
switch (arch_info->type) {
2036
case KVM_HW_WP_WRITE:
2037
if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) {
2038
cs->watchpoint_hit = &hw_watchpoint;
2039
hw_watchpoint.vaddr = arch_info->addr;
2040
hw_watchpoint.flags = BP_MEM_WRITE;
2045
if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) {
2049
case KVM_SINGLESTEP:
2050
if (cs->singlestep_enabled) {
2061
int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
2063
S390CPU *cpu = S390_CPU(cs);
2066
qemu_mutex_lock_iothread();
2068
switch (run->exit_reason) {
2069
case KVM_EXIT_S390_SIEIC:
2070
ret = handle_intercept(cpu);
2072
case KVM_EXIT_S390_RESET:
2073
s390_reipl_request();
2075
case KVM_EXIT_S390_TSCH:
2076
ret = handle_tsch(cpu);
2078
case KVM_EXIT_S390_STSI:
2079
ret = handle_stsi(cpu);
2081
case KVM_EXIT_DEBUG:
2082
ret = kvm_arch_handle_debug_exit(cpu);
2085
fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason);
2088
qemu_mutex_unlock_iothread();
2091
ret = EXCP_INTERRUPT;
2096
bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
2101
int kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
2106
int kvm_arch_on_sigbus(int code, void *addr)
2111
void kvm_s390_io_interrupt(uint16_t subchannel_id,
2112
uint16_t subchannel_nr, uint32_t io_int_parm,
2113
uint32_t io_int_word)
2115
struct kvm_s390_irq irq = {
2116
.u.io.subchannel_id = subchannel_id,
2117
.u.io.subchannel_nr = subchannel_nr,
2118
.u.io.io_int_parm = io_int_parm,
2119
.u.io.io_int_word = io_int_word,
2122
if (io_int_word & IO_INT_WORD_AI) {
2123
irq.type = KVM_S390_INT_IO(1, 0, 0, 0);
2125
irq.type = KVM_S390_INT_IO(0, (subchannel_id & 0xff00) >> 8,
2126
(subchannel_id & 0x0006),
2129
kvm_s390_floating_interrupt(&irq);
2132
static uint64_t build_channel_report_mcic(void)
2136
/* subclass: indicate channel report pending */
2138
/* subclass modifiers: none */
2139
/* storage errors: none */
2140
/* validity bits: no damage */
2141
MCIC_VB_WP | MCIC_VB_MS | MCIC_VB_PM | MCIC_VB_IA | MCIC_VB_FP |
2142
MCIC_VB_GR | MCIC_VB_CR | MCIC_VB_ST | MCIC_VB_AR | MCIC_VB_PR |
2143
MCIC_VB_FC | MCIC_VB_CT | MCIC_VB_CC;
2144
if (s390_has_feat(S390_FEAT_VECTOR)) {
2150
void kvm_s390_crw_mchk(void)
2152
struct kvm_s390_irq irq = {
2153
.type = KVM_S390_MCHK,
2154
.u.mchk.cr14 = 1 << 28,
2155
.u.mchk.mcic = build_channel_report_mcic(),
2157
kvm_s390_floating_interrupt(&irq);
2160
void kvm_s390_enable_css_support(S390CPU *cpu)
2164
/* Activate host kernel channel subsystem support. */
2165
r = kvm_vcpu_enable_cap(CPU(cpu), KVM_CAP_S390_CSS_SUPPORT, 0);
2169
void kvm_arch_init_irq_routing(KVMState *s)
2172
* Note that while irqchip capabilities generally imply that cpustates
2173
* are handled in-kernel, it is not true for s390 (yet); therefore, we
2174
* have to override the common code kvm_halt_in_kernel_allowed setting.
2176
if (kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
2177
kvm_gsi_routing_allowed = true;
2178
kvm_halt_in_kernel_allowed = false;
2182
int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch,
2183
int vq, bool assign)
2185
struct kvm_ioeventfd kick = {
2186
.flags = KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY |
2187
KVM_IOEVENTFD_FLAG_DATAMATCH,
2188
.fd = event_notifier_get_fd(notifier),
2193
if (!kvm_check_extension(kvm_state, KVM_CAP_IOEVENTFD)) {
2197
kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
2199
return kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
2202
int kvm_s390_get_memslot_count(KVMState *s)
2204
return kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS);
2207
int kvm_s390_get_ri(void)
2212
int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state)
2214
struct kvm_mp_state mp_state = {};
2217
/* the kvm part might not have been initialized yet */
2218
if (CPU(cpu)->kvm_state == NULL) {
2222
switch (cpu_state) {
2223
case CPU_STATE_STOPPED:
2224
mp_state.mp_state = KVM_MP_STATE_STOPPED;
2226
case CPU_STATE_CHECK_STOP:
2227
mp_state.mp_state = KVM_MP_STATE_CHECK_STOP;
2229
case CPU_STATE_OPERATING:
2230
mp_state.mp_state = KVM_MP_STATE_OPERATING;
2232
case CPU_STATE_LOAD:
2233
mp_state.mp_state = KVM_MP_STATE_LOAD;
2236
error_report("Requested CPU state is not a valid S390 CPU state: %u",
2241
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
2243
trace_kvm_failed_cpu_state_set(CPU(cpu)->cpu_index, cpu_state,
2250
void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu)
2252
struct kvm_s390_irq_state irq_state;
2253
CPUState *cs = CPU(cpu);
2256
if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) {
2260
irq_state.buf = (uint64_t) cpu->irqstate;
2261
irq_state.len = VCPU_IRQ_BUF_SIZE;
2263
bytes = kvm_vcpu_ioctl(cs, KVM_S390_GET_IRQ_STATE, &irq_state);
2265
cpu->irqstate_saved_size = 0;
2266
error_report("Migration of interrupt state failed");
2270
cpu->irqstate_saved_size = bytes;
2273
int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu)
2275
CPUState *cs = CPU(cpu);
2276
struct kvm_s390_irq_state irq_state;
2279
if (cpu->irqstate_saved_size == 0) {
2283
if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) {
2287
irq_state.buf = (uint64_t) cpu->irqstate;
2288
irq_state.len = cpu->irqstate_saved_size;
2290
r = kvm_vcpu_ioctl(cs, KVM_S390_SET_IRQ_STATE, &irq_state);
2292
error_report("Setting interrupt state failed %d", r);
2297
int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
2298
uint64_t address, uint32_t data, PCIDevice *dev)
2300
S390PCIBusDevice *pbdev;
2301
uint32_t idx = data >> ZPCI_MSI_VEC_BITS;
2302
uint32_t vec = data & ZPCI_MSI_VEC_MASK;
2304
pbdev = s390_pci_find_dev_by_idx(idx);
2306
DPRINTF("add_msi_route no dev\n");
2310
pbdev->routes.adapter.ind_offset = vec;
2312
route->type = KVM_IRQ_ROUTING_S390_ADAPTER;
2314
route->u.adapter.summary_addr = pbdev->routes.adapter.summary_addr;
2315
route->u.adapter.ind_addr = pbdev->routes.adapter.ind_addr;
2316
route->u.adapter.summary_offset = pbdev->routes.adapter.summary_offset;
2317
route->u.adapter.ind_offset = pbdev->routes.adapter.ind_offset;
2318
route->u.adapter.adapter_id = pbdev->routes.adapter.adapter_id;
2322
int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
2323
int vector, PCIDevice *dev)
2328
int kvm_arch_release_virq_post(int virq)
2333
int kvm_arch_msi_data_to_gsi(uint32_t data)
2338
static inline int test_bit_inv(long nr, const unsigned long *addr)
2340
return test_bit(BE_BIT_NR(nr), addr);
2343
static inline void set_bit_inv(long nr, unsigned long *addr)
2345
set_bit(BE_BIT_NR(nr), addr);
2348
static int query_cpu_subfunc(S390FeatBitmap features)
2350
struct kvm_s390_vm_cpu_subfunc prop;
2351
struct kvm_device_attr attr = {
2352
.group = KVM_S390_VM_CPU_MODEL,
2353
.attr = KVM_S390_VM_CPU_MACHINE_SUBFUNC,
2354
.addr = (uint64_t) &prop,
2358
rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
2364
* We're going to add all subfunctions now, if the corresponding feature
2365
* is available that unlocks the query functions.
2367
s390_add_from_feat_block(features, S390_FEAT_TYPE_PLO, prop.plo);
2368
if (test_bit(S390_FEAT_TOD_CLOCK_STEERING, features)) {
2369
s390_add_from_feat_block(features, S390_FEAT_TYPE_PTFF, prop.ptff);
2371
if (test_bit(S390_FEAT_MSA, features)) {
2372
s390_add_from_feat_block(features, S390_FEAT_TYPE_KMAC, prop.kmac);
2373
s390_add_from_feat_block(features, S390_FEAT_TYPE_KMC, prop.kmc);
2374
s390_add_from_feat_block(features, S390_FEAT_TYPE_KM, prop.km);
2375
s390_add_from_feat_block(features, S390_FEAT_TYPE_KIMD, prop.kimd);
2376
s390_add_from_feat_block(features, S390_FEAT_TYPE_KLMD, prop.klmd);
2378
if (test_bit(S390_FEAT_MSA_EXT_3, features)) {
2379
s390_add_from_feat_block(features, S390_FEAT_TYPE_PCKMO, prop.pckmo);
2381
if (test_bit(S390_FEAT_MSA_EXT_4, features)) {
2382
s390_add_from_feat_block(features, S390_FEAT_TYPE_KMCTR, prop.kmctr);
2383
s390_add_from_feat_block(features, S390_FEAT_TYPE_KMF, prop.kmf);
2384
s390_add_from_feat_block(features, S390_FEAT_TYPE_KMO, prop.kmo);
2385
s390_add_from_feat_block(features, S390_FEAT_TYPE_PCC, prop.pcc);
2387
if (test_bit(S390_FEAT_MSA_EXT_5, features)) {
2388
s390_add_from_feat_block(features, S390_FEAT_TYPE_PPNO, prop.ppno);
2393
static int configure_cpu_subfunc(const S390FeatBitmap features)
2395
struct kvm_s390_vm_cpu_subfunc prop = {};
2396
struct kvm_device_attr attr = {
2397
.group = KVM_S390_VM_CPU_MODEL,
2398
.attr = KVM_S390_VM_CPU_PROCESSOR_SUBFUNC,
2399
.addr = (uint64_t) &prop,
2402
if (!kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
2403
KVM_S390_VM_CPU_PROCESSOR_SUBFUNC)) {
2404
/* hardware support might be missing, IBC will handle most of this */
2408
s390_fill_feat_block(features, S390_FEAT_TYPE_PLO, prop.plo);
2409
if (test_bit(S390_FEAT_TOD_CLOCK_STEERING, features)) {
2410
s390_fill_feat_block(features, S390_FEAT_TYPE_PTFF, prop.ptff);
2411
prop.ptff[0] |= 0x80; /* query is always available */
2413
if (test_bit(S390_FEAT_MSA, features)) {
2414
s390_fill_feat_block(features, S390_FEAT_TYPE_KMAC, prop.kmac);
2415
prop.kmac[0] |= 0x80; /* query is always available */
2416
s390_fill_feat_block(features, S390_FEAT_TYPE_KMC, prop.kmc);
2417
prop.kmc[0] |= 0x80; /* query is always available */
2418
s390_fill_feat_block(features, S390_FEAT_TYPE_KM, prop.km);
2419
prop.km[0] |= 0x80; /* query is always available */
2420
s390_fill_feat_block(features, S390_FEAT_TYPE_KIMD, prop.kimd);
2421
prop.kimd[0] |= 0x80; /* query is always available */
2422
s390_fill_feat_block(features, S390_FEAT_TYPE_KLMD, prop.klmd);
2423
prop.klmd[0] |= 0x80; /* query is always available */
2425
if (test_bit(S390_FEAT_MSA_EXT_3, features)) {
2426
s390_fill_feat_block(features, S390_FEAT_TYPE_PCKMO, prop.pckmo);
2427
prop.pckmo[0] |= 0x80; /* query is always available */
2429
if (test_bit(S390_FEAT_MSA_EXT_4, features)) {
2430
s390_fill_feat_block(features, S390_FEAT_TYPE_KMCTR, prop.kmctr);
2431
prop.kmctr[0] |= 0x80; /* query is always available */
2432
s390_fill_feat_block(features, S390_FEAT_TYPE_KMF, prop.kmf);
2433
prop.kmf[0] |= 0x80; /* query is always available */
2434
s390_fill_feat_block(features, S390_FEAT_TYPE_KMO, prop.kmo);
2435
prop.kmo[0] |= 0x80; /* query is always available */
2436
s390_fill_feat_block(features, S390_FEAT_TYPE_PCC, prop.pcc);
2437
prop.pcc[0] |= 0x80; /* query is always available */
2439
if (test_bit(S390_FEAT_MSA_EXT_5, features)) {
2440
s390_fill_feat_block(features, S390_FEAT_TYPE_PPNO, prop.ppno);
2441
prop.ppno[0] |= 0x80; /* query is always available */
2443
return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
2446
static int kvm_to_feat[][2] = {
2447
{ KVM_S390_VM_CPU_FEAT_ESOP, S390_FEAT_ESOP },
2448
{ KVM_S390_VM_CPU_FEAT_SIEF2, S390_FEAT_SIE_F2 },
2449
{ KVM_S390_VM_CPU_FEAT_64BSCAO , S390_FEAT_SIE_64BSCAO },
2450
{ KVM_S390_VM_CPU_FEAT_SIIF, S390_FEAT_SIE_SIIF },
2451
{ KVM_S390_VM_CPU_FEAT_GPERE, S390_FEAT_SIE_GPERE },
2452
{ KVM_S390_VM_CPU_FEAT_GSLS, S390_FEAT_SIE_GSLS },
2453
{ KVM_S390_VM_CPU_FEAT_IB, S390_FEAT_SIE_IB },
2454
{ KVM_S390_VM_CPU_FEAT_CEI, S390_FEAT_SIE_CEI },
2455
{ KVM_S390_VM_CPU_FEAT_IBS, S390_FEAT_SIE_IBS },
2456
{ KVM_S390_VM_CPU_FEAT_SKEY, S390_FEAT_SIE_SKEY },
2457
{ KVM_S390_VM_CPU_FEAT_CMMA, S390_FEAT_SIE_CMMA },
2458
{ KVM_S390_VM_CPU_FEAT_PFMFI, S390_FEAT_SIE_PFMFI},
2459
{ KVM_S390_VM_CPU_FEAT_SIGPIF, S390_FEAT_SIE_SIGPIF},
2462
static int query_cpu_feat(S390FeatBitmap features)
2464
struct kvm_s390_vm_cpu_feat prop;
2465
struct kvm_device_attr attr = {
2466
.group = KVM_S390_VM_CPU_MODEL,
2467
.attr = KVM_S390_VM_CPU_MACHINE_FEAT,
2468
.addr = (uint64_t) &prop,
2473
rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
2478
for (i = 0; i < ARRAY_SIZE(kvm_to_feat); i++) {
2479
if (test_bit_inv(kvm_to_feat[i][0], (unsigned long *)prop.feat)) {
2480
set_bit(kvm_to_feat[i][1], features);
2486
static int configure_cpu_feat(const S390FeatBitmap features)
2488
struct kvm_s390_vm_cpu_feat prop = {};
2489
struct kvm_device_attr attr = {
2490
.group = KVM_S390_VM_CPU_MODEL,
2491
.attr = KVM_S390_VM_CPU_PROCESSOR_FEAT,
2492
.addr = (uint64_t) &prop,
2496
for (i = 0; i < ARRAY_SIZE(kvm_to_feat); i++) {
2497
if (test_bit(kvm_to_feat[i][1], features)) {
2498
set_bit_inv(kvm_to_feat[i][0], (unsigned long *)prop.feat);
2501
return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
2504
bool kvm_s390_cpu_models_supported(void)
2506
if (!cpu_model_allowed()) {
2507
/* compatibility machines interfere with the cpu model */
2510
return kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
2511
KVM_S390_VM_CPU_MACHINE) &&
2512
kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
2513
KVM_S390_VM_CPU_PROCESSOR) &&
2514
kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
2515
KVM_S390_VM_CPU_MACHINE_FEAT) &&
2516
kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
2517
KVM_S390_VM_CPU_PROCESSOR_FEAT) &&
2518
kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
2519
KVM_S390_VM_CPU_MACHINE_SUBFUNC);
2522
void kvm_s390_get_host_cpu_model(S390CPUModel *model, Error **errp)
2524
struct kvm_s390_vm_cpu_machine prop = {};
2525
struct kvm_device_attr attr = {
2526
.group = KVM_S390_VM_CPU_MODEL,
2527
.attr = KVM_S390_VM_CPU_MACHINE,
2528
.addr = (uint64_t) &prop,
2530
uint16_t unblocked_ibc = 0, cpu_type = 0;
2533
memset(model, 0, sizeof(*model));
2535
if (!kvm_s390_cpu_models_supported()) {
2536
error_setg(errp, "KVM doesn't support CPU models");
2540
/* query the basic cpu model properties */
2541
rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
2543
error_setg(errp, "KVM: Error querying host CPU model: %d", rc);
2547
cpu_type = cpuid_type(prop.cpuid);
2548
if (has_ibc(prop.ibc)) {
2549
model->lowest_ibc = lowest_ibc(prop.ibc);
2550
unblocked_ibc = unblocked_ibc(prop.ibc);
2552
model->cpu_id = cpuid_id(prop.cpuid);
2553
model->cpu_ver = 0xff;
2555
/* get supported cpu features indicated via STFL(E) */
2556
s390_add_from_feat_block(model->features, S390_FEAT_TYPE_STFL,
2557
(uint8_t *) prop.fac_mask);
2558
/* dat-enhancement facility 2 has no bit but was introduced with stfle */
2559
if (test_bit(S390_FEAT_STFLE, model->features)) {
2560
set_bit(S390_FEAT_DAT_ENH_2, model->features);
2562
/* get supported cpu features indicated e.g. via SCLP */
2563
rc = query_cpu_feat(model->features);
2565
error_setg(errp, "KVM: Error querying CPU features: %d", rc);
2568
/* get supported cpu subfunctions indicated via query / test bit */
2569
rc = query_cpu_subfunc(model->features);
2571
error_setg(errp, "KVM: Error querying CPU subfunctions: %d", rc);
2575
/* with cpu model support, CMM is only indicated if really available */
2576
if (kvm_s390_cmma_available()) {
2577
set_bit(S390_FEAT_CMM, model->features);
2580
if (s390_known_cpu_type(cpu_type)) {
2581
/* we want the exact model, even if some features are missing */
2582
model->def = s390_find_cpu_def(cpu_type, ibc_gen(unblocked_ibc),
2583
ibc_ec_ga(unblocked_ibc), NULL);
2585
/* model unknown, e.g. too new - search using features */
2586
model->def = s390_find_cpu_def(0, ibc_gen(unblocked_ibc),
2587
ibc_ec_ga(unblocked_ibc),
2591
error_setg(errp, "KVM: host CPU model could not be identified");
2594
/* strip of features that are not part of the maximum model */
2595
bitmap_and(model->features, model->features, model->def->full_feat,
2599
void kvm_s390_apply_cpu_model(const S390CPUModel *model, Error **errp)
2601
struct kvm_s390_vm_cpu_processor prop = {
2604
struct kvm_device_attr attr = {
2605
.group = KVM_S390_VM_CPU_MODEL,
2606
.attr = KVM_S390_VM_CPU_PROCESSOR,
2607
.addr = (uint64_t) &prop,
2612
/* compatibility handling if cpu models are disabled */
2613
if (kvm_s390_cmma_available() && !mem_path) {
2614
kvm_s390_enable_cmma();
2618
if (!kvm_s390_cpu_models_supported()) {
2619
error_setg(errp, "KVM doesn't support CPU models");
2622
prop.cpuid = s390_cpuid_from_cpu_model(model);
2623
prop.ibc = s390_ibc_from_cpu_model(model);
2624
/* configure cpu features indicated via STFL(e) */
2625
s390_fill_feat_block(model->features, S390_FEAT_TYPE_STFL,
2626
(uint8_t *) prop.fac_list);
2627
rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
2629
error_setg(errp, "KVM: Error configuring the CPU model: %d", rc);
2632
/* configure cpu features indicated e.g. via SCLP */
2633
rc = configure_cpu_feat(model->features);
2635
error_setg(errp, "KVM: Error configuring CPU features: %d", rc);
2638
/* configure cpu subfunctions indicated via query / test bit */
2639
rc = configure_cpu_subfunc(model->features);
2641
error_setg(errp, "KVM: Error configuring CPU subfunctions: %d", rc);
2644
/* enable CMM via CMMA - disable on hugetlbfs */
2645
if (test_bit(S390_FEAT_CMM, model->features)) {
2647
error_report("Warning: CMM will not be enabled because it is not "
2648
"compatible to hugetlbfs.");
2650
kvm_s390_enable_cmma();