4
* Copyright (c) 2009 Ulrich Hecht
5
* Copyright (c) 2011 Alexander Graf
7
* This library is free software; you can redistribute it and/or
8
* modify it under the terms of the GNU Lesser General Public
9
* License as published by the Free Software Foundation; either
10
* version 2 of the License, or (at your option) any later version.
12
* This library is distributed in the hope that it will be useful,
13
* but WITHOUT ANY WARRANTY; without even the implied warranty of
14
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15
* Lesser General Public License for more details.
17
* You should have received a copy of the GNU Lesser General Public
18
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
21
#include "qemu/osdep.h"
22
#include "qapi/error.h"
24
#include "exec/gdbstub.h"
25
#include "qemu/timer.h"
26
#include "exec/exec-all.h"
27
#include "exec/cpu_ldst.h"
28
#include "hw/s390x/ioinst.h"
29
#ifndef CONFIG_USER_ONLY
30
#include "sysemu/sysemu.h"
34
//#define DEBUG_S390_STDOUT
37
#ifdef DEBUG_S390_STDOUT
38
#define DPRINTF(fmt, ...) \
39
do { fprintf(stderr, fmt, ## __VA_ARGS__); \
40
if (qemu_log_separate()) qemu_log(fmt, ##__VA_ARGS__); } while (0)
42
#define DPRINTF(fmt, ...) \
43
do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
46
#define DPRINTF(fmt, ...) \
51
#ifndef CONFIG_USER_ONLY
52
void s390x_tod_timer(void *opaque)
54
S390CPU *cpu = opaque;
55
CPUS390XState *env = &cpu->env;
57
env->pending_int |= INTERRUPT_TOD;
58
cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
61
void s390x_cpu_timer(void *opaque)
63
S390CPU *cpu = opaque;
64
CPUS390XState *env = &cpu->env;
66
env->pending_int |= INTERRUPT_CPUTIMER;
67
cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
71
S390CPU *cpu_s390x_create(const char *cpu_model, Error **errp)
73
static bool features_parsed;
74
char *name, *features;
79
name = g_strdup(cpu_model);
80
features = strchr(name, ',');
86
oc = cpu_class_by_name(TYPE_S390_CPU, name);
88
error_setg(errp, "Unknown CPU definition \'%s\'", name);
92
typename = object_class_get_name(oc);
94
if (!features_parsed) {
95
features_parsed = true;
97
cc->parse_features(typename, features, errp);
104
return S390_CPU(CPU(object_new(typename)));
107
S390CPU *s390x_new_cpu(const char *cpu_model, int64_t id, Error **errp)
112
cpu = cpu_s390x_create(cpu_model, &err);
117
object_property_set_int(OBJECT(cpu), id, "id", &err);
121
object_property_set_bool(OBJECT(cpu), true, "realized", &err);
125
error_propagate(errp, err);
126
object_unref(OBJECT(cpu));
132
S390CPU *cpu_s390x_init(const char *cpu_model)
136
/* Use to track CPU ID for linux-user only */
137
static int64_t next_cpu_id;
139
cpu = s390x_new_cpu(cpu_model, next_cpu_id++, &err);
141
error_report_err(err);
146
#if defined(CONFIG_USER_ONLY)
148
void s390_cpu_do_interrupt(CPUState *cs)
150
cs->exception_index = -1;
153
int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
156
S390CPU *cpu = S390_CPU(cs);
158
cs->exception_index = EXCP_PGM;
159
cpu->env.int_pgm_code = PGM_ADDRESSING;
160
/* On real machines this value is dropped into LowMem. Since this
161
is userland, simply put this someplace that cpu_loop can find it. */
162
cpu->env.__excp_addr = address;
166
#else /* !CONFIG_USER_ONLY */
168
/* Ensure to exit the TB after this call! */
169
void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen)
171
CPUState *cs = CPU(s390_env_get_cpu(env));
173
cs->exception_index = EXCP_PGM;
174
env->int_pgm_code = code;
175
env->int_pgm_ilen = ilen;
178
int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
181
S390CPU *cpu = S390_CPU(cs);
182
CPUS390XState *env = &cpu->env;
183
uint64_t asc = cpu_mmu_idx_to_asc(mmu_idx);
184
target_ulong vaddr, raddr;
187
DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
188
__func__, orig_vaddr, rw, mmu_idx);
190
orig_vaddr &= TARGET_PAGE_MASK;
194
if (!(env->psw.mask & PSW_MASK_64)) {
198
if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
199
/* Translation ended in exception */
203
/* check out of RAM access */
204
if (raddr > ram_size) {
205
DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
206
(uint64_t)raddr, (uint64_t)ram_size);
207
trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_LATER);
211
qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
212
__func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
214
tlb_set_page(cs, orig_vaddr, raddr, prot,
215
mmu_idx, TARGET_PAGE_SIZE);
220
hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr)
222
S390CPU *cpu = S390_CPU(cs);
223
CPUS390XState *env = &cpu->env;
226
uint64_t asc = env->psw.mask & PSW_MASK_ASC;
229
if (!(env->psw.mask & PSW_MASK_64)) {
233
if (mmu_translate(env, vaddr, MMU_INST_FETCH, asc, &raddr, &prot, false)) {
239
hwaddr s390_cpu_get_phys_addr_debug(CPUState *cs, vaddr vaddr)
244
page = vaddr & TARGET_PAGE_MASK;
245
phys_addr = cpu_get_phys_page_debug(cs, page);
246
phys_addr += (vaddr & ~TARGET_PAGE_MASK);
251
void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr)
253
uint64_t old_mask = env->psw.mask;
255
env->psw.addr = addr;
256
env->psw.mask = mask;
258
env->cc_op = (mask >> 44) & 3;
261
if ((old_mask ^ mask) & PSW_MASK_PER) {
262
s390_cpu_recompute_watchpoints(CPU(s390_env_get_cpu(env)));
265
if (mask & PSW_MASK_WAIT) {
266
S390CPU *cpu = s390_env_get_cpu(env);
267
if (s390_cpu_halt(cpu) == 0) {
268
#ifndef CONFIG_USER_ONLY
269
qemu_system_shutdown_request();
275
static uint64_t get_psw_mask(CPUS390XState *env)
277
uint64_t r = env->psw.mask;
280
env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst,
284
assert(!(env->cc_op & ~3));
285
r |= (uint64_t)env->cc_op << 44;
291
static LowCore *cpu_map_lowcore(CPUS390XState *env)
293
S390CPU *cpu = s390_env_get_cpu(env);
295
hwaddr len = sizeof(LowCore);
297
lowcore = cpu_physical_memory_map(env->psa, &len, 1);
299
if (len < sizeof(LowCore)) {
300
cpu_abort(CPU(cpu), "Could not map lowcore\n");
306
static void cpu_unmap_lowcore(LowCore *lowcore)
308
cpu_physical_memory_unmap(lowcore, sizeof(LowCore), 1, sizeof(LowCore));
311
void do_restart_interrupt(CPUS390XState *env)
316
lowcore = cpu_map_lowcore(env);
318
lowcore->restart_old_psw.mask = cpu_to_be64(get_psw_mask(env));
319
lowcore->restart_old_psw.addr = cpu_to_be64(env->psw.addr);
320
mask = be64_to_cpu(lowcore->restart_new_psw.mask);
321
addr = be64_to_cpu(lowcore->restart_new_psw.addr);
323
cpu_unmap_lowcore(lowcore);
325
load_psw(env, mask, addr);
328
static void do_program_interrupt(CPUS390XState *env)
332
int ilen = env->int_pgm_ilen;
336
ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
339
ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
340
env->psw.addr += ilen;
343
assert(ilen == 2 || ilen == 4 || ilen == 6);
346
qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilen=%d\n",
347
__func__, env->int_pgm_code, ilen);
349
lowcore = cpu_map_lowcore(env);
351
/* Signal PER events with the exception. */
352
if (env->per_perc_atmid) {
353
env->int_pgm_code |= PGM_PER;
354
lowcore->per_address = cpu_to_be64(env->per_address);
355
lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
356
env->per_perc_atmid = 0;
359
lowcore->pgm_ilen = cpu_to_be16(ilen);
360
lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
361
lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
362
lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
363
mask = be64_to_cpu(lowcore->program_new_psw.mask);
364
addr = be64_to_cpu(lowcore->program_new_psw.addr);
365
lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
367
cpu_unmap_lowcore(lowcore);
369
DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__,
370
env->int_pgm_code, ilen, env->psw.mask,
373
load_psw(env, mask, addr);
376
static void do_svc_interrupt(CPUS390XState *env)
381
lowcore = cpu_map_lowcore(env);
383
lowcore->svc_code = cpu_to_be16(env->int_svc_code);
384
lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
385
lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
386
lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
387
mask = be64_to_cpu(lowcore->svc_new_psw.mask);
388
addr = be64_to_cpu(lowcore->svc_new_psw.addr);
390
cpu_unmap_lowcore(lowcore);
392
load_psw(env, mask, addr);
394
/* When a PER event is pending, the PER exception has to happen
395
immediately after the SERVICE CALL one. */
396
if (env->per_perc_atmid) {
397
env->int_pgm_code = PGM_PER;
398
env->int_pgm_ilen = env->int_svc_ilen;
399
do_program_interrupt(env);
403
#define VIRTIO_SUBCODE_64 0x0D00
405
static void do_ext_interrupt(CPUS390XState *env)
407
S390CPU *cpu = s390_env_get_cpu(env);
412
if (!(env->psw.mask & PSW_MASK_EXT)) {
413
cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
416
if (env->ext_index < 0 || env->ext_index >= MAX_EXT_QUEUE) {
417
cpu_abort(CPU(cpu), "Ext queue overrun: %d\n", env->ext_index);
420
q = &env->ext_queue[env->ext_index];
421
lowcore = cpu_map_lowcore(env);
423
lowcore->ext_int_code = cpu_to_be16(q->code);
424
lowcore->ext_params = cpu_to_be32(q->param);
425
lowcore->ext_params2 = cpu_to_be64(q->param64);
426
lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
427
lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
428
lowcore->cpu_addr = cpu_to_be16(env->cpu_num | VIRTIO_SUBCODE_64);
429
mask = be64_to_cpu(lowcore->external_new_psw.mask);
430
addr = be64_to_cpu(lowcore->external_new_psw.addr);
432
cpu_unmap_lowcore(lowcore);
435
if (env->ext_index == -1) {
436
env->pending_int &= ~INTERRUPT_EXT;
439
DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
440
env->psw.mask, env->psw.addr);
442
load_psw(env, mask, addr);
445
static void do_io_interrupt(CPUS390XState *env)
447
S390CPU *cpu = s390_env_get_cpu(env);
454
if (!(env->psw.mask & PSW_MASK_IO)) {
455
cpu_abort(CPU(cpu), "I/O int w/o I/O mask\n");
458
for (isc = 0; isc < ARRAY_SIZE(env->io_index); isc++) {
461
if (env->io_index[isc] < 0) {
464
if (env->io_index[isc] >= MAX_IO_QUEUE) {
465
cpu_abort(CPU(cpu), "I/O queue overrun for isc %d: %d\n",
466
isc, env->io_index[isc]);
469
q = &env->io_queue[env->io_index[isc]][isc];
470
isc_bits = ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q->word));
471
if (!(env->cregs[6] & isc_bits)) {
479
lowcore = cpu_map_lowcore(env);
481
lowcore->subchannel_id = cpu_to_be16(q->id);
482
lowcore->subchannel_nr = cpu_to_be16(q->nr);
483
lowcore->io_int_parm = cpu_to_be32(q->parm);
484
lowcore->io_int_word = cpu_to_be32(q->word);
485
lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
486
lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
487
mask = be64_to_cpu(lowcore->io_new_psw.mask);
488
addr = be64_to_cpu(lowcore->io_new_psw.addr);
490
cpu_unmap_lowcore(lowcore);
492
env->io_index[isc]--;
494
DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
495
env->psw.mask, env->psw.addr);
496
load_psw(env, mask, addr);
498
if (env->io_index[isc] >= 0) {
505
env->pending_int &= ~INTERRUPT_IO;
510
static void do_mchk_interrupt(CPUS390XState *env)
512
S390CPU *cpu = s390_env_get_cpu(env);
518
if (!(env->psw.mask & PSW_MASK_MCHECK)) {
519
cpu_abort(CPU(cpu), "Machine check w/o mchk mask\n");
522
if (env->mchk_index < 0 || env->mchk_index >= MAX_MCHK_QUEUE) {
523
cpu_abort(CPU(cpu), "Mchk queue overrun: %d\n", env->mchk_index);
526
q = &env->mchk_queue[env->mchk_index];
529
/* Don't know how to handle this... */
530
cpu_abort(CPU(cpu), "Unknown machine check type %d\n", q->type);
532
if (!(env->cregs[14] & (1 << 28))) {
533
/* CRW machine checks disabled */
537
lowcore = cpu_map_lowcore(env);
539
for (i = 0; i < 16; i++) {
540
lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
541
lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
542
lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
543
lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
545
lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
546
lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
547
lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
548
lowcore->cpu_timer_save_area[0] = cpu_to_be32(env->cputm >> 32);
549
lowcore->cpu_timer_save_area[1] = cpu_to_be32((uint32_t)env->cputm);
550
lowcore->clock_comp_save_area[0] = cpu_to_be32(env->ckc >> 32);
551
lowcore->clock_comp_save_area[1] = cpu_to_be32((uint32_t)env->ckc);
553
lowcore->mcck_interruption_code[0] = cpu_to_be32(0x00400f1d);
554
lowcore->mcck_interruption_code[1] = cpu_to_be32(0x40330000);
555
lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
556
lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
557
mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
558
addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
560
cpu_unmap_lowcore(lowcore);
563
if (env->mchk_index == -1) {
564
env->pending_int &= ~INTERRUPT_MCHK;
567
DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
568
env->psw.mask, env->psw.addr);
570
load_psw(env, mask, addr);
573
void s390_cpu_do_interrupt(CPUState *cs)
575
S390CPU *cpu = S390_CPU(cs);
576
CPUS390XState *env = &cpu->env;
578
qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
579
__func__, cs->exception_index, env->psw.addr);
581
s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
582
/* handle machine checks */
583
if ((env->psw.mask & PSW_MASK_MCHECK) &&
584
(cs->exception_index == -1)) {
585
if (env->pending_int & INTERRUPT_MCHK) {
586
cs->exception_index = EXCP_MCHK;
589
/* handle external interrupts */
590
if ((env->psw.mask & PSW_MASK_EXT) &&
591
cs->exception_index == -1) {
592
if (env->pending_int & INTERRUPT_EXT) {
593
/* code is already in env */
594
cs->exception_index = EXCP_EXT;
595
} else if (env->pending_int & INTERRUPT_TOD) {
596
cpu_inject_ext(cpu, 0x1004, 0, 0);
597
cs->exception_index = EXCP_EXT;
598
env->pending_int &= ~INTERRUPT_EXT;
599
env->pending_int &= ~INTERRUPT_TOD;
600
} else if (env->pending_int & INTERRUPT_CPUTIMER) {
601
cpu_inject_ext(cpu, 0x1005, 0, 0);
602
cs->exception_index = EXCP_EXT;
603
env->pending_int &= ~INTERRUPT_EXT;
604
env->pending_int &= ~INTERRUPT_TOD;
607
/* handle I/O interrupts */
608
if ((env->psw.mask & PSW_MASK_IO) &&
609
(cs->exception_index == -1)) {
610
if (env->pending_int & INTERRUPT_IO) {
611
cs->exception_index = EXCP_IO;
615
switch (cs->exception_index) {
617
do_program_interrupt(env);
620
do_svc_interrupt(env);
623
do_ext_interrupt(env);
626
do_io_interrupt(env);
629
do_mchk_interrupt(env);
632
cs->exception_index = -1;
634
if (!env->pending_int) {
635
cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
639
bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
641
if (interrupt_request & CPU_INTERRUPT_HARD) {
642
S390CPU *cpu = S390_CPU(cs);
643
CPUS390XState *env = &cpu->env;
645
if (env->psw.mask & PSW_MASK_EXT) {
646
s390_cpu_do_interrupt(cs);
653
void s390_cpu_recompute_watchpoints(CPUState *cs)
655
const int wp_flags = BP_CPU | BP_MEM_WRITE | BP_STOP_BEFORE_ACCESS;
656
S390CPU *cpu = S390_CPU(cs);
657
CPUS390XState *env = &cpu->env;
659
/* We are called when the watchpoints have changed. First
661
cpu_watchpoint_remove_all(cs, BP_CPU);
663
/* Return if PER is not enabled */
664
if (!(env->psw.mask & PSW_MASK_PER)) {
668
/* Return if storage-alteration event is not enabled. */
669
if (!(env->cregs[9] & PER_CR9_EVENT_STORE)) {
673
if (env->cregs[10] == 0 && env->cregs[11] == -1LL) {
674
/* We can't create a watchoint spanning the whole memory range, so
675
split it in two parts. */
676
cpu_watchpoint_insert(cs, 0, 1ULL << 63, wp_flags, NULL);
677
cpu_watchpoint_insert(cs, 1ULL << 63, 1ULL << 63, wp_flags, NULL);
678
} else if (env->cregs[10] > env->cregs[11]) {
679
/* The address range loops, create two watchpoints. */
680
cpu_watchpoint_insert(cs, env->cregs[10], -env->cregs[10],
682
cpu_watchpoint_insert(cs, 0, env->cregs[11] + 1, wp_flags, NULL);
685
/* Default case, create a single watchpoint. */
686
cpu_watchpoint_insert(cs, env->cregs[10],
687
env->cregs[11] - env->cregs[10] + 1,
692
void s390x_cpu_debug_excp_handler(CPUState *cs)
694
S390CPU *cpu = S390_CPU(cs);
695
CPUS390XState *env = &cpu->env;
696
CPUWatchpoint *wp_hit = cs->watchpoint_hit;
698
if (wp_hit && wp_hit->flags & BP_CPU) {
699
/* FIXME: When the storage-alteration-space control bit is set,
700
the exception should only be triggered if the memory access
701
is done using an address space with the storage-alteration-event
702
bit set. We have no way to detect that with the current
704
cs->watchpoint_hit = NULL;
706
env->per_address = env->psw.addr;
707
env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
708
/* FIXME: We currently no way to detect the address space used
709
to trigger the watchpoint. For now just consider it is the
710
current default ASC. This turn to be true except when MVCP
711
and MVCS instrutions are not used. */
712
env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
714
/* Remove all watchpoints to re-execute the code. A PER exception
715
will be triggered, it will call load_psw which will recompute
717
cpu_watchpoint_remove_all(cs, BP_CPU);
718
cpu_loop_exit_noexc(cs);
721
#endif /* CONFIG_USER_ONLY */