2
* S390x MMU related functions
4
* Copyright (c) 2011 Alexander Graf
5
* Copyright (c) 2015 Thomas Huth, IBM Corporation
7
* This program is free software; you can redistribute it and/or modify
8
* it under the terms of the GNU General Public License as published by
9
* the Free Software Foundation; either version 2 of the License, or
10
* (at your option) any later version.
12
* This program is distributed in the hope that it will be useful,
13
* but WITHOUT ANY WARRANTY; without even the implied warranty of
14
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15
* GNU General Public License for more details.
18
#include "qemu/osdep.h"
19
#include "qemu/error-report.h"
20
#include "exec/address-spaces.h"
22
#include "sysemu/kvm.h"
24
#include "hw/s390x/storage-keys.h"
26
/* #define DEBUG_S390 */
27
/* #define DEBUG_S390_PTE */
28
/* #define DEBUG_S390_STDOUT */
31
#ifdef DEBUG_S390_STDOUT
32
#define DPRINTF(fmt, ...) \
33
do { fprintf(stderr, fmt, ## __VA_ARGS__); \
34
if (qemu_log_separate()) qemu_log(fmt, ##__VA_ARGS__); } while (0)
36
#define DPRINTF(fmt, ...) \
37
do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
40
#define DPRINTF(fmt, ...) \
45
#define PTE_DPRINTF DPRINTF
47
#define PTE_DPRINTF(fmt, ...) \
51
/* Fetch/store bits in the translation exception code: */
53
#define FS_WRITE 0x400
55
static void trigger_access_exception(CPUS390XState *env, uint32_t type,
56
uint32_t ilen, uint64_t tec)
58
S390CPU *cpu = s390_env_get_cpu(env);
61
kvm_s390_access_exception(cpu, type, tec);
63
CPUState *cs = CPU(cpu);
64
stq_phys(cs->as, env->psa + offsetof(LowCore, trans_exc_code), tec);
65
trigger_pgm_exception(env, type, ilen);
69
static void trigger_prot_fault(CPUS390XState *env, target_ulong vaddr,
70
uint64_t asc, int rw, bool exc)
74
tec = vaddr | (rw == MMU_DATA_STORE ? FS_WRITE : FS_READ) | 4 | asc >> 46;
76
DPRINTF("%s: trans_exc_code=%016" PRIx64 "\n", __func__, tec);
82
trigger_access_exception(env, PGM_PROTECTION, ILEN_LATER_INC, tec);
85
static void trigger_page_fault(CPUS390XState *env, target_ulong vaddr,
86
uint32_t type, uint64_t asc, int rw, bool exc)
88
int ilen = ILEN_LATER;
91
tec = vaddr | (rw == MMU_DATA_STORE ? FS_WRITE : FS_READ) | asc >> 46;
93
DPRINTF("%s: trans_exc_code=%016" PRIx64 "\n", __func__, tec);
99
/* Code accesses have an undefined ilc. */
100
if (rw == MMU_INST_FETCH) {
104
trigger_access_exception(env, type, ilen, tec);
108
* Translate real address to absolute (= physical)
109
* address by taking care of the prefix mapping.
111
static target_ulong mmu_real2abs(CPUS390XState *env, target_ulong raddr)
113
if (raddr < 0x2000) {
114
return raddr + env->psa; /* Map the lowcore. */
115
} else if (raddr >= env->psa && raddr < env->psa + 0x2000) {
116
return raddr - env->psa; /* Map the 0 page. */
121
/* Decode page table entry (normal 4KB page) */
122
static int mmu_translate_pte(CPUS390XState *env, target_ulong vaddr,
123
uint64_t asc, uint64_t pt_entry,
124
target_ulong *raddr, int *flags, int rw, bool exc)
126
if (pt_entry & _PAGE_INVALID) {
127
DPRINTF("%s: PTE=0x%" PRIx64 " invalid\n", __func__, pt_entry);
128
trigger_page_fault(env, vaddr, PGM_PAGE_TRANS, asc, rw, exc);
131
if (pt_entry & _PAGE_RES0) {
132
trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw, exc);
135
if (pt_entry & _PAGE_RO) {
136
*flags &= ~PAGE_WRITE;
139
*raddr = pt_entry & _ASCE_ORIGIN;
141
PTE_DPRINTF("%s: PTE=0x%" PRIx64 "\n", __func__, pt_entry);
146
#define VADDR_PX 0xff000 /* Page index bits */
148
/* Decode segment table entry */
149
static int mmu_translate_segment(CPUS390XState *env, target_ulong vaddr,
150
uint64_t asc, uint64_t st_entry,
151
target_ulong *raddr, int *flags, int rw,
154
CPUState *cs = CPU(s390_env_get_cpu(env));
155
uint64_t origin, offs, pt_entry;
157
if (st_entry & _SEGMENT_ENTRY_RO) {
158
*flags &= ~PAGE_WRITE;
161
if ((st_entry & _SEGMENT_ENTRY_FC) && (env->cregs[0] & CR0_EDAT)) {
162
/* Decode EDAT1 segment frame absolute address (1MB page) */
163
*raddr = (st_entry & 0xfffffffffff00000ULL) | (vaddr & 0xfffff);
164
PTE_DPRINTF("%s: SEG=0x%" PRIx64 "\n", __func__, st_entry);
168
/* Look up 4KB page entry */
169
origin = st_entry & _SEGMENT_ENTRY_ORIGIN;
170
offs = (vaddr & VADDR_PX) >> 9;
171
pt_entry = ldq_phys(cs->as, origin + offs);
172
PTE_DPRINTF("%s: 0x%" PRIx64 " + 0x%" PRIx64 " => 0x%016" PRIx64 "\n",
173
__func__, origin, offs, pt_entry);
174
return mmu_translate_pte(env, vaddr, asc, pt_entry, raddr, flags, rw, exc);
177
/* Decode region table entries */
178
static int mmu_translate_region(CPUS390XState *env, target_ulong vaddr,
179
uint64_t asc, uint64_t entry, int level,
180
target_ulong *raddr, int *flags, int rw,
183
CPUState *cs = CPU(s390_env_get_cpu(env));
184
uint64_t origin, offs, new_entry;
185
const int pchks[4] = {
186
PGM_SEGMENT_TRANS, PGM_REG_THIRD_TRANS,
187
PGM_REG_SEC_TRANS, PGM_REG_FIRST_TRANS
190
PTE_DPRINTF("%s: 0x%" PRIx64 "\n", __func__, entry);
192
origin = entry & _REGION_ENTRY_ORIGIN;
193
offs = (vaddr >> (17 + 11 * level / 4)) & 0x3ff8;
195
new_entry = ldq_phys(cs->as, origin + offs);
196
PTE_DPRINTF("%s: 0x%" PRIx64 " + 0x%" PRIx64 " => 0x%016" PRIx64 "\n",
197
__func__, origin, offs, new_entry);
199
if ((new_entry & _REGION_ENTRY_INV) != 0) {
200
DPRINTF("%s: invalid region\n", __func__);
201
trigger_page_fault(env, vaddr, pchks[level / 4], asc, rw, exc);
205
if ((new_entry & _REGION_ENTRY_TYPE_MASK) != level) {
206
trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw, exc);
210
if (level == _ASCE_TYPE_SEGMENT) {
211
return mmu_translate_segment(env, vaddr, asc, new_entry, raddr, flags,
215
/* Check region table offset and length */
216
offs = (vaddr >> (28 + 11 * (level - 4) / 4)) & 3;
217
if (offs < ((new_entry & _REGION_ENTRY_TF) >> 6)
218
|| offs > (new_entry & _REGION_ENTRY_LENGTH)) {
219
DPRINTF("%s: invalid offset or len (%lx)\n", __func__, new_entry);
220
trigger_page_fault(env, vaddr, pchks[level / 4 - 1], asc, rw, exc);
224
if ((env->cregs[0] & CR0_EDAT) && (new_entry & _REGION_ENTRY_RO)) {
225
*flags &= ~PAGE_WRITE;
228
/* yet another region */
229
return mmu_translate_region(env, vaddr, asc, new_entry, level - 4,
230
raddr, flags, rw, exc);
233
static int mmu_translate_asce(CPUS390XState *env, target_ulong vaddr,
234
uint64_t asc, uint64_t asce, target_ulong *raddr,
235
int *flags, int rw, bool exc)
240
if (asce & _ASCE_REAL_SPACE) {
246
level = asce & _ASCE_TYPE_MASK;
248
case _ASCE_TYPE_REGION1:
249
if ((vaddr >> 62) > (asce & _ASCE_TABLE_LENGTH)) {
250
trigger_page_fault(env, vaddr, PGM_REG_FIRST_TRANS, asc, rw, exc);
254
case _ASCE_TYPE_REGION2:
255
if (vaddr & 0xffe0000000000000ULL) {
256
DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
257
" 0xffe0000000000000ULL\n", __func__, vaddr);
258
trigger_page_fault(env, vaddr, PGM_ASCE_TYPE, asc, rw, exc);
261
if ((vaddr >> 51 & 3) > (asce & _ASCE_TABLE_LENGTH)) {
262
trigger_page_fault(env, vaddr, PGM_REG_SEC_TRANS, asc, rw, exc);
266
case _ASCE_TYPE_REGION3:
267
if (vaddr & 0xfffffc0000000000ULL) {
268
DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
269
" 0xfffffc0000000000ULL\n", __func__, vaddr);
270
trigger_page_fault(env, vaddr, PGM_ASCE_TYPE, asc, rw, exc);
273
if ((vaddr >> 40 & 3) > (asce & _ASCE_TABLE_LENGTH)) {
274
trigger_page_fault(env, vaddr, PGM_REG_THIRD_TRANS, asc, rw, exc);
278
case _ASCE_TYPE_SEGMENT:
279
if (vaddr & 0xffffffff80000000ULL) {
280
DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
281
" 0xffffffff80000000ULL\n", __func__, vaddr);
282
trigger_page_fault(env, vaddr, PGM_ASCE_TYPE, asc, rw, exc);
285
if ((vaddr >> 29 & 3) > (asce & _ASCE_TABLE_LENGTH)) {
286
trigger_page_fault(env, vaddr, PGM_SEGMENT_TRANS, asc, rw, exc);
292
r = mmu_translate_region(env, vaddr, asc, asce, level, raddr, flags, rw,
294
if (rw == MMU_DATA_STORE && !(*flags & PAGE_WRITE)) {
295
trigger_prot_fault(env, vaddr, asc, rw, exc);
303
* Translate a virtual (logical) address into a physical (absolute) address.
304
* @param vaddr the virtual address
305
* @param rw 0 = read, 1 = write, 2 = code fetch
306
* @param asc address space control (one of the PSW_ASC_* modes)
307
* @param raddr the translated address is stored to this pointer
308
* @param flags the PAGE_READ/WRITE/EXEC flags are stored to this pointer
309
* @param exc true = inject a program check if a fault occurred
310
* @return 0 if the translation was successful, -1 if a fault occurred
312
int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
313
target_ulong *raddr, int *flags, bool exc)
315
static S390SKeysState *ss;
316
static S390SKeysClass *skeyclass;
321
ss = s390_get_skeys_device();
322
skeyclass = S390_SKEYS_GET_CLASS(ss);
325
*flags = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
326
vaddr &= TARGET_PAGE_MASK;
328
if (!(env->psw.mask & PSW_MASK_DAT)) {
335
case PSW_ASC_PRIMARY:
336
PTE_DPRINTF("%s: asc=primary\n", __func__);
337
r = mmu_translate_asce(env, vaddr, asc, env->cregs[1], raddr, flags,
341
PTE_DPRINTF("%s: asc=home\n", __func__);
342
r = mmu_translate_asce(env, vaddr, asc, env->cregs[13], raddr, flags,
345
case PSW_ASC_SECONDARY:
346
PTE_DPRINTF("%s: asc=secondary\n", __func__);
348
* Instruction: Primary
351
if (rw == MMU_INST_FETCH) {
352
r = mmu_translate_asce(env, vaddr, PSW_ASC_PRIMARY, env->cregs[1],
353
raddr, flags, rw, exc);
354
*flags &= ~(PAGE_READ | PAGE_WRITE);
356
r = mmu_translate_asce(env, vaddr, PSW_ASC_SECONDARY, env->cregs[7],
357
raddr, flags, rw, exc);
358
*flags &= ~(PAGE_EXEC);
363
hw_error("guest switched to unknown asc mode\n");
368
/* Convert real address -> absolute address */
369
*raddr = mmu_real2abs(env, *raddr);
371
if (r == 0 && *raddr < ram_size) {
372
if (skeyclass->get_skeys(ss, *raddr / TARGET_PAGE_SIZE, 1, &key)) {
373
trace_get_skeys_nonzero(r);
377
if (*flags & PAGE_READ) {
381
if (*flags & PAGE_WRITE) {
385
if (skeyclass->set_skeys(ss, *raddr / TARGET_PAGE_SIZE, 1, &key)) {
386
trace_set_skeys_nonzero(r);
395
* lowprot_enabled: Check whether low-address protection is enabled
397
static bool lowprot_enabled(const CPUS390XState *env)
399
if (!(env->cregs[0] & CR0_LOWPROT)) {
402
if (!(env->psw.mask & PSW_MASK_DAT)) {
406
/* Check the private-space control bit */
407
switch (env->psw.mask & PSW_MASK_ASC) {
408
case PSW_ASC_PRIMARY:
409
return !(env->cregs[1] & _ASCE_PRIVATE_SPACE);
410
case PSW_ASC_SECONDARY:
411
return !(env->cregs[7] & _ASCE_PRIVATE_SPACE);
413
return !(env->cregs[13] & _ASCE_PRIVATE_SPACE);
415
/* We don't support access register mode */
416
error_report("unsupported addressing mode");
422
* translate_pages: Translate a set of consecutive logical page addresses
423
* to absolute addresses
425
static int translate_pages(S390CPU *cpu, vaddr addr, int nr_pages,
426
target_ulong *pages, bool is_write)
428
bool lowprot = is_write && lowprot_enabled(&cpu->env);
429
uint64_t asc = cpu->env.psw.mask & PSW_MASK_ASC;
430
CPUS390XState *env = &cpu->env;
433
for (i = 0; i < nr_pages; i++) {
434
/* Low-address protection? */
435
if (lowprot && (addr < 512 || (addr >= 4096 && addr < 4096 + 512))) {
436
trigger_access_exception(env, PGM_PROTECTION, ILEN_LATER_INC, 0);
439
ret = mmu_translate(env, addr, is_write, asc, &pages[i], &pflags, true);
443
if (!address_space_access_valid(&address_space_memory, pages[i],
444
TARGET_PAGE_SIZE, is_write)) {
445
program_interrupt(env, PGM_ADDRESSING, 0);
448
addr += TARGET_PAGE_SIZE;
455
* s390_cpu_virt_mem_rw:
456
* @laddr: the logical start address
457
* @ar: the access register number
458
* @hostbuf: buffer in host memory. NULL = do only checks w/o copying
459
* @len: length that should be transferred
460
* @is_write: true = write, false = read
461
* Returns: 0 on success, non-zero if an exception occurred
463
* Copy from/to guest memory using logical addresses. Note that we inject a
464
* program interrupt in case there is an error while accessing the memory.
466
int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf,
467
int len, bool is_write)
469
int currlen, nr_pages, i;
474
ret = kvm_s390_mem_op(cpu, laddr, ar, hostbuf, len, is_write);
480
nr_pages = (((laddr & ~TARGET_PAGE_MASK) + len - 1) >> TARGET_PAGE_BITS)
482
pages = g_malloc(nr_pages * sizeof(*pages));
484
ret = translate_pages(cpu, laddr, nr_pages, pages, is_write);
485
if (ret == 0 && hostbuf != NULL) {
486
/* Copy data by stepping through the area page by page */
487
for (i = 0; i < nr_pages; i++) {
488
currlen = MIN(len, TARGET_PAGE_SIZE - (laddr % TARGET_PAGE_SIZE));
489
cpu_physical_memory_rw(pages[i] | (laddr & ~TARGET_PAGE_MASK),
490
hostbuf, currlen, is_write);