2
#include "exec/gdbstub.h"
4
#include "qemu/host-utils.h"
5
#include "sysemu/arch_init.h"
6
#include "sysemu/sysemu.h"
7
#include "qemu/bitops.h"
9
#ifndef CONFIG_USER_ONLY
10
static inline int get_phys_addr(CPUARMState *env, uint32_t address,
11
int access_type, int is_user,
12
hwaddr *phys_ptr, int *prot,
13
target_ulong *page_size);
16
static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
20
/* VFP data registers are always little-endian. */
21
nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
23
stfq_le_p(buf, env->vfp.regs[reg]);
26
if (arm_feature(env, ARM_FEATURE_NEON)) {
27
/* Aliases for Q regs. */
30
stfq_le_p(buf, env->vfp.regs[(reg - 32) * 2]);
31
stfq_le_p(buf + 8, env->vfp.regs[(reg - 32) * 2 + 1]);
35
switch (reg - nregs) {
36
case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
37
case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
38
case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
43
static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
47
nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
49
env->vfp.regs[reg] = ldfq_le_p(buf);
52
if (arm_feature(env, ARM_FEATURE_NEON)) {
55
env->vfp.regs[(reg - 32) * 2] = ldfq_le_p(buf);
56
env->vfp.regs[(reg - 32) * 2 + 1] = ldfq_le_p(buf + 8);
60
switch (reg - nregs) {
61
case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
62
case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
63
case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
68
static int raw_read(CPUARMState *env, const ARMCPRegInfo *ri,
71
if (ri->type & ARM_CP_64BIT) {
72
*value = CPREG_FIELD64(env, ri);
74
*value = CPREG_FIELD32(env, ri);
79
static int raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
82
if (ri->type & ARM_CP_64BIT) {
83
CPREG_FIELD64(env, ri) = value;
85
CPREG_FIELD32(env, ri) = value;
90
static bool read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
93
/* Raw read of a coprocessor register (as needed for migration, etc)
94
* return true on success, false if the read is impossible for some reason.
96
if (ri->type & ARM_CP_CONST) {
98
} else if (ri->raw_readfn) {
99
return (ri->raw_readfn(env, ri, v) == 0);
100
} else if (ri->readfn) {
101
return (ri->readfn(env, ri, v) == 0);
103
if (ri->type & ARM_CP_64BIT) {
104
*v = CPREG_FIELD64(env, ri);
106
*v = CPREG_FIELD32(env, ri);
112
static bool write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
115
/* Raw write of a coprocessor register (as needed for migration, etc).
116
* Return true on success, false if the write is impossible for some reason.
117
* Note that constant registers are treated as write-ignored; the
118
* caller should check for success by whether a readback gives the
121
if (ri->type & ARM_CP_CONST) {
123
} else if (ri->raw_writefn) {
124
return (ri->raw_writefn(env, ri, v) == 0);
125
} else if (ri->writefn) {
126
return (ri->writefn(env, ri, v) == 0);
128
if (ri->type & ARM_CP_64BIT) {
129
CPREG_FIELD64(env, ri) = v;
131
CPREG_FIELD32(env, ri) = v;
137
bool write_cpustate_to_list(ARMCPU *cpu)
139
/* Write the coprocessor state from cpu->env to the (index,value) list. */
143
for (i = 0; i < cpu->cpreg_array_len; i++) {
144
uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
145
const ARMCPRegInfo *ri;
147
ri = get_arm_cp_reginfo(cpu, regidx);
152
if (ri->type & ARM_CP_NO_MIGRATE) {
155
if (!read_raw_cp_reg(&cpu->env, ri, &v)) {
159
cpu->cpreg_values[i] = v;
164
bool write_list_to_cpustate(ARMCPU *cpu)
169
for (i = 0; i < cpu->cpreg_array_len; i++) {
170
uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
171
uint64_t v = cpu->cpreg_values[i];
173
const ARMCPRegInfo *ri;
175
ri = get_arm_cp_reginfo(cpu, regidx);
180
if (ri->type & ARM_CP_NO_MIGRATE) {
183
/* Write value and confirm it reads back as written
184
* (to catch read-only registers and partially read-only
185
* registers where the incoming migration value doesn't match)
187
if (!write_raw_cp_reg(&cpu->env, ri, v) ||
188
!read_raw_cp_reg(&cpu->env, ri, &readback) ||
196
static void add_cpreg_to_list(gpointer key, gpointer opaque)
198
ARMCPU *cpu = opaque;
200
const ARMCPRegInfo *ri;
202
regidx = *(uint32_t *)key;
203
ri = get_arm_cp_reginfo(cpu, regidx);
205
if (!(ri->type & ARM_CP_NO_MIGRATE)) {
206
cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
207
/* The value array need not be initialized at this point */
208
cpu->cpreg_array_len++;
212
static void count_cpreg(gpointer key, gpointer opaque)
214
ARMCPU *cpu = opaque;
216
const ARMCPRegInfo *ri;
218
regidx = *(uint32_t *)key;
219
ri = get_arm_cp_reginfo(cpu, regidx);
221
if (!(ri->type & ARM_CP_NO_MIGRATE)) {
222
cpu->cpreg_array_len++;
226
static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
228
uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
229
uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
240
static void cpreg_make_keylist(gpointer key, gpointer value, gpointer udata)
242
GList **plist = udata;
244
*plist = g_list_prepend(*plist, key);
247
void init_cpreg_list(ARMCPU *cpu)
249
/* Initialise the cpreg_tuples[] array based on the cp_regs hash.
250
* Note that we require cpreg_tuples[] to be sorted by key ID.
255
g_hash_table_foreach(cpu->cp_regs, cpreg_make_keylist, &keys);
257
keys = g_list_sort(keys, cpreg_key_compare);
259
cpu->cpreg_array_len = 0;
261
g_list_foreach(keys, count_cpreg, cpu);
263
arraylen = cpu->cpreg_array_len;
264
cpu->cpreg_indexes = g_new(uint64_t, arraylen);
265
cpu->cpreg_values = g_new(uint64_t, arraylen);
266
cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
267
cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
268
cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
269
cpu->cpreg_array_len = 0;
271
g_list_foreach(keys, add_cpreg_to_list, cpu);
273
assert(cpu->cpreg_array_len == arraylen);
278
static int dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
280
env->cp15.c3 = value;
281
tlb_flush(env, 1); /* Flush TLB as domain not tracked in TLB */
285
static int fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
287
if (env->cp15.c13_fcse != value) {
288
/* Unlike real hardware the qemu TLB uses virtual addresses,
289
* not modified virtual addresses, so this causes a TLB flush.
292
env->cp15.c13_fcse = value;
296
static int contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
299
if (env->cp15.c13_context != value && !arm_feature(env, ARM_FEATURE_MPU)) {
300
/* For VMSA (when not using the LPAE long descriptor page table
301
* format) this register includes the ASID, so do a TLB flush.
302
* For PMSA it is purely a process ID and no action is needed.
306
env->cp15.c13_context = value;
310
static int tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
313
/* Invalidate all (TLBIALL) */
318
static int tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
321
/* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
322
tlb_flush_page(env, value & TARGET_PAGE_MASK);
326
static int tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
329
/* Invalidate by ASID (TLBIASID) */
330
tlb_flush(env, value == 0);
334
static int tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
337
/* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
338
tlb_flush_page(env, value & TARGET_PAGE_MASK);
342
static const ARMCPRegInfo cp_reginfo[] = {
343
/* DBGDIDR: just RAZ. In particular this means the "debug architecture
344
* version" bits will read as a reserved value, which should cause
345
* Linux to not try to use the debug hardware.
347
{ .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
348
.access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
349
/* MMU Domain access control / MPU write buffer control */
350
{ .name = "DACR", .cp = 15,
351
.crn = 3, .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
352
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c3),
353
.resetvalue = 0, .writefn = dacr_write, .raw_writefn = raw_write, },
354
{ .name = "FCSEIDR", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 0,
355
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c13_fcse),
356
.resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
357
{ .name = "CONTEXTIDR", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 1,
358
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c13_fcse),
359
.resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
360
/* ??? This covers not just the impdef TLB lockdown registers but also
361
* some v7VMSA registers relating to TEX remap, so it is overly broad.
363
{ .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = CP_ANY,
364
.opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
365
/* MMU TLB control. Note that the wildcarding means we cover not just
366
* the unified TLB ops but also the dside/iside/inner-shareable variants.
368
{ .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
369
.opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
370
.type = ARM_CP_NO_MIGRATE },
371
{ .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
372
.opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
373
.type = ARM_CP_NO_MIGRATE },
374
{ .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
375
.opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
376
.type = ARM_CP_NO_MIGRATE },
377
{ .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
378
.opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
379
.type = ARM_CP_NO_MIGRATE },
380
/* Cache maintenance ops; some of this space may be overridden later. */
381
{ .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
382
.opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
383
.type = ARM_CP_NOP | ARM_CP_OVERRIDE },
387
static const ARMCPRegInfo not_v6_cp_reginfo[] = {
388
/* Not all pre-v6 cores implemented this WFI, so this is slightly
391
{ .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
392
.access = PL1_W, .type = ARM_CP_WFI },
396
static const ARMCPRegInfo not_v7_cp_reginfo[] = {
397
/* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
398
* is UNPREDICTABLE; we choose to NOP as most implementations do).
400
{ .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
401
.access = PL1_W, .type = ARM_CP_WFI },
402
/* L1 cache lockdown. Not architectural in v6 and earlier but in practice
403
* implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
404
* OMAPCP will override this space.
406
{ .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
407
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
409
{ .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
410
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
412
/* v6 doesn't have the cache ID registers but Linux reads them anyway */
413
{ .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
414
.access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
419
static int cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
421
if (env->cp15.c1_coproc != value) {
422
env->cp15.c1_coproc = value;
423
/* ??? Is this safe when called from within a TB? */
429
static const ARMCPRegInfo v6_cp_reginfo[] = {
430
/* prefetch by MVA in v6, NOP in v7 */
431
{ .name = "MVA_prefetch",
432
.cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
433
.access = PL1_W, .type = ARM_CP_NOP },
434
{ .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
435
.access = PL0_W, .type = ARM_CP_NOP },
436
{ .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
437
.access = PL0_W, .type = ARM_CP_NOP },
438
{ .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
439
.access = PL0_W, .type = ARM_CP_NOP },
440
{ .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
441
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c6_insn),
443
/* Watchpoint Fault Address Register : should actually only be present
444
* for 1136, 1176, 11MPCore.
446
{ .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
447
.access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
448
{ .name = "CPACR", .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2,
449
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c1_coproc),
450
.resetvalue = 0, .writefn = cpacr_write },
455
static int pmreg_read(CPUARMState *env, const ARMCPRegInfo *ri,
458
/* Generic performance monitor register read function for where
459
* user access may be allowed by PMUSERENR.
461
if (arm_current_pl(env) == 0 && !env->cp15.c9_pmuserenr) {
464
*value = CPREG_FIELD32(env, ri);
468
static int pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
471
if (arm_current_pl(env) == 0 && !env->cp15.c9_pmuserenr) {
474
/* only the DP, X, D and E bits are writable */
475
env->cp15.c9_pmcr &= ~0x39;
476
env->cp15.c9_pmcr |= (value & 0x39);
480
static int pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
483
if (arm_current_pl(env) == 0 && !env->cp15.c9_pmuserenr) {
487
env->cp15.c9_pmcnten |= value;
491
static int pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
494
if (arm_current_pl(env) == 0 && !env->cp15.c9_pmuserenr) {
498
env->cp15.c9_pmcnten &= ~value;
502
static int pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
505
if (arm_current_pl(env) == 0 && !env->cp15.c9_pmuserenr) {
508
env->cp15.c9_pmovsr &= ~value;
512
static int pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
515
if (arm_current_pl(env) == 0 && !env->cp15.c9_pmuserenr) {
518
env->cp15.c9_pmxevtyper = value & 0xff;
522
static int pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
525
env->cp15.c9_pmuserenr = value & 1;
529
static int pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
532
/* We have no event counters so only the C bit can be changed */
534
env->cp15.c9_pminten |= value;
538
static int pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
542
env->cp15.c9_pminten &= ~value;
546
static int ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri,
549
ARMCPU *cpu = arm_env_get_cpu(env);
550
*value = cpu->ccsidr[env->cp15.c0_cssel];
554
static int csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
557
env->cp15.c0_cssel = value & 0xf;
561
static const ARMCPRegInfo v7_cp_reginfo[] = {
562
/* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
565
{ .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
566
.access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
567
{ .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
568
.access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
569
/* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
570
{ .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
571
.access = PL1_W, .type = ARM_CP_NOP },
572
/* Performance monitors are implementation defined in v7,
573
* but with an ARM recommended set of registers, which we
574
* follow (although we don't actually implement any counters)
576
* Performance registers fall into three categories:
577
* (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
578
* (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
579
* (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
580
* For the cases controlled by PMUSERENR we must set .access to PL0_RW
581
* or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
583
{ .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
584
.access = PL0_RW, .resetvalue = 0,
585
.fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
586
.readfn = pmreg_read, .writefn = pmcntenset_write,
587
.raw_readfn = raw_read, .raw_writefn = raw_write },
588
{ .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
589
.access = PL0_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
590
.readfn = pmreg_read, .writefn = pmcntenclr_write,
591
.type = ARM_CP_NO_MIGRATE },
592
{ .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
593
.access = PL0_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
594
.readfn = pmreg_read, .writefn = pmovsr_write,
595
.raw_readfn = raw_read, .raw_writefn = raw_write },
596
/* Unimplemented so WI. Strictly speaking write accesses in PL0 should
599
{ .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
600
.access = PL0_W, .type = ARM_CP_NOP },
601
/* Since we don't implement any events, writing to PMSELR is UNPREDICTABLE.
602
* We choose to RAZ/WI. XXX should respect PMUSERENR.
604
{ .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
605
.access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
606
/* Unimplemented, RAZ/WI. XXX PMUSERENR */
607
{ .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
608
.access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
609
{ .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
611
.fieldoffset = offsetof(CPUARMState, cp15.c9_pmxevtyper),
612
.readfn = pmreg_read, .writefn = pmxevtyper_write,
613
.raw_readfn = raw_read, .raw_writefn = raw_write },
614
/* Unimplemented, RAZ/WI. XXX PMUSERENR */
615
{ .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
616
.access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
617
{ .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
618
.access = PL0_R | PL1_RW,
619
.fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
621
.writefn = pmuserenr_write, .raw_writefn = raw_write },
622
{ .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
624
.fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
626
.writefn = pmintenset_write, .raw_writefn = raw_write },
627
{ .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
628
.access = PL1_RW, .type = ARM_CP_NO_MIGRATE,
629
.fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
630
.resetvalue = 0, .writefn = pmintenclr_write, },
631
{ .name = "CCSIDR", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
632
.access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_MIGRATE },
633
{ .name = "CSSELR", .cp = 15, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
634
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c0_cssel),
635
.writefn = csselr_write, .resetvalue = 0 },
636
/* Auxiliary ID register: this actually has an IMPDEF value but for now
637
* just RAZ for all cores:
639
{ .name = "AIDR", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 7,
640
.access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
644
static int teecr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
651
static int teehbr_read(CPUARMState *env, const ARMCPRegInfo *ri,
654
/* This is a helper function because the user access rights
655
* depend on the value of the TEECR.
657
if (arm_current_pl(env) == 0 && (env->teecr & 1)) {
660
*value = env->teehbr;
664
static int teehbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
667
if (arm_current_pl(env) == 0 && (env->teecr & 1)) {
674
static const ARMCPRegInfo t2ee_cp_reginfo[] = {
675
{ .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
676
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
678
.writefn = teecr_write },
679
{ .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
680
.access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
681
.resetvalue = 0, .raw_readfn = raw_read, .raw_writefn = raw_write,
682
.readfn = teehbr_read, .writefn = teehbr_write },
686
static const ARMCPRegInfo v6k_cp_reginfo[] = {
687
{ .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
689
.fieldoffset = offsetof(CPUARMState, cp15.c13_tls1),
691
{ .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
692
.access = PL0_R|PL1_W,
693
.fieldoffset = offsetof(CPUARMState, cp15.c13_tls2),
695
{ .name = "TPIDRPRW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 4,
697
.fieldoffset = offsetof(CPUARMState, cp15.c13_tls3),
702
#ifndef CONFIG_USER_ONLY
704
static uint64_t gt_get_countervalue(CPUARMState *env)
706
return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE;
709
static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
711
ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
714
/* Timer enabled: calculate and set current ISTATUS, irq, and
715
* reset timer to when ISTATUS next has to change
717
uint64_t count = gt_get_countervalue(&cpu->env);
718
/* Note that this must be unsigned 64 bit arithmetic: */
719
int istatus = count >= gt->cval;
722
gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
723
qemu_set_irq(cpu->gt_timer_outputs[timeridx],
724
(istatus && !(gt->ctl & 2)));
726
/* Next transition is when count rolls back over to zero */
727
nexttick = UINT64_MAX;
729
/* Next transition is when we hit cval */
732
/* Note that the desired next expiry time might be beyond the
733
* signed-64-bit range of a QEMUTimer -- in this case we just
734
* set the timer for as far in the future as possible. When the
735
* timer expires we will reset the timer for any remaining period.
737
if (nexttick > INT64_MAX / GTIMER_SCALE) {
738
nexttick = INT64_MAX / GTIMER_SCALE;
740
timer_mod(cpu->gt_timer[timeridx], nexttick);
742
/* Timer disabled: ISTATUS and timer output always clear */
744
qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
745
timer_del(cpu->gt_timer[timeridx]);
749
static int gt_cntfrq_read(CPUARMState *env, const ARMCPRegInfo *ri,
752
/* Not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero */
753
if (arm_current_pl(env) == 0 && !extract32(env->cp15.c14_cntkctl, 0, 2)) {
756
*value = env->cp15.c14_cntfrq;
760
static void gt_cnt_reset(CPUARMState *env, const ARMCPRegInfo *ri)
762
ARMCPU *cpu = arm_env_get_cpu(env);
763
int timeridx = ri->opc1 & 1;
765
timer_del(cpu->gt_timer[timeridx]);
768
static int gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri,
771
int timeridx = ri->opc1 & 1;
773
if (arm_current_pl(env) == 0 &&
774
!extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
777
*value = gt_get_countervalue(env);
781
static int gt_cval_read(CPUARMState *env, const ARMCPRegInfo *ri,
784
int timeridx = ri->opc1 & 1;
786
if (arm_current_pl(env) == 0 &&
787
!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
790
*value = env->cp15.c14_timer[timeridx].cval;
794
static int gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
797
int timeridx = ri->opc1 & 1;
799
env->cp15.c14_timer[timeridx].cval = value;
800
gt_recalc_timer(arm_env_get_cpu(env), timeridx);
803
static int gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
806
int timeridx = ri->crm & 1;
808
if (arm_current_pl(env) == 0 &&
809
!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
812
*value = (uint32_t)(env->cp15.c14_timer[timeridx].cval -
813
gt_get_countervalue(env));
817
static int gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
820
int timeridx = ri->crm & 1;
822
env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) +
823
+ sextract64(value, 0, 32);
824
gt_recalc_timer(arm_env_get_cpu(env), timeridx);
828
static int gt_ctl_read(CPUARMState *env, const ARMCPRegInfo *ri,
831
int timeridx = ri->crm & 1;
833
if (arm_current_pl(env) == 0 &&
834
!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
837
*value = env->cp15.c14_timer[timeridx].ctl;
841
static int gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
844
ARMCPU *cpu = arm_env_get_cpu(env);
845
int timeridx = ri->crm & 1;
846
uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
848
env->cp15.c14_timer[timeridx].ctl = value & 3;
849
if ((oldval ^ value) & 1) {
851
gt_recalc_timer(cpu, timeridx);
852
} else if ((oldval & value) & 2) {
853
/* IMASK toggled: don't need to recalculate,
854
* just set the interrupt line based on ISTATUS
856
qemu_set_irq(cpu->gt_timer_outputs[timeridx],
857
(oldval & 4) && (value & 2));
862
void arm_gt_ptimer_cb(void *opaque)
864
ARMCPU *cpu = opaque;
866
gt_recalc_timer(cpu, GTIMER_PHYS);
869
void arm_gt_vtimer_cb(void *opaque)
871
ARMCPU *cpu = opaque;
873
gt_recalc_timer(cpu, GTIMER_VIRT);
876
static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
877
/* Note that CNTFRQ is purely reads-as-written for the benefit
878
* of software; writing it doesn't actually change the timer frequency.
879
* Our reset value matches the fixed frequency we implement the timer at.
881
{ .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
882
.access = PL1_RW | PL0_R,
883
.fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
884
.resetvalue = (1000 * 1000 * 1000) / GTIMER_SCALE,
885
.readfn = gt_cntfrq_read, .raw_readfn = raw_read,
887
/* overall control: mostly access permissions */
888
{ .name = "CNTKCTL", .cp = 15, .crn = 14, .crm = 1, .opc1 = 0, .opc2 = 0,
890
.fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
893
/* per-timer control */
894
{ .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
895
.type = ARM_CP_IO, .access = PL1_RW | PL0_R,
896
.fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
898
.readfn = gt_ctl_read, .writefn = gt_ctl_write,
899
.raw_readfn = raw_read, .raw_writefn = raw_write,
901
{ .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
902
.type = ARM_CP_IO, .access = PL1_RW | PL0_R,
903
.fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
905
.readfn = gt_ctl_read, .writefn = gt_ctl_write,
906
.raw_readfn = raw_read, .raw_writefn = raw_write,
908
/* TimerValue views: a 32 bit downcounting view of the underlying state */
909
{ .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
910
.type = ARM_CP_NO_MIGRATE | ARM_CP_IO, .access = PL1_RW | PL0_R,
911
.readfn = gt_tval_read, .writefn = gt_tval_write,
913
{ .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
914
.type = ARM_CP_NO_MIGRATE | ARM_CP_IO, .access = PL1_RW | PL0_R,
915
.readfn = gt_tval_read, .writefn = gt_tval_write,
917
/* The counter itself */
918
{ .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
919
.access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_MIGRATE | ARM_CP_IO,
920
.readfn = gt_cnt_read, .resetfn = gt_cnt_reset,
922
{ .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
923
.access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_MIGRATE | ARM_CP_IO,
924
.readfn = gt_cnt_read, .resetfn = gt_cnt_reset,
926
/* Comparison value, indicating when the timer goes off */
927
{ .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
928
.access = PL1_RW | PL0_R,
929
.type = ARM_CP_64BIT | ARM_CP_IO,
930
.fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
932
.readfn = gt_cval_read, .writefn = gt_cval_write,
933
.raw_readfn = raw_read, .raw_writefn = raw_write,
935
{ .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
936
.access = PL1_RW | PL0_R,
937
.type = ARM_CP_64BIT | ARM_CP_IO,
938
.fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
940
.readfn = gt_cval_read, .writefn = gt_cval_write,
941
.raw_readfn = raw_read, .raw_writefn = raw_write,
947
/* In user-mode none of the generic timer registers are accessible,
948
* and their implementation depends on QEMU_CLOCK_VIRTUAL and qdev gpio outputs,
949
* so instead just don't register any of them.
951
static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
957
static int par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
959
if (arm_feature(env, ARM_FEATURE_LPAE)) {
960
env->cp15.c7_par = value;
961
} else if (arm_feature(env, ARM_FEATURE_V7)) {
962
env->cp15.c7_par = value & 0xfffff6ff;
964
env->cp15.c7_par = value & 0xfffff1ff;
969
#ifndef CONFIG_USER_ONLY
970
/* get_phys_addr() isn't present for user-mode-only targets */
972
/* Return true if extended addresses are enabled, ie this is an
973
* LPAE implementation and we are using the long-descriptor translation
974
* table format because the TTBCR EAE bit is set.
976
static inline bool extended_addresses_enabled(CPUARMState *env)
978
return arm_feature(env, ARM_FEATURE_LPAE)
979
&& (env->cp15.c2_control & (1U << 31));
982
static int ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
985
target_ulong page_size;
987
int ret, is_user = ri->opc2 & 2;
988
int access_type = ri->opc2 & 1;
991
/* Other states are only available with TrustZone */
994
ret = get_phys_addr(env, value, access_type, is_user,
995
&phys_addr, &prot, &page_size);
996
if (extended_addresses_enabled(env)) {
997
/* ret is a DFSR/IFSR value for the long descriptor
998
* translation table format, but with WnR always clear.
999
* Convert it to a 64-bit PAR.
1001
uint64_t par64 = (1 << 11); /* LPAE bit always set */
1003
par64 |= phys_addr & ~0xfffULL;
1004
/* We don't set the ATTR or SH fields in the PAR. */
1007
par64 |= (ret & 0x3f) << 1; /* FS */
1008
/* Note that S2WLK and FSTAGE are always zero, because we don't
1009
* implement virtualization and therefore there can't be a stage 2
1013
env->cp15.c7_par = par64;
1014
env->cp15.c7_par_hi = par64 >> 32;
1016
/* ret is a DFSR/IFSR value for the short descriptor
1017
* translation table format (with WnR always clear).
1018
* Convert it to a 32-bit PAR.
1021
/* We do not set any attribute bits in the PAR */
1022
if (page_size == (1 << 24)
1023
&& arm_feature(env, ARM_FEATURE_V7)) {
1024
env->cp15.c7_par = (phys_addr & 0xff000000) | 1 << 1;
1026
env->cp15.c7_par = phys_addr & 0xfffff000;
1029
env->cp15.c7_par = ((ret & (10 << 1)) >> 5) |
1030
((ret & (12 << 1)) >> 6) |
1031
((ret & 0xf) << 1) | 1;
1033
env->cp15.c7_par_hi = 0;
1039
static const ARMCPRegInfo vapa_cp_reginfo[] = {
1040
{ .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
1041
.access = PL1_RW, .resetvalue = 0,
1042
.fieldoffset = offsetof(CPUARMState, cp15.c7_par),
1043
.writefn = par_write },
1044
#ifndef CONFIG_USER_ONLY
1045
{ .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
1046
.access = PL1_W, .writefn = ats_write, .type = ARM_CP_NO_MIGRATE },
1051
/* Return basic MPU access permission bits. */
1052
static uint32_t simple_mpu_ap_bits(uint32_t val)
1059
for (i = 0; i < 16; i += 2) {
1060
ret |= (val >> i) & mask;
1066
/* Pad basic MPU access permission bits to extended format. */
1067
static uint32_t extended_mpu_ap_bits(uint32_t val)
1074
for (i = 0; i < 16; i += 2) {
1075
ret |= (val & mask) << i;
1081
static int pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
1084
env->cp15.c5_data = extended_mpu_ap_bits(value);
1088
static int pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri,
1091
*value = simple_mpu_ap_bits(env->cp15.c5_data);
1095
static int pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
1098
env->cp15.c5_insn = extended_mpu_ap_bits(value);
1102
static int pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri,
1105
*value = simple_mpu_ap_bits(env->cp15.c5_insn);
1109
static int arm946_prbs_read(CPUARMState *env, const ARMCPRegInfo *ri,
1115
*value = env->cp15.c6_region[ri->crm];
1119
static int arm946_prbs_write(CPUARMState *env, const ARMCPRegInfo *ri,
1125
env->cp15.c6_region[ri->crm] = value;
1129
static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
1130
{ .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
1131
.access = PL1_RW, .type = ARM_CP_NO_MIGRATE,
1132
.fieldoffset = offsetof(CPUARMState, cp15.c5_data), .resetvalue = 0,
1133
.readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
1134
{ .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
1135
.access = PL1_RW, .type = ARM_CP_NO_MIGRATE,
1136
.fieldoffset = offsetof(CPUARMState, cp15.c5_insn), .resetvalue = 0,
1137
.readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
1138
{ .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
1140
.fieldoffset = offsetof(CPUARMState, cp15.c5_data), .resetvalue = 0, },
1141
{ .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
1143
.fieldoffset = offsetof(CPUARMState, cp15.c5_insn), .resetvalue = 0, },
1144
{ .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
1146
.fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
1147
{ .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
1149
.fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
1150
/* Protection region base and size registers */
1151
{ .name = "946_PRBS", .cp = 15, .crn = 6, .crm = CP_ANY, .opc1 = 0,
1152
.opc2 = CP_ANY, .access = PL1_RW,
1153
.readfn = arm946_prbs_read, .writefn = arm946_prbs_write, },
1157
static int vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
1160
int maskshift = extract32(value, 0, 3);
1162
if (arm_feature(env, ARM_FEATURE_LPAE)) {
1163
value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
1167
/* Note that we always calculate c2_mask and c2_base_mask, but
1168
* they are only used for short-descriptor tables (ie if EAE is 0);
1169
* for long-descriptor tables the TTBCR fields are used differently
1170
* and the c2_mask and c2_base_mask values are meaningless.
1172
env->cp15.c2_control = value;
1173
env->cp15.c2_mask = ~(((uint32_t)0xffffffffu) >> maskshift);
1174
env->cp15.c2_base_mask = ~((uint32_t)0x3fffu >> maskshift);
1178
static int vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1181
if (arm_feature(env, ARM_FEATURE_LPAE)) {
1182
/* With LPAE the TTBCR could result in a change of ASID
1183
* via the TTBCR.A1 bit, so do a TLB flush.
1187
return vmsa_ttbcr_raw_write(env, ri, value);
1190
static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1192
env->cp15.c2_base_mask = 0xffffc000u;
1193
env->cp15.c2_control = 0;
1194
env->cp15.c2_mask = 0;
1197
static const ARMCPRegInfo vmsa_cp_reginfo[] = {
1198
{ .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
1200
.fieldoffset = offsetof(CPUARMState, cp15.c5_data), .resetvalue = 0, },
1201
{ .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
1203
.fieldoffset = offsetof(CPUARMState, cp15.c5_insn), .resetvalue = 0, },
1204
{ .name = "TTBR0", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
1206
.fieldoffset = offsetof(CPUARMState, cp15.c2_base0), .resetvalue = 0, },
1207
{ .name = "TTBR1", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
1209
.fieldoffset = offsetof(CPUARMState, cp15.c2_base1), .resetvalue = 0, },
1210
{ .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
1211
.access = PL1_RW, .writefn = vmsa_ttbcr_write,
1212
.resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write,
1213
.fieldoffset = offsetof(CPUARMState, cp15.c2_control) },
1214
{ .name = "DFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
1215
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c6_data),
1220
static int omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
1223
env->cp15.c15_ticonfig = value & 0xe7;
1224
/* The OS_TYPE bit in this register changes the reported CPUID! */
1225
env->cp15.c0_cpuid = (value & (1 << 5)) ?
1226
ARM_CPUID_TI915T : ARM_CPUID_TI925T;
1230
static int omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
1233
env->cp15.c15_threadid = value & 0xffff;
1237
static int omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
1240
/* Wait-for-interrupt (deprecated) */
1241
cpu_interrupt(CPU(arm_env_get_cpu(env)), CPU_INTERRUPT_HALT);
1245
static int omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
1248
/* On OMAP there are registers indicating the max/min index of dcache lines
1249
* containing a dirty line; cache flush operations have to reset these.
1251
env->cp15.c15_i_max = 0x000;
1252
env->cp15.c15_i_min = 0xff0;
1256
static const ARMCPRegInfo omap_cp_reginfo[] = {
1257
{ .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
1258
.opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
1259
.fieldoffset = offsetof(CPUARMState, cp15.c5_data), .resetvalue = 0, },
1260
{ .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
1261
.access = PL1_RW, .type = ARM_CP_NOP },
1262
{ .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
1264
.fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
1265
.writefn = omap_ticonfig_write },
1266
{ .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
1268
.fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
1269
{ .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
1270
.access = PL1_RW, .resetvalue = 0xff0,
1271
.fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
1272
{ .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
1274
.fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
1275
.writefn = omap_threadid_write },
1276
{ .name = "TI925T_STATUS", .cp = 15, .crn = 15,
1277
.crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
1278
.type = ARM_CP_NO_MIGRATE,
1279
.readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
1280
/* TODO: Peripheral port remap register:
1281
* On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
1282
* base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
1285
{ .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
1286
.opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
1287
.type = ARM_CP_OVERRIDE | ARM_CP_NO_MIGRATE,
1288
.writefn = omap_cachemaint_write },
1289
{ .name = "C9", .cp = 15, .crn = 9,
1290
.crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
1291
.type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
1295
static int xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1299
if (env->cp15.c15_cpar != value) {
1300
/* Changes cp0 to cp13 behavior, so needs a TB flush. */
1302
env->cp15.c15_cpar = value;
1307
static const ARMCPRegInfo xscale_cp_reginfo[] = {
1308
{ .name = "XSCALE_CPAR",
1309
.cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
1310
.fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
1311
.writefn = xscale_cpar_write, },
1312
{ .name = "XSCALE_AUXCR",
1313
.cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
1314
.fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
1319
static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
1320
/* RAZ/WI the whole crn=15 space, when we don't have a more specific
1321
* implementation of this implementation-defined space.
1322
* Ideally this should eventually disappear in favour of actually
1323
* implementing the correct behaviour for all cores.
1325
{ .name = "C15_IMPDEF", .cp = 15, .crn = 15,
1326
.crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
1327
.access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
1332
static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
1333
/* Cache status: RAZ because we have no cache so it's always clean */
1334
{ .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
1335
.access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
1340
static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
1341
/* We never have a a block transfer operation in progress */
1342
{ .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
1343
.access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
1345
/* The cache ops themselves: these all NOP for QEMU */
1346
{ .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
1347
.access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
1348
{ .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
1349
.access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
1350
{ .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
1351
.access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
1352
{ .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
1353
.access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
1354
{ .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
1355
.access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
1356
{ .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
1357
.access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
1361
static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
1362
/* The cache test-and-clean instructions always return (1 << 30)
1363
* to indicate that there are no dirty cache lines.
1365
{ .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
1366
.access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
1367
.resetvalue = (1 << 30) },
1368
{ .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
1369
.access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
1370
.resetvalue = (1 << 30) },
1374
static const ARMCPRegInfo strongarm_cp_reginfo[] = {
1375
/* Ignore ReadBuffer accesses */
1376
{ .name = "C9_READBUFFER", .cp = 15, .crn = 9,
1377
.crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
1378
.access = PL1_RW, .resetvalue = 0,
1379
.type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_MIGRATE },
1383
static int mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri,
1386
CPUState *cs = CPU(arm_env_get_cpu(env));
1387
uint32_t mpidr = cs->cpu_index;
1388
/* We don't support setting cluster ID ([8..11])
1389
* so these bits always RAZ.
1391
if (arm_feature(env, ARM_FEATURE_V7MP)) {
1392
mpidr |= (1U << 31);
1393
/* Cores which are uniprocessor (non-coherent)
1394
* but still implement the MP extensions set
1395
* bit 30. (For instance, A9UP.) However we do
1396
* not currently model any of those cores.
1403
static const ARMCPRegInfo mpidr_cp_reginfo[] = {
1404
{ .name = "MPIDR", .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
1405
.access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_MIGRATE },
1409
static int par64_read(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t *value)
1411
*value = ((uint64_t)env->cp15.c7_par_hi << 32) | env->cp15.c7_par;
1415
static int par64_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1417
env->cp15.c7_par_hi = value >> 32;
1418
env->cp15.c7_par = value;
1422
static void par64_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1424
env->cp15.c7_par_hi = 0;
1425
env->cp15.c7_par = 0;
1428
static int ttbr064_read(CPUARMState *env, const ARMCPRegInfo *ri,
1431
*value = ((uint64_t)env->cp15.c2_base0_hi << 32) | env->cp15.c2_base0;
1435
static int ttbr064_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
1438
env->cp15.c2_base0_hi = value >> 32;
1439
env->cp15.c2_base0 = value;
1443
static int ttbr064_write(CPUARMState *env, const ARMCPRegInfo *ri,
1446
/* Writes to the 64 bit format TTBRs may change the ASID */
1448
return ttbr064_raw_write(env, ri, value);
1451
static void ttbr064_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1453
env->cp15.c2_base0_hi = 0;
1454
env->cp15.c2_base0 = 0;
1457
static int ttbr164_read(CPUARMState *env, const ARMCPRegInfo *ri,
1460
*value = ((uint64_t)env->cp15.c2_base1_hi << 32) | env->cp15.c2_base1;
1464
static int ttbr164_write(CPUARMState *env, const ARMCPRegInfo *ri,
1467
env->cp15.c2_base1_hi = value >> 32;
1468
env->cp15.c2_base1 = value;
1472
static void ttbr164_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1474
env->cp15.c2_base1_hi = 0;
1475
env->cp15.c2_base1 = 0;
1478
static const ARMCPRegInfo lpae_cp_reginfo[] = {
1479
/* NOP AMAIR0/1: the override is because these clash with the rather
1480
* broadly specified TLB_LOCKDOWN entry in the generic cp_reginfo.
1482
{ .name = "AMAIR0", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
1483
.access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_OVERRIDE,
1485
{ .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
1486
.access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_OVERRIDE,
1488
/* 64 bit access versions of the (dummy) debug registers */
1489
{ .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0,
1490
.access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
1491
{ .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0,
1492
.access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
1493
{ .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
1494
.access = PL1_RW, .type = ARM_CP_64BIT,
1495
.readfn = par64_read, .writefn = par64_write, .resetfn = par64_reset },
1496
{ .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
1497
.access = PL1_RW, .type = ARM_CP_64BIT, .readfn = ttbr064_read,
1498
.writefn = ttbr064_write, .raw_writefn = ttbr064_raw_write,
1499
.resetfn = ttbr064_reset },
1500
{ .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
1501
.access = PL1_RW, .type = ARM_CP_64BIT, .readfn = ttbr164_read,
1502
.writefn = ttbr164_write, .resetfn = ttbr164_reset },
1506
static int vbar_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1508
CPREG_FIELD32(env, ri) = value & ~0x1f;
1512
static const ARMCPRegInfo trustzone_cp_reginfo[] = {
1513
/* Dummy implementations of registers; we don't enforce the
1514
* 'secure mode only' access checks. TODO: revisit as part of
1515
* proper fake-trustzone support.
1517
{ .name = "SCR", .cp = 15, .crn = 1, .crm = 1, .opc1 = 0, .opc2 = 0,
1518
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c1_scr),
1520
{ .name = "SDER", .cp = 15, .crn = 1, .crm = 1, .opc1 = 0, .opc2 = 1,
1521
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c1_sedbg),
1523
{ .name = "NSACR", .cp = 15, .crn = 1, .crm = 1, .opc1 = 0, .opc2 = 2,
1524
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c1_nseac),
1526
{ .name = "VBAR", .cp = 15, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
1527
.access = PL1_RW, .writefn = vbar_write,
1528
.fieldoffset = offsetof(CPUARMState, cp15.c12_vbar),
1530
{ .name = "MVBAR", .cp = 15, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 1,
1531
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c12_mvbar),
1532
.writefn = vbar_write, .resetvalue = 0 },
1536
static int sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1538
env->cp15.c1_sys = value;
1539
/* ??? Lots of these bits are not implemented. */
1540
/* This may enable/disable the MMU, so do a TLB flush. */
1545
void register_cp_regs_for_features(ARMCPU *cpu)
1547
/* Register all the coprocessor registers based on feature bits */
1548
CPUARMState *env = &cpu->env;
1549
if (arm_feature(env, ARM_FEATURE_M)) {
1550
/* M profile has no coprocessor registers */
1554
define_arm_cp_regs(cpu, cp_reginfo);
1555
if (arm_feature(env, ARM_FEATURE_V6)) {
1556
/* The ID registers all have impdef reset values */
1557
ARMCPRegInfo v6_idregs[] = {
1558
{ .name = "ID_PFR0", .cp = 15, .crn = 0, .crm = 1,
1559
.opc1 = 0, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST,
1560
.resetvalue = cpu->id_pfr0 },
1561
{ .name = "ID_PFR1", .cp = 15, .crn = 0, .crm = 1,
1562
.opc1 = 0, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST,
1563
.resetvalue = cpu->id_pfr1 },
1564
{ .name = "ID_DFR0", .cp = 15, .crn = 0, .crm = 1,
1565
.opc1 = 0, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST,
1566
.resetvalue = cpu->id_dfr0 },
1567
{ .name = "ID_AFR0", .cp = 15, .crn = 0, .crm = 1,
1568
.opc1 = 0, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST,
1569
.resetvalue = cpu->id_afr0 },
1570
{ .name = "ID_MMFR0", .cp = 15, .crn = 0, .crm = 1,
1571
.opc1 = 0, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST,
1572
.resetvalue = cpu->id_mmfr0 },
1573
{ .name = "ID_MMFR1", .cp = 15, .crn = 0, .crm = 1,
1574
.opc1 = 0, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST,
1575
.resetvalue = cpu->id_mmfr1 },
1576
{ .name = "ID_MMFR2", .cp = 15, .crn = 0, .crm = 1,
1577
.opc1 = 0, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST,
1578
.resetvalue = cpu->id_mmfr2 },
1579
{ .name = "ID_MMFR3", .cp = 15, .crn = 0, .crm = 1,
1580
.opc1 = 0, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST,
1581
.resetvalue = cpu->id_mmfr3 },
1582
{ .name = "ID_ISAR0", .cp = 15, .crn = 0, .crm = 2,
1583
.opc1 = 0, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST,
1584
.resetvalue = cpu->id_isar0 },
1585
{ .name = "ID_ISAR1", .cp = 15, .crn = 0, .crm = 2,
1586
.opc1 = 0, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST,
1587
.resetvalue = cpu->id_isar1 },
1588
{ .name = "ID_ISAR2", .cp = 15, .crn = 0, .crm = 2,
1589
.opc1 = 0, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST,
1590
.resetvalue = cpu->id_isar2 },
1591
{ .name = "ID_ISAR3", .cp = 15, .crn = 0, .crm = 2,
1592
.opc1 = 0, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST,
1593
.resetvalue = cpu->id_isar3 },
1594
{ .name = "ID_ISAR4", .cp = 15, .crn = 0, .crm = 2,
1595
.opc1 = 0, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST,
1596
.resetvalue = cpu->id_isar4 },
1597
{ .name = "ID_ISAR5", .cp = 15, .crn = 0, .crm = 2,
1598
.opc1 = 0, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST,
1599
.resetvalue = cpu->id_isar5 },
1600
/* 6..7 are as yet unallocated and must RAZ */
1601
{ .name = "ID_ISAR6", .cp = 15, .crn = 0, .crm = 2,
1602
.opc1 = 0, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST,
1604
{ .name = "ID_ISAR7", .cp = 15, .crn = 0, .crm = 2,
1605
.opc1 = 0, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST,
1609
define_arm_cp_regs(cpu, v6_idregs);
1610
define_arm_cp_regs(cpu, v6_cp_reginfo);
1612
define_arm_cp_regs(cpu, not_v6_cp_reginfo);
1614
if (arm_feature(env, ARM_FEATURE_V6K)) {
1615
define_arm_cp_regs(cpu, v6k_cp_reginfo);
1617
if (arm_feature(env, ARM_FEATURE_V7)) {
1618
/* v7 performance monitor control register: same implementor
1619
* field as main ID register, and we implement no event counters.
1621
ARMCPRegInfo pmcr = {
1622
.name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
1623
.access = PL0_RW, .resetvalue = cpu->midr & 0xff000000,
1624
.fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
1625
.readfn = pmreg_read, .writefn = pmcr_write,
1626
.raw_readfn = raw_read, .raw_writefn = raw_write,
1628
ARMCPRegInfo clidr = {
1629
.name = "CLIDR", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
1630
.access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->clidr
1632
define_one_arm_cp_reg(cpu, &pmcr);
1633
define_one_arm_cp_reg(cpu, &clidr);
1634
define_arm_cp_regs(cpu, v7_cp_reginfo);
1636
define_arm_cp_regs(cpu, not_v7_cp_reginfo);
1638
if (arm_feature(env, ARM_FEATURE_MPU)) {
1639
/* These are the MPU registers prior to PMSAv6. Any new
1640
* PMSA core later than the ARM946 will require that we
1641
* implement the PMSAv6 or PMSAv7 registers, which are
1642
* completely different.
1644
assert(!arm_feature(env, ARM_FEATURE_V6));
1645
define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
1647
define_arm_cp_regs(cpu, vmsa_cp_reginfo);
1649
if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
1650
define_arm_cp_regs(cpu, t2ee_cp_reginfo);
1652
if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
1653
define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
1655
if (arm_feature(env, ARM_FEATURE_VAPA)) {
1656
define_arm_cp_regs(cpu, vapa_cp_reginfo);
1658
if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
1659
define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
1661
if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
1662
define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
1664
if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
1665
define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
1667
if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
1668
define_arm_cp_regs(cpu, omap_cp_reginfo);
1670
if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
1671
define_arm_cp_regs(cpu, strongarm_cp_reginfo);
1673
if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1674
define_arm_cp_regs(cpu, xscale_cp_reginfo);
1676
if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
1677
define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
1679
if (arm_feature(env, ARM_FEATURE_LPAE)) {
1680
define_arm_cp_regs(cpu, lpae_cp_reginfo);
1682
if (arm_feature(env, ARM_FEATURE_TRUSTZONE)) {
1683
define_arm_cp_regs(cpu, trustzone_cp_reginfo);
1685
/* Slightly awkwardly, the OMAP and StrongARM cores need all of
1686
* cp15 crn=0 to be writes-ignored, whereas for other cores they should
1687
* be read-only (ie write causes UNDEF exception).
1690
ARMCPRegInfo id_cp_reginfo[] = {
1691
/* Note that the MIDR isn't a simple constant register because
1692
* of the TI925 behaviour where writes to another register can
1693
* cause the MIDR value to change.
1695
* Unimplemented registers in the c15 0 0 0 space default to
1696
* MIDR. Define MIDR first as this entire space, then CTR, TCMTR
1697
* and friends override accordingly.
1700
.cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
1701
.access = PL1_R, .resetvalue = cpu->midr,
1702
.writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
1703
.fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
1704
.type = ARM_CP_OVERRIDE },
1706
.cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
1707
.access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
1709
.cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
1710
.access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
1712
.cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
1713
.access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
1714
/* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
1716
.cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
1717
.access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
1719
.cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
1720
.access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
1722
.cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
1723
.access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
1725
.cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
1726
.access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
1728
.cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
1729
.access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
1732
ARMCPRegInfo crn0_wi_reginfo = {
1733
.name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
1734
.opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
1735
.type = ARM_CP_NOP | ARM_CP_OVERRIDE
1737
if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
1738
arm_feature(env, ARM_FEATURE_STRONGARM)) {
1740
/* Register the blanket "writes ignored" value first to cover the
1741
* whole space. Then update the specific ID registers to allow write
1742
* access, so that they ignore writes rather than causing them to
1745
define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
1746
for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) {
1750
define_arm_cp_regs(cpu, id_cp_reginfo);
1753
if (arm_feature(env, ARM_FEATURE_MPIDR)) {
1754
define_arm_cp_regs(cpu, mpidr_cp_reginfo);
1757
if (arm_feature(env, ARM_FEATURE_AUXCR)) {
1758
ARMCPRegInfo auxcr = {
1759
.name = "AUXCR", .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1,
1760
.access = PL1_RW, .type = ARM_CP_CONST,
1761
.resetvalue = cpu->reset_auxcr
1763
define_one_arm_cp_reg(cpu, &auxcr);
1766
/* Generic registers whose values depend on the implementation */
1768
ARMCPRegInfo sctlr = {
1769
.name = "SCTLR", .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
1770
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c1_sys),
1771
.writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
1772
.raw_writefn = raw_write,
1774
if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1775
/* Normally we would always end the TB on an SCTLR write, but Linux
1776
* arch/arm/mach-pxa/sleep.S expects two instructions following
1777
* an MMU enable to execute from cache. Imitate this behaviour.
1779
sctlr.type |= ARM_CP_SUPPRESS_TB_END;
1781
define_one_arm_cp_reg(cpu, &sctlr);
1785
ARMCPU *cpu_arm_init(const char *cpu_model)
1790
oc = cpu_class_by_name(TYPE_ARM_CPU, cpu_model);
1794
cpu = ARM_CPU(object_new(object_class_get_name(oc)));
1796
/* TODO this should be set centrally, once possible */
1797
object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
1802
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
1804
CPUState *cs = CPU(cpu);
1805
CPUARMState *env = &cpu->env;
1807
if (arm_feature(env, ARM_FEATURE_NEON)) {
1808
gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
1809
51, "arm-neon.xml", 0);
1810
} else if (arm_feature(env, ARM_FEATURE_VFP3)) {
1811
gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
1812
35, "arm-vfp3.xml", 0);
1813
} else if (arm_feature(env, ARM_FEATURE_VFP)) {
1814
gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
1815
19, "arm-vfp.xml", 0);
1819
/* Sort alphabetically by type name, except for "any". */
1820
static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b)
1822
ObjectClass *class_a = (ObjectClass *)a;
1823
ObjectClass *class_b = (ObjectClass *)b;
1824
const char *name_a, *name_b;
1826
name_a = object_class_get_name(class_a);
1827
name_b = object_class_get_name(class_b);
1828
if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) {
1830
} else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) {
1833
return strcmp(name_a, name_b);
1837
static void arm_cpu_list_entry(gpointer data, gpointer user_data)
1839
ObjectClass *oc = data;
1840
CPUListState *s = user_data;
1841
const char *typename;
1844
typename = object_class_get_name(oc);
1845
name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU));
1846
(*s->cpu_fprintf)(s->file, " %s\n",
1851
void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf)
1855
.cpu_fprintf = cpu_fprintf,
1859
list = object_class_get_list(TYPE_ARM_CPU, false);
1860
list = g_slist_sort(list, arm_cpu_list_compare);
1861
(*cpu_fprintf)(f, "Available CPUs:\n");
1862
g_slist_foreach(list, arm_cpu_list_entry, &s);
1865
/* The 'host' CPU type is dynamically registered only if KVM is
1866
* enabled, so we have to special-case it here:
1868
(*cpu_fprintf)(f, " host (only available in KVM mode)\n");
1872
static void arm_cpu_add_definition(gpointer data, gpointer user_data)
1874
ObjectClass *oc = data;
1875
CpuDefinitionInfoList **cpu_list = user_data;
1876
CpuDefinitionInfoList *entry;
1877
CpuDefinitionInfo *info;
1878
const char *typename;
1880
typename = object_class_get_name(oc);
1881
info = g_malloc0(sizeof(*info));
1882
info->name = g_strndup(typename,
1883
strlen(typename) - strlen("-" TYPE_ARM_CPU));
1885
entry = g_malloc0(sizeof(*entry));
1886
entry->value = info;
1887
entry->next = *cpu_list;
1891
CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
1893
CpuDefinitionInfoList *cpu_list = NULL;
1896
list = object_class_get_list(TYPE_ARM_CPU, false);
1897
g_slist_foreach(list, arm_cpu_add_definition, &cpu_list);
1903
void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
1904
const ARMCPRegInfo *r, void *opaque)
1906
/* Define implementations of coprocessor registers.
1907
* We store these in a hashtable because typically
1908
* there are less than 150 registers in a space which
1909
* is 16*16*16*8*8 = 262144 in size.
1910
* Wildcarding is supported for the crm, opc1 and opc2 fields.
1911
* If a register is defined twice then the second definition is
1912
* used, so this can be used to define some generic registers and
1913
* then override them with implementation specific variations.
1914
* At least one of the original and the second definition should
1915
* include ARM_CP_OVERRIDE in its type bits -- this is just a guard
1916
* against accidental use.
1918
int crm, opc1, opc2;
1919
int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
1920
int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
1921
int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
1922
int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
1923
int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
1924
int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
1925
/* 64 bit registers have only CRm and Opc1 fields */
1926
assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
1927
/* Check that the register definition has enough info to handle
1928
* reads and writes if they are permitted.
1930
if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) {
1931
if (r->access & PL3_R) {
1932
assert(r->fieldoffset || r->readfn);
1934
if (r->access & PL3_W) {
1935
assert(r->fieldoffset || r->writefn);
1938
/* Bad type field probably means missing sentinel at end of reg list */
1939
assert(cptype_valid(r->type));
1940
for (crm = crmmin; crm <= crmmax; crm++) {
1941
for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
1942
for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
1943
uint32_t *key = g_new(uint32_t, 1);
1944
ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo));
1945
int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
1946
*key = ENCODE_CP_REG(r->cp, is64, r->crn, crm, opc1, opc2);
1948
r2->opaque = opaque;
1950
/* Make sure reginfo passed to helpers for wildcarded regs
1951
* has the correct crm/opc1/opc2 for this reg, not CP_ANY:
1956
/* By convention, for wildcarded registers only the first
1957
* entry is used for migration; the others are marked as
1958
* NO_MIGRATE so we don't try to transfer the register
1959
* multiple times. Special registers (ie NOP/WFI) are
1962
if ((r->type & ARM_CP_SPECIAL) ||
1963
((r->crm == CP_ANY) && crm != 0) ||
1964
((r->opc1 == CP_ANY) && opc1 != 0) ||
1965
((r->opc2 == CP_ANY) && opc2 != 0)) {
1966
r2->type |= ARM_CP_NO_MIGRATE;
1969
/* Overriding of an existing definition must be explicitly
1972
if (!(r->type & ARM_CP_OVERRIDE)) {
1973
ARMCPRegInfo *oldreg;
1974
oldreg = g_hash_table_lookup(cpu->cp_regs, key);
1975
if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) {
1976
fprintf(stderr, "Register redefined: cp=%d %d bit "
1977
"crn=%d crm=%d opc1=%d opc2=%d, "
1978
"was %s, now %s\n", r2->cp, 32 + 32 * is64,
1979
r2->crn, r2->crm, r2->opc1, r2->opc2,
1980
oldreg->name, r2->name);
1981
g_assert_not_reached();
1984
g_hash_table_insert(cpu->cp_regs, key, r2);
1990
void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
1991
const ARMCPRegInfo *regs, void *opaque)
1993
/* Define a whole list of registers */
1994
const ARMCPRegInfo *r;
1995
for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
1996
define_one_arm_cp_reg_with_opaque(cpu, r, opaque);
2000
const ARMCPRegInfo *get_arm_cp_reginfo(ARMCPU *cpu, uint32_t encoded_cp)
2002
return g_hash_table_lookup(cpu->cp_regs, &encoded_cp);
2005
int arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
2008
/* Helper coprocessor write function for write-ignore registers */
2012
int arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t *value)
2014
/* Helper coprocessor write function for read-as-zero registers */
2019
static int bad_mode_switch(CPUARMState *env, int mode)
2021
/* Return true if it is not valid for us to switch to
2022
* this CPU mode (ie all the UNPREDICTABLE cases in
2023
* the ARM ARM CPSRWriteByInstr pseudocode).
2026
case ARM_CPU_MODE_USR:
2027
case ARM_CPU_MODE_SYS:
2028
case ARM_CPU_MODE_SVC:
2029
case ARM_CPU_MODE_ABT:
2030
case ARM_CPU_MODE_UND:
2031
case ARM_CPU_MODE_IRQ:
2032
case ARM_CPU_MODE_FIQ:
2039
uint32_t cpsr_read(CPUARMState *env)
2042
ZF = (env->ZF == 0);
2043
return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
2044
(env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
2045
| (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
2046
| ((env->condexec_bits & 0xfc) << 8)
2050
void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
2052
if (mask & CPSR_NZCV) {
2053
env->ZF = (~val) & CPSR_Z;
2055
env->CF = (val >> 29) & 1;
2056
env->VF = (val << 3) & 0x80000000;
2059
env->QF = ((val & CPSR_Q) != 0);
2061
env->thumb = ((val & CPSR_T) != 0);
2062
if (mask & CPSR_IT_0_1) {
2063
env->condexec_bits &= ~3;
2064
env->condexec_bits |= (val >> 25) & 3;
2066
if (mask & CPSR_IT_2_7) {
2067
env->condexec_bits &= 3;
2068
env->condexec_bits |= (val >> 8) & 0xfc;
2070
if (mask & CPSR_GE) {
2071
env->GE = (val >> 16) & 0xf;
2074
if ((env->uncached_cpsr ^ val) & mask & CPSR_M) {
2075
if (bad_mode_switch(env, val & CPSR_M)) {
2076
/* Attempt to switch to an invalid mode: this is UNPREDICTABLE.
2077
* We choose to ignore the attempt and leave the CPSR M field
2082
switch_mode(env, val & CPSR_M);
2085
mask &= ~CACHED_CPSR_BITS;
2086
env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
2089
/* Sign/zero extend */
2090
uint32_t HELPER(sxtb16)(uint32_t x)
2093
res = (uint16_t)(int8_t)x;
2094
res |= (uint32_t)(int8_t)(x >> 16) << 16;
2098
uint32_t HELPER(uxtb16)(uint32_t x)
2101
res = (uint16_t)(uint8_t)x;
2102
res |= (uint32_t)(uint8_t)(x >> 16) << 16;
2106
uint32_t HELPER(clz)(uint32_t x)
2111
int32_t HELPER(sdiv)(int32_t num, int32_t den)
2115
if (num == INT_MIN && den == -1)
2120
uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
2127
uint32_t HELPER(rbit)(uint32_t x)
2129
x = ((x & 0xff000000) >> 24)
2130
| ((x & 0x00ff0000) >> 8)
2131
| ((x & 0x0000ff00) << 8)
2132
| ((x & 0x000000ff) << 24);
2133
x = ((x & 0xf0f0f0f0) >> 4)
2134
| ((x & 0x0f0f0f0f) << 4);
2135
x = ((x & 0x88888888) >> 3)
2136
| ((x & 0x44444444) >> 1)
2137
| ((x & 0x22222222) << 1)
2138
| ((x & 0x11111111) << 3);
2142
#if defined(CONFIG_USER_ONLY)
2144
void arm_cpu_do_interrupt(CPUState *cs)
2146
ARMCPU *cpu = ARM_CPU(cs);
2147
CPUARMState *env = &cpu->env;
2149
env->exception_index = -1;
2152
int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address, int rw,
2156
env->exception_index = EXCP_PREFETCH_ABORT;
2157
env->cp15.c6_insn = address;
2159
env->exception_index = EXCP_DATA_ABORT;
2160
env->cp15.c6_data = address;
2165
/* These should probably raise undefined insn exceptions. */
2166
void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
2168
cpu_abort(env, "v7m_mrs %d\n", reg);
2171
uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
2173
cpu_abort(env, "v7m_mrs %d\n", reg);
2177
void switch_mode(CPUARMState *env, int mode)
2179
if (mode != ARM_CPU_MODE_USR)
2180
cpu_abort(env, "Tried to switch out of user mode\n");
2183
void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
2185
cpu_abort(env, "banked r13 write\n");
2188
uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
2190
cpu_abort(env, "banked r13 read\n");
2196
/* Map CPU modes onto saved register banks. */
2197
int bank_number(int mode)
2200
case ARM_CPU_MODE_USR:
2201
case ARM_CPU_MODE_SYS:
2203
case ARM_CPU_MODE_SVC:
2205
case ARM_CPU_MODE_ABT:
2207
case ARM_CPU_MODE_UND:
2209
case ARM_CPU_MODE_IRQ:
2211
case ARM_CPU_MODE_FIQ:
2213
case ARM_CPU_MODE_SMC:
2216
hw_error("bank number requested for bad CPSR mode value 0x%x\n", mode);
2219
void switch_mode(CPUARMState *env, int mode)
2224
old_mode = env->uncached_cpsr & CPSR_M;
2225
if (mode == old_mode)
2228
if (old_mode == ARM_CPU_MODE_FIQ) {
2229
memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
2230
memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
2231
} else if (mode == ARM_CPU_MODE_FIQ) {
2232
memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
2233
memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
2236
i = bank_number(old_mode);
2237
env->banked_r13[i] = env->regs[13];
2238
env->banked_r14[i] = env->regs[14];
2239
env->banked_spsr[i] = env->spsr;
2241
i = bank_number(mode);
2242
env->regs[13] = env->banked_r13[i];
2243
env->regs[14] = env->banked_r14[i];
2244
env->spsr = env->banked_spsr[i];
2247
static void v7m_push(CPUARMState *env, uint32_t val)
2250
stl_phys(env->regs[13], val);
2253
static uint32_t v7m_pop(CPUARMState *env)
2256
val = ldl_phys(env->regs[13]);
2261
/* Switch to V7M main or process stack pointer. */
2262
static void switch_v7m_sp(CPUARMState *env, int process)
2265
if (env->v7m.current_sp != process) {
2266
tmp = env->v7m.other_sp;
2267
env->v7m.other_sp = env->regs[13];
2268
env->regs[13] = tmp;
2269
env->v7m.current_sp = process;
2273
static void do_v7m_exception_exit(CPUARMState *env)
2278
type = env->regs[15];
2279
if (env->v7m.exception != 0)
2280
armv7m_nvic_complete_irq(env->nvic, env->v7m.exception);
2282
/* Switch to the target stack. */
2283
switch_v7m_sp(env, (type & 4) != 0);
2284
/* Pop registers. */
2285
env->regs[0] = v7m_pop(env);
2286
env->regs[1] = v7m_pop(env);
2287
env->regs[2] = v7m_pop(env);
2288
env->regs[3] = v7m_pop(env);
2289
env->regs[12] = v7m_pop(env);
2290
env->regs[14] = v7m_pop(env);
2291
env->regs[15] = v7m_pop(env);
2292
xpsr = v7m_pop(env);
2293
xpsr_write(env, xpsr, 0xfffffdff);
2294
/* Undo stack alignment. */
2297
/* ??? The exception return type specifies Thread/Handler mode. However
2298
this is also implied by the xPSR value. Not sure what to do
2299
if there is a mismatch. */
2300
/* ??? Likewise for mismatches between the CONTROL register and the stack
2304
/* Exception names for debug logging; note that not all of these
2305
* precisely correspond to architectural exceptions.
2307
static const char * const excnames[] = {
2308
[EXCP_UDEF] = "Undefined Instruction",
2310
[EXCP_PREFETCH_ABORT] = "Prefetch Abort",
2311
[EXCP_DATA_ABORT] = "Data Abort",
2314
[EXCP_BKPT] = "Breakpoint",
2315
[EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
2316
[EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
2317
[EXCP_STREX] = "QEMU intercept of STREX",
2320
static inline void arm_log_exception(int idx)
2322
if (qemu_loglevel_mask(CPU_LOG_INT)) {
2323
const char *exc = NULL;
2325
if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
2326
exc = excnames[idx];
2331
qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc);
2335
void arm_v7m_cpu_do_interrupt(CPUState *cs)
2337
ARMCPU *cpu = ARM_CPU(cs);
2338
CPUARMState *env = &cpu->env;
2339
uint32_t xpsr = xpsr_read(env);
2343
arm_log_exception(env->exception_index);
2346
if (env->v7m.current_sp)
2348
if (env->v7m.exception == 0)
2351
/* For exceptions we just mark as pending on the NVIC, and let that
2353
/* TODO: Need to escalate if the current priority is higher than the
2354
one we're raising. */
2355
switch (env->exception_index) {
2357
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
2360
/* The PC already points to the next instruction. */
2361
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC);
2363
case EXCP_PREFETCH_ABORT:
2364
case EXCP_DATA_ABORT:
2365
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM);
2368
if (semihosting_enabled) {
2370
nr = arm_lduw_code(env, env->regs[15], env->bswap_code) & 0xff;
2373
env->regs[0] = do_arm_semihosting(env);
2374
qemu_log_mask(CPU_LOG_INT, "...handled as semihosting call\n");
2378
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG);
2381
env->v7m.exception = armv7m_nvic_acknowledge_irq(env->nvic);
2383
case EXCP_EXCEPTION_EXIT:
2384
do_v7m_exception_exit(env);
2387
cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
2388
return; /* Never happens. Keep compiler happy. */
2391
/* Align stack pointer. */
2392
/* ??? Should only do this if Configuration Control Register
2393
STACKALIGN bit is set. */
2394
if (env->regs[13] & 4) {
2398
/* Switch to the handler mode. */
2399
v7m_push(env, xpsr);
2400
v7m_push(env, env->regs[15]);
2401
v7m_push(env, env->regs[14]);
2402
v7m_push(env, env->regs[12]);
2403
v7m_push(env, env->regs[3]);
2404
v7m_push(env, env->regs[2]);
2405
v7m_push(env, env->regs[1]);
2406
v7m_push(env, env->regs[0]);
2407
switch_v7m_sp(env, 0);
2409
env->condexec_bits = 0;
2411
addr = ldl_phys(env->v7m.vecbase + env->v7m.exception * 4);
2412
env->regs[15] = addr & 0xfffffffe;
2413
env->thumb = addr & 1;
2416
/* Handle a CPU exception. */
2417
void arm_cpu_do_interrupt(CPUState *cs)
2419
ARMCPU *cpu = ARM_CPU(cs);
2420
CPUARMState *env = &cpu->env;
2428
arm_log_exception(env->exception_index);
2430
/* TODO: Vectored interrupt controller. */
2431
switch (env->exception_index) {
2433
new_mode = ARM_CPU_MODE_UND;
2442
if (semihosting_enabled) {
2443
/* Check for semihosting interrupt. */
2445
mask = arm_lduw_code(env, env->regs[15] - 2, env->bswap_code)
2448
mask = arm_ldl_code(env, env->regs[15] - 4, env->bswap_code)
2451
/* Only intercept calls from privileged modes, to provide some
2452
semblance of security. */
2453
if (((mask == 0x123456 && !env->thumb)
2454
|| (mask == 0xab && env->thumb))
2455
&& (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
2456
env->regs[0] = do_arm_semihosting(env);
2457
qemu_log_mask(CPU_LOG_INT, "...handled as semihosting call\n");
2461
new_mode = ARM_CPU_MODE_SVC;
2464
/* The PC already points to the next instruction. */
2468
/* See if this is a semihosting syscall. */
2469
if (env->thumb && semihosting_enabled) {
2470
mask = arm_lduw_code(env, env->regs[15], env->bswap_code) & 0xff;
2472
&& (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
2474
env->regs[0] = do_arm_semihosting(env);
2475
qemu_log_mask(CPU_LOG_INT, "...handled as semihosting call\n");
2479
env->cp15.c5_insn = 2;
2480
/* Fall through to prefetch abort. */
2481
case EXCP_PREFETCH_ABORT:
2482
qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
2483
env->cp15.c5_insn, env->cp15.c6_insn);
2484
new_mode = ARM_CPU_MODE_ABT;
2486
mask = CPSR_A | CPSR_I;
2489
case EXCP_DATA_ABORT:
2490
qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
2491
env->cp15.c5_data, env->cp15.c6_data);
2492
new_mode = ARM_CPU_MODE_ABT;
2494
mask = CPSR_A | CPSR_I;
2498
new_mode = ARM_CPU_MODE_IRQ;
2500
/* Disable IRQ and imprecise data aborts. */
2501
mask = CPSR_A | CPSR_I;
2505
new_mode = ARM_CPU_MODE_FIQ;
2507
/* Disable FIQ, IRQ and imprecise data aborts. */
2508
mask = CPSR_A | CPSR_I | CPSR_F;
2512
if (semihosting_enabled) {
2513
cpu_abort(env, "SMC handling under semihosting not implemented\n");
2516
if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SMC) {
2517
env->cp15.c1_scr &= ~1;
2519
offset = env->thumb ? 2 : 0;
2520
new_mode = ARM_CPU_MODE_SMC;
2522
mask = CPSR_A | CPSR_I | CPSR_F;
2525
cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
2526
return; /* Never happens. Keep compiler happy. */
2528
if (arm_feature(env, ARM_FEATURE_TRUSTZONE)) {
2529
if (new_mode == ARM_CPU_MODE_SMC ||
2530
(env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SMC) {
2531
addr += env->cp15.c12_mvbar;
2533
if (env->cp15.c1_sys & (1 << 13)) {
2536
addr += env->cp15.c12_vbar;
2541
if (env->cp15.c1_sys & (1 << 13)) {
2545
switch_mode (env, new_mode);
2546
env->spsr = cpsr_read(env);
2547
/* Clear IT bits. */
2548
env->condexec_bits = 0;
2549
/* Switch to the new mode, and to the correct instruction set. */
2550
env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
2551
env->uncached_cpsr |= mask;
2552
/* this is a lie, as the was no c1_sys on V4T/V5, but who cares
2553
* and we should just guard the thumb mode on V4 */
2554
if (arm_feature(env, ARM_FEATURE_V4T)) {
2555
env->thumb = (env->cp15.c1_sys & (1 << 30)) != 0;
2557
env->regs[14] = env->regs[15] + offset;
2558
env->regs[15] = addr;
2559
cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
2562
/* Check section/page access permissions.
2563
Returns the page protection flags, or zero if the access is not
2565
static inline int check_ap(CPUARMState *env, int ap, int domain_prot,
2566
int access_type, int is_user)
2570
if (domain_prot == 3) {
2571
return PAGE_READ | PAGE_WRITE;
2574
if (access_type == 1)
2577
prot_ro = PAGE_READ;
2581
if (access_type == 1)
2583
switch ((env->cp15.c1_sys >> 8) & 3) {
2585
return is_user ? 0 : PAGE_READ;
2592
return is_user ? 0 : PAGE_READ | PAGE_WRITE;
2597
return PAGE_READ | PAGE_WRITE;
2599
return PAGE_READ | PAGE_WRITE;
2600
case 4: /* Reserved. */
2603
return is_user ? 0 : prot_ro;
2607
if (!arm_feature (env, ARM_FEATURE_V6K))
2615
static uint32_t get_level1_table_address(CPUARMState *env, uint32_t address)
2619
if (address & env->cp15.c2_mask)
2620
table = env->cp15.c2_base1 & 0xffffc000;
2622
table = env->cp15.c2_base0 & env->cp15.c2_base_mask;
2624
table |= (address >> 18) & 0x3ffc;
2628
static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
2629
int is_user, hwaddr *phys_ptr,
2630
int *prot, target_ulong *page_size)
2641
/* Pagetable walk. */
2642
/* Lookup l1 descriptor. */
2643
table = get_level1_table_address(env, address);
2644
desc = ldl_phys(table);
2646
domain = (desc >> 5) & 0x0f;
2647
domain_prot = (env->cp15.c3 >> (domain * 2)) & 3;
2649
/* Section translation fault. */
2653
if (domain_prot == 0 || domain_prot == 2) {
2655
code = 9; /* Section domain fault. */
2657
code = 11; /* Page domain fault. */
2662
phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
2663
ap = (desc >> 10) & 3;
2665
*page_size = 1024 * 1024;
2667
/* Lookup l2 entry. */
2669
/* Coarse pagetable. */
2670
table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
2672
/* Fine pagetable. */
2673
table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
2675
desc = ldl_phys(table);
2677
case 0: /* Page translation fault. */
2680
case 1: /* 64k page. */
2681
phys_addr = (desc & 0xffff0000) | (address & 0xffff);
2682
ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
2683
*page_size = 0x10000;
2685
case 2: /* 4k page. */
2686
phys_addr = (desc & 0xfffff000) | (address & 0xfff);
2687
ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
2688
*page_size = 0x1000;
2690
case 3: /* 1k page. */
2692
if (arm_feature(env, ARM_FEATURE_XSCALE)) {
2693
phys_addr = (desc & 0xfffff000) | (address & 0xfff);
2695
/* Page translation fault. */
2700
phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
2702
ap = (desc >> 4) & 3;
2706
/* Never happens, but compiler isn't smart enough to tell. */
2711
*prot = check_ap(env, ap, domain_prot, access_type, is_user);
2713
/* Access permission fault. */
2717
*phys_ptr = phys_addr;
2720
return code | (domain << 4);
2723
static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
2724
int is_user, hwaddr *phys_ptr,
2725
int *prot, target_ulong *page_size)
2738
/* Pagetable walk. */
2739
/* Lookup l1 descriptor. */
2740
table = get_level1_table_address(env, address);
2741
desc = ldl_phys(table);
2743
if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) {
2744
/* Section translation fault, or attempt to use the encoding
2745
* which is Reserved on implementations without PXN.
2750
if ((type == 1) || !(desc & (1 << 18))) {
2751
/* Page or Section. */
2752
domain = (desc >> 5) & 0x0f;
2754
domain_prot = (env->cp15.c3 >> (domain * 2)) & 3;
2755
if (domain_prot == 0 || domain_prot == 2) {
2757
code = 9; /* Section domain fault. */
2759
code = 11; /* Page domain fault. */
2764
if (desc & (1 << 18)) {
2766
phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
2767
*page_size = 0x1000000;
2770
phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
2771
*page_size = 0x100000;
2773
ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
2774
xn = desc & (1 << 4);
2778
if (arm_feature(env, ARM_FEATURE_PXN)) {
2779
pxn = (desc >> 2) & 1;
2781
/* Lookup l2 entry. */
2782
table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
2783
desc = ldl_phys(table);
2784
ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
2786
case 0: /* Page translation fault. */
2789
case 1: /* 64k page. */
2790
phys_addr = (desc & 0xffff0000) | (address & 0xffff);
2791
xn = desc & (1 << 15);
2792
*page_size = 0x10000;
2794
case 2: case 3: /* 4k page. */
2795
phys_addr = (desc & 0xfffff000) | (address & 0xfff);
2797
*page_size = 0x1000;
2800
/* Never happens, but compiler isn't smart enough to tell. */
2805
if (domain_prot == 3) {
2806
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
2808
if (pxn && !is_user) {
2811
if (xn && access_type == 2)
2814
/* The simplified model uses AP[0] as an access control bit. */
2815
if ((env->cp15.c1_sys & (1 << 29)) && (ap & 1) == 0) {
2816
/* Access flag fault. */
2817
code = (code == 15) ? 6 : 3;
2820
*prot = check_ap(env, ap, domain_prot, access_type, is_user);
2822
/* Access permission fault. */
2829
*phys_ptr = phys_addr;
2832
return code | (domain << 4);
2835
/* Fault type for long-descriptor MMU fault reporting; this corresponds
2836
* to bits [5..2] in the STATUS field in long-format DFSR/IFSR.
2839
translation_fault = 1,
2841
permission_fault = 3,
2844
static int get_phys_addr_lpae(CPUARMState *env, uint32_t address,
2845
int access_type, int is_user,
2846
hwaddr *phys_ptr, int *prot,
2847
target_ulong *page_size_ptr)
2849
/* Read an LPAE long-descriptor translation table. */
2850
MMUFaultType fault_type = translation_fault;
2858
uint32_t tableattrs;
2859
target_ulong page_size;
2862
/* Determine whether this address is in the region controlled by
2863
* TTBR0 or TTBR1 (or if it is in neither region and should fault).
2864
* This is a Non-secure PL0/1 stage 1 translation, so controlled by
2865
* TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32:
2867
uint32_t t0sz = extract32(env->cp15.c2_control, 0, 3);
2868
uint32_t t1sz = extract32(env->cp15.c2_control, 16, 3);
2869
if (t0sz && !extract32(address, 32 - t0sz, t0sz)) {
2870
/* there is a ttbr0 region and we are in it (high bits all zero) */
2872
} else if (t1sz && !extract32(~address, 32 - t1sz, t1sz)) {
2873
/* there is a ttbr1 region and we are in it (high bits all one) */
2876
/* ttbr0 region is "everything not in the ttbr1 region" */
2879
/* ttbr1 region is "everything not in the ttbr0 region" */
2882
/* in the gap between the two regions, this is a Translation fault */
2883
fault_type = translation_fault;
2887
/* Note that QEMU ignores shareability and cacheability attributes,
2888
* so we don't need to do anything with the SH, ORGN, IRGN fields
2889
* in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
2890
* ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
2891
* implement any ASID-like capability so we can ignore it (instead
2892
* we will always flush the TLB any time the ASID is changed).
2894
if (ttbr_select == 0) {
2895
ttbr = ((uint64_t)env->cp15.c2_base0_hi << 32) | env->cp15.c2_base0;
2896
epd = extract32(env->cp15.c2_control, 7, 1);
2899
ttbr = ((uint64_t)env->cp15.c2_base1_hi << 32) | env->cp15.c2_base1;
2900
epd = extract32(env->cp15.c2_control, 23, 1);
2905
/* Translation table walk disabled => Translation fault on TLB miss */
2909
/* If the region is small enough we will skip straight to a 2nd level
2910
* lookup. This affects the number of bits of the address used in
2911
* combination with the TTBR to find the first descriptor. ('n' here
2912
* matches the usage in the ARM ARM sB3.6.6, where bits [39..n] are
2913
* from the TTBR, [n-1..3] from the vaddr, and [2..0] always zero).
2922
/* Clear the vaddr bits which aren't part of the within-region address,
2923
* so that we don't have to special case things when calculating the
2924
* first descriptor address.
2926
address &= (0xffffffffU >> tsz);
2928
/* Now we can extract the actual base address from the TTBR */
2929
descaddr = extract64(ttbr, 0, 40);
2930
descaddr &= ~((1ULL << n) - 1);
2934
uint64_t descriptor;
2936
descaddr |= ((address >> (9 * (4 - level))) & 0xff8);
2937
descriptor = ldq_phys(descaddr);
2938
if (!(descriptor & 1) ||
2939
(!(descriptor & 2) && (level == 3))) {
2940
/* Invalid, or the Reserved level 3 encoding */
2943
descaddr = descriptor & 0xfffffff000ULL;
2945
if ((descriptor & 2) && (level < 3)) {
2946
/* Table entry. The top five bits are attributes which may
2947
* propagate down through lower levels of the table (and
2948
* which are all arranged so that 0 means "no effect", so
2949
* we can gather them up by ORing in the bits at each level).
2951
tableattrs |= extract64(descriptor, 59, 5);
2955
/* Block entry at level 1 or 2, or page entry at level 3.
2956
* These are basically the same thing, although the number
2957
* of bits we pull in from the vaddr varies.
2959
page_size = (1 << (39 - (9 * level)));
2960
descaddr |= (address & (page_size - 1));
2961
/* Extract attributes from the descriptor and merge with table attrs */
2962
attrs = extract64(descriptor, 2, 10)
2963
| (extract64(descriptor, 52, 12) << 10);
2964
attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */
2965
attrs |= extract32(tableattrs, 3, 1) << 5; /* APTable[1] => AP[2] */
2966
/* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
2967
* means "force PL1 access only", which means forcing AP[1] to 0.
2969
if (extract32(tableattrs, 2, 1)) {
2972
/* Since we're always in the Non-secure state, NSTable is ignored. */
2975
/* Here descaddr is the final physical address, and attributes
2978
fault_type = access_fault;
2979
if ((attrs & (1 << 8)) == 0) {
2983
fault_type = permission_fault;
2984
if (is_user && !(attrs & (1 << 4))) {
2985
/* Unprivileged access not enabled */
2988
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
2989
if (attrs & (1 << 12) || (!is_user && (attrs & (1 << 11)))) {
2991
if (access_type == 2) {
2994
*prot &= ~PAGE_EXEC;
2996
if (attrs & (1 << 5)) {
2997
/* Write access forbidden */
2998
if (access_type == 1) {
3001
*prot &= ~PAGE_WRITE;
3004
*phys_ptr = descaddr;
3005
*page_size_ptr = page_size;
3009
/* Long-descriptor format IFSR/DFSR value */
3010
return (1 << 9) | (fault_type << 2) | level;
3013
static int get_phys_addr_mpu(CPUARMState *env, uint32_t address,
3014
int access_type, int is_user,
3015
hwaddr *phys_ptr, int *prot)
3021
*phys_ptr = address;
3022
for (n = 7; n >= 0; n--) {
3023
base = env->cp15.c6_region[n];
3024
if ((base & 1) == 0)
3026
mask = 1 << ((base >> 1) & 0x1f);
3027
/* Keep this shift separate from the above to avoid an
3028
(undefined) << 32. */
3029
mask = (mask << 1) - 1;
3030
if (((base ^ address) & ~mask) == 0)
3036
if (access_type == 2) {
3037
mask = env->cp15.c5_insn;
3039
mask = env->cp15.c5_data;
3041
mask = (mask >> (n * 4)) & 0xf;
3048
*prot = PAGE_READ | PAGE_WRITE;
3053
*prot |= PAGE_WRITE;
3056
*prot = PAGE_READ | PAGE_WRITE;
3067
/* Bad permission. */
3074
/* get_phys_addr - get the physical address for this virtual address
3076
* Find the physical address corresponding to the given virtual address,
3077
* by doing a translation table walk on MMU based systems or using the
3078
* MPU state on MPU based systems.
3080
* Returns 0 if the translation was successful. Otherwise, phys_ptr,
3081
* prot and page_size are not filled in, and the return value provides
3082
* information on why the translation aborted, in the format of a
3083
* DFSR/IFSR fault register, with the following caveats:
3084
* * we honour the short vs long DFSR format differences.
3085
* * the WnR bit is never set (the caller must do this).
3086
* * for MPU based systems we don't bother to return a full FSR format
3090
* @address: virtual address to get physical address for
3091
* @access_type: 0 for read, 1 for write, 2 for execute
3092
* @is_user: 0 for privileged access, 1 for user
3093
* @phys_ptr: set to the physical address corresponding to the virtual address
3094
* @prot: set to the permissions for the page containing phys_ptr
3095
* @page_size: set to the size of the page containing phys_ptr
3097
static inline int get_phys_addr(CPUARMState *env, uint32_t address,
3098
int access_type, int is_user,
3099
hwaddr *phys_ptr, int *prot,
3100
target_ulong *page_size)
3102
/* Fast Context Switch Extension. */
3103
if (address < 0x02000000)
3104
address += env->cp15.c13_fcse;
3106
if ((env->cp15.c1_sys & 1) == 0) {
3107
/* MMU/MPU disabled. */
3108
*phys_ptr = address;
3109
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
3110
*page_size = TARGET_PAGE_SIZE;
3112
} else if (arm_feature(env, ARM_FEATURE_MPU)) {
3113
*page_size = TARGET_PAGE_SIZE;
3114
return get_phys_addr_mpu(env, address, access_type, is_user, phys_ptr,
3116
} else if (extended_addresses_enabled(env)) {
3117
return get_phys_addr_lpae(env, address, access_type, is_user, phys_ptr,
3119
} else if (env->cp15.c1_sys & (1 << 23)) {
3120
return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr,
3123
return get_phys_addr_v5(env, address, access_type, is_user, phys_ptr,
3128
int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address,
3129
int access_type, int mmu_idx)
3132
target_ulong page_size;
3136
is_user = mmu_idx == MMU_USER_IDX;
3137
ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot,
3140
/* Map a single [sub]page. */
3141
phys_addr &= ~(hwaddr)0x3ff;
3142
address &= ~(uint32_t)0x3ff;
3143
tlb_set_page (env, address, phys_addr, prot, mmu_idx, page_size);
3147
if (access_type == 2) {
3148
env->cp15.c5_insn = ret;
3149
env->cp15.c6_insn = address;
3150
env->exception_index = EXCP_PREFETCH_ABORT;
3152
env->cp15.c5_data = ret;
3153
if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6))
3154
env->cp15.c5_data |= (1 << 11);
3155
env->cp15.c6_data = address;
3156
env->exception_index = EXCP_DATA_ABORT;
3161
hwaddr arm_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
3163
ARMCPU *cpu = ARM_CPU(cs);
3165
target_ulong page_size;
3169
ret = get_phys_addr(&cpu->env, addr, 0, 0, &phys_addr, &prot, &page_size);
3178
void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
3180
if ((env->uncached_cpsr & CPSR_M) == mode) {
3181
env->regs[13] = val;
3183
env->banked_r13[bank_number(mode)] = val;
3187
uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
3189
if ((env->uncached_cpsr & CPSR_M) == mode) {
3190
return env->regs[13];
3192
return env->banked_r13[bank_number(mode)];
3196
uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
3200
return xpsr_read(env) & 0xf8000000;
3202
return xpsr_read(env) & 0xf80001ff;
3204
return xpsr_read(env) & 0xff00fc00;
3206
return xpsr_read(env) & 0xff00fdff;
3208
return xpsr_read(env) & 0x000001ff;
3210
return xpsr_read(env) & 0x0700fc00;
3212
return xpsr_read(env) & 0x0700edff;
3214
return env->v7m.current_sp ? env->v7m.other_sp : env->regs[13];
3216
return env->v7m.current_sp ? env->regs[13] : env->v7m.other_sp;
3217
case 16: /* PRIMASK */
3218
return (env->uncached_cpsr & CPSR_I) != 0;
3219
case 17: /* BASEPRI */
3220
case 18: /* BASEPRI_MAX */
3221
return env->v7m.basepri;
3222
case 19: /* FAULTMASK */
3223
return (env->uncached_cpsr & CPSR_F) != 0;
3224
case 20: /* CONTROL */
3225
return env->v7m.control;
3227
/* ??? For debugging only. */
3228
cpu_abort(env, "Unimplemented system register read (%d)\n", reg);
3233
void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
3237
xpsr_write(env, val, 0xf8000000);
3240
xpsr_write(env, val, 0xf8000000);
3243
xpsr_write(env, val, 0xfe00fc00);
3246
xpsr_write(env, val, 0xfe00fc00);
3249
/* IPSR bits are readonly. */
3252
xpsr_write(env, val, 0x0600fc00);
3255
xpsr_write(env, val, 0x0600fc00);
3258
if (env->v7m.current_sp)
3259
env->v7m.other_sp = val;
3261
env->regs[13] = val;
3264
if (env->v7m.current_sp)
3265
env->regs[13] = val;
3267
env->v7m.other_sp = val;
3269
case 16: /* PRIMASK */
3271
env->uncached_cpsr |= CPSR_I;
3273
env->uncached_cpsr &= ~CPSR_I;
3275
case 17: /* BASEPRI */
3276
env->v7m.basepri = val & 0xff;
3278
case 18: /* BASEPRI_MAX */
3280
if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0))
3281
env->v7m.basepri = val;
3283
case 19: /* FAULTMASK */
3285
env->uncached_cpsr |= CPSR_F;
3287
env->uncached_cpsr &= ~CPSR_F;
3289
case 20: /* CONTROL */
3290
env->v7m.control = val & 3;
3291
switch_v7m_sp(env, (val & 2) != 0);
3294
/* ??? For debugging only. */
3295
cpu_abort(env, "Unimplemented system register write (%d)\n", reg);
3302
/* Note that signed overflow is undefined in C. The following routines are
3303
careful to use unsigned types where modulo arithmetic is required.
3304
Failure to do so _will_ break on newer gcc. */
3306
/* Signed saturating arithmetic. */
3308
/* Perform 16-bit signed saturating addition. */
3309
static inline uint16_t add16_sat(uint16_t a, uint16_t b)
3314
if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
3323
/* Perform 8-bit signed saturating addition. */
3324
static inline uint8_t add8_sat(uint8_t a, uint8_t b)
3329
if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
3338
/* Perform 16-bit signed saturating subtraction. */
3339
static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
3344
if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
3353
/* Perform 8-bit signed saturating subtraction. */
3354
static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
3359
if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
3368
#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
3369
#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
3370
#define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
3371
#define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
3374
#include "op_addsub.h"
3376
/* Unsigned saturating arithmetic. */
3377
static inline uint16_t add16_usat(uint16_t a, uint16_t b)
3386
static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
3394
static inline uint8_t add8_usat(uint8_t a, uint8_t b)
3403
static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
3411
#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
3412
#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
3413
#define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
3414
#define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
3417
#include "op_addsub.h"
3419
/* Signed modulo arithmetic. */
3420
#define SARITH16(a, b, n, op) do { \
3422
sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
3423
RESULT(sum, n, 16); \
3425
ge |= 3 << (n * 2); \
3428
#define SARITH8(a, b, n, op) do { \
3430
sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
3431
RESULT(sum, n, 8); \
3437
#define ADD16(a, b, n) SARITH16(a, b, n, +)
3438
#define SUB16(a, b, n) SARITH16(a, b, n, -)
3439
#define ADD8(a, b, n) SARITH8(a, b, n, +)
3440
#define SUB8(a, b, n) SARITH8(a, b, n, -)
3444
#include "op_addsub.h"
3446
/* Unsigned modulo arithmetic. */
3447
#define ADD16(a, b, n) do { \
3449
sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
3450
RESULT(sum, n, 16); \
3451
if ((sum >> 16) == 1) \
3452
ge |= 3 << (n * 2); \
3455
#define ADD8(a, b, n) do { \
3457
sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
3458
RESULT(sum, n, 8); \
3459
if ((sum >> 8) == 1) \
3463
#define SUB16(a, b, n) do { \
3465
sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
3466
RESULT(sum, n, 16); \
3467
if ((sum >> 16) == 0) \
3468
ge |= 3 << (n * 2); \
3471
#define SUB8(a, b, n) do { \
3473
sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
3474
RESULT(sum, n, 8); \
3475
if ((sum >> 8) == 0) \
3482
#include "op_addsub.h"
3484
/* Halved signed arithmetic. */
3485
#define ADD16(a, b, n) \
3486
RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
3487
#define SUB16(a, b, n) \
3488
RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
3489
#define ADD8(a, b, n) \
3490
RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
3491
#define SUB8(a, b, n) \
3492
RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
3495
#include "op_addsub.h"
3497
/* Halved unsigned arithmetic. */
3498
#define ADD16(a, b, n) \
3499
RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
3500
#define SUB16(a, b, n) \
3501
RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
3502
#define ADD8(a, b, n) \
3503
RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
3504
#define SUB8(a, b, n) \
3505
RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
3508
#include "op_addsub.h"
3510
static inline uint8_t do_usad(uint8_t a, uint8_t b)
3518
/* Unsigned sum of absolute byte differences. */
3519
uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
3522
sum = do_usad(a, b);
3523
sum += do_usad(a >> 8, b >> 8);
3524
sum += do_usad(a >> 16, b >>16);
3525
sum += do_usad(a >> 24, b >> 24);
3529
/* For ARMv6 SEL instruction. */
3530
uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
3543
return (a & mask) | (b & ~mask);
3546
/* VFP support. We follow the convention used for VFP instructions:
3547
Single precision routines have a "s" suffix, double precision a
3550
/* Convert host exception flags to vfp form. */
3551
static inline int vfp_exceptbits_from_host(int host_bits)
3553
int target_bits = 0;
3555
if (host_bits & float_flag_invalid)
3557
if (host_bits & float_flag_divbyzero)
3559
if (host_bits & float_flag_overflow)
3561
if (host_bits & (float_flag_underflow | float_flag_output_denormal))
3563
if (host_bits & float_flag_inexact)
3564
target_bits |= 0x10;
3565
if (host_bits & float_flag_input_denormal)
3566
target_bits |= 0x80;
3570
uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env)
3575
fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff)
3576
| (env->vfp.vec_len << 16)
3577
| (env->vfp.vec_stride << 20);
3578
i = get_float_exception_flags(&env->vfp.fp_status);
3579
i |= get_float_exception_flags(&env->vfp.standard_fp_status);
3580
fpscr |= vfp_exceptbits_from_host(i);
3584
uint32_t vfp_get_fpscr(CPUARMState *env)
3586
return HELPER(vfp_get_fpscr)(env);
3589
/* Convert vfp exception flags to target form. */
3590
static inline int vfp_exceptbits_to_host(int target_bits)
3594
if (target_bits & 1)
3595
host_bits |= float_flag_invalid;
3596
if (target_bits & 2)
3597
host_bits |= float_flag_divbyzero;
3598
if (target_bits & 4)
3599
host_bits |= float_flag_overflow;
3600
if (target_bits & 8)
3601
host_bits |= float_flag_underflow;
3602
if (target_bits & 0x10)
3603
host_bits |= float_flag_inexact;
3604
if (target_bits & 0x80)
3605
host_bits |= float_flag_input_denormal;
3609
void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
3614
changed = env->vfp.xregs[ARM_VFP_FPSCR];
3615
env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
3616
env->vfp.vec_len = (val >> 16) & 7;
3617
env->vfp.vec_stride = (val >> 20) & 3;
3620
if (changed & (3 << 22)) {
3621
i = (val >> 22) & 3;
3624
i = float_round_nearest_even;
3630
i = float_round_down;
3633
i = float_round_to_zero;
3636
set_float_rounding_mode(i, &env->vfp.fp_status);
3638
if (changed & (1 << 24)) {
3639
set_flush_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
3640
set_flush_inputs_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
3642
if (changed & (1 << 25))
3643
set_default_nan_mode((val & (1 << 25)) != 0, &env->vfp.fp_status);
3645
i = vfp_exceptbits_to_host(val);
3646
set_float_exception_flags(i, &env->vfp.fp_status);
3647
set_float_exception_flags(0, &env->vfp.standard_fp_status);
3650
void vfp_set_fpscr(CPUARMState *env, uint32_t val)
3652
HELPER(vfp_set_fpscr)(env, val);
3655
#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
3657
#define VFP_BINOP(name) \
3658
float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
3660
float_status *fpst = fpstp; \
3661
return float32_ ## name(a, b, fpst); \
3663
float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
3665
float_status *fpst = fpstp; \
3666
return float64_ ## name(a, b, fpst); \
3674
float32 VFP_HELPER(neg, s)(float32 a)
3676
return float32_chs(a);
3679
float64 VFP_HELPER(neg, d)(float64 a)
3681
return float64_chs(a);
3684
float32 VFP_HELPER(abs, s)(float32 a)
3686
return float32_abs(a);
3689
float64 VFP_HELPER(abs, d)(float64 a)
3691
return float64_abs(a);
3694
float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env)
3696
return float32_sqrt(a, &env->vfp.fp_status);
3699
float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env)
3701
return float64_sqrt(a, &env->vfp.fp_status);
3704
/* XXX: check quiet/signaling case */
3705
#define DO_VFP_cmp(p, type) \
3706
void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \
3709
switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
3710
case 0: flags = 0x6; break; \
3711
case -1: flags = 0x8; break; \
3712
case 1: flags = 0x2; break; \
3713
default: case 2: flags = 0x3; break; \
3715
env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
3716
| (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
3718
void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
3721
switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
3722
case 0: flags = 0x6; break; \
3723
case -1: flags = 0x8; break; \
3724
case 1: flags = 0x2; break; \
3725
default: case 2: flags = 0x3; break; \
3727
env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
3728
| (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
3730
DO_VFP_cmp(s, float32)
3731
DO_VFP_cmp(d, float64)
3734
/* Integer to float and float to integer conversions */
3736
#define CONV_ITOF(name, fsz, sign) \
3737
float##fsz HELPER(name)(uint32_t x, void *fpstp) \
3739
float_status *fpst = fpstp; \
3740
return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
3743
#define CONV_FTOI(name, fsz, sign, round) \
3744
uint32_t HELPER(name)(float##fsz x, void *fpstp) \
3746
float_status *fpst = fpstp; \
3747
if (float##fsz##_is_any_nan(x)) { \
3748
float_raise(float_flag_invalid, fpst); \
3751
return float##fsz##_to_##sign##int32##round(x, fpst); \
3754
#define FLOAT_CONVS(name, p, fsz, sign) \
3755
CONV_ITOF(vfp_##name##to##p, fsz, sign) \
3756
CONV_FTOI(vfp_to##name##p, fsz, sign, ) \
3757
CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero)
3759
FLOAT_CONVS(si, s, 32, )
3760
FLOAT_CONVS(si, d, 64, )
3761
FLOAT_CONVS(ui, s, 32, u)
3762
FLOAT_CONVS(ui, d, 64, u)
3768
/* floating point conversion */
3769
float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env)
3771
float64 r = float32_to_float64(x, &env->vfp.fp_status);
3772
/* ARM requires that S<->D conversion of any kind of NaN generates
3773
* a quiet NaN by forcing the most significant frac bit to 1.
3775
return float64_maybe_silence_nan(r);
3778
float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env)
3780
float32 r = float64_to_float32(x, &env->vfp.fp_status);
3781
/* ARM requires that S<->D conversion of any kind of NaN generates
3782
* a quiet NaN by forcing the most significant frac bit to 1.
3784
return float32_maybe_silence_nan(r);
3787
/* VFP3 fixed point conversion. */
3788
#define VFP_CONV_FIX(name, p, fsz, itype, sign) \
3789
float##fsz HELPER(vfp_##name##to##p)(uint##fsz##_t x, uint32_t shift, \
3792
float_status *fpst = fpstp; \
3794
tmp = sign##int32_to_##float##fsz((itype##_t)x, fpst); \
3795
return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
3797
uint##fsz##_t HELPER(vfp_to##name##p)(float##fsz x, uint32_t shift, \
3800
float_status *fpst = fpstp; \
3802
if (float##fsz##_is_any_nan(x)) { \
3803
float_raise(float_flag_invalid, fpst); \
3806
tmp = float##fsz##_scalbn(x, shift, fpst); \
3807
return float##fsz##_to_##itype##_round_to_zero(tmp, fpst); \
3810
VFP_CONV_FIX(sh, d, 64, int16, )
3811
VFP_CONV_FIX(sl, d, 64, int32, )
3812
VFP_CONV_FIX(uh, d, 64, uint16, u)
3813
VFP_CONV_FIX(ul, d, 64, uint32, u)
3814
VFP_CONV_FIX(sh, s, 32, int16, )
3815
VFP_CONV_FIX(sl, s, 32, int32, )
3816
VFP_CONV_FIX(uh, s, 32, uint16, u)
3817
VFP_CONV_FIX(ul, s, 32, uint32, u)
3820
/* Half precision conversions. */
3821
static float32 do_fcvt_f16_to_f32(uint32_t a, CPUARMState *env, float_status *s)
3823
int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
3824
float32 r = float16_to_float32(make_float16(a), ieee, s);
3826
return float32_maybe_silence_nan(r);
3831
static uint32_t do_fcvt_f32_to_f16(float32 a, CPUARMState *env, float_status *s)
3833
int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
3834
float16 r = float32_to_float16(a, ieee, s);
3836
r = float16_maybe_silence_nan(r);
3838
return float16_val(r);
3841
float32 HELPER(neon_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env)
3843
return do_fcvt_f16_to_f32(a, env, &env->vfp.standard_fp_status);
3846
uint32_t HELPER(neon_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
3848
return do_fcvt_f32_to_f16(a, env, &env->vfp.standard_fp_status);
3851
float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env)
3853
return do_fcvt_f16_to_f32(a, env, &env->vfp.fp_status);
3856
uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
3858
return do_fcvt_f32_to_f16(a, env, &env->vfp.fp_status);
3861
#define float32_two make_float32(0x40000000)
3862
#define float32_three make_float32(0x40400000)
3863
#define float32_one_point_five make_float32(0x3fc00000)
3865
float32 HELPER(recps_f32)(float32 a, float32 b, CPUARMState *env)
3867
float_status *s = &env->vfp.standard_fp_status;
3868
if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
3869
(float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
3870
if (!(float32_is_zero(a) || float32_is_zero(b))) {
3871
float_raise(float_flag_input_denormal, s);
3875
return float32_sub(float32_two, float32_mul(a, b, s), s);
3878
float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUARMState *env)
3880
float_status *s = &env->vfp.standard_fp_status;
3882
if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
3883
(float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
3884
if (!(float32_is_zero(a) || float32_is_zero(b))) {
3885
float_raise(float_flag_input_denormal, s);
3887
return float32_one_point_five;
3889
product = float32_mul(a, b, s);
3890
return float32_div(float32_sub(float32_three, product, s), float32_two, s);
3895
/* Constants 256 and 512 are used in some helpers; we avoid relying on
3896
* int->float conversions at run-time. */
3897
#define float64_256 make_float64(0x4070000000000000LL)
3898
#define float64_512 make_float64(0x4080000000000000LL)
3900
/* The algorithm that must be used to calculate the estimate
3901
* is specified by the ARM ARM.
3903
static float64 recip_estimate(float64 a, CPUARMState *env)
3905
/* These calculations mustn't set any fp exception flags,
3906
* so we use a local copy of the fp_status.
3908
float_status dummy_status = env->vfp.standard_fp_status;
3909
float_status *s = &dummy_status;
3910
/* q = (int)(a * 512.0) */
3911
float64 q = float64_mul(float64_512, a, s);
3912
int64_t q_int = float64_to_int64_round_to_zero(q, s);
3914
/* r = 1.0 / (((double)q + 0.5) / 512.0) */
3915
q = int64_to_float64(q_int, s);
3916
q = float64_add(q, float64_half, s);
3917
q = float64_div(q, float64_512, s);
3918
q = float64_div(float64_one, q, s);
3920
/* s = (int)(256.0 * r + 0.5) */
3921
q = float64_mul(q, float64_256, s);
3922
q = float64_add(q, float64_half, s);
3923
q_int = float64_to_int64_round_to_zero(q, s);
3925
/* return (double)s / 256.0 */
3926
return float64_div(int64_to_float64(q_int, s), float64_256, s);
3929
float32 HELPER(recpe_f32)(float32 a, CPUARMState *env)
3931
float_status *s = &env->vfp.standard_fp_status;
3933
uint32_t val32 = float32_val(a);
3936
int a_exp = (val32 & 0x7f800000) >> 23;
3937
int sign = val32 & 0x80000000;
3939
if (float32_is_any_nan(a)) {
3940
if (float32_is_signaling_nan(a)) {
3941
float_raise(float_flag_invalid, s);
3943
return float32_default_nan;
3944
} else if (float32_is_infinity(a)) {
3945
return float32_set_sign(float32_zero, float32_is_neg(a));
3946
} else if (float32_is_zero_or_denormal(a)) {
3947
if (!float32_is_zero(a)) {
3948
float_raise(float_flag_input_denormal, s);
3950
float_raise(float_flag_divbyzero, s);
3951
return float32_set_sign(float32_infinity, float32_is_neg(a));
3952
} else if (a_exp >= 253) {
3953
float_raise(float_flag_underflow, s);
3954
return float32_set_sign(float32_zero, float32_is_neg(a));
3957
f64 = make_float64((0x3feULL << 52)
3958
| ((int64_t)(val32 & 0x7fffff) << 29));
3960
result_exp = 253 - a_exp;
3962
f64 = recip_estimate(f64, env);
3965
| ((result_exp & 0xff) << 23)
3966
| ((float64_val(f64) >> 29) & 0x7fffff);
3967
return make_float32(val32);
3970
/* The algorithm that must be used to calculate the estimate
3971
* is specified by the ARM ARM.
3973
static float64 recip_sqrt_estimate(float64 a, CPUARMState *env)
3975
/* These calculations mustn't set any fp exception flags,
3976
* so we use a local copy of the fp_status.
3978
float_status dummy_status = env->vfp.standard_fp_status;
3979
float_status *s = &dummy_status;
3983
if (float64_lt(a, float64_half, s)) {
3984
/* range 0.25 <= a < 0.5 */
3986
/* a in units of 1/512 rounded down */
3987
/* q0 = (int)(a * 512.0); */
3988
q = float64_mul(float64_512, a, s);
3989
q_int = float64_to_int64_round_to_zero(q, s);
3991
/* reciprocal root r */
3992
/* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0); */
3993
q = int64_to_float64(q_int, s);
3994
q = float64_add(q, float64_half, s);
3995
q = float64_div(q, float64_512, s);
3996
q = float64_sqrt(q, s);
3997
q = float64_div(float64_one, q, s);
3999
/* range 0.5 <= a < 1.0 */
4001
/* a in units of 1/256 rounded down */
4002
/* q1 = (int)(a * 256.0); */
4003
q = float64_mul(float64_256, a, s);
4004
int64_t q_int = float64_to_int64_round_to_zero(q, s);
4006
/* reciprocal root r */
4007
/* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */
4008
q = int64_to_float64(q_int, s);
4009
q = float64_add(q, float64_half, s);
4010
q = float64_div(q, float64_256, s);
4011
q = float64_sqrt(q, s);
4012
q = float64_div(float64_one, q, s);
4014
/* r in units of 1/256 rounded to nearest */
4015
/* s = (int)(256.0 * r + 0.5); */
4017
q = float64_mul(q, float64_256,s );
4018
q = float64_add(q, float64_half, s);
4019
q_int = float64_to_int64_round_to_zero(q, s);
4021
/* return (double)s / 256.0;*/
4022
return float64_div(int64_to_float64(q_int, s), float64_256, s);
4025
float32 HELPER(rsqrte_f32)(float32 a, CPUARMState *env)
4027
float_status *s = &env->vfp.standard_fp_status;
4033
val = float32_val(a);
4035
if (float32_is_any_nan(a)) {
4036
if (float32_is_signaling_nan(a)) {
4037
float_raise(float_flag_invalid, s);
4039
return float32_default_nan;
4040
} else if (float32_is_zero_or_denormal(a)) {
4041
if (!float32_is_zero(a)) {
4042
float_raise(float_flag_input_denormal, s);
4044
float_raise(float_flag_divbyzero, s);
4045
return float32_set_sign(float32_infinity, float32_is_neg(a));
4046
} else if (float32_is_neg(a)) {
4047
float_raise(float_flag_invalid, s);
4048
return float32_default_nan;
4049
} else if (float32_is_infinity(a)) {
4050
return float32_zero;
4053
/* Normalize to a double-precision value between 0.25 and 1.0,
4054
* preserving the parity of the exponent. */
4055
if ((val & 0x800000) == 0) {
4056
f64 = make_float64(((uint64_t)(val & 0x80000000) << 32)
4058
| ((uint64_t)(val & 0x7fffff) << 29));
4060
f64 = make_float64(((uint64_t)(val & 0x80000000) << 32)
4062
| ((uint64_t)(val & 0x7fffff) << 29));
4065
result_exp = (380 - ((val & 0x7f800000) >> 23)) / 2;
4067
f64 = recip_sqrt_estimate(f64, env);
4069
val64 = float64_val(f64);
4071
val = ((result_exp & 0xff) << 23)
4072
| ((val64 >> 29) & 0x7fffff);
4073
return make_float32(val);
4076
uint32_t HELPER(recpe_u32)(uint32_t a, CPUARMState *env)
4080
if ((a & 0x80000000) == 0) {
4084
f64 = make_float64((0x3feULL << 52)
4085
| ((int64_t)(a & 0x7fffffff) << 21));
4087
f64 = recip_estimate (f64, env);
4089
return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
4092
uint32_t HELPER(rsqrte_u32)(uint32_t a, CPUARMState *env)
4096
if ((a & 0xc0000000) == 0) {
4100
if (a & 0x80000000) {
4101
f64 = make_float64((0x3feULL << 52)
4102
| ((uint64_t)(a & 0x7fffffff) << 21));
4103
} else { /* bits 31-30 == '01' */
4104
f64 = make_float64((0x3fdULL << 52)
4105
| ((uint64_t)(a & 0x3fffffff) << 22));
4108
f64 = recip_sqrt_estimate(f64, env);
4110
return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
4113
/* VFPv4 fused multiply-accumulate */
4114
float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp)
4116
float_status *fpst = fpstp;
4117
return float32_muladd(a, b, c, 0, fpst);
4120
float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp)
4122
float_status *fpst = fpstp;
4123
return float64_muladd(a, b, c, 0, fpst);