4
* Copyright (c) 2013 Alexander Graf <agraf@suse.de>
6
* This library is free software; you can redistribute it and/or
7
* modify it under the terms of the GNU Lesser General Public
8
* License as published by the Free Software Foundation; either
9
* version 2 of the License, or (at your option) any later version.
11
* This library is distributed in the hope that it will be useful,
12
* but WITHOUT ANY WARRANTY; without even the implied warranty of
13
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14
* Lesser General Public License for more details.
16
* You should have received a copy of the GNU Lesser General Public
17
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
28
#include "translate.h"
29
#include "qemu/host-utils.h"
31
#include "exec/gen-icount.h"
37
static TCGv_i64 cpu_X[32];
38
static TCGv_i64 cpu_pc;
39
static TCGv_i32 cpu_NF, cpu_ZF, cpu_CF, cpu_VF;
41
/* Load/store exclusive handling */
42
static TCGv_i64 cpu_exclusive_addr;
43
static TCGv_i64 cpu_exclusive_val;
44
static TCGv_i64 cpu_exclusive_high;
45
#ifdef CONFIG_USER_ONLY
46
static TCGv_i64 cpu_exclusive_test;
47
static TCGv_i32 cpu_exclusive_info;
50
static const char *regnames[] = {
51
"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
52
"x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
53
"x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
54
"x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
58
A64_SHIFT_TYPE_LSL = 0,
59
A64_SHIFT_TYPE_LSR = 1,
60
A64_SHIFT_TYPE_ASR = 2,
61
A64_SHIFT_TYPE_ROR = 3
64
/* initialize TCG globals. */
65
void a64_translate_init(void)
69
cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
70
offsetof(CPUARMState, pc),
72
for (i = 0; i < 32; i++) {
73
cpu_X[i] = tcg_global_mem_new_i64(TCG_AREG0,
74
offsetof(CPUARMState, xregs[i]),
78
cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
79
cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
80
cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
81
cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
83
cpu_exclusive_addr = tcg_global_mem_new_i64(TCG_AREG0,
84
offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
85
cpu_exclusive_val = tcg_global_mem_new_i64(TCG_AREG0,
86
offsetof(CPUARMState, exclusive_val), "exclusive_val");
87
cpu_exclusive_high = tcg_global_mem_new_i64(TCG_AREG0,
88
offsetof(CPUARMState, exclusive_high), "exclusive_high");
89
#ifdef CONFIG_USER_ONLY
90
cpu_exclusive_test = tcg_global_mem_new_i64(TCG_AREG0,
91
offsetof(CPUARMState, exclusive_test), "exclusive_test");
92
cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
93
offsetof(CPUARMState, exclusive_info), "exclusive_info");
97
void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
98
fprintf_function cpu_fprintf, int flags)
100
ARMCPU *cpu = ARM_CPU(cs);
101
CPUARMState *env = &cpu->env;
102
uint32_t psr = pstate_read(env);
105
cpu_fprintf(f, "PC=%016"PRIx64" SP=%016"PRIx64"\n",
106
env->pc, env->xregs[31]);
107
for (i = 0; i < 31; i++) {
108
cpu_fprintf(f, "X%02d=%016"PRIx64, i, env->xregs[i]);
110
cpu_fprintf(f, "\n");
115
cpu_fprintf(f, "PSTATE=%08x (flags %c%c%c%c)\n",
117
psr & PSTATE_N ? 'N' : '-',
118
psr & PSTATE_Z ? 'Z' : '-',
119
psr & PSTATE_C ? 'C' : '-',
120
psr & PSTATE_V ? 'V' : '-');
121
cpu_fprintf(f, "\n");
123
if (flags & CPU_DUMP_FPU) {
125
for (i = 0; i < numvfpregs; i += 2) {
126
uint64_t vlo = float64_val(env->vfp.regs[i * 2]);
127
uint64_t vhi = float64_val(env->vfp.regs[(i * 2) + 1]);
128
cpu_fprintf(f, "q%02d=%016" PRIx64 ":%016" PRIx64 " ",
130
vlo = float64_val(env->vfp.regs[(i + 1) * 2]);
131
vhi = float64_val(env->vfp.regs[((i + 1) * 2) + 1]);
132
cpu_fprintf(f, "q%02d=%016" PRIx64 ":%016" PRIx64 "\n",
135
cpu_fprintf(f, "FPCR: %08x FPSR: %08x\n",
136
vfp_get_fpcr(env), vfp_get_fpsr(env));
140
static int get_mem_index(DisasContext *s)
142
#ifdef CONFIG_USER_ONLY
149
void gen_a64_set_pc_im(uint64_t val)
151
tcg_gen_movi_i64(cpu_pc, val);
154
static void gen_exception(int excp)
156
TCGv_i32 tmp = tcg_temp_new_i32();
157
tcg_gen_movi_i32(tmp, excp);
158
gen_helper_exception(cpu_env, tmp);
159
tcg_temp_free_i32(tmp);
162
static void gen_exception_insn(DisasContext *s, int offset, int excp)
164
gen_a64_set_pc_im(s->pc - offset);
166
s->is_jmp = DISAS_EXC;
169
static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest)
171
/* No direct tb linking with singlestep or deterministic io */
172
if (s->singlestep_enabled || (s->tb->cflags & CF_LAST_IO)) {
176
/* Only link tbs from inside the same guest page */
177
if ((s->tb->pc & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
184
static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
186
TranslationBlock *tb;
189
if (use_goto_tb(s, n, dest)) {
191
gen_a64_set_pc_im(dest);
192
tcg_gen_exit_tb((tcg_target_long)tb + n);
193
s->is_jmp = DISAS_TB_JUMP;
195
gen_a64_set_pc_im(dest);
196
if (s->singlestep_enabled) {
197
gen_exception(EXCP_DEBUG);
200
s->is_jmp = DISAS_JUMP;
204
static void unallocated_encoding(DisasContext *s)
206
gen_exception_insn(s, 4, EXCP_UDEF);
209
#define unsupported_encoding(s, insn) \
211
qemu_log_mask(LOG_UNIMP, \
212
"%s:%d: unsupported instruction encoding 0x%08x " \
213
"at pc=%016" PRIx64 "\n", \
214
__FILE__, __LINE__, insn, s->pc - 4); \
215
unallocated_encoding(s); \
218
static void init_tmp_a64_array(DisasContext *s)
220
#ifdef CONFIG_DEBUG_TCG
222
for (i = 0; i < ARRAY_SIZE(s->tmp_a64); i++) {
223
TCGV_UNUSED_I64(s->tmp_a64[i]);
226
s->tmp_a64_count = 0;
229
static void free_tmp_a64(DisasContext *s)
232
for (i = 0; i < s->tmp_a64_count; i++) {
233
tcg_temp_free_i64(s->tmp_a64[i]);
235
init_tmp_a64_array(s);
238
static TCGv_i64 new_tmp_a64(DisasContext *s)
240
assert(s->tmp_a64_count < TMP_A64_MAX);
241
return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64();
244
static TCGv_i64 new_tmp_a64_zero(DisasContext *s)
246
TCGv_i64 t = new_tmp_a64(s);
247
tcg_gen_movi_i64(t, 0);
252
* Register access functions
254
* These functions are used for directly accessing a register in where
255
* changes to the final register value are likely to be made. If you
256
* need to use a register for temporary calculation (e.g. index type
257
* operations) use the read_* form.
259
* B1.2.1 Register mappings
261
* In instruction register encoding 31 can refer to ZR (zero register) or
262
* the SP (stack pointer) depending on context. In QEMU's case we map SP
263
* to cpu_X[31] and ZR accesses to a temporary which can be discarded.
264
* This is the point of the _sp forms.
266
static TCGv_i64 cpu_reg(DisasContext *s, int reg)
269
return new_tmp_a64_zero(s);
275
/* register access for when 31 == SP */
276
static TCGv_i64 cpu_reg_sp(DisasContext *s, int reg)
281
/* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64
282
* representing the register contents. This TCGv is an auto-freed
283
* temporary so it need not be explicitly freed, and may be modified.
285
static TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf)
287
TCGv_i64 v = new_tmp_a64(s);
290
tcg_gen_mov_i64(v, cpu_X[reg]);
292
tcg_gen_ext32u_i64(v, cpu_X[reg]);
295
tcg_gen_movi_i64(v, 0);
300
static TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
302
TCGv_i64 v = new_tmp_a64(s);
304
tcg_gen_mov_i64(v, cpu_X[reg]);
306
tcg_gen_ext32u_i64(v, cpu_X[reg]);
311
/* Return the offset into CPUARMState of a slice (from
312
* the least significant end) of FP register Qn (ie
314
* (Note that this is not the same mapping as for A32; see cpu.h)
316
static inline int fp_reg_offset(int regno, TCGMemOp size)
318
int offs = offsetof(CPUARMState, vfp.regs[regno * 2]);
319
#ifdef HOST_WORDS_BIGENDIAN
320
offs += (8 - (1 << size));
325
/* Offset of the high half of the 128 bit vector Qn */
326
static inline int fp_reg_hi_offset(int regno)
328
return offsetof(CPUARMState, vfp.regs[regno * 2 + 1]);
331
/* Set ZF and NF based on a 64 bit result. This is alas fiddlier
332
* than the 32 bit equivalent.
334
static inline void gen_set_NZ64(TCGv_i64 result)
336
TCGv_i64 flag = tcg_temp_new_i64();
338
tcg_gen_setcondi_i64(TCG_COND_NE, flag, result, 0);
339
tcg_gen_trunc_i64_i32(cpu_ZF, flag);
340
tcg_gen_shri_i64(flag, result, 32);
341
tcg_gen_trunc_i64_i32(cpu_NF, flag);
342
tcg_temp_free_i64(flag);
345
/* Set NZCV as for a logical operation: NZ as per result, CV cleared. */
346
static inline void gen_logic_CC(int sf, TCGv_i64 result)
349
gen_set_NZ64(result);
351
tcg_gen_trunc_i64_i32(cpu_ZF, result);
352
tcg_gen_trunc_i64_i32(cpu_NF, result);
354
tcg_gen_movi_i32(cpu_CF, 0);
355
tcg_gen_movi_i32(cpu_VF, 0);
358
/* dest = T0 + T1; compute C, N, V and Z flags */
359
static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
362
TCGv_i64 result, flag, tmp;
363
result = tcg_temp_new_i64();
364
flag = tcg_temp_new_i64();
365
tmp = tcg_temp_new_i64();
367
tcg_gen_movi_i64(tmp, 0);
368
tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp);
370
tcg_gen_trunc_i64_i32(cpu_CF, flag);
372
gen_set_NZ64(result);
374
tcg_gen_xor_i64(flag, result, t0);
375
tcg_gen_xor_i64(tmp, t0, t1);
376
tcg_gen_andc_i64(flag, flag, tmp);
377
tcg_temp_free_i64(tmp);
378
tcg_gen_shri_i64(flag, flag, 32);
379
tcg_gen_trunc_i64_i32(cpu_VF, flag);
381
tcg_gen_mov_i64(dest, result);
382
tcg_temp_free_i64(result);
383
tcg_temp_free_i64(flag);
385
/* 32 bit arithmetic */
386
TCGv_i32 t0_32 = tcg_temp_new_i32();
387
TCGv_i32 t1_32 = tcg_temp_new_i32();
388
TCGv_i32 tmp = tcg_temp_new_i32();
390
tcg_gen_movi_i32(tmp, 0);
391
tcg_gen_trunc_i64_i32(t0_32, t0);
392
tcg_gen_trunc_i64_i32(t1_32, t1);
393
tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp);
394
tcg_gen_mov_i32(cpu_ZF, cpu_NF);
395
tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
396
tcg_gen_xor_i32(tmp, t0_32, t1_32);
397
tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
398
tcg_gen_extu_i32_i64(dest, cpu_NF);
400
tcg_temp_free_i32(tmp);
401
tcg_temp_free_i32(t0_32);
402
tcg_temp_free_i32(t1_32);
406
/* dest = T0 - T1; compute C, N, V and Z flags */
407
static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
410
/* 64 bit arithmetic */
411
TCGv_i64 result, flag, tmp;
413
result = tcg_temp_new_i64();
414
flag = tcg_temp_new_i64();
415
tcg_gen_sub_i64(result, t0, t1);
417
gen_set_NZ64(result);
419
tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1);
420
tcg_gen_trunc_i64_i32(cpu_CF, flag);
422
tcg_gen_xor_i64(flag, result, t0);
423
tmp = tcg_temp_new_i64();
424
tcg_gen_xor_i64(tmp, t0, t1);
425
tcg_gen_and_i64(flag, flag, tmp);
426
tcg_temp_free_i64(tmp);
427
tcg_gen_shri_i64(flag, flag, 32);
428
tcg_gen_trunc_i64_i32(cpu_VF, flag);
429
tcg_gen_mov_i64(dest, result);
430
tcg_temp_free_i64(flag);
431
tcg_temp_free_i64(result);
433
/* 32 bit arithmetic */
434
TCGv_i32 t0_32 = tcg_temp_new_i32();
435
TCGv_i32 t1_32 = tcg_temp_new_i32();
438
tcg_gen_trunc_i64_i32(t0_32, t0);
439
tcg_gen_trunc_i64_i32(t1_32, t1);
440
tcg_gen_sub_i32(cpu_NF, t0_32, t1_32);
441
tcg_gen_mov_i32(cpu_ZF, cpu_NF);
442
tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
443
tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
444
tmp = tcg_temp_new_i32();
445
tcg_gen_xor_i32(tmp, t0_32, t1_32);
446
tcg_temp_free_i32(t0_32);
447
tcg_temp_free_i32(t1_32);
448
tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
449
tcg_temp_free_i32(tmp);
450
tcg_gen_extu_i32_i64(dest, cpu_NF);
454
/* dest = T0 + T1 + CF; do not compute flags. */
455
static void gen_adc(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
457
TCGv_i64 flag = tcg_temp_new_i64();
458
tcg_gen_extu_i32_i64(flag, cpu_CF);
459
tcg_gen_add_i64(dest, t0, t1);
460
tcg_gen_add_i64(dest, dest, flag);
461
tcg_temp_free_i64(flag);
464
tcg_gen_ext32u_i64(dest, dest);
468
/* dest = T0 + T1 + CF; compute C, N, V and Z flags. */
469
static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
472
TCGv_i64 result, cf_64, vf_64, tmp;
473
result = tcg_temp_new_i64();
474
cf_64 = tcg_temp_new_i64();
475
vf_64 = tcg_temp_new_i64();
476
tmp = tcg_const_i64(0);
478
tcg_gen_extu_i32_i64(cf_64, cpu_CF);
479
tcg_gen_add2_i64(result, cf_64, t0, tmp, cf_64, tmp);
480
tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, tmp);
481
tcg_gen_trunc_i64_i32(cpu_CF, cf_64);
482
gen_set_NZ64(result);
484
tcg_gen_xor_i64(vf_64, result, t0);
485
tcg_gen_xor_i64(tmp, t0, t1);
486
tcg_gen_andc_i64(vf_64, vf_64, tmp);
487
tcg_gen_shri_i64(vf_64, vf_64, 32);
488
tcg_gen_trunc_i64_i32(cpu_VF, vf_64);
490
tcg_gen_mov_i64(dest, result);
492
tcg_temp_free_i64(tmp);
493
tcg_temp_free_i64(vf_64);
494
tcg_temp_free_i64(cf_64);
495
tcg_temp_free_i64(result);
497
TCGv_i32 t0_32, t1_32, tmp;
498
t0_32 = tcg_temp_new_i32();
499
t1_32 = tcg_temp_new_i32();
500
tmp = tcg_const_i32(0);
502
tcg_gen_trunc_i64_i32(t0_32, t0);
503
tcg_gen_trunc_i64_i32(t1_32, t1);
504
tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, cpu_CF, tmp);
505
tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, tmp);
507
tcg_gen_mov_i32(cpu_ZF, cpu_NF);
508
tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
509
tcg_gen_xor_i32(tmp, t0_32, t1_32);
510
tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
511
tcg_gen_extu_i32_i64(dest, cpu_NF);
513
tcg_temp_free_i32(tmp);
514
tcg_temp_free_i32(t1_32);
515
tcg_temp_free_i32(t0_32);
520
* Load/Store generators
524
* Store from GPR register to memory
526
static void do_gpr_st(DisasContext *s, TCGv_i64 source,
527
TCGv_i64 tcg_addr, int size)
530
tcg_gen_qemu_st_i64(source, tcg_addr, get_mem_index(s), MO_TE + size);
534
* Load from memory to GPR register
536
static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
537
int size, bool is_signed, bool extend)
539
TCGMemOp memop = MO_TE + size;
547
tcg_gen_qemu_ld_i64(dest, tcg_addr, get_mem_index(s), memop);
549
if (extend && is_signed) {
551
tcg_gen_ext32u_i64(dest, dest);
556
* Store from FP register to memory
558
static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
560
/* This writes the bottom N bits of a 128 bit wide vector to memory */
561
TCGv_i64 tmp = tcg_temp_new_i64();
562
tcg_gen_ld_i64(tmp, cpu_env, fp_reg_offset(srcidx, MO_64));
564
tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s), MO_TE + size);
566
TCGv_i64 tcg_hiaddr = tcg_temp_new_i64();
567
tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s), MO_TEQ);
568
tcg_gen_qemu_st64(tmp, tcg_addr, get_mem_index(s));
569
tcg_gen_ld_i64(tmp, cpu_env, fp_reg_hi_offset(srcidx));
570
tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
571
tcg_gen_qemu_st_i64(tmp, tcg_hiaddr, get_mem_index(s), MO_TEQ);
572
tcg_temp_free_i64(tcg_hiaddr);
575
tcg_temp_free_i64(tmp);
579
* Load from memory to FP register
581
static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
583
/* This always zero-extends and writes to a full 128 bit wide vector */
584
TCGv_i64 tmplo = tcg_temp_new_i64();
588
TCGMemOp memop = MO_TE + size;
589
tmphi = tcg_const_i64(0);
590
tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), memop);
593
tmphi = tcg_temp_new_i64();
594
tcg_hiaddr = tcg_temp_new_i64();
596
tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), MO_TEQ);
597
tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
598
tcg_gen_qemu_ld_i64(tmphi, tcg_hiaddr, get_mem_index(s), MO_TEQ);
599
tcg_temp_free_i64(tcg_hiaddr);
602
tcg_gen_st_i64(tmplo, cpu_env, fp_reg_offset(destidx, MO_64));
603
tcg_gen_st_i64(tmphi, cpu_env, fp_reg_hi_offset(destidx));
605
tcg_temp_free_i64(tmplo);
606
tcg_temp_free_i64(tmphi);
610
* This utility function is for doing register extension with an
611
* optional shift. You will likely want to pass a temporary for the
612
* destination register. See DecodeRegExtend() in the ARM ARM.
614
static void ext_and_shift_reg(TCGv_i64 tcg_out, TCGv_i64 tcg_in,
615
int option, unsigned int shift)
617
int extsize = extract32(option, 0, 2);
618
bool is_signed = extract32(option, 2, 1);
623
tcg_gen_ext8s_i64(tcg_out, tcg_in);
626
tcg_gen_ext16s_i64(tcg_out, tcg_in);
629
tcg_gen_ext32s_i64(tcg_out, tcg_in);
632
tcg_gen_mov_i64(tcg_out, tcg_in);
638
tcg_gen_ext8u_i64(tcg_out, tcg_in);
641
tcg_gen_ext16u_i64(tcg_out, tcg_in);
644
tcg_gen_ext32u_i64(tcg_out, tcg_in);
647
tcg_gen_mov_i64(tcg_out, tcg_in);
653
tcg_gen_shli_i64(tcg_out, tcg_out, shift);
657
static inline void gen_check_sp_alignment(DisasContext *s)
659
/* The AArch64 architecture mandates that (if enabled via PSTATE
660
* or SCTLR bits) there is a check that SP is 16-aligned on every
661
* SP-relative load or store (with an exception generated if it is not).
662
* In line with general QEMU practice regarding misaligned accesses,
663
* we omit these checks for the sake of guest program performance.
664
* This function is provided as a hook so we can more easily add these
665
* checks in future (possibly as a "favour catching guest program bugs
666
* over speed" user selectable option).
671
* the instruction disassembly implemented here matches
672
* the instruction encoding classifications in chapter 3 (C3)
673
* of the ARM Architecture Reference Manual (DDI0487A_a)
676
/* C3.2.7 Unconditional branch (immediate)
678
* +----+-----------+-------------------------------------+
679
* | op | 0 0 1 0 1 | imm26 |
680
* +----+-----------+-------------------------------------+
682
static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
684
uint64_t addr = s->pc + sextract32(insn, 0, 26) * 4 - 4;
686
if (insn & (1 << 31)) {
687
/* C5.6.26 BL Branch with link */
688
tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
691
/* C5.6.20 B Branch / C5.6.26 BL Branch with link */
692
gen_goto_tb(s, 0, addr);
695
/* C3.2.1 Compare & branch (immediate)
696
* 31 30 25 24 23 5 4 0
697
* +----+-------------+----+---------------------+--------+
698
* | sf | 0 1 1 0 1 0 | op | imm19 | Rt |
699
* +----+-------------+----+---------------------+--------+
701
static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
703
unsigned int sf, op, rt;
708
sf = extract32(insn, 31, 1);
709
op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */
710
rt = extract32(insn, 0, 5);
711
addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
713
tcg_cmp = read_cpu_reg(s, rt, sf);
714
label_match = gen_new_label();
716
tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
717
tcg_cmp, 0, label_match);
719
gen_goto_tb(s, 0, s->pc);
720
gen_set_label(label_match);
721
gen_goto_tb(s, 1, addr);
724
/* C3.2.5 Test & branch (immediate)
725
* 31 30 25 24 23 19 18 5 4 0
726
* +----+-------------+----+-------+-------------+------+
727
* | b5 | 0 1 1 0 1 1 | op | b40 | imm14 | Rt |
728
* +----+-------------+----+-------+-------------+------+
730
static void disas_test_b_imm(DisasContext *s, uint32_t insn)
732
unsigned int bit_pos, op, rt;
737
bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
738
op = extract32(insn, 24, 1); /* 0: TBZ; 1: TBNZ */
739
addr = s->pc + sextract32(insn, 5, 14) * 4 - 4;
740
rt = extract32(insn, 0, 5);
742
tcg_cmp = tcg_temp_new_i64();
743
tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos));
744
label_match = gen_new_label();
745
tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
746
tcg_cmp, 0, label_match);
747
tcg_temp_free_i64(tcg_cmp);
748
gen_goto_tb(s, 0, s->pc);
749
gen_set_label(label_match);
750
gen_goto_tb(s, 1, addr);
753
/* C3.2.2 / C5.6.19 Conditional branch (immediate)
754
* 31 25 24 23 5 4 3 0
755
* +---------------+----+---------------------+----+------+
756
* | 0 1 0 1 0 1 0 | o1 | imm19 | o0 | cond |
757
* +---------------+----+---------------------+----+------+
759
static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
764
if ((insn & (1 << 4)) || (insn & (1 << 24))) {
765
unallocated_encoding(s);
768
addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
769
cond = extract32(insn, 0, 4);
772
/* genuinely conditional branches */
773
int label_match = gen_new_label();
774
arm_gen_test_cc(cond, label_match);
775
gen_goto_tb(s, 0, s->pc);
776
gen_set_label(label_match);
777
gen_goto_tb(s, 1, addr);
779
/* 0xe and 0xf are both "always" conditions */
780
gen_goto_tb(s, 0, addr);
785
static void handle_hint(DisasContext *s, uint32_t insn,
786
unsigned int op1, unsigned int op2, unsigned int crm)
788
unsigned int selector = crm << 3 | op2;
791
unallocated_encoding(s);
803
/* we treat all as NOP at least for now */
806
/* default specified as NOP equivalent */
811
static void gen_clrex(DisasContext *s, uint32_t insn)
813
tcg_gen_movi_i64(cpu_exclusive_addr, -1);
816
/* CLREX, DSB, DMB, ISB */
817
static void handle_sync(DisasContext *s, uint32_t insn,
818
unsigned int op1, unsigned int op2, unsigned int crm)
821
unallocated_encoding(s);
832
/* We don't emulate caches so barriers are no-ops */
835
unallocated_encoding(s);
840
/* C5.6.130 MSR (immediate) - move immediate to processor state field */
841
static void handle_msr_i(DisasContext *s, uint32_t insn,
842
unsigned int op1, unsigned int op2, unsigned int crm)
844
unsupported_encoding(s, insn);
847
static void gen_get_nzcv(TCGv_i64 tcg_rt)
849
TCGv_i32 tmp = tcg_temp_new_i32();
850
TCGv_i32 nzcv = tcg_temp_new_i32();
852
/* build bit 31, N */
853
tcg_gen_andi_i32(nzcv, cpu_NF, (1 << 31));
854
/* build bit 30, Z */
855
tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_ZF, 0);
856
tcg_gen_deposit_i32(nzcv, nzcv, tmp, 30, 1);
857
/* build bit 29, C */
858
tcg_gen_deposit_i32(nzcv, nzcv, cpu_CF, 29, 1);
859
/* build bit 28, V */
860
tcg_gen_shri_i32(tmp, cpu_VF, 31);
861
tcg_gen_deposit_i32(nzcv, nzcv, tmp, 28, 1);
862
/* generate result */
863
tcg_gen_extu_i32_i64(tcg_rt, nzcv);
865
tcg_temp_free_i32(nzcv);
866
tcg_temp_free_i32(tmp);
869
static void gen_set_nzcv(TCGv_i64 tcg_rt)
872
TCGv_i32 nzcv = tcg_temp_new_i32();
874
/* take NZCV from R[t] */
875
tcg_gen_trunc_i64_i32(nzcv, tcg_rt);
878
tcg_gen_andi_i32(cpu_NF, nzcv, (1 << 31));
880
tcg_gen_andi_i32(cpu_ZF, nzcv, (1 << 30));
881
tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_ZF, cpu_ZF, 0);
883
tcg_gen_andi_i32(cpu_CF, nzcv, (1 << 29));
884
tcg_gen_shri_i32(cpu_CF, cpu_CF, 29);
886
tcg_gen_andi_i32(cpu_VF, nzcv, (1 << 28));
887
tcg_gen_shli_i32(cpu_VF, cpu_VF, 3);
888
tcg_temp_free_i32(nzcv);
891
/* C5.6.129 MRS - move from system register
892
* C5.6.131 MSR (register) - move to system register
895
* These are all essentially the same insn in 'read' and 'write'
896
* versions, with varying op0 fields.
898
static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
899
unsigned int op0, unsigned int op1, unsigned int op2,
900
unsigned int crn, unsigned int crm, unsigned int rt)
902
const ARMCPRegInfo *ri;
905
ri = get_arm_cp_reginfo(s->cp_regs,
906
ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
907
crn, crm, op0, op1, op2));
910
/* Unknown register */
911
unallocated_encoding(s);
915
/* Check access permissions */
916
if (!cp_access_ok(s->current_pl, ri, isread)) {
917
unallocated_encoding(s);
921
/* Handle special cases first */
922
switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
926
tcg_rt = cpu_reg(s, rt);
928
gen_get_nzcv(tcg_rt);
930
gen_set_nzcv(tcg_rt);
937
if (use_icount && (ri->type & ARM_CP_IO)) {
941
tcg_rt = cpu_reg(s, rt);
944
if (ri->type & ARM_CP_CONST) {
945
tcg_gen_movi_i64(tcg_rt, ri->resetvalue);
946
} else if (ri->readfn) {
948
gen_a64_set_pc_im(s->pc - 4);
949
tmpptr = tcg_const_ptr(ri);
950
gen_helper_get_cp_reg64(tcg_rt, cpu_env, tmpptr);
951
tcg_temp_free_ptr(tmpptr);
953
tcg_gen_ld_i64(tcg_rt, cpu_env, ri->fieldoffset);
956
if (ri->type & ARM_CP_CONST) {
957
/* If not forbidden by access permissions, treat as WI */
959
} else if (ri->writefn) {
961
gen_a64_set_pc_im(s->pc - 4);
962
tmpptr = tcg_const_ptr(ri);
963
gen_helper_set_cp_reg64(cpu_env, tmpptr, tcg_rt);
964
tcg_temp_free_ptr(tmpptr);
966
tcg_gen_st_i64(tcg_rt, cpu_env, ri->fieldoffset);
970
if (use_icount && (ri->type & ARM_CP_IO)) {
971
/* I/O operations must end the TB here (whether read or write) */
973
s->is_jmp = DISAS_UPDATE;
974
} else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
975
/* We default to ending the TB on a coprocessor register write,
976
* but allow this to be suppressed by the register definition
977
* (usually only necessary to work around guest bugs).
979
s->is_jmp = DISAS_UPDATE;
984
* 31 22 21 20 19 18 16 15 12 11 8 7 5 4 0
985
* +---------------------+---+-----+-----+-------+-------+-----+------+
986
* | 1 1 0 1 0 1 0 1 0 0 | L | op0 | op1 | CRn | CRm | op2 | Rt |
987
* +---------------------+---+-----+-----+-------+-------+-----+------+
989
static void disas_system(DisasContext *s, uint32_t insn)
991
unsigned int l, op0, op1, crn, crm, op2, rt;
992
l = extract32(insn, 21, 1);
993
op0 = extract32(insn, 19, 2);
994
op1 = extract32(insn, 16, 3);
995
crn = extract32(insn, 12, 4);
996
crm = extract32(insn, 8, 4);
997
op2 = extract32(insn, 5, 3);
998
rt = extract32(insn, 0, 5);
1001
if (l || rt != 31) {
1002
unallocated_encoding(s);
1006
case 2: /* C5.6.68 HINT */
1007
handle_hint(s, insn, op1, op2, crm);
1009
case 3: /* CLREX, DSB, DMB, ISB */
1010
handle_sync(s, insn, op1, op2, crm);
1012
case 4: /* C5.6.130 MSR (immediate) */
1013
handle_msr_i(s, insn, op1, op2, crm);
1016
unallocated_encoding(s);
1021
handle_sys(s, insn, l, op0, op1, op2, crn, crm, rt);
1024
/* C3.2.3 Exception generation
1026
* 31 24 23 21 20 5 4 2 1 0
1027
* +-----------------+-----+------------------------+-----+----+
1028
* | 1 1 0 1 0 1 0 0 | opc | imm16 | op2 | LL |
1029
* +-----------------------+------------------------+----------+
1031
static void disas_exc(DisasContext *s, uint32_t insn)
1033
int opc = extract32(insn, 21, 3);
1034
int op2_ll = extract32(insn, 0, 5);
1038
/* SVC, HVC, SMC; since we don't support the Virtualization
1039
* or TrustZone extensions these all UNDEF except SVC.
1042
unallocated_encoding(s);
1045
gen_exception_insn(s, 0, EXCP_SWI);
1049
unallocated_encoding(s);
1053
gen_exception_insn(s, 0, EXCP_BKPT);
1057
unallocated_encoding(s);
1061
unsupported_encoding(s, insn);
1064
if (op2_ll < 1 || op2_ll > 3) {
1065
unallocated_encoding(s);
1068
/* DCPS1, DCPS2, DCPS3 */
1069
unsupported_encoding(s, insn);
1072
unallocated_encoding(s);
1077
/* C3.2.7 Unconditional branch (register)
1078
* 31 25 24 21 20 16 15 10 9 5 4 0
1079
* +---------------+-------+-------+-------+------+-------+
1080
* | 1 1 0 1 0 1 1 | opc | op2 | op3 | Rn | op4 |
1081
* +---------------+-------+-------+-------+------+-------+
1083
static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
1085
unsigned int opc, op2, op3, rn, op4;
1087
opc = extract32(insn, 21, 4);
1088
op2 = extract32(insn, 16, 5);
1089
op3 = extract32(insn, 10, 6);
1090
rn = extract32(insn, 5, 5);
1091
op4 = extract32(insn, 0, 5);
1093
if (op4 != 0x0 || op3 != 0x0 || op2 != 0x1f) {
1094
unallocated_encoding(s);
1103
tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
1108
unallocated_encoding(s);
1110
unsupported_encoding(s, insn);
1114
unallocated_encoding(s);
1118
tcg_gen_mov_i64(cpu_pc, cpu_reg(s, rn));
1119
s->is_jmp = DISAS_JUMP;
1122
/* C3.2 Branches, exception generating and system instructions */
1123
static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
1125
switch (extract32(insn, 25, 7)) {
1126
case 0x0a: case 0x0b:
1127
case 0x4a: case 0x4b: /* Unconditional branch (immediate) */
1128
disas_uncond_b_imm(s, insn);
1130
case 0x1a: case 0x5a: /* Compare & branch (immediate) */
1131
disas_comp_b_imm(s, insn);
1133
case 0x1b: case 0x5b: /* Test & branch (immediate) */
1134
disas_test_b_imm(s, insn);
1136
case 0x2a: /* Conditional branch (immediate) */
1137
disas_cond_b_imm(s, insn);
1139
case 0x6a: /* Exception generation / System */
1140
if (insn & (1 << 24)) {
1141
disas_system(s, insn);
1146
case 0x6b: /* Unconditional branch (register) */
1147
disas_uncond_b_reg(s, insn);
1150
unallocated_encoding(s);
1156
* Load/Store exclusive instructions are implemented by remembering
1157
* the value/address loaded, and seeing if these are the same
1158
* when the store is performed. This is not actually the architecturally
1159
* mandated semantics, but it works for typical guest code sequences
1160
* and avoids having to monitor regular stores.
1162
* In system emulation mode only one CPU will be running at once, so
1163
* this sequence is effectively atomic. In user emulation mode we
1164
* throw an exception and handle the atomic operation elsewhere.
1166
static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
1167
TCGv_i64 addr, int size, bool is_pair)
1169
TCGv_i64 tmp = tcg_temp_new_i64();
1170
TCGMemOp memop = MO_TE + size;
1172
g_assert(size <= 3);
1173
tcg_gen_qemu_ld_i64(tmp, addr, get_mem_index(s), memop);
1176
TCGv_i64 addr2 = tcg_temp_new_i64();
1177
TCGv_i64 hitmp = tcg_temp_new_i64();
1179
g_assert(size >= 2);
1180
tcg_gen_addi_i64(addr2, addr, 1 << size);
1181
tcg_gen_qemu_ld_i64(hitmp, addr2, get_mem_index(s), memop);
1182
tcg_temp_free_i64(addr2);
1183
tcg_gen_mov_i64(cpu_exclusive_high, hitmp);
1184
tcg_gen_mov_i64(cpu_reg(s, rt2), hitmp);
1185
tcg_temp_free_i64(hitmp);
1188
tcg_gen_mov_i64(cpu_exclusive_val, tmp);
1189
tcg_gen_mov_i64(cpu_reg(s, rt), tmp);
1191
tcg_temp_free_i64(tmp);
1192
tcg_gen_mov_i64(cpu_exclusive_addr, addr);
1195
#ifdef CONFIG_USER_ONLY
1196
static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
1197
TCGv_i64 addr, int size, int is_pair)
1199
tcg_gen_mov_i64(cpu_exclusive_test, addr);
1200
tcg_gen_movi_i32(cpu_exclusive_info,
1201
size | is_pair << 2 | (rd << 4) | (rt << 9) | (rt2 << 14));
1202
gen_exception_insn(s, 4, EXCP_STREX);
1205
static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
1206
TCGv_i64 addr, int size, int is_pair)
1208
qemu_log_mask(LOG_UNIMP,
1209
"%s:%d: system mode store_exclusive unsupported "
1210
"at pc=%016" PRIx64 "\n",
1211
__FILE__, __LINE__, s->pc - 4);
1215
/* C3.3.6 Load/store exclusive
1217
* 31 30 29 24 23 22 21 20 16 15 14 10 9 5 4 0
1218
* +-----+-------------+----+---+----+------+----+-------+------+------+
1219
* | sz | 0 0 1 0 0 0 | o2 | L | o1 | Rs | o0 | Rt2 | Rn | Rt |
1220
* +-----+-------------+----+---+----+------+----+-------+------+------+
1222
* sz: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64 bit
1223
* L: 0 -> store, 1 -> load
1224
* o2: 0 -> exclusive, 1 -> not
1225
* o1: 0 -> single register, 1 -> register pair
1226
* o0: 1 -> load-acquire/store-release, 0 -> not
1228
* o0 == 0 AND o2 == 1 is un-allocated
1229
* o1 == 1 is un-allocated except for 32 and 64 bit sizes
1231
static void disas_ldst_excl(DisasContext *s, uint32_t insn)
1233
int rt = extract32(insn, 0, 5);
1234
int rn = extract32(insn, 5, 5);
1235
int rt2 = extract32(insn, 10, 5);
1236
int is_lasr = extract32(insn, 15, 1);
1237
int rs = extract32(insn, 16, 5);
1238
int is_pair = extract32(insn, 21, 1);
1239
int is_store = !extract32(insn, 22, 1);
1240
int is_excl = !extract32(insn, 23, 1);
1241
int size = extract32(insn, 30, 2);
1244
if ((!is_excl && !is_lasr) ||
1245
(is_pair && size < 2)) {
1246
unallocated_encoding(s);
1251
gen_check_sp_alignment(s);
1253
tcg_addr = read_cpu_reg_sp(s, rn, 1);
1255
/* Note that since TCG is single threaded load-acquire/store-release
1256
* semantics require no extra if (is_lasr) { ... } handling.
1261
gen_load_exclusive(s, rt, rt2, tcg_addr, size, is_pair);
1263
gen_store_exclusive(s, rs, rt, rt2, tcg_addr, size, is_pair);
1266
TCGv_i64 tcg_rt = cpu_reg(s, rt);
1268
do_gpr_st(s, tcg_rt, tcg_addr, size);
1270
do_gpr_ld(s, tcg_rt, tcg_addr, size, false, false);
1273
TCGv_i64 tcg_rt2 = cpu_reg(s, rt);
1274
tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
1276
do_gpr_st(s, tcg_rt2, tcg_addr, size);
1278
do_gpr_ld(s, tcg_rt2, tcg_addr, size, false, false);
1285
* C3.3.5 Load register (literal)
1287
* 31 30 29 27 26 25 24 23 5 4 0
1288
* +-----+-------+---+-----+-------------------+-------+
1289
* | opc | 0 1 1 | V | 0 0 | imm19 | Rt |
1290
* +-----+-------+---+-----+-------------------+-------+
1292
* V: 1 -> vector (simd/fp)
1293
* opc (non-vector): 00 -> 32 bit, 01 -> 64 bit,
1294
* 10-> 32 bit signed, 11 -> prefetch
1295
* opc (vector): 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit (11 unallocated)
1297
static void disas_ld_lit(DisasContext *s, uint32_t insn)
1299
int rt = extract32(insn, 0, 5);
1300
int64_t imm = sextract32(insn, 5, 19) << 2;
1301
bool is_vector = extract32(insn, 26, 1);
1302
int opc = extract32(insn, 30, 2);
1303
bool is_signed = false;
1305
TCGv_i64 tcg_rt, tcg_addr;
1309
unallocated_encoding(s);
1315
/* PRFM (literal) : prefetch */
1318
size = 2 + extract32(opc, 0, 1);
1319
is_signed = extract32(opc, 1, 1);
1322
tcg_rt = cpu_reg(s, rt);
1324
tcg_addr = tcg_const_i64((s->pc - 4) + imm);
1326
do_fp_ld(s, rt, tcg_addr, size);
1328
do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false);
1330
tcg_temp_free_i64(tcg_addr);
1334
* C5.6.80 LDNP (Load Pair - non-temporal hint)
1335
* C5.6.81 LDP (Load Pair - non vector)
1336
* C5.6.82 LDPSW (Load Pair Signed Word - non vector)
1337
* C5.6.176 STNP (Store Pair - non-temporal hint)
1338
* C5.6.177 STP (Store Pair - non vector)
1339
* C6.3.165 LDNP (Load Pair of SIMD&FP - non-temporal hint)
1340
* C6.3.165 LDP (Load Pair of SIMD&FP)
1341
* C6.3.284 STNP (Store Pair of SIMD&FP - non-temporal hint)
1342
* C6.3.284 STP (Store Pair of SIMD&FP)
1344
* 31 30 29 27 26 25 24 23 22 21 15 14 10 9 5 4 0
1345
* +-----+-------+---+---+-------+---+-----------------------------+
1346
* | opc | 1 0 1 | V | 0 | index | L | imm7 | Rt2 | Rn | Rt |
1347
* +-----+-------+---+---+-------+---+-------+-------+------+------+
1349
* opc: LDP/STP/LDNP/STNP 00 -> 32 bit, 10 -> 64 bit
1351
* LDP/STP/LDNP/STNP (SIMD) 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit
1352
* V: 0 -> GPR, 1 -> Vector
1353
* idx: 00 -> signed offset with non-temporal hint, 01 -> post-index,
1354
* 10 -> signed offset, 11 -> pre-index
1355
* L: 0 -> Store 1 -> Load
1357
* Rt, Rt2 = GPR or SIMD registers to be stored
1358
* Rn = general purpose register containing address
1359
* imm7 = signed offset (multiple of 4 or 8 depending on size)
1361
static void disas_ldst_pair(DisasContext *s, uint32_t insn)
1363
int rt = extract32(insn, 0, 5);
1364
int rn = extract32(insn, 5, 5);
1365
int rt2 = extract32(insn, 10, 5);
1366
int64_t offset = sextract32(insn, 15, 7);
1367
int index = extract32(insn, 23, 2);
1368
bool is_vector = extract32(insn, 26, 1);
1369
bool is_load = extract32(insn, 22, 1);
1370
int opc = extract32(insn, 30, 2);
1372
bool is_signed = false;
1373
bool postindex = false;
1376
TCGv_i64 tcg_addr; /* calculated address */
1380
unallocated_encoding(s);
1387
size = 2 + extract32(opc, 1, 1);
1388
is_signed = extract32(opc, 0, 1);
1389
if (!is_load && is_signed) {
1390
unallocated_encoding(s);
1396
case 1: /* post-index */
1401
/* signed offset with "non-temporal" hint. Since we don't emulate
1402
* caches we don't care about hints to the cache system about
1403
* data access patterns, and handle this identically to plain
1407
/* There is no non-temporal-hint version of LDPSW */
1408
unallocated_encoding(s);
1413
case 2: /* signed offset, rn not updated */
1416
case 3: /* pre-index */
1425
gen_check_sp_alignment(s);
1428
tcg_addr = read_cpu_reg_sp(s, rn, 1);
1431
tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
1436
do_fp_ld(s, rt, tcg_addr, size);
1438
do_fp_st(s, rt, tcg_addr, size);
1441
TCGv_i64 tcg_rt = cpu_reg(s, rt);
1443
do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false);
1445
do_gpr_st(s, tcg_rt, tcg_addr, size);
1448
tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
1451
do_fp_ld(s, rt2, tcg_addr, size);
1453
do_fp_st(s, rt2, tcg_addr, size);
1456
TCGv_i64 tcg_rt2 = cpu_reg(s, rt2);
1458
do_gpr_ld(s, tcg_rt2, tcg_addr, size, is_signed, false);
1460
do_gpr_st(s, tcg_rt2, tcg_addr, size);
1466
tcg_gen_addi_i64(tcg_addr, tcg_addr, offset - (1 << size));
1468
tcg_gen_subi_i64(tcg_addr, tcg_addr, 1 << size);
1470
tcg_gen_mov_i64(cpu_reg_sp(s, rn), tcg_addr);
1475
* C3.3.8 Load/store (immediate post-indexed)
1476
* C3.3.9 Load/store (immediate pre-indexed)
1477
* C3.3.12 Load/store (unscaled immediate)
1479
* 31 30 29 27 26 25 24 23 22 21 20 12 11 10 9 5 4 0
1480
* +----+-------+---+-----+-----+---+--------+-----+------+------+
1481
* |size| 1 1 1 | V | 0 0 | opc | 0 | imm9 | idx | Rn | Rt |
1482
* +----+-------+---+-----+-----+---+--------+-----+------+------+
1484
* idx = 01 -> post-indexed, 11 pre-indexed, 00 unscaled imm. (no writeback)
1485
* V = 0 -> non-vector
1486
* size: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64bit
1487
* opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
1489
static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn)
1491
int rt = extract32(insn, 0, 5);
1492
int rn = extract32(insn, 5, 5);
1493
int imm9 = sextract32(insn, 12, 9);
1494
int opc = extract32(insn, 22, 2);
1495
int size = extract32(insn, 30, 2);
1496
int idx = extract32(insn, 10, 2);
1497
bool is_signed = false;
1498
bool is_store = false;
1499
bool is_extended = false;
1500
bool is_vector = extract32(insn, 26, 1);
1507
size |= (opc & 2) << 1;
1509
unallocated_encoding(s);
1512
is_store = ((opc & 1) == 0);
1514
if (size == 3 && opc == 2) {
1515
/* PRFM - prefetch */
1518
if (opc == 3 && size > 1) {
1519
unallocated_encoding(s);
1522
is_store = (opc == 0);
1523
is_signed = opc & (1<<1);
1524
is_extended = (size < 3) && (opc & 1);
1546
gen_check_sp_alignment(s);
1548
tcg_addr = read_cpu_reg_sp(s, rn, 1);
1551
tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9);
1556
do_fp_st(s, rt, tcg_addr, size);
1558
do_fp_ld(s, rt, tcg_addr, size);
1561
TCGv_i64 tcg_rt = cpu_reg(s, rt);
1563
do_gpr_st(s, tcg_rt, tcg_addr, size);
1565
do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended);
1570
TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
1572
tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9);
1574
tcg_gen_mov_i64(tcg_rn, tcg_addr);
1579
* C3.3.10 Load/store (register offset)
1581
* 31 30 29 27 26 25 24 23 22 21 20 16 15 13 12 11 10 9 5 4 0
1582
* +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
1583
* |size| 1 1 1 | V | 0 0 | opc | 1 | Rm | opt | S| 1 0 | Rn | Rt |
1584
* +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
1587
* size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
1588
* opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
1590
* size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
1591
* opc<0>: 0 -> store, 1 -> load
1592
* V: 1 -> vector/simd
1593
* opt: extend encoding (see DecodeRegExtend)
1594
* S: if S=1 then scale (essentially index by sizeof(size))
1595
* Rt: register to transfer into/out of
1596
* Rn: address register or SP for base
1597
* Rm: offset register or ZR for offset
1599
static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn)
1601
int rt = extract32(insn, 0, 5);
1602
int rn = extract32(insn, 5, 5);
1603
int shift = extract32(insn, 12, 1);
1604
int rm = extract32(insn, 16, 5);
1605
int opc = extract32(insn, 22, 2);
1606
int opt = extract32(insn, 13, 3);
1607
int size = extract32(insn, 30, 2);
1608
bool is_signed = false;
1609
bool is_store = false;
1610
bool is_extended = false;
1611
bool is_vector = extract32(insn, 26, 1);
1616
if (extract32(opt, 1, 1) == 0) {
1617
unallocated_encoding(s);
1622
size |= (opc & 2) << 1;
1624
unallocated_encoding(s);
1627
is_store = !extract32(opc, 0, 1);
1629
if (size == 3 && opc == 2) {
1630
/* PRFM - prefetch */
1633
if (opc == 3 && size > 1) {
1634
unallocated_encoding(s);
1637
is_store = (opc == 0);
1638
is_signed = extract32(opc, 1, 1);
1639
is_extended = (size < 3) && extract32(opc, 0, 1);
1643
gen_check_sp_alignment(s);
1645
tcg_addr = read_cpu_reg_sp(s, rn, 1);
1647
tcg_rm = read_cpu_reg(s, rm, 1);
1648
ext_and_shift_reg(tcg_rm, tcg_rm, opt, shift ? size : 0);
1650
tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_rm);
1654
do_fp_st(s, rt, tcg_addr, size);
1656
do_fp_ld(s, rt, tcg_addr, size);
1659
TCGv_i64 tcg_rt = cpu_reg(s, rt);
1661
do_gpr_st(s, tcg_rt, tcg_addr, size);
1663
do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended);
1669
* C3.3.13 Load/store (unsigned immediate)
1671
* 31 30 29 27 26 25 24 23 22 21 10 9 5
1672
* +----+-------+---+-----+-----+------------+-------+------+
1673
* |size| 1 1 1 | V | 0 1 | opc | imm12 | Rn | Rt |
1674
* +----+-------+---+-----+-----+------------+-------+------+
1677
* size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
1678
* opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
1680
* size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
1681
* opc<0>: 0 -> store, 1 -> load
1682
* Rn: base address register (inc SP)
1683
* Rt: target register
1685
static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn)
1687
int rt = extract32(insn, 0, 5);
1688
int rn = extract32(insn, 5, 5);
1689
unsigned int imm12 = extract32(insn, 10, 12);
1690
bool is_vector = extract32(insn, 26, 1);
1691
int size = extract32(insn, 30, 2);
1692
int opc = extract32(insn, 22, 2);
1693
unsigned int offset;
1698
bool is_signed = false;
1699
bool is_extended = false;
1702
size |= (opc & 2) << 1;
1704
unallocated_encoding(s);
1707
is_store = !extract32(opc, 0, 1);
1709
if (size == 3 && opc == 2) {
1710
/* PRFM - prefetch */
1713
if (opc == 3 && size > 1) {
1714
unallocated_encoding(s);
1717
is_store = (opc == 0);
1718
is_signed = extract32(opc, 1, 1);
1719
is_extended = (size < 3) && extract32(opc, 0, 1);
1723
gen_check_sp_alignment(s);
1725
tcg_addr = read_cpu_reg_sp(s, rn, 1);
1726
offset = imm12 << size;
1727
tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
1731
do_fp_st(s, rt, tcg_addr, size);
1733
do_fp_ld(s, rt, tcg_addr, size);
1736
TCGv_i64 tcg_rt = cpu_reg(s, rt);
1738
do_gpr_st(s, tcg_rt, tcg_addr, size);
1740
do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended);
1745
/* Load/store register (immediate forms) */
1746
static void disas_ldst_reg_imm(DisasContext *s, uint32_t insn)
1748
switch (extract32(insn, 10, 2)) {
1749
case 0: case 1: case 3:
1750
/* Load/store register (unscaled immediate) */
1751
/* Load/store immediate pre/post-indexed */
1752
disas_ldst_reg_imm9(s, insn);
1755
/* Load/store register unprivileged */
1756
unsupported_encoding(s, insn);
1759
unallocated_encoding(s);
1764
/* Load/store register (all forms) */
1765
static void disas_ldst_reg(DisasContext *s, uint32_t insn)
1767
switch (extract32(insn, 24, 2)) {
1769
if (extract32(insn, 21, 1) == 1 && extract32(insn, 10, 2) == 2) {
1770
disas_ldst_reg_roffset(s, insn);
1772
disas_ldst_reg_imm(s, insn);
1776
disas_ldst_reg_unsigned_imm(s, insn);
1779
unallocated_encoding(s);
1784
/* AdvSIMD load/store multiple structures */
1785
static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
1787
unsupported_encoding(s, insn);
1790
/* AdvSIMD load/store single structure */
1791
static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
1793
unsupported_encoding(s, insn);
1796
/* C3.3 Loads and stores */
1797
static void disas_ldst(DisasContext *s, uint32_t insn)
1799
switch (extract32(insn, 24, 6)) {
1800
case 0x08: /* Load/store exclusive */
1801
disas_ldst_excl(s, insn);
1803
case 0x18: case 0x1c: /* Load register (literal) */
1804
disas_ld_lit(s, insn);
1806
case 0x28: case 0x29:
1807
case 0x2c: case 0x2d: /* Load/store pair (all forms) */
1808
disas_ldst_pair(s, insn);
1810
case 0x38: case 0x39:
1811
case 0x3c: case 0x3d: /* Load/store register (all forms) */
1812
disas_ldst_reg(s, insn);
1814
case 0x0c: /* AdvSIMD load/store multiple structures */
1815
disas_ldst_multiple_struct(s, insn);
1817
case 0x0d: /* AdvSIMD load/store single structure */
1818
disas_ldst_single_struct(s, insn);
1821
unallocated_encoding(s);
1826
/* C3.4.6 PC-rel. addressing
1827
* 31 30 29 28 24 23 5 4 0
1828
* +----+-------+-----------+-------------------+------+
1829
* | op | immlo | 1 0 0 0 0 | immhi | Rd |
1830
* +----+-------+-----------+-------------------+------+
1832
static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
1834
unsigned int page, rd;
1838
page = extract32(insn, 31, 1);
1839
/* SignExtend(immhi:immlo) -> offset */
1840
offset = ((int64_t)sextract32(insn, 5, 19) << 2) | extract32(insn, 29, 2);
1841
rd = extract32(insn, 0, 5);
1845
/* ADRP (page based) */
1850
tcg_gen_movi_i64(cpu_reg(s, rd), base + offset);
1854
* C3.4.1 Add/subtract (immediate)
1856
* 31 30 29 28 24 23 22 21 10 9 5 4 0
1857
* +--+--+--+-----------+-----+-------------+-----+-----+
1858
* |sf|op| S| 1 0 0 0 1 |shift| imm12 | Rn | Rd |
1859
* +--+--+--+-----------+-----+-------------+-----+-----+
1861
* sf: 0 -> 32bit, 1 -> 64bit
1862
* op: 0 -> add , 1 -> sub
1864
* shift: 00 -> LSL imm by 0, 01 -> LSL imm by 12
1866
static void disas_add_sub_imm(DisasContext *s, uint32_t insn)
1868
int rd = extract32(insn, 0, 5);
1869
int rn = extract32(insn, 5, 5);
1870
uint64_t imm = extract32(insn, 10, 12);
1871
int shift = extract32(insn, 22, 2);
1872
bool setflags = extract32(insn, 29, 1);
1873
bool sub_op = extract32(insn, 30, 1);
1874
bool is_64bit = extract32(insn, 31, 1);
1876
TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
1877
TCGv_i64 tcg_rd = setflags ? cpu_reg(s, rd) : cpu_reg_sp(s, rd);
1878
TCGv_i64 tcg_result;
1887
unallocated_encoding(s);
1891
tcg_result = tcg_temp_new_i64();
1894
tcg_gen_subi_i64(tcg_result, tcg_rn, imm);
1896
tcg_gen_addi_i64(tcg_result, tcg_rn, imm);
1899
TCGv_i64 tcg_imm = tcg_const_i64(imm);
1901
gen_sub_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
1903
gen_add_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
1905
tcg_temp_free_i64(tcg_imm);
1909
tcg_gen_mov_i64(tcg_rd, tcg_result);
1911
tcg_gen_ext32u_i64(tcg_rd, tcg_result);
1914
tcg_temp_free_i64(tcg_result);
1917
/* The input should be a value in the bottom e bits (with higher
1918
* bits zero); returns that value replicated into every element
1919
* of size e in a 64 bit integer.
1921
static uint64_t bitfield_replicate(uint64_t mask, unsigned int e)
1931
/* Return a value with the bottom len bits set (where 0 < len <= 64) */
1932
static inline uint64_t bitmask64(unsigned int length)
1934
assert(length > 0 && length <= 64);
1935
return ~0ULL >> (64 - length);
1938
/* Simplified variant of pseudocode DecodeBitMasks() for the case where we
1939
* only require the wmask. Returns false if the imms/immr/immn are a reserved
1940
* value (ie should cause a guest UNDEF exception), and true if they are
1941
* valid, in which case the decoded bit pattern is written to result.
1943
static bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
1944
unsigned int imms, unsigned int immr)
1947
unsigned e, levels, s, r;
1950
assert(immn < 2 && imms < 64 && immr < 64);
1952
/* The bit patterns we create here are 64 bit patterns which
1953
* are vectors of identical elements of size e = 2, 4, 8, 16, 32 or
1954
* 64 bits each. Each element contains the same value: a run
1955
* of between 1 and e-1 non-zero bits, rotated within the
1956
* element by between 0 and e-1 bits.
1958
* The element size and run length are encoded into immn (1 bit)
1959
* and imms (6 bits) as follows:
1960
* 64 bit elements: immn = 1, imms = <length of run - 1>
1961
* 32 bit elements: immn = 0, imms = 0 : <length of run - 1>
1962
* 16 bit elements: immn = 0, imms = 10 : <length of run - 1>
1963
* 8 bit elements: immn = 0, imms = 110 : <length of run - 1>
1964
* 4 bit elements: immn = 0, imms = 1110 : <length of run - 1>
1965
* 2 bit elements: immn = 0, imms = 11110 : <length of run - 1>
1966
* Notice that immn = 0, imms = 11111x is the only combination
1967
* not covered by one of the above options; this is reserved.
1968
* Further, <length of run - 1> all-ones is a reserved pattern.
1970
* In all cases the rotation is by immr % e (and immr is 6 bits).
1973
/* First determine the element size */
1974
len = 31 - clz32((immn << 6) | (~imms & 0x3f));
1976
/* This is the immn == 0, imms == 0x11111x case */
1986
/* <length of run - 1> mustn't be all-ones. */
1990
/* Create the value of one element: s+1 set bits rotated
1991
* by r within the element (which is e bits wide)...
1993
mask = bitmask64(s + 1);
1994
mask = (mask >> r) | (mask << (e - r));
1995
/* ...then replicate the element over the whole 64 bit value */
1996
mask = bitfield_replicate(mask, e);
2001
/* C3.4.4 Logical (immediate)
2002
* 31 30 29 28 23 22 21 16 15 10 9 5 4 0
2003
* +----+-----+-------------+---+------+------+------+------+
2004
* | sf | opc | 1 0 0 1 0 0 | N | immr | imms | Rn | Rd |
2005
* +----+-----+-------------+---+------+------+------+------+
2007
static void disas_logic_imm(DisasContext *s, uint32_t insn)
2009
unsigned int sf, opc, is_n, immr, imms, rn, rd;
2010
TCGv_i64 tcg_rd, tcg_rn;
2012
bool is_and = false;
2014
sf = extract32(insn, 31, 1);
2015
opc = extract32(insn, 29, 2);
2016
is_n = extract32(insn, 22, 1);
2017
immr = extract32(insn, 16, 6);
2018
imms = extract32(insn, 10, 6);
2019
rn = extract32(insn, 5, 5);
2020
rd = extract32(insn, 0, 5);
2023
unallocated_encoding(s);
2027
if (opc == 0x3) { /* ANDS */
2028
tcg_rd = cpu_reg(s, rd);
2030
tcg_rd = cpu_reg_sp(s, rd);
2032
tcg_rn = cpu_reg(s, rn);
2034
if (!logic_imm_decode_wmask(&wmask, is_n, imms, immr)) {
2035
/* some immediate field values are reserved */
2036
unallocated_encoding(s);
2041
wmask &= 0xffffffff;
2045
case 0x3: /* ANDS */
2047
tcg_gen_andi_i64(tcg_rd, tcg_rn, wmask);
2051
tcg_gen_ori_i64(tcg_rd, tcg_rn, wmask);
2054
tcg_gen_xori_i64(tcg_rd, tcg_rn, wmask);
2057
assert(FALSE); /* must handle all above */
2061
if (!sf && !is_and) {
2062
/* zero extend final result; we know we can skip this for AND
2063
* since the immediate had the high 32 bits clear.
2065
tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
2068
if (opc == 3) { /* ANDS */
2069
gen_logic_CC(sf, tcg_rd);
2074
* C3.4.5 Move wide (immediate)
2076
* 31 30 29 28 23 22 21 20 5 4 0
2077
* +--+-----+-------------+-----+----------------+------+
2078
* |sf| opc | 1 0 0 1 0 1 | hw | imm16 | Rd |
2079
* +--+-----+-------------+-----+----------------+------+
2081
* sf: 0 -> 32 bit, 1 -> 64 bit
2082
* opc: 00 -> N, 10 -> Z, 11 -> K
2083
* hw: shift/16 (0,16, and sf only 32, 48)
2085
static void disas_movw_imm(DisasContext *s, uint32_t insn)
2087
int rd = extract32(insn, 0, 5);
2088
uint64_t imm = extract32(insn, 5, 16);
2089
int sf = extract32(insn, 31, 1);
2090
int opc = extract32(insn, 29, 2);
2091
int pos = extract32(insn, 21, 2) << 4;
2092
TCGv_i64 tcg_rd = cpu_reg(s, rd);
2095
if (!sf && (pos >= 32)) {
2096
unallocated_encoding(s);
2110
tcg_gen_movi_i64(tcg_rd, imm);
2113
tcg_imm = tcg_const_i64(imm);
2114
tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_imm, pos, 16);
2115
tcg_temp_free_i64(tcg_imm);
2117
tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
2121
unallocated_encoding(s);
2127
* 31 30 29 28 23 22 21 16 15 10 9 5 4 0
2128
* +----+-----+-------------+---+------+------+------+------+
2129
* | sf | opc | 1 0 0 1 1 0 | N | immr | imms | Rn | Rd |
2130
* +----+-----+-------------+---+------+------+------+------+
2132
static void disas_bitfield(DisasContext *s, uint32_t insn)
2134
unsigned int sf, n, opc, ri, si, rn, rd, bitsize, pos, len;
2135
TCGv_i64 tcg_rd, tcg_tmp;
2137
sf = extract32(insn, 31, 1);
2138
opc = extract32(insn, 29, 2);
2139
n = extract32(insn, 22, 1);
2140
ri = extract32(insn, 16, 6);
2141
si = extract32(insn, 10, 6);
2142
rn = extract32(insn, 5, 5);
2143
rd = extract32(insn, 0, 5);
2144
bitsize = sf ? 64 : 32;
2146
if (sf != n || ri >= bitsize || si >= bitsize || opc > 2) {
2147
unallocated_encoding(s);
2151
tcg_rd = cpu_reg(s, rd);
2152
tcg_tmp = read_cpu_reg(s, rn, sf);
2154
/* OPTME: probably worth recognizing common cases of ext{8,16,32}{u,s} */
2156
if (opc != 1) { /* SBFM or UBFM */
2157
tcg_gen_movi_i64(tcg_rd, 0);
2160
/* do the bit move operation */
2162
/* Wd<s-r:0> = Wn<s:r> */
2163
tcg_gen_shri_i64(tcg_tmp, tcg_tmp, ri);
2165
len = (si - ri) + 1;
2167
/* Wd<32+s-r,32-r> = Wn<s:0> */
2172
tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len);
2174
if (opc == 0) { /* SBFM - sign extend the destination field */
2175
tcg_gen_shli_i64(tcg_rd, tcg_rd, 64 - (pos + len));
2176
tcg_gen_sari_i64(tcg_rd, tcg_rd, 64 - (pos + len));
2179
if (!sf) { /* zero extend final result */
2180
tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
2185
* 31 30 29 28 23 22 21 20 16 15 10 9 5 4 0
2186
* +----+------+-------------+---+----+------+--------+------+------+
2187
* | sf | op21 | 1 0 0 1 1 1 | N | o0 | Rm | imms | Rn | Rd |
2188
* +----+------+-------------+---+----+------+--------+------+------+
2190
static void disas_extract(DisasContext *s, uint32_t insn)
2192
unsigned int sf, n, rm, imm, rn, rd, bitsize, op21, op0;
2194
sf = extract32(insn, 31, 1);
2195
n = extract32(insn, 22, 1);
2196
rm = extract32(insn, 16, 5);
2197
imm = extract32(insn, 10, 6);
2198
rn = extract32(insn, 5, 5);
2199
rd = extract32(insn, 0, 5);
2200
op21 = extract32(insn, 29, 2);
2201
op0 = extract32(insn, 21, 1);
2202
bitsize = sf ? 64 : 32;
2204
if (sf != n || op21 || op0 || imm >= bitsize) {
2205
unallocated_encoding(s);
2207
TCGv_i64 tcg_rd, tcg_rm, tcg_rn;
2209
tcg_rd = cpu_reg(s, rd);
2212
/* OPTME: we can special case rm==rn as a rotate */
2213
tcg_rm = read_cpu_reg(s, rm, sf);
2214
tcg_rn = read_cpu_reg(s, rn, sf);
2215
tcg_gen_shri_i64(tcg_rm, tcg_rm, imm);
2216
tcg_gen_shli_i64(tcg_rn, tcg_rn, bitsize - imm);
2217
tcg_gen_or_i64(tcg_rd, tcg_rm, tcg_rn);
2219
tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
2222
/* tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts,
2223
* so an extract from bit 0 is a special case.
2226
tcg_gen_mov_i64(tcg_rd, cpu_reg(s, rm));
2228
tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rm));
2235
/* C3.4 Data processing - immediate */
2236
static void disas_data_proc_imm(DisasContext *s, uint32_t insn)
2238
switch (extract32(insn, 23, 6)) {
2239
case 0x20: case 0x21: /* PC-rel. addressing */
2240
disas_pc_rel_adr(s, insn);
2242
case 0x22: case 0x23: /* Add/subtract (immediate) */
2243
disas_add_sub_imm(s, insn);
2245
case 0x24: /* Logical (immediate) */
2246
disas_logic_imm(s, insn);
2248
case 0x25: /* Move wide (immediate) */
2249
disas_movw_imm(s, insn);
2251
case 0x26: /* Bitfield */
2252
disas_bitfield(s, insn);
2254
case 0x27: /* Extract */
2255
disas_extract(s, insn);
2258
unallocated_encoding(s);
2263
/* Shift a TCGv src by TCGv shift_amount, put result in dst.
2264
* Note that it is the caller's responsibility to ensure that the
2265
* shift amount is in range (ie 0..31 or 0..63) and provide the ARM
2266
* mandated semantics for out of range shifts.
2268
static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf,
2269
enum a64_shift_type shift_type, TCGv_i64 shift_amount)
2271
switch (shift_type) {
2272
case A64_SHIFT_TYPE_LSL:
2273
tcg_gen_shl_i64(dst, src, shift_amount);
2275
case A64_SHIFT_TYPE_LSR:
2276
tcg_gen_shr_i64(dst, src, shift_amount);
2278
case A64_SHIFT_TYPE_ASR:
2280
tcg_gen_ext32s_i64(dst, src);
2282
tcg_gen_sar_i64(dst, sf ? src : dst, shift_amount);
2284
case A64_SHIFT_TYPE_ROR:
2286
tcg_gen_rotr_i64(dst, src, shift_amount);
2289
t0 = tcg_temp_new_i32();
2290
t1 = tcg_temp_new_i32();
2291
tcg_gen_trunc_i64_i32(t0, src);
2292
tcg_gen_trunc_i64_i32(t1, shift_amount);
2293
tcg_gen_rotr_i32(t0, t0, t1);
2294
tcg_gen_extu_i32_i64(dst, t0);
2295
tcg_temp_free_i32(t0);
2296
tcg_temp_free_i32(t1);
2300
assert(FALSE); /* all shift types should be handled */
2304
if (!sf) { /* zero extend final result */
2305
tcg_gen_ext32u_i64(dst, dst);
2309
/* Shift a TCGv src by immediate, put result in dst.
2310
* The shift amount must be in range (this should always be true as the
2311
* relevant instructions will UNDEF on bad shift immediates).
2313
static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf,
2314
enum a64_shift_type shift_type, unsigned int shift_i)
2316
assert(shift_i < (sf ? 64 : 32));
2319
tcg_gen_mov_i64(dst, src);
2321
TCGv_i64 shift_const;
2323
shift_const = tcg_const_i64(shift_i);
2324
shift_reg(dst, src, sf, shift_type, shift_const);
2325
tcg_temp_free_i64(shift_const);
2329
/* C3.5.10 Logical (shifted register)
2330
* 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
2331
* +----+-----+-----------+-------+---+------+--------+------+------+
2332
* | sf | opc | 0 1 0 1 0 | shift | N | Rm | imm6 | Rn | Rd |
2333
* +----+-----+-----------+-------+---+------+--------+------+------+
2335
static void disas_logic_reg(DisasContext *s, uint32_t insn)
2337
TCGv_i64 tcg_rd, tcg_rn, tcg_rm;
2338
unsigned int sf, opc, shift_type, invert, rm, shift_amount, rn, rd;
2340
sf = extract32(insn, 31, 1);
2341
opc = extract32(insn, 29, 2);
2342
shift_type = extract32(insn, 22, 2);
2343
invert = extract32(insn, 21, 1);
2344
rm = extract32(insn, 16, 5);
2345
shift_amount = extract32(insn, 10, 6);
2346
rn = extract32(insn, 5, 5);
2347
rd = extract32(insn, 0, 5);
2349
if (!sf && (shift_amount & (1 << 5))) {
2350
unallocated_encoding(s);
2354
tcg_rd = cpu_reg(s, rd);
2356
if (opc == 1 && shift_amount == 0 && shift_type == 0 && rn == 31) {
2357
/* Unshifted ORR and ORN with WZR/XZR is the standard encoding for
2358
* register-register MOV and MVN, so it is worth special casing.
2360
tcg_rm = cpu_reg(s, rm);
2362
tcg_gen_not_i64(tcg_rd, tcg_rm);
2364
tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
2368
tcg_gen_mov_i64(tcg_rd, tcg_rm);
2370
tcg_gen_ext32u_i64(tcg_rd, tcg_rm);
2376
tcg_rm = read_cpu_reg(s, rm, sf);
2379
shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, shift_amount);
2382
tcg_rn = cpu_reg(s, rn);
2384
switch (opc | (invert << 2)) {
2387
tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm);
2390
tcg_gen_or_i64(tcg_rd, tcg_rn, tcg_rm);
2393
tcg_gen_xor_i64(tcg_rd, tcg_rn, tcg_rm);
2397
tcg_gen_andc_i64(tcg_rd, tcg_rn, tcg_rm);
2400
tcg_gen_orc_i64(tcg_rd, tcg_rn, tcg_rm);
2403
tcg_gen_eqv_i64(tcg_rd, tcg_rn, tcg_rm);
2411
tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
2415
gen_logic_CC(sf, tcg_rd);
2420
* C3.5.1 Add/subtract (extended register)
2422
* 31|30|29|28 24|23 22|21|20 16|15 13|12 10|9 5|4 0|
2423
* +--+--+--+-----------+-----+--+-------+------+------+----+----+
2424
* |sf|op| S| 0 1 0 1 1 | opt | 1| Rm |option| imm3 | Rn | Rd |
2425
* +--+--+--+-----------+-----+--+-------+------+------+----+----+
2427
* sf: 0 -> 32bit, 1 -> 64bit
2428
* op: 0 -> add , 1 -> sub
2431
* option: extension type (see DecodeRegExtend)
2432
* imm3: optional shift to Rm
2434
* Rd = Rn + LSL(extend(Rm), amount)
2436
static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn)
2438
int rd = extract32(insn, 0, 5);
2439
int rn = extract32(insn, 5, 5);
2440
int imm3 = extract32(insn, 10, 3);
2441
int option = extract32(insn, 13, 3);
2442
int rm = extract32(insn, 16, 5);
2443
bool setflags = extract32(insn, 29, 1);
2444
bool sub_op = extract32(insn, 30, 1);
2445
bool sf = extract32(insn, 31, 1);
2447
TCGv_i64 tcg_rm, tcg_rn; /* temps */
2449
TCGv_i64 tcg_result;
2452
unallocated_encoding(s);
2456
/* non-flag setting ops may use SP */
2458
tcg_rn = read_cpu_reg_sp(s, rn, sf);
2459
tcg_rd = cpu_reg_sp(s, rd);
2461
tcg_rn = read_cpu_reg(s, rn, sf);
2462
tcg_rd = cpu_reg(s, rd);
2465
tcg_rm = read_cpu_reg(s, rm, sf);
2466
ext_and_shift_reg(tcg_rm, tcg_rm, option, imm3);
2468
tcg_result = tcg_temp_new_i64();
2472
tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
2474
tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
2478
gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
2480
gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
2485
tcg_gen_mov_i64(tcg_rd, tcg_result);
2487
tcg_gen_ext32u_i64(tcg_rd, tcg_result);
2490
tcg_temp_free_i64(tcg_result);
2494
* C3.5.2 Add/subtract (shifted register)
2496
* 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
2497
* +--+--+--+-----------+-----+--+-------+---------+------+------+
2498
* |sf|op| S| 0 1 0 1 1 |shift| 0| Rm | imm6 | Rn | Rd |
2499
* +--+--+--+-----------+-----+--+-------+---------+------+------+
2501
* sf: 0 -> 32bit, 1 -> 64bit
2502
* op: 0 -> add , 1 -> sub
2504
* shift: 00 -> LSL, 01 -> LSR, 10 -> ASR, 11 -> RESERVED
2505
* imm6: Shift amount to apply to Rm before the add/sub
2507
static void disas_add_sub_reg(DisasContext *s, uint32_t insn)
2509
int rd = extract32(insn, 0, 5);
2510
int rn = extract32(insn, 5, 5);
2511
int imm6 = extract32(insn, 10, 6);
2512
int rm = extract32(insn, 16, 5);
2513
int shift_type = extract32(insn, 22, 2);
2514
bool setflags = extract32(insn, 29, 1);
2515
bool sub_op = extract32(insn, 30, 1);
2516
bool sf = extract32(insn, 31, 1);
2518
TCGv_i64 tcg_rd = cpu_reg(s, rd);
2519
TCGv_i64 tcg_rn, tcg_rm;
2520
TCGv_i64 tcg_result;
2522
if ((shift_type == 3) || (!sf && (imm6 > 31))) {
2523
unallocated_encoding(s);
2527
tcg_rn = read_cpu_reg(s, rn, sf);
2528
tcg_rm = read_cpu_reg(s, rm, sf);
2530
shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, imm6);
2532
tcg_result = tcg_temp_new_i64();
2536
tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
2538
tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
2542
gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
2544
gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
2549
tcg_gen_mov_i64(tcg_rd, tcg_result);
2551
tcg_gen_ext32u_i64(tcg_rd, tcg_result);
2554
tcg_temp_free_i64(tcg_result);
2557
/* C3.5.9 Data-processing (3 source)
2559
31 30 29 28 24 23 21 20 16 15 14 10 9 5 4 0
2560
+--+------+-----------+------+------+----+------+------+------+
2561
|sf| op54 | 1 1 0 1 1 | op31 | Rm | o0 | Ra | Rn | Rd |
2562
+--+------+-----------+------+------+----+------+------+------+
2565
static void disas_data_proc_3src(DisasContext *s, uint32_t insn)
2567
int rd = extract32(insn, 0, 5);
2568
int rn = extract32(insn, 5, 5);
2569
int ra = extract32(insn, 10, 5);
2570
int rm = extract32(insn, 16, 5);
2571
int op_id = (extract32(insn, 29, 3) << 4) |
2572
(extract32(insn, 21, 3) << 1) |
2573
extract32(insn, 15, 1);
2574
bool sf = extract32(insn, 31, 1);
2575
bool is_sub = extract32(op_id, 0, 1);
2576
bool is_high = extract32(op_id, 2, 1);
2577
bool is_signed = false;
2582
/* Note that op_id is sf:op54:op31:o0 so it includes the 32/64 size flag */
2584
case 0x42: /* SMADDL */
2585
case 0x43: /* SMSUBL */
2586
case 0x44: /* SMULH */
2589
case 0x0: /* MADD (32bit) */
2590
case 0x1: /* MSUB (32bit) */
2591
case 0x40: /* MADD (64bit) */
2592
case 0x41: /* MSUB (64bit) */
2593
case 0x4a: /* UMADDL */
2594
case 0x4b: /* UMSUBL */
2595
case 0x4c: /* UMULH */
2598
unallocated_encoding(s);
2603
TCGv_i64 low_bits = tcg_temp_new_i64(); /* low bits discarded */
2604
TCGv_i64 tcg_rd = cpu_reg(s, rd);
2605
TCGv_i64 tcg_rn = cpu_reg(s, rn);
2606
TCGv_i64 tcg_rm = cpu_reg(s, rm);
2609
tcg_gen_muls2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
2611
tcg_gen_mulu2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
2614
tcg_temp_free_i64(low_bits);
2618
tcg_op1 = tcg_temp_new_i64();
2619
tcg_op2 = tcg_temp_new_i64();
2620
tcg_tmp = tcg_temp_new_i64();
2623
tcg_gen_mov_i64(tcg_op1, cpu_reg(s, rn));
2624
tcg_gen_mov_i64(tcg_op2, cpu_reg(s, rm));
2627
tcg_gen_ext32s_i64(tcg_op1, cpu_reg(s, rn));
2628
tcg_gen_ext32s_i64(tcg_op2, cpu_reg(s, rm));
2630
tcg_gen_ext32u_i64(tcg_op1, cpu_reg(s, rn));
2631
tcg_gen_ext32u_i64(tcg_op2, cpu_reg(s, rm));
2635
if (ra == 31 && !is_sub) {
2636
/* Special-case MADD with rA == XZR; it is the standard MUL alias */
2637
tcg_gen_mul_i64(cpu_reg(s, rd), tcg_op1, tcg_op2);
2639
tcg_gen_mul_i64(tcg_tmp, tcg_op1, tcg_op2);
2641
tcg_gen_sub_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
2643
tcg_gen_add_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
2648
tcg_gen_ext32u_i64(cpu_reg(s, rd), cpu_reg(s, rd));
2651
tcg_temp_free_i64(tcg_op1);
2652
tcg_temp_free_i64(tcg_op2);
2653
tcg_temp_free_i64(tcg_tmp);
2656
/* C3.5.3 - Add/subtract (with carry)
2657
* 31 30 29 28 27 26 25 24 23 22 21 20 16 15 10 9 5 4 0
2658
* +--+--+--+------------------------+------+---------+------+-----+
2659
* |sf|op| S| 1 1 0 1 0 0 0 0 | rm | opcode2 | Rn | Rd |
2660
* +--+--+--+------------------------+------+---------+------+-----+
2664
static void disas_adc_sbc(DisasContext *s, uint32_t insn)
2666
unsigned int sf, op, setflags, rm, rn, rd;
2667
TCGv_i64 tcg_y, tcg_rn, tcg_rd;
2669
if (extract32(insn, 10, 6) != 0) {
2670
unallocated_encoding(s);
2674
sf = extract32(insn, 31, 1);
2675
op = extract32(insn, 30, 1);
2676
setflags = extract32(insn, 29, 1);
2677
rm = extract32(insn, 16, 5);
2678
rn = extract32(insn, 5, 5);
2679
rd = extract32(insn, 0, 5);
2681
tcg_rd = cpu_reg(s, rd);
2682
tcg_rn = cpu_reg(s, rn);
2685
tcg_y = new_tmp_a64(s);
2686
tcg_gen_not_i64(tcg_y, cpu_reg(s, rm));
2688
tcg_y = cpu_reg(s, rm);
2692
gen_adc_CC(sf, tcg_rd, tcg_rn, tcg_y);
2694
gen_adc(sf, tcg_rd, tcg_rn, tcg_y);
2698
/* C3.5.4 - C3.5.5 Conditional compare (immediate / register)
2699
* 31 30 29 28 27 26 25 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
2700
* +--+--+--+------------------------+--------+------+----+--+------+--+-----+
2701
* |sf|op| S| 1 1 0 1 0 0 1 0 |imm5/rm | cond |i/r |o2| Rn |o3|nzcv |
2702
* +--+--+--+------------------------+--------+------+----+--+------+--+-----+
2705
static void disas_cc(DisasContext *s, uint32_t insn)
2707
unsigned int sf, op, y, cond, rn, nzcv, is_imm;
2708
int label_continue = -1;
2709
TCGv_i64 tcg_tmp, tcg_y, tcg_rn;
2711
if (!extract32(insn, 29, 1)) {
2712
unallocated_encoding(s);
2715
if (insn & (1 << 10 | 1 << 4)) {
2716
unallocated_encoding(s);
2719
sf = extract32(insn, 31, 1);
2720
op = extract32(insn, 30, 1);
2721
is_imm = extract32(insn, 11, 1);
2722
y = extract32(insn, 16, 5); /* y = rm (reg) or imm5 (imm) */
2723
cond = extract32(insn, 12, 4);
2724
rn = extract32(insn, 5, 5);
2725
nzcv = extract32(insn, 0, 4);
2727
if (cond < 0x0e) { /* not always */
2728
int label_match = gen_new_label();
2729
label_continue = gen_new_label();
2730
arm_gen_test_cc(cond, label_match);
2732
tcg_tmp = tcg_temp_new_i64();
2733
tcg_gen_movi_i64(tcg_tmp, nzcv << 28);
2734
gen_set_nzcv(tcg_tmp);
2735
tcg_temp_free_i64(tcg_tmp);
2736
tcg_gen_br(label_continue);
2737
gen_set_label(label_match);
2739
/* match, or condition is always */
2741
tcg_y = new_tmp_a64(s);
2742
tcg_gen_movi_i64(tcg_y, y);
2744
tcg_y = cpu_reg(s, y);
2746
tcg_rn = cpu_reg(s, rn);
2748
tcg_tmp = tcg_temp_new_i64();
2750
gen_sub_CC(sf, tcg_tmp, tcg_rn, tcg_y);
2752
gen_add_CC(sf, tcg_tmp, tcg_rn, tcg_y);
2754
tcg_temp_free_i64(tcg_tmp);
2756
if (cond < 0x0e) { /* continue */
2757
gen_set_label(label_continue);
2761
/* C3.5.6 Conditional select
2762
* 31 30 29 28 21 20 16 15 12 11 10 9 5 4 0
2763
* +----+----+---+-----------------+------+------+-----+------+------+
2764
* | sf | op | S | 1 1 0 1 0 1 0 0 | Rm | cond | op2 | Rn | Rd |
2765
* +----+----+---+-----------------+------+------+-----+------+------+
2767
static void disas_cond_select(DisasContext *s, uint32_t insn)
2769
unsigned int sf, else_inv, rm, cond, else_inc, rn, rd;
2770
TCGv_i64 tcg_rd, tcg_src;
2772
if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) {
2773
/* S == 1 or op2<1> == 1 */
2774
unallocated_encoding(s);
2777
sf = extract32(insn, 31, 1);
2778
else_inv = extract32(insn, 30, 1);
2779
rm = extract32(insn, 16, 5);
2780
cond = extract32(insn, 12, 4);
2781
else_inc = extract32(insn, 10, 1);
2782
rn = extract32(insn, 5, 5);
2783
rd = extract32(insn, 0, 5);
2786
/* silly no-op write; until we use movcond we must special-case
2787
* this to avoid a dead temporary across basic blocks.
2792
tcg_rd = cpu_reg(s, rd);
2794
if (cond >= 0x0e) { /* condition "always" */
2795
tcg_src = read_cpu_reg(s, rn, sf);
2796
tcg_gen_mov_i64(tcg_rd, tcg_src);
2798
/* OPTME: we could use movcond here, at the cost of duplicating
2799
* a lot of the arm_gen_test_cc() logic.
2801
int label_match = gen_new_label();
2802
int label_continue = gen_new_label();
2804
arm_gen_test_cc(cond, label_match);
2806
tcg_src = cpu_reg(s, rm);
2808
if (else_inv && else_inc) {
2809
tcg_gen_neg_i64(tcg_rd, tcg_src);
2810
} else if (else_inv) {
2811
tcg_gen_not_i64(tcg_rd, tcg_src);
2812
} else if (else_inc) {
2813
tcg_gen_addi_i64(tcg_rd, tcg_src, 1);
2815
tcg_gen_mov_i64(tcg_rd, tcg_src);
2818
tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
2820
tcg_gen_br(label_continue);
2822
gen_set_label(label_match);
2823
tcg_src = read_cpu_reg(s, rn, sf);
2824
tcg_gen_mov_i64(tcg_rd, tcg_src);
2826
gen_set_label(label_continue);
2830
static void handle_clz(DisasContext *s, unsigned int sf,
2831
unsigned int rn, unsigned int rd)
2833
TCGv_i64 tcg_rd, tcg_rn;
2834
tcg_rd = cpu_reg(s, rd);
2835
tcg_rn = cpu_reg(s, rn);
2838
gen_helper_clz64(tcg_rd, tcg_rn);
2840
TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
2841
tcg_gen_trunc_i64_i32(tcg_tmp32, tcg_rn);
2842
gen_helper_clz(tcg_tmp32, tcg_tmp32);
2843
tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
2844
tcg_temp_free_i32(tcg_tmp32);
2848
static void handle_cls(DisasContext *s, unsigned int sf,
2849
unsigned int rn, unsigned int rd)
2851
TCGv_i64 tcg_rd, tcg_rn;
2852
tcg_rd = cpu_reg(s, rd);
2853
tcg_rn = cpu_reg(s, rn);
2856
gen_helper_cls64(tcg_rd, tcg_rn);
2858
TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
2859
tcg_gen_trunc_i64_i32(tcg_tmp32, tcg_rn);
2860
gen_helper_cls32(tcg_tmp32, tcg_tmp32);
2861
tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
2862
tcg_temp_free_i32(tcg_tmp32);
2866
static void handle_rbit(DisasContext *s, unsigned int sf,
2867
unsigned int rn, unsigned int rd)
2869
TCGv_i64 tcg_rd, tcg_rn;
2870
tcg_rd = cpu_reg(s, rd);
2871
tcg_rn = cpu_reg(s, rn);
2874
gen_helper_rbit64(tcg_rd, tcg_rn);
2876
TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
2877
tcg_gen_trunc_i64_i32(tcg_tmp32, tcg_rn);
2878
gen_helper_rbit(tcg_tmp32, tcg_tmp32);
2879
tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
2880
tcg_temp_free_i32(tcg_tmp32);
2884
/* C5.6.149 REV with sf==1, opcode==3 ("REV64") */
2885
static void handle_rev64(DisasContext *s, unsigned int sf,
2886
unsigned int rn, unsigned int rd)
2889
unallocated_encoding(s);
2892
tcg_gen_bswap64_i64(cpu_reg(s, rd), cpu_reg(s, rn));
2895
/* C5.6.149 REV with sf==0, opcode==2
2896
* C5.6.151 REV32 (sf==1, opcode==2)
2898
static void handle_rev32(DisasContext *s, unsigned int sf,
2899
unsigned int rn, unsigned int rd)
2901
TCGv_i64 tcg_rd = cpu_reg(s, rd);
2904
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
2905
TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
2907
/* bswap32_i64 requires zero high word */
2908
tcg_gen_ext32u_i64(tcg_tmp, tcg_rn);
2909
tcg_gen_bswap32_i64(tcg_rd, tcg_tmp);
2910
tcg_gen_shri_i64(tcg_tmp, tcg_rn, 32);
2911
tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp);
2912
tcg_gen_concat32_i64(tcg_rd, tcg_rd, tcg_tmp);
2914
tcg_temp_free_i64(tcg_tmp);
2916
tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rn));
2917
tcg_gen_bswap32_i64(tcg_rd, tcg_rd);
2921
/* C5.6.150 REV16 (opcode==1) */
2922
static void handle_rev16(DisasContext *s, unsigned int sf,
2923
unsigned int rn, unsigned int rd)
2925
TCGv_i64 tcg_rd = cpu_reg(s, rd);
2926
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
2927
TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
2929
tcg_gen_andi_i64(tcg_tmp, tcg_rn, 0xffff);
2930
tcg_gen_bswap16_i64(tcg_rd, tcg_tmp);
2932
tcg_gen_shri_i64(tcg_tmp, tcg_rn, 16);
2933
tcg_gen_andi_i64(tcg_tmp, tcg_tmp, 0xffff);
2934
tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
2935
tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, 16, 16);
2938
tcg_gen_shri_i64(tcg_tmp, tcg_rn, 32);
2939
tcg_gen_andi_i64(tcg_tmp, tcg_tmp, 0xffff);
2940
tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
2941
tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, 32, 16);
2943
tcg_gen_shri_i64(tcg_tmp, tcg_rn, 48);
2944
tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
2945
tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, 48, 16);
2948
tcg_temp_free_i64(tcg_tmp);
2951
/* C3.5.7 Data-processing (1 source)
2952
* 31 30 29 28 21 20 16 15 10 9 5 4 0
2953
* +----+---+---+-----------------+---------+--------+------+------+
2954
* | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode | Rn | Rd |
2955
* +----+---+---+-----------------+---------+--------+------+------+
2957
static void disas_data_proc_1src(DisasContext *s, uint32_t insn)
2959
unsigned int sf, opcode, rn, rd;
2961
if (extract32(insn, 29, 1) || extract32(insn, 16, 5)) {
2962
unallocated_encoding(s);
2966
sf = extract32(insn, 31, 1);
2967
opcode = extract32(insn, 10, 6);
2968
rn = extract32(insn, 5, 5);
2969
rd = extract32(insn, 0, 5);
2973
handle_rbit(s, sf, rn, rd);
2976
handle_rev16(s, sf, rn, rd);
2979
handle_rev32(s, sf, rn, rd);
2982
handle_rev64(s, sf, rn, rd);
2985
handle_clz(s, sf, rn, rd);
2988
handle_cls(s, sf, rn, rd);
2993
static void handle_div(DisasContext *s, bool is_signed, unsigned int sf,
2994
unsigned int rm, unsigned int rn, unsigned int rd)
2996
TCGv_i64 tcg_n, tcg_m, tcg_rd;
2997
tcg_rd = cpu_reg(s, rd);
2999
if (!sf && is_signed) {
3000
tcg_n = new_tmp_a64(s);
3001
tcg_m = new_tmp_a64(s);
3002
tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn));
3003
tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, rm));
3005
tcg_n = read_cpu_reg(s, rn, sf);
3006
tcg_m = read_cpu_reg(s, rm, sf);
3010
gen_helper_sdiv64(tcg_rd, tcg_n, tcg_m);
3012
gen_helper_udiv64(tcg_rd, tcg_n, tcg_m);
3015
if (!sf) { /* zero extend final result */
3016
tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3020
/* C5.6.115 LSLV, C5.6.118 LSRV, C5.6.17 ASRV, C5.6.154 RORV */
3021
static void handle_shift_reg(DisasContext *s,
3022
enum a64_shift_type shift_type, unsigned int sf,
3023
unsigned int rm, unsigned int rn, unsigned int rd)
3025
TCGv_i64 tcg_shift = tcg_temp_new_i64();
3026
TCGv_i64 tcg_rd = cpu_reg(s, rd);
3027
TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
3029
tcg_gen_andi_i64(tcg_shift, cpu_reg(s, rm), sf ? 63 : 31);
3030
shift_reg(tcg_rd, tcg_rn, sf, shift_type, tcg_shift);
3031
tcg_temp_free_i64(tcg_shift);
3034
/* C3.5.8 Data-processing (2 source)
3035
* 31 30 29 28 21 20 16 15 10 9 5 4 0
3036
* +----+---+---+-----------------+------+--------+------+------+
3037
* | sf | 0 | S | 1 1 0 1 0 1 1 0 | Rm | opcode | Rn | Rd |
3038
* +----+---+---+-----------------+------+--------+------+------+
3040
static void disas_data_proc_2src(DisasContext *s, uint32_t insn)
3042
unsigned int sf, rm, opcode, rn, rd;
3043
sf = extract32(insn, 31, 1);
3044
rm = extract32(insn, 16, 5);
3045
opcode = extract32(insn, 10, 6);
3046
rn = extract32(insn, 5, 5);
3047
rd = extract32(insn, 0, 5);
3049
if (extract32(insn, 29, 1)) {
3050
unallocated_encoding(s);
3056
handle_div(s, false, sf, rm, rn, rd);
3059
handle_div(s, true, sf, rm, rn, rd);
3062
handle_shift_reg(s, A64_SHIFT_TYPE_LSL, sf, rm, rn, rd);
3065
handle_shift_reg(s, A64_SHIFT_TYPE_LSR, sf, rm, rn, rd);
3068
handle_shift_reg(s, A64_SHIFT_TYPE_ASR, sf, rm, rn, rd);
3071
handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd);
3080
case 23: /* CRC32 */
3081
unsupported_encoding(s, insn);
3084
unallocated_encoding(s);
3089
/* C3.5 Data processing - register */
3090
static void disas_data_proc_reg(DisasContext *s, uint32_t insn)
3092
switch (extract32(insn, 24, 5)) {
3093
case 0x0a: /* Logical (shifted register) */
3094
disas_logic_reg(s, insn);
3096
case 0x0b: /* Add/subtract */
3097
if (insn & (1 << 21)) { /* (extended register) */
3098
disas_add_sub_ext_reg(s, insn);
3100
disas_add_sub_reg(s, insn);
3103
case 0x1b: /* Data-processing (3 source) */
3104
disas_data_proc_3src(s, insn);
3107
switch (extract32(insn, 21, 3)) {
3108
case 0x0: /* Add/subtract (with carry) */
3109
disas_adc_sbc(s, insn);
3111
case 0x2: /* Conditional compare */
3112
disas_cc(s, insn); /* both imm and reg forms */
3114
case 0x4: /* Conditional select */
3115
disas_cond_select(s, insn);
3117
case 0x6: /* Data-processing */
3118
if (insn & (1 << 30)) { /* (1 source) */
3119
disas_data_proc_1src(s, insn);
3120
} else { /* (2 source) */
3121
disas_data_proc_2src(s, insn);
3125
unallocated_encoding(s);
3130
unallocated_encoding(s);
3135
/* C3.6.22 Floating point compare
3136
* 31 30 29 28 24 23 22 21 20 16 15 14 13 10 9 5 4 0
3137
* +---+---+---+-----------+------+---+------+-----+---------+------+-------+
3138
* | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | op | 1 0 0 0 | Rn | op2 |
3139
* +---+---+---+-----------+------+---+------+-----+---------+------+-------+
3141
static void disas_fp_compare(DisasContext *s, uint32_t insn)
3143
unsupported_encoding(s, insn);
3146
/* C3.6.23 Floating point conditional compare
3147
* 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
3148
* +---+---+---+-----------+------+---+------+------+-----+------+----+------+
3149
* | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 0 1 | Rn | op | nzcv |
3150
* +---+---+---+-----------+------+---+------+------+-----+------+----+------+
3152
static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
3154
unsupported_encoding(s, insn);
3157
/* C3.6.24 Floating point conditional select
3158
* 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
3159
* +---+---+---+-----------+------+---+------+------+-----+------+------+
3160
* | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 1 1 | Rn | Rd |
3161
* +---+---+---+-----------+------+---+------+------+-----+------+------+
3163
static void disas_fp_csel(DisasContext *s, uint32_t insn)
3165
unsupported_encoding(s, insn);
3168
/* C3.6.25 Floating point data-processing (1 source)
3169
* 31 30 29 28 24 23 22 21 20 15 14 10 9 5 4 0
3170
* +---+---+---+-----------+------+---+--------+-----------+------+------+
3171
* | M | 0 | S | 1 1 1 1 0 | type | 1 | opcode | 1 0 0 0 0 | Rn | Rd |
3172
* +---+---+---+-----------+------+---+--------+-----------+------+------+
3174
static void disas_fp_1src(DisasContext *s, uint32_t insn)
3176
unsupported_encoding(s, insn);
3179
/* C3.6.26 Floating point data-processing (2 source)
3180
* 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
3181
* +---+---+---+-----------+------+---+------+--------+-----+------+------+
3182
* | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | opcode | 1 0 | Rn | Rd |
3183
* +---+---+---+-----------+------+---+------+--------+-----+------+------+
3185
static void disas_fp_2src(DisasContext *s, uint32_t insn)
3187
unsupported_encoding(s, insn);
3190
/* C3.6.27 Floating point data-processing (3 source)
3191
* 31 30 29 28 24 23 22 21 20 16 15 14 10 9 5 4 0
3192
* +---+---+---+-----------+------+----+------+----+------+------+------+
3193
* | M | 0 | S | 1 1 1 1 1 | type | o1 | Rm | o0 | Ra | Rn | Rd |
3194
* +---+---+---+-----------+------+----+------+----+------+------+------+
3196
static void disas_fp_3src(DisasContext *s, uint32_t insn)
3198
unsupported_encoding(s, insn);
3201
/* C3.6.28 Floating point immediate
3202
* 31 30 29 28 24 23 22 21 20 13 12 10 9 5 4 0
3203
* +---+---+---+-----------+------+---+------------+-------+------+------+
3204
* | M | 0 | S | 1 1 1 1 0 | type | 1 | imm8 | 1 0 0 | imm5 | Rd |
3205
* +---+---+---+-----------+------+---+------------+-------+------+------+
3207
static void disas_fp_imm(DisasContext *s, uint32_t insn)
3209
unsupported_encoding(s, insn);
3212
/* C3.6.29 Floating point <-> fixed point conversions
3213
* 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
3214
* +----+---+---+-----------+------+---+-------+--------+-------+------+------+
3215
* | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opcode | scale | Rn | Rd |
3216
* +----+---+---+-----------+------+---+-------+--------+-------+------+------+
3218
static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn)
3220
unsupported_encoding(s, insn);
3223
static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof)
3225
/* FMOV: gpr to or from float, double, or top half of quad fp reg,
3226
* without conversion.
3230
TCGv_i64 tcg_rn = cpu_reg(s, rn);
3236
TCGv_i64 tmp = tcg_temp_new_i64();
3237
tcg_gen_ext32u_i64(tmp, tcg_rn);
3238
tcg_gen_st_i64(tmp, cpu_env, fp_reg_offset(rd, MO_64));
3239
tcg_gen_movi_i64(tmp, 0);
3240
tcg_gen_st_i64(tmp, cpu_env, fp_reg_hi_offset(rd));
3241
tcg_temp_free_i64(tmp);
3247
TCGv_i64 tmp = tcg_const_i64(0);
3248
tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_offset(rd, MO_64));
3249
tcg_gen_st_i64(tmp, cpu_env, fp_reg_hi_offset(rd));
3250
tcg_temp_free_i64(tmp);
3254
/* 64 bit to top half. */
3255
tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_hi_offset(rd));
3259
TCGv_i64 tcg_rd = cpu_reg(s, rd);
3264
tcg_gen_ld32u_i64(tcg_rd, cpu_env, fp_reg_offset(rn, MO_32));
3268
tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_offset(rn, MO_64));
3271
/* 64 bits from top half */
3272
tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_hi_offset(rn));
3278
/* C3.6.30 Floating point <-> integer conversions
3279
* 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
3280
* +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
3281
* | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opc | 0 0 0 0 0 0 | Rn | Rd |
3282
* +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
3284
static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
3286
int rd = extract32(insn, 0, 5);
3287
int rn = extract32(insn, 5, 5);
3288
int opcode = extract32(insn, 16, 3);
3289
int rmode = extract32(insn, 19, 2);
3290
int type = extract32(insn, 22, 2);
3291
bool sbit = extract32(insn, 29, 1);
3292
bool sf = extract32(insn, 31, 1);
3294
if (!sbit && (rmode < 2) && (opcode > 5)) {
3296
bool itof = opcode & 1;
3298
switch (sf << 3 | type << 1 | rmode) {
3299
case 0x0: /* 32 bit */
3300
case 0xa: /* 64 bit */
3301
case 0xd: /* 64 bit to top half of quad */
3304
/* all other sf/type/rmode combinations are invalid */
3305
unallocated_encoding(s);
3309
handle_fmov(s, rd, rn, type, itof);
3311
/* actual FP conversions */
3312
unsupported_encoding(s, insn);
3316
/* FP-specific subcases of table C3-6 (SIMD and FP data processing)
3317
* 31 30 29 28 25 24 0
3318
* +---+---+---+---------+-----------------------------+
3319
* | | 0 | | 1 1 1 1 | |
3320
* +---+---+---+---------+-----------------------------+
3322
static void disas_data_proc_fp(DisasContext *s, uint32_t insn)
3324
if (extract32(insn, 24, 1)) {
3325
/* Floating point data-processing (3 source) */
3326
disas_fp_3src(s, insn);
3327
} else if (extract32(insn, 21, 1) == 0) {
3328
/* Floating point to fixed point conversions */
3329
disas_fp_fixed_conv(s, insn);
3331
switch (extract32(insn, 10, 2)) {
3333
/* Floating point conditional compare */
3334
disas_fp_ccomp(s, insn);
3337
/* Floating point data-processing (2 source) */
3338
disas_fp_2src(s, insn);
3341
/* Floating point conditional select */
3342
disas_fp_csel(s, insn);
3345
switch (ctz32(extract32(insn, 12, 4))) {
3346
case 0: /* [15:12] == xxx1 */
3347
/* Floating point immediate */
3348
disas_fp_imm(s, insn);
3350
case 1: /* [15:12] == xx10 */
3351
/* Floating point compare */
3352
disas_fp_compare(s, insn);
3354
case 2: /* [15:12] == x100 */
3355
/* Floating point data-processing (1 source) */
3356
disas_fp_1src(s, insn);
3358
case 3: /* [15:12] == 1000 */
3359
unallocated_encoding(s);
3361
default: /* [15:12] == 0000 */
3362
/* Floating point <-> integer conversions */
3363
disas_fp_int_conv(s, insn);
3371
static void disas_data_proc_simd(DisasContext *s, uint32_t insn)
3373
/* Note that this is called with all non-FP cases from
3374
* table C3-6 so it must UNDEF for entries not specifically
3375
* allocated to instructions in that table.
3377
unsupported_encoding(s, insn);
3380
/* C3.6 Data processing - SIMD and floating point */
3381
static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
3383
if (extract32(insn, 28, 1) == 1 && extract32(insn, 30, 1) == 0) {
3384
disas_data_proc_fp(s, insn);
3386
/* SIMD, including crypto */
3387
disas_data_proc_simd(s, insn);
3391
/* C3.1 A64 instruction index by encoding */
3392
static void disas_a64_insn(CPUARMState *env, DisasContext *s)
3396
insn = arm_ldl_code(env, s->pc, s->bswap_code);
3400
switch (extract32(insn, 25, 4)) {
3401
case 0x0: case 0x1: case 0x2: case 0x3: /* UNALLOCATED */
3402
unallocated_encoding(s);
3404
case 0x8: case 0x9: /* Data processing - immediate */
3405
disas_data_proc_imm(s, insn);
3407
case 0xa: case 0xb: /* Branch, exception generation and system insns */
3408
disas_b_exc_sys(s, insn);
3413
case 0xe: /* Loads and stores */
3414
disas_ldst(s, insn);
3417
case 0xd: /* Data processing - register */
3418
disas_data_proc_reg(s, insn);
3421
case 0xf: /* Data processing - SIMD and floating point */
3422
disas_data_proc_simd_fp(s, insn);
3425
assert(FALSE); /* all 15 cases should be handled above */
3429
/* if we allocated any temporaries, free them here */
3433
void gen_intermediate_code_internal_a64(ARMCPU *cpu,
3434
TranslationBlock *tb,
3437
CPUState *cs = CPU(cpu);
3438
CPUARMState *env = &cpu->env;
3439
DisasContext dc1, *dc = &dc1;
3441
uint16_t *gen_opc_end;
3443
target_ulong pc_start;
3444
target_ulong next_page_start;
3452
gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
3454
dc->is_jmp = DISAS_NEXT;
3456
dc->singlestep_enabled = cs->singlestep_enabled;
3462
dc->condexec_mask = 0;
3463
dc->condexec_cond = 0;
3464
#if !defined(CONFIG_USER_ONLY)
3467
dc->vfp_enabled = 0;
3470
dc->cp_regs = cpu->cp_regs;
3471
dc->current_pl = arm_current_pl(env);
3473
init_tmp_a64_array(dc);
3475
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
3478
max_insns = tb->cflags & CF_COUNT_MASK;
3479
if (max_insns == 0) {
3480
max_insns = CF_COUNT_MASK;
3485
tcg_clear_temp_count();
3488
if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3489
QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
3490
if (bp->pc == dc->pc) {
3491
gen_exception_insn(dc, 0, EXCP_DEBUG);
3492
/* Advance PC so that clearing the breakpoint will
3493
invalidate this TB. */
3495
goto done_generating;
3501
j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
3505
tcg_ctx.gen_opc_instr_start[lj++] = 0;
3508
tcg_ctx.gen_opc_pc[lj] = dc->pc;
3509
tcg_ctx.gen_opc_instr_start[lj] = 1;
3510
tcg_ctx.gen_opc_icount[lj] = num_insns;
3513
if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
3517
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
3518
tcg_gen_debug_insn_start(dc->pc);
3521
disas_a64_insn(env, dc);
3523
if (tcg_check_temp_count()) {
3524
fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
3528
/* Translation stops when a conditional branch is encountered.
3529
* Otherwise the subsequent code could get translated several times.
3530
* Also stop translation when a page boundary is reached. This
3531
* ensures prefetch aborts occur at the right place.
3534
} while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
3535
!cs->singlestep_enabled &&
3537
dc->pc < next_page_start &&
3538
num_insns < max_insns);
3540
if (tb->cflags & CF_LAST_IO) {
3544
if (unlikely(cs->singlestep_enabled) && dc->is_jmp != DISAS_EXC) {
3545
/* Note that this means single stepping WFI doesn't halt the CPU.
3546
* For conditional branch insns this is harmless unreachable code as
3547
* gen_goto_tb() has already handled emitting the debug exception
3548
* (and thus a tb-jump is not possible when singlestepping).
3550
assert(dc->is_jmp != DISAS_TB_JUMP);
3551
if (dc->is_jmp != DISAS_JUMP) {
3552
gen_a64_set_pc_im(dc->pc);
3554
gen_exception(EXCP_DEBUG);
3556
switch (dc->is_jmp) {
3558
gen_goto_tb(dc, 1, dc->pc);
3562
gen_a64_set_pc_im(dc->pc);
3565
/* indicate that the hash table must be used to find the next TB */
3573
/* This is a special case because we don't want to just halt the CPU
3574
* if trying to debug across a WFI.
3576
gen_helper_wfi(cpu_env);
3582
gen_tb_end(tb, num_insns);
3583
*tcg_ctx.gen_opc_ptr = INDEX_op_end;
3586
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3587
qemu_log("----------------\n");
3588
qemu_log("IN: %s\n", lookup_symbol(pc_start));
3589
log_target_disas(env, pc_start, dc->pc - pc_start,
3590
dc->thumb | (dc->bswap_code << 1));
3595
j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
3598
tcg_ctx.gen_opc_instr_start[lj++] = 0;
3601
tb->size = dc->pc - pc_start;
3602
tb->icount = num_insns;