4
* Copyright (c) 2013 Alexander Graf <agraf@suse.de>
6
* This library is free software; you can redistribute it and/or
7
* modify it under the terms of the GNU Lesser General Public
8
* License as published by the Free Software Foundation; either
9
* version 2 of the License, or (at your option) any later version.
11
* This library is distributed in the hope that it will be useful,
12
* but WITHOUT ANY WARRANTY; without even the implied warranty of
13
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14
* Lesser General Public License for more details.
16
* You should have received a copy of the GNU Lesser General Public
17
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
28
#include "translate.h"
29
#include "qemu/host-utils.h"
31
#include "exec/gen-icount.h"
37
static TCGv_i64 cpu_X[32];
38
static TCGv_i64 cpu_pc;
39
static TCGv_i32 cpu_NF, cpu_ZF, cpu_CF, cpu_VF;
41
static const char *regnames[] = {
42
"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
43
"x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
44
"x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
45
"x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
49
A64_SHIFT_TYPE_LSL = 0,
50
A64_SHIFT_TYPE_LSR = 1,
51
A64_SHIFT_TYPE_ASR = 2,
52
A64_SHIFT_TYPE_ROR = 3
55
/* initialize TCG globals. */
56
void a64_translate_init(void)
60
cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
61
offsetof(CPUARMState, pc),
63
for (i = 0; i < 32; i++) {
64
cpu_X[i] = tcg_global_mem_new_i64(TCG_AREG0,
65
offsetof(CPUARMState, xregs[i]),
69
cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
70
cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
71
cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
72
cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
75
void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
76
fprintf_function cpu_fprintf, int flags)
78
ARMCPU *cpu = ARM_CPU(cs);
79
CPUARMState *env = &cpu->env;
80
uint32_t psr = pstate_read(env);
83
cpu_fprintf(f, "PC=%016"PRIx64" SP=%016"PRIx64"\n",
84
env->pc, env->xregs[31]);
85
for (i = 0; i < 31; i++) {
86
cpu_fprintf(f, "X%02d=%016"PRIx64, i, env->xregs[i]);
93
cpu_fprintf(f, "PSTATE=%08x (flags %c%c%c%c)\n",
95
psr & PSTATE_N ? 'N' : '-',
96
psr & PSTATE_Z ? 'Z' : '-',
97
psr & PSTATE_C ? 'C' : '-',
98
psr & PSTATE_V ? 'V' : '-');
102
static int get_mem_index(DisasContext *s)
104
#ifdef CONFIG_USER_ONLY
111
void gen_a64_set_pc_im(uint64_t val)
113
tcg_gen_movi_i64(cpu_pc, val);
116
static void gen_exception(int excp)
118
TCGv_i32 tmp = tcg_temp_new_i32();
119
tcg_gen_movi_i32(tmp, excp);
120
gen_helper_exception(cpu_env, tmp);
121
tcg_temp_free_i32(tmp);
124
static void gen_exception_insn(DisasContext *s, int offset, int excp)
126
gen_a64_set_pc_im(s->pc - offset);
128
s->is_jmp = DISAS_EXC;
131
static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest)
133
/* No direct tb linking with singlestep or deterministic io */
134
if (s->singlestep_enabled || (s->tb->cflags & CF_LAST_IO)) {
138
/* Only link tbs from inside the same guest page */
139
if ((s->tb->pc & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
146
static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
148
TranslationBlock *tb;
151
if (use_goto_tb(s, n, dest)) {
153
gen_a64_set_pc_im(dest);
154
tcg_gen_exit_tb((tcg_target_long)tb + n);
155
s->is_jmp = DISAS_TB_JUMP;
157
gen_a64_set_pc_im(dest);
158
if (s->singlestep_enabled) {
159
gen_exception(EXCP_DEBUG);
162
s->is_jmp = DISAS_JUMP;
166
static void unallocated_encoding(DisasContext *s)
168
gen_exception_insn(s, 4, EXCP_UDEF);
171
#define unsupported_encoding(s, insn) \
173
qemu_log_mask(LOG_UNIMP, \
174
"%s:%d: unsupported instruction encoding 0x%08x " \
175
"at pc=%016" PRIx64 "\n", \
176
__FILE__, __LINE__, insn, s->pc - 4); \
177
unallocated_encoding(s); \
180
static void init_tmp_a64_array(DisasContext *s)
182
#ifdef CONFIG_DEBUG_TCG
184
for (i = 0; i < ARRAY_SIZE(s->tmp_a64); i++) {
185
TCGV_UNUSED_I64(s->tmp_a64[i]);
188
s->tmp_a64_count = 0;
191
static void free_tmp_a64(DisasContext *s)
194
for (i = 0; i < s->tmp_a64_count; i++) {
195
tcg_temp_free_i64(s->tmp_a64[i]);
197
init_tmp_a64_array(s);
200
static TCGv_i64 new_tmp_a64(DisasContext *s)
202
assert(s->tmp_a64_count < TMP_A64_MAX);
203
return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64();
206
static TCGv_i64 new_tmp_a64_zero(DisasContext *s)
208
TCGv_i64 t = new_tmp_a64(s);
209
tcg_gen_movi_i64(t, 0);
214
* Register access functions
216
* These functions are used for directly accessing a register in where
217
* changes to the final register value are likely to be made. If you
218
* need to use a register for temporary calculation (e.g. index type
219
* operations) use the read_* form.
221
* B1.2.1 Register mappings
223
* In instruction register encoding 31 can refer to ZR (zero register) or
224
* the SP (stack pointer) depending on context. In QEMU's case we map SP
225
* to cpu_X[31] and ZR accesses to a temporary which can be discarded.
226
* This is the point of the _sp forms.
228
static TCGv_i64 cpu_reg(DisasContext *s, int reg)
231
return new_tmp_a64_zero(s);
237
/* register access for when 31 == SP */
238
static TCGv_i64 cpu_reg_sp(DisasContext *s, int reg)
243
/* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64
244
* representing the register contents. This TCGv is an auto-freed
245
* temporary so it need not be explicitly freed, and may be modified.
247
static TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf)
249
TCGv_i64 v = new_tmp_a64(s);
252
tcg_gen_mov_i64(v, cpu_X[reg]);
254
tcg_gen_ext32u_i64(v, cpu_X[reg]);
257
tcg_gen_movi_i64(v, 0);
262
static TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
264
TCGv_i64 v = new_tmp_a64(s);
266
tcg_gen_mov_i64(v, cpu_X[reg]);
268
tcg_gen_ext32u_i64(v, cpu_X[reg]);
273
/* Set ZF and NF based on a 64 bit result. This is alas fiddlier
274
* than the 32 bit equivalent.
276
static inline void gen_set_NZ64(TCGv_i64 result)
278
TCGv_i64 flag = tcg_temp_new_i64();
280
tcg_gen_setcondi_i64(TCG_COND_NE, flag, result, 0);
281
tcg_gen_trunc_i64_i32(cpu_ZF, flag);
282
tcg_gen_shri_i64(flag, result, 32);
283
tcg_gen_trunc_i64_i32(cpu_NF, flag);
284
tcg_temp_free_i64(flag);
287
/* Set NZCV as for a logical operation: NZ as per result, CV cleared. */
288
static inline void gen_logic_CC(int sf, TCGv_i64 result)
291
gen_set_NZ64(result);
293
tcg_gen_trunc_i64_i32(cpu_ZF, result);
294
tcg_gen_trunc_i64_i32(cpu_NF, result);
296
tcg_gen_movi_i32(cpu_CF, 0);
297
tcg_gen_movi_i32(cpu_VF, 0);
300
/* dest = T0 + T1; compute C, N, V and Z flags */
301
static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
304
TCGv_i64 result, flag, tmp;
305
result = tcg_temp_new_i64();
306
flag = tcg_temp_new_i64();
307
tmp = tcg_temp_new_i64();
309
tcg_gen_movi_i64(tmp, 0);
310
tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp);
312
tcg_gen_trunc_i64_i32(cpu_CF, flag);
314
gen_set_NZ64(result);
316
tcg_gen_xor_i64(flag, result, t0);
317
tcg_gen_xor_i64(tmp, t0, t1);
318
tcg_gen_andc_i64(flag, flag, tmp);
319
tcg_temp_free_i64(tmp);
320
tcg_gen_shri_i64(flag, flag, 32);
321
tcg_gen_trunc_i64_i32(cpu_VF, flag);
323
tcg_gen_mov_i64(dest, result);
324
tcg_temp_free_i64(result);
325
tcg_temp_free_i64(flag);
327
/* 32 bit arithmetic */
328
TCGv_i32 t0_32 = tcg_temp_new_i32();
329
TCGv_i32 t1_32 = tcg_temp_new_i32();
330
TCGv_i32 tmp = tcg_temp_new_i32();
332
tcg_gen_movi_i32(tmp, 0);
333
tcg_gen_trunc_i64_i32(t0_32, t0);
334
tcg_gen_trunc_i64_i32(t1_32, t1);
335
tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp);
336
tcg_gen_mov_i32(cpu_ZF, cpu_NF);
337
tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
338
tcg_gen_xor_i32(tmp, t0_32, t1_32);
339
tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
340
tcg_gen_extu_i32_i64(dest, cpu_NF);
342
tcg_temp_free_i32(tmp);
343
tcg_temp_free_i32(t0_32);
344
tcg_temp_free_i32(t1_32);
348
/* dest = T0 - T1; compute C, N, V and Z flags */
349
static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
352
/* 64 bit arithmetic */
353
TCGv_i64 result, flag, tmp;
355
result = tcg_temp_new_i64();
356
flag = tcg_temp_new_i64();
357
tcg_gen_sub_i64(result, t0, t1);
359
gen_set_NZ64(result);
361
tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1);
362
tcg_gen_trunc_i64_i32(cpu_CF, flag);
364
tcg_gen_xor_i64(flag, result, t0);
365
tmp = tcg_temp_new_i64();
366
tcg_gen_xor_i64(tmp, t0, t1);
367
tcg_gen_and_i64(flag, flag, tmp);
368
tcg_temp_free_i64(tmp);
369
tcg_gen_shri_i64(flag, flag, 32);
370
tcg_gen_trunc_i64_i32(cpu_VF, flag);
371
tcg_gen_mov_i64(dest, result);
372
tcg_temp_free_i64(flag);
373
tcg_temp_free_i64(result);
375
/* 32 bit arithmetic */
376
TCGv_i32 t0_32 = tcg_temp_new_i32();
377
TCGv_i32 t1_32 = tcg_temp_new_i32();
380
tcg_gen_trunc_i64_i32(t0_32, t0);
381
tcg_gen_trunc_i64_i32(t1_32, t1);
382
tcg_gen_sub_i32(cpu_NF, t0_32, t1_32);
383
tcg_gen_mov_i32(cpu_ZF, cpu_NF);
384
tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
385
tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
386
tmp = tcg_temp_new_i32();
387
tcg_gen_xor_i32(tmp, t0_32, t1_32);
388
tcg_temp_free_i32(t0_32);
389
tcg_temp_free_i32(t1_32);
390
tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
391
tcg_temp_free_i32(tmp);
392
tcg_gen_extu_i32_i64(dest, cpu_NF);
397
* Load/Store generators
401
* Store from GPR register to memory
403
static void do_gpr_st(DisasContext *s, TCGv_i64 source,
404
TCGv_i64 tcg_addr, int size)
407
tcg_gen_qemu_st_i64(source, tcg_addr, get_mem_index(s), MO_TE + size);
411
* Load from memory to GPR register
413
static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
414
int size, bool is_signed, bool extend)
416
TCGMemOp memop = MO_TE + size;
424
tcg_gen_qemu_ld_i64(dest, tcg_addr, get_mem_index(s), memop);
426
if (extend && is_signed) {
428
tcg_gen_ext32u_i64(dest, dest);
433
* Store from FP register to memory
435
static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
437
/* This writes the bottom N bits of a 128 bit wide vector to memory */
438
int freg_offs = offsetof(CPUARMState, vfp.regs[srcidx * 2]);
439
TCGv_i64 tmp = tcg_temp_new_i64();
444
tcg_gen_ld8u_i64(tmp, cpu_env, freg_offs);
447
tcg_gen_ld16u_i64(tmp, cpu_env, freg_offs);
450
tcg_gen_ld32u_i64(tmp, cpu_env, freg_offs);
453
tcg_gen_ld_i64(tmp, cpu_env, freg_offs);
456
tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s), MO_TE + size);
458
TCGv_i64 tcg_hiaddr = tcg_temp_new_i64();
459
tcg_gen_ld_i64(tmp, cpu_env, freg_offs);
460
tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s), MO_TEQ);
461
tcg_gen_qemu_st64(tmp, tcg_addr, get_mem_index(s));
462
tcg_gen_ld_i64(tmp, cpu_env, freg_offs + sizeof(float64));
463
tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
464
tcg_gen_qemu_st_i64(tmp, tcg_hiaddr, get_mem_index(s), MO_TEQ);
465
tcg_temp_free_i64(tcg_hiaddr);
468
tcg_temp_free_i64(tmp);
472
* Load from memory to FP register
474
static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
476
/* This always zero-extends and writes to a full 128 bit wide vector */
477
int freg_offs = offsetof(CPUARMState, vfp.regs[destidx * 2]);
478
TCGv_i64 tmplo = tcg_temp_new_i64();
482
TCGMemOp memop = MO_TE + size;
483
tmphi = tcg_const_i64(0);
484
tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), memop);
487
tmphi = tcg_temp_new_i64();
488
tcg_hiaddr = tcg_temp_new_i64();
490
tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), MO_TEQ);
491
tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
492
tcg_gen_qemu_ld_i64(tmphi, tcg_hiaddr, get_mem_index(s), MO_TEQ);
493
tcg_temp_free_i64(tcg_hiaddr);
496
tcg_gen_st_i64(tmplo, cpu_env, freg_offs);
497
tcg_gen_st_i64(tmphi, cpu_env, freg_offs + sizeof(float64));
499
tcg_temp_free_i64(tmplo);
500
tcg_temp_free_i64(tmphi);
504
* This utility function is for doing register extension with an
505
* optional shift. You will likely want to pass a temporary for the
506
* destination register. See DecodeRegExtend() in the ARM ARM.
508
static void ext_and_shift_reg(TCGv_i64 tcg_out, TCGv_i64 tcg_in,
509
int option, unsigned int shift)
511
int extsize = extract32(option, 0, 2);
512
bool is_signed = extract32(option, 2, 1);
517
tcg_gen_ext8s_i64(tcg_out, tcg_in);
520
tcg_gen_ext16s_i64(tcg_out, tcg_in);
523
tcg_gen_ext32s_i64(tcg_out, tcg_in);
526
tcg_gen_mov_i64(tcg_out, tcg_in);
532
tcg_gen_ext8u_i64(tcg_out, tcg_in);
535
tcg_gen_ext16u_i64(tcg_out, tcg_in);
538
tcg_gen_ext32u_i64(tcg_out, tcg_in);
541
tcg_gen_mov_i64(tcg_out, tcg_in);
547
tcg_gen_shli_i64(tcg_out, tcg_out, shift);
551
static inline void gen_check_sp_alignment(DisasContext *s)
553
/* The AArch64 architecture mandates that (if enabled via PSTATE
554
* or SCTLR bits) there is a check that SP is 16-aligned on every
555
* SP-relative load or store (with an exception generated if it is not).
556
* In line with general QEMU practice regarding misaligned accesses,
557
* we omit these checks for the sake of guest program performance.
558
* This function is provided as a hook so we can more easily add these
559
* checks in future (possibly as a "favour catching guest program bugs
560
* over speed" user selectable option).
565
* the instruction disassembly implemented here matches
566
* the instruction encoding classifications in chapter 3 (C3)
567
* of the ARM Architecture Reference Manual (DDI0487A_a)
570
/* C3.2.7 Unconditional branch (immediate)
572
* +----+-----------+-------------------------------------+
573
* | op | 0 0 1 0 1 | imm26 |
574
* +----+-----------+-------------------------------------+
576
static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
578
uint64_t addr = s->pc + sextract32(insn, 0, 26) * 4 - 4;
580
if (insn & (1 << 31)) {
581
/* C5.6.26 BL Branch with link */
582
tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
585
/* C5.6.20 B Branch / C5.6.26 BL Branch with link */
586
gen_goto_tb(s, 0, addr);
589
/* C3.2.1 Compare & branch (immediate)
590
* 31 30 25 24 23 5 4 0
591
* +----+-------------+----+---------------------+--------+
592
* | sf | 0 1 1 0 1 0 | op | imm19 | Rt |
593
* +----+-------------+----+---------------------+--------+
595
static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
597
unsigned int sf, op, rt;
602
sf = extract32(insn, 31, 1);
603
op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */
604
rt = extract32(insn, 0, 5);
605
addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
607
tcg_cmp = read_cpu_reg(s, rt, sf);
608
label_match = gen_new_label();
610
tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
611
tcg_cmp, 0, label_match);
613
gen_goto_tb(s, 0, s->pc);
614
gen_set_label(label_match);
615
gen_goto_tb(s, 1, addr);
618
/* C3.2.5 Test & branch (immediate)
619
* 31 30 25 24 23 19 18 5 4 0
620
* +----+-------------+----+-------+-------------+------+
621
* | b5 | 0 1 1 0 1 1 | op | b40 | imm14 | Rt |
622
* +----+-------------+----+-------+-------------+------+
624
static void disas_test_b_imm(DisasContext *s, uint32_t insn)
626
unsigned int bit_pos, op, rt;
631
bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
632
op = extract32(insn, 24, 1); /* 0: TBZ; 1: TBNZ */
633
addr = s->pc + sextract32(insn, 5, 14) * 4 - 4;
634
rt = extract32(insn, 0, 5);
636
tcg_cmp = tcg_temp_new_i64();
637
tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos));
638
label_match = gen_new_label();
639
tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
640
tcg_cmp, 0, label_match);
641
tcg_temp_free_i64(tcg_cmp);
642
gen_goto_tb(s, 0, s->pc);
643
gen_set_label(label_match);
644
gen_goto_tb(s, 1, addr);
647
/* C3.2.2 / C5.6.19 Conditional branch (immediate)
648
* 31 25 24 23 5 4 3 0
649
* +---------------+----+---------------------+----+------+
650
* | 0 1 0 1 0 1 0 | o1 | imm19 | o0 | cond |
651
* +---------------+----+---------------------+----+------+
653
static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
658
if ((insn & (1 << 4)) || (insn & (1 << 24))) {
659
unallocated_encoding(s);
662
addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
663
cond = extract32(insn, 0, 4);
666
/* genuinely conditional branches */
667
int label_match = gen_new_label();
668
arm_gen_test_cc(cond, label_match);
669
gen_goto_tb(s, 0, s->pc);
670
gen_set_label(label_match);
671
gen_goto_tb(s, 1, addr);
673
/* 0xe and 0xf are both "always" conditions */
674
gen_goto_tb(s, 0, addr);
679
static void handle_hint(DisasContext *s, uint32_t insn,
680
unsigned int op1, unsigned int op2, unsigned int crm)
682
unsigned int selector = crm << 3 | op2;
685
unallocated_encoding(s);
697
/* we treat all as NOP at least for now */
700
/* default specified as NOP equivalent */
705
/* CLREX, DSB, DMB, ISB */
706
static void handle_sync(DisasContext *s, uint32_t insn,
707
unsigned int op1, unsigned int op2, unsigned int crm)
710
unallocated_encoding(s);
716
unsupported_encoding(s, insn);
721
/* We don't emulate caches so barriers are no-ops */
724
unallocated_encoding(s);
729
/* C5.6.130 MSR (immediate) - move immediate to processor state field */
730
static void handle_msr_i(DisasContext *s, uint32_t insn,
731
unsigned int op1, unsigned int op2, unsigned int crm)
733
unsupported_encoding(s, insn);
737
static void handle_sys(DisasContext *s, uint32_t insn, unsigned int l,
738
unsigned int op1, unsigned int op2,
739
unsigned int crn, unsigned int crm, unsigned int rt)
741
unsupported_encoding(s, insn);
744
/* C5.6.129 MRS - move from system register */
745
static void handle_mrs(DisasContext *s, uint32_t insn, unsigned int op0,
746
unsigned int op1, unsigned int op2,
747
unsigned int crn, unsigned int crm, unsigned int rt)
749
unsupported_encoding(s, insn);
752
/* C5.6.131 MSR (register) - move to system register */
753
static void handle_msr(DisasContext *s, uint32_t insn, unsigned int op0,
754
unsigned int op1, unsigned int op2,
755
unsigned int crn, unsigned int crm, unsigned int rt)
757
unsupported_encoding(s, insn);
761
* 31 22 21 20 19 18 16 15 12 11 8 7 5 4 0
762
* +---------------------+---+-----+-----+-------+-------+-----+------+
763
* | 1 1 0 1 0 1 0 1 0 0 | L | op0 | op1 | CRn | CRm | op2 | Rt |
764
* +---------------------+---+-----+-----+-------+-------+-----+------+
766
static void disas_system(DisasContext *s, uint32_t insn)
768
unsigned int l, op0, op1, crn, crm, op2, rt;
769
l = extract32(insn, 21, 1);
770
op0 = extract32(insn, 19, 2);
771
op1 = extract32(insn, 16, 3);
772
crn = extract32(insn, 12, 4);
773
crm = extract32(insn, 8, 4);
774
op2 = extract32(insn, 5, 3);
775
rt = extract32(insn, 0, 5);
779
unallocated_encoding(s);
783
case 2: /* C5.6.68 HINT */
784
handle_hint(s, insn, op1, op2, crm);
786
case 3: /* CLREX, DSB, DMB, ISB */
787
handle_sync(s, insn, op1, op2, crm);
789
case 4: /* C5.6.130 MSR (immediate) */
790
handle_msr_i(s, insn, op1, op2, crm);
793
unallocated_encoding(s);
801
handle_sys(s, insn, l, op1, op2, crn, crm, rt);
802
} else if (l) { /* op0 > 1 */
803
/* C5.6.129 MRS - move from system register */
804
handle_mrs(s, insn, op0, op1, op2, crn, crm, rt);
806
/* C5.6.131 MSR (register) - move to system register */
807
handle_msr(s, insn, op0, op1, op2, crn, crm, rt);
811
/* C3.2.3 Exception generation
813
* 31 24 23 21 20 5 4 2 1 0
814
* +-----------------+-----+------------------------+-----+----+
815
* | 1 1 0 1 0 1 0 0 | opc | imm16 | op2 | LL |
816
* +-----------------------+------------------------+----------+
818
static void disas_exc(DisasContext *s, uint32_t insn)
820
int opc = extract32(insn, 21, 3);
821
int op2_ll = extract32(insn, 0, 5);
825
/* SVC, HVC, SMC; since we don't support the Virtualization
826
* or TrustZone extensions these all UNDEF except SVC.
829
unallocated_encoding(s);
832
gen_exception_insn(s, 0, EXCP_SWI);
836
unallocated_encoding(s);
840
gen_exception_insn(s, 0, EXCP_BKPT);
844
unallocated_encoding(s);
848
unsupported_encoding(s, insn);
851
if (op2_ll < 1 || op2_ll > 3) {
852
unallocated_encoding(s);
855
/* DCPS1, DCPS2, DCPS3 */
856
unsupported_encoding(s, insn);
859
unallocated_encoding(s);
864
/* C3.2.7 Unconditional branch (register)
865
* 31 25 24 21 20 16 15 10 9 5 4 0
866
* +---------------+-------+-------+-------+------+-------+
867
* | 1 1 0 1 0 1 1 | opc | op2 | op3 | Rn | op4 |
868
* +---------------+-------+-------+-------+------+-------+
870
static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
872
unsigned int opc, op2, op3, rn, op4;
874
opc = extract32(insn, 21, 4);
875
op2 = extract32(insn, 16, 5);
876
op3 = extract32(insn, 10, 6);
877
rn = extract32(insn, 5, 5);
878
op4 = extract32(insn, 0, 5);
880
if (op4 != 0x0 || op3 != 0x0 || op2 != 0x1f) {
881
unallocated_encoding(s);
890
tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
895
unallocated_encoding(s);
897
unsupported_encoding(s, insn);
901
unallocated_encoding(s);
905
tcg_gen_mov_i64(cpu_pc, cpu_reg(s, rn));
906
s->is_jmp = DISAS_JUMP;
909
/* C3.2 Branches, exception generating and system instructions */
910
static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
912
switch (extract32(insn, 25, 7)) {
913
case 0x0a: case 0x0b:
914
case 0x4a: case 0x4b: /* Unconditional branch (immediate) */
915
disas_uncond_b_imm(s, insn);
917
case 0x1a: case 0x5a: /* Compare & branch (immediate) */
918
disas_comp_b_imm(s, insn);
920
case 0x1b: case 0x5b: /* Test & branch (immediate) */
921
disas_test_b_imm(s, insn);
923
case 0x2a: /* Conditional branch (immediate) */
924
disas_cond_b_imm(s, insn);
926
case 0x6a: /* Exception generation / System */
927
if (insn & (1 << 24)) {
928
disas_system(s, insn);
933
case 0x6b: /* Unconditional branch (register) */
934
disas_uncond_b_reg(s, insn);
937
unallocated_encoding(s);
942
/* Load/store exclusive */
943
static void disas_ldst_excl(DisasContext *s, uint32_t insn)
945
unsupported_encoding(s, insn);
948
/* Load register (literal) */
949
static void disas_ld_lit(DisasContext *s, uint32_t insn)
951
unsupported_encoding(s, insn);
955
* C5.6.80 LDNP (Load Pair - non-temporal hint)
956
* C5.6.81 LDP (Load Pair - non vector)
957
* C5.6.82 LDPSW (Load Pair Signed Word - non vector)
958
* C5.6.176 STNP (Store Pair - non-temporal hint)
959
* C5.6.177 STP (Store Pair - non vector)
960
* C6.3.165 LDNP (Load Pair of SIMD&FP - non-temporal hint)
961
* C6.3.165 LDP (Load Pair of SIMD&FP)
962
* C6.3.284 STNP (Store Pair of SIMD&FP - non-temporal hint)
963
* C6.3.284 STP (Store Pair of SIMD&FP)
965
* 31 30 29 27 26 25 24 23 22 21 15 14 10 9 5 4 0
966
* +-----+-------+---+---+-------+---+-----------------------------+
967
* | opc | 1 0 1 | V | 0 | index | L | imm7 | Rt2 | Rn | Rt |
968
* +-----+-------+---+---+-------+---+-------+-------+------+------+
970
* opc: LDP/STP/LDNP/STNP 00 -> 32 bit, 10 -> 64 bit
972
* LDP/STP/LDNP/STNP (SIMD) 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit
973
* V: 0 -> GPR, 1 -> Vector
974
* idx: 00 -> signed offset with non-temporal hint, 01 -> post-index,
975
* 10 -> signed offset, 11 -> pre-index
976
* L: 0 -> Store 1 -> Load
978
* Rt, Rt2 = GPR or SIMD registers to be stored
979
* Rn = general purpose register containing address
980
* imm7 = signed offset (multiple of 4 or 8 depending on size)
982
static void disas_ldst_pair(DisasContext *s, uint32_t insn)
984
int rt = extract32(insn, 0, 5);
985
int rn = extract32(insn, 5, 5);
986
int rt2 = extract32(insn, 10, 5);
987
int64_t offset = sextract32(insn, 15, 7);
988
int index = extract32(insn, 23, 2);
989
bool is_vector = extract32(insn, 26, 1);
990
bool is_load = extract32(insn, 22, 1);
991
int opc = extract32(insn, 30, 2);
993
bool is_signed = false;
994
bool postindex = false;
997
TCGv_i64 tcg_addr; /* calculated address */
1001
unallocated_encoding(s);
1008
size = 2 + extract32(opc, 1, 1);
1009
is_signed = extract32(opc, 0, 1);
1010
if (!is_load && is_signed) {
1011
unallocated_encoding(s);
1017
case 1: /* post-index */
1022
/* signed offset with "non-temporal" hint. Since we don't emulate
1023
* caches we don't care about hints to the cache system about
1024
* data access patterns, and handle this identically to plain
1028
/* There is no non-temporal-hint version of LDPSW */
1029
unallocated_encoding(s);
1034
case 2: /* signed offset, rn not updated */
1037
case 3: /* pre-index */
1046
gen_check_sp_alignment(s);
1049
tcg_addr = read_cpu_reg_sp(s, rn, 1);
1052
tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
1057
do_fp_ld(s, rt, tcg_addr, size);
1059
do_fp_st(s, rt, tcg_addr, size);
1062
TCGv_i64 tcg_rt = cpu_reg(s, rt);
1064
do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false);
1066
do_gpr_st(s, tcg_rt, tcg_addr, size);
1069
tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
1072
do_fp_ld(s, rt2, tcg_addr, size);
1074
do_fp_st(s, rt2, tcg_addr, size);
1077
TCGv_i64 tcg_rt2 = cpu_reg(s, rt2);
1079
do_gpr_ld(s, tcg_rt2, tcg_addr, size, is_signed, false);
1081
do_gpr_st(s, tcg_rt2, tcg_addr, size);
1087
tcg_gen_addi_i64(tcg_addr, tcg_addr, offset - (1 << size));
1089
tcg_gen_subi_i64(tcg_addr, tcg_addr, 1 << size);
1091
tcg_gen_mov_i64(cpu_reg_sp(s, rn), tcg_addr);
1096
* C3.3.8 Load/store (immediate post-indexed)
1097
* C3.3.9 Load/store (immediate pre-indexed)
1098
* C3.3.12 Load/store (unscaled immediate)
1100
* 31 30 29 27 26 25 24 23 22 21 20 12 11 10 9 5 4 0
1101
* +----+-------+---+-----+-----+---+--------+-----+------+------+
1102
* |size| 1 1 1 | V | 0 0 | opc | 0 | imm9 | idx | Rn | Rt |
1103
* +----+-------+---+-----+-----+---+--------+-----+------+------+
1105
* idx = 01 -> post-indexed, 11 pre-indexed, 00 unscaled imm. (no writeback)
1106
* V = 0 -> non-vector
1107
* size: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64bit
1108
* opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
1110
static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn)
1112
int rt = extract32(insn, 0, 5);
1113
int rn = extract32(insn, 5, 5);
1114
int imm9 = sextract32(insn, 12, 9);
1115
int opc = extract32(insn, 22, 2);
1116
int size = extract32(insn, 30, 2);
1117
int idx = extract32(insn, 10, 2);
1118
bool is_signed = false;
1119
bool is_store = false;
1120
bool is_extended = false;
1121
bool is_vector = extract32(insn, 26, 1);
1128
size |= (opc & 2) << 1;
1130
unallocated_encoding(s);
1133
is_store = ((opc & 1) == 0);
1135
if (size == 3 && opc == 2) {
1136
/* PRFM - prefetch */
1139
if (opc == 3 && size > 1) {
1140
unallocated_encoding(s);
1143
is_store = (opc == 0);
1144
is_signed = opc & (1<<1);
1145
is_extended = (size < 3) && (opc & 1);
1167
gen_check_sp_alignment(s);
1169
tcg_addr = read_cpu_reg_sp(s, rn, 1);
1172
tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9);
1177
do_fp_st(s, rt, tcg_addr, size);
1179
do_fp_ld(s, rt, tcg_addr, size);
1182
TCGv_i64 tcg_rt = cpu_reg(s, rt);
1184
do_gpr_st(s, tcg_rt, tcg_addr, size);
1186
do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended);
1191
TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
1193
tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9);
1195
tcg_gen_mov_i64(tcg_rn, tcg_addr);
1200
* C3.3.10 Load/store (register offset)
1202
* 31 30 29 27 26 25 24 23 22 21 20 16 15 13 12 11 10 9 5 4 0
1203
* +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
1204
* |size| 1 1 1 | V | 0 0 | opc | 1 | Rm | opt | S| 1 0 | Rn | Rt |
1205
* +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
1208
* size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
1209
* opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
1211
* size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
1212
* opc<0>: 0 -> store, 1 -> load
1213
* V: 1 -> vector/simd
1214
* opt: extend encoding (see DecodeRegExtend)
1215
* S: if S=1 then scale (essentially index by sizeof(size))
1216
* Rt: register to transfer into/out of
1217
* Rn: address register or SP for base
1218
* Rm: offset register or ZR for offset
1220
static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn)
1222
int rt = extract32(insn, 0, 5);
1223
int rn = extract32(insn, 5, 5);
1224
int shift = extract32(insn, 12, 1);
1225
int rm = extract32(insn, 16, 5);
1226
int opc = extract32(insn, 22, 2);
1227
int opt = extract32(insn, 13, 3);
1228
int size = extract32(insn, 30, 2);
1229
bool is_signed = false;
1230
bool is_store = false;
1231
bool is_extended = false;
1232
bool is_vector = extract32(insn, 26, 1);
1237
if (extract32(opt, 1, 1) == 0) {
1238
unallocated_encoding(s);
1243
size |= (opc & 2) << 1;
1245
unallocated_encoding(s);
1248
is_store = !extract32(opc, 0, 1);
1250
if (size == 3 && opc == 2) {
1251
/* PRFM - prefetch */
1254
if (opc == 3 && size > 1) {
1255
unallocated_encoding(s);
1258
is_store = (opc == 0);
1259
is_signed = extract32(opc, 1, 1);
1260
is_extended = (size < 3) && extract32(opc, 0, 1);
1264
gen_check_sp_alignment(s);
1266
tcg_addr = read_cpu_reg_sp(s, rn, 1);
1268
tcg_rm = read_cpu_reg(s, rm, 1);
1269
ext_and_shift_reg(tcg_rm, tcg_rm, opt, shift ? size : 0);
1271
tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_rm);
1275
do_fp_st(s, rt, tcg_addr, size);
1277
do_fp_ld(s, rt, tcg_addr, size);
1280
TCGv_i64 tcg_rt = cpu_reg(s, rt);
1282
do_gpr_st(s, tcg_rt, tcg_addr, size);
1284
do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended);
1290
* C3.3.13 Load/store (unsigned immediate)
1292
* 31 30 29 27 26 25 24 23 22 21 10 9 5
1293
* +----+-------+---+-----+-----+------------+-------+------+
1294
* |size| 1 1 1 | V | 0 1 | opc | imm12 | Rn | Rt |
1295
* +----+-------+---+-----+-----+------------+-------+------+
1298
* size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
1299
* opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
1301
* size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
1302
* opc<0>: 0 -> store, 1 -> load
1303
* Rn: base address register (inc SP)
1304
* Rt: target register
1306
static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn)
1308
int rt = extract32(insn, 0, 5);
1309
int rn = extract32(insn, 5, 5);
1310
unsigned int imm12 = extract32(insn, 10, 12);
1311
bool is_vector = extract32(insn, 26, 1);
1312
int size = extract32(insn, 30, 2);
1313
int opc = extract32(insn, 22, 2);
1314
unsigned int offset;
1319
bool is_signed = false;
1320
bool is_extended = false;
1323
size |= (opc & 2) << 1;
1325
unallocated_encoding(s);
1328
is_store = !extract32(opc, 0, 1);
1330
if (size == 3 && opc == 2) {
1331
/* PRFM - prefetch */
1334
if (opc == 3 && size > 1) {
1335
unallocated_encoding(s);
1338
is_store = (opc == 0);
1339
is_signed = extract32(opc, 1, 1);
1340
is_extended = (size < 3) && extract32(opc, 0, 1);
1344
gen_check_sp_alignment(s);
1346
tcg_addr = read_cpu_reg_sp(s, rn, 1);
1347
offset = imm12 << size;
1348
tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
1352
do_fp_st(s, rt, tcg_addr, size);
1354
do_fp_ld(s, rt, tcg_addr, size);
1357
TCGv_i64 tcg_rt = cpu_reg(s, rt);
1359
do_gpr_st(s, tcg_rt, tcg_addr, size);
1361
do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended);
1366
/* Load/store register (immediate forms) */
1367
static void disas_ldst_reg_imm(DisasContext *s, uint32_t insn)
1369
switch (extract32(insn, 10, 2)) {
1370
case 0: case 1: case 3:
1371
/* Load/store register (unscaled immediate) */
1372
/* Load/store immediate pre/post-indexed */
1373
disas_ldst_reg_imm9(s, insn);
1376
/* Load/store register unprivileged */
1377
unsupported_encoding(s, insn);
1380
unallocated_encoding(s);
1385
/* Load/store register (all forms) */
1386
static void disas_ldst_reg(DisasContext *s, uint32_t insn)
1388
switch (extract32(insn, 24, 2)) {
1390
if (extract32(insn, 21, 1) == 1 && extract32(insn, 10, 2) == 2) {
1391
disas_ldst_reg_roffset(s, insn);
1393
disas_ldst_reg_imm(s, insn);
1397
disas_ldst_reg_unsigned_imm(s, insn);
1400
unallocated_encoding(s);
1405
/* AdvSIMD load/store multiple structures */
1406
static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
1408
unsupported_encoding(s, insn);
1411
/* AdvSIMD load/store single structure */
1412
static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
1414
unsupported_encoding(s, insn);
1417
/* C3.3 Loads and stores */
1418
static void disas_ldst(DisasContext *s, uint32_t insn)
1420
switch (extract32(insn, 24, 6)) {
1421
case 0x08: /* Load/store exclusive */
1422
disas_ldst_excl(s, insn);
1424
case 0x18: case 0x1c: /* Load register (literal) */
1425
disas_ld_lit(s, insn);
1427
case 0x28: case 0x29:
1428
case 0x2c: case 0x2d: /* Load/store pair (all forms) */
1429
disas_ldst_pair(s, insn);
1431
case 0x38: case 0x39:
1432
case 0x3c: case 0x3d: /* Load/store register (all forms) */
1433
disas_ldst_reg(s, insn);
1435
case 0x0c: /* AdvSIMD load/store multiple structures */
1436
disas_ldst_multiple_struct(s, insn);
1438
case 0x0d: /* AdvSIMD load/store single structure */
1439
disas_ldst_single_struct(s, insn);
1442
unallocated_encoding(s);
1447
/* C3.4.6 PC-rel. addressing
1448
* 31 30 29 28 24 23 5 4 0
1449
* +----+-------+-----------+-------------------+------+
1450
* | op | immlo | 1 0 0 0 0 | immhi | Rd |
1451
* +----+-------+-----------+-------------------+------+
1453
static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
1455
unsigned int page, rd;
1459
page = extract32(insn, 31, 1);
1460
/* SignExtend(immhi:immlo) -> offset */
1461
offset = ((int64_t)sextract32(insn, 5, 19) << 2) | extract32(insn, 29, 2);
1462
rd = extract32(insn, 0, 5);
1466
/* ADRP (page based) */
1471
tcg_gen_movi_i64(cpu_reg(s, rd), base + offset);
1475
* C3.4.1 Add/subtract (immediate)
1477
* 31 30 29 28 24 23 22 21 10 9 5 4 0
1478
* +--+--+--+-----------+-----+-------------+-----+-----+
1479
* |sf|op| S| 1 0 0 0 1 |shift| imm12 | Rn | Rd |
1480
* +--+--+--+-----------+-----+-------------+-----+-----+
1482
* sf: 0 -> 32bit, 1 -> 64bit
1483
* op: 0 -> add , 1 -> sub
1485
* shift: 00 -> LSL imm by 0, 01 -> LSL imm by 12
1487
static void disas_add_sub_imm(DisasContext *s, uint32_t insn)
1489
int rd = extract32(insn, 0, 5);
1490
int rn = extract32(insn, 5, 5);
1491
uint64_t imm = extract32(insn, 10, 12);
1492
int shift = extract32(insn, 22, 2);
1493
bool setflags = extract32(insn, 29, 1);
1494
bool sub_op = extract32(insn, 30, 1);
1495
bool is_64bit = extract32(insn, 31, 1);
1497
TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
1498
TCGv_i64 tcg_rd = setflags ? cpu_reg(s, rd) : cpu_reg_sp(s, rd);
1499
TCGv_i64 tcg_result;
1508
unallocated_encoding(s);
1512
tcg_result = tcg_temp_new_i64();
1515
tcg_gen_subi_i64(tcg_result, tcg_rn, imm);
1517
tcg_gen_addi_i64(tcg_result, tcg_rn, imm);
1520
TCGv_i64 tcg_imm = tcg_const_i64(imm);
1522
gen_sub_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
1524
gen_add_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
1526
tcg_temp_free_i64(tcg_imm);
1530
tcg_gen_mov_i64(tcg_rd, tcg_result);
1532
tcg_gen_ext32u_i64(tcg_rd, tcg_result);
1535
tcg_temp_free_i64(tcg_result);
1538
/* The input should be a value in the bottom e bits (with higher
1539
* bits zero); returns that value replicated into every element
1540
* of size e in a 64 bit integer.
1542
static uint64_t bitfield_replicate(uint64_t mask, unsigned int e)
1552
/* Return a value with the bottom len bits set (where 0 < len <= 64) */
1553
static inline uint64_t bitmask64(unsigned int length)
1555
assert(length > 0 && length <= 64);
1556
return ~0ULL >> (64 - length);
1559
/* Simplified variant of pseudocode DecodeBitMasks() for the case where we
1560
* only require the wmask. Returns false if the imms/immr/immn are a reserved
1561
* value (ie should cause a guest UNDEF exception), and true if they are
1562
* valid, in which case the decoded bit pattern is written to result.
1564
static bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
1565
unsigned int imms, unsigned int immr)
1568
unsigned e, levels, s, r;
1571
assert(immn < 2 && imms < 64 && immr < 64);
1573
/* The bit patterns we create here are 64 bit patterns which
1574
* are vectors of identical elements of size e = 2, 4, 8, 16, 32 or
1575
* 64 bits each. Each element contains the same value: a run
1576
* of between 1 and e-1 non-zero bits, rotated within the
1577
* element by between 0 and e-1 bits.
1579
* The element size and run length are encoded into immn (1 bit)
1580
* and imms (6 bits) as follows:
1581
* 64 bit elements: immn = 1, imms = <length of run - 1>
1582
* 32 bit elements: immn = 0, imms = 0 : <length of run - 1>
1583
* 16 bit elements: immn = 0, imms = 10 : <length of run - 1>
1584
* 8 bit elements: immn = 0, imms = 110 : <length of run - 1>
1585
* 4 bit elements: immn = 0, imms = 1110 : <length of run - 1>
1586
* 2 bit elements: immn = 0, imms = 11110 : <length of run - 1>
1587
* Notice that immn = 0, imms = 11111x is the only combination
1588
* not covered by one of the above options; this is reserved.
1589
* Further, <length of run - 1> all-ones is a reserved pattern.
1591
* In all cases the rotation is by immr % e (and immr is 6 bits).
1594
/* First determine the element size */
1595
len = 31 - clz32((immn << 6) | (~imms & 0x3f));
1597
/* This is the immn == 0, imms == 0x11111x case */
1607
/* <length of run - 1> mustn't be all-ones. */
1611
/* Create the value of one element: s+1 set bits rotated
1612
* by r within the element (which is e bits wide)...
1614
mask = bitmask64(s + 1);
1615
mask = (mask >> r) | (mask << (e - r));
1616
/* ...then replicate the element over the whole 64 bit value */
1617
mask = bitfield_replicate(mask, e);
1622
/* C3.4.4 Logical (immediate)
1623
* 31 30 29 28 23 22 21 16 15 10 9 5 4 0
1624
* +----+-----+-------------+---+------+------+------+------+
1625
* | sf | opc | 1 0 0 1 0 0 | N | immr | imms | Rn | Rd |
1626
* +----+-----+-------------+---+------+------+------+------+
1628
static void disas_logic_imm(DisasContext *s, uint32_t insn)
1630
unsigned int sf, opc, is_n, immr, imms, rn, rd;
1631
TCGv_i64 tcg_rd, tcg_rn;
1633
bool is_and = false;
1635
sf = extract32(insn, 31, 1);
1636
opc = extract32(insn, 29, 2);
1637
is_n = extract32(insn, 22, 1);
1638
immr = extract32(insn, 16, 6);
1639
imms = extract32(insn, 10, 6);
1640
rn = extract32(insn, 5, 5);
1641
rd = extract32(insn, 0, 5);
1644
unallocated_encoding(s);
1648
if (opc == 0x3) { /* ANDS */
1649
tcg_rd = cpu_reg(s, rd);
1651
tcg_rd = cpu_reg_sp(s, rd);
1653
tcg_rn = cpu_reg(s, rn);
1655
if (!logic_imm_decode_wmask(&wmask, is_n, imms, immr)) {
1656
/* some immediate field values are reserved */
1657
unallocated_encoding(s);
1662
wmask &= 0xffffffff;
1666
case 0x3: /* ANDS */
1668
tcg_gen_andi_i64(tcg_rd, tcg_rn, wmask);
1672
tcg_gen_ori_i64(tcg_rd, tcg_rn, wmask);
1675
tcg_gen_xori_i64(tcg_rd, tcg_rn, wmask);
1678
assert(FALSE); /* must handle all above */
1682
if (!sf && !is_and) {
1683
/* zero extend final result; we know we can skip this for AND
1684
* since the immediate had the high 32 bits clear.
1686
tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
1689
if (opc == 3) { /* ANDS */
1690
gen_logic_CC(sf, tcg_rd);
1695
* C3.4.5 Move wide (immediate)
1697
* 31 30 29 28 23 22 21 20 5 4 0
1698
* +--+-----+-------------+-----+----------------+------+
1699
* |sf| opc | 1 0 0 1 0 1 | hw | imm16 | Rd |
1700
* +--+-----+-------------+-----+----------------+------+
1702
* sf: 0 -> 32 bit, 1 -> 64 bit
1703
* opc: 00 -> N, 10 -> Z, 11 -> K
1704
* hw: shift/16 (0,16, and sf only 32, 48)
1706
static void disas_movw_imm(DisasContext *s, uint32_t insn)
1708
int rd = extract32(insn, 0, 5);
1709
uint64_t imm = extract32(insn, 5, 16);
1710
int sf = extract32(insn, 31, 1);
1711
int opc = extract32(insn, 29, 2);
1712
int pos = extract32(insn, 21, 2) << 4;
1713
TCGv_i64 tcg_rd = cpu_reg(s, rd);
1716
if (!sf && (pos >= 32)) {
1717
unallocated_encoding(s);
1731
tcg_gen_movi_i64(tcg_rd, imm);
1734
tcg_imm = tcg_const_i64(imm);
1735
tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_imm, pos, 16);
1736
tcg_temp_free_i64(tcg_imm);
1738
tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
1742
unallocated_encoding(s);
1748
* 31 30 29 28 23 22 21 16 15 10 9 5 4 0
1749
* +----+-----+-------------+---+------+------+------+------+
1750
* | sf | opc | 1 0 0 1 1 0 | N | immr | imms | Rn | Rd |
1751
* +----+-----+-------------+---+------+------+------+------+
1753
static void disas_bitfield(DisasContext *s, uint32_t insn)
1755
unsigned int sf, n, opc, ri, si, rn, rd, bitsize, pos, len;
1756
TCGv_i64 tcg_rd, tcg_tmp;
1758
sf = extract32(insn, 31, 1);
1759
opc = extract32(insn, 29, 2);
1760
n = extract32(insn, 22, 1);
1761
ri = extract32(insn, 16, 6);
1762
si = extract32(insn, 10, 6);
1763
rn = extract32(insn, 5, 5);
1764
rd = extract32(insn, 0, 5);
1765
bitsize = sf ? 64 : 32;
1767
if (sf != n || ri >= bitsize || si >= bitsize || opc > 2) {
1768
unallocated_encoding(s);
1772
tcg_rd = cpu_reg(s, rd);
1773
tcg_tmp = read_cpu_reg(s, rn, sf);
1775
/* OPTME: probably worth recognizing common cases of ext{8,16,32}{u,s} */
1777
if (opc != 1) { /* SBFM or UBFM */
1778
tcg_gen_movi_i64(tcg_rd, 0);
1781
/* do the bit move operation */
1783
/* Wd<s-r:0> = Wn<s:r> */
1784
tcg_gen_shri_i64(tcg_tmp, tcg_tmp, ri);
1786
len = (si - ri) + 1;
1788
/* Wd<32+s-r,32-r> = Wn<s:0> */
1793
tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len);
1795
if (opc == 0) { /* SBFM - sign extend the destination field */
1796
tcg_gen_shli_i64(tcg_rd, tcg_rd, 64 - (pos + len));
1797
tcg_gen_sari_i64(tcg_rd, tcg_rd, 64 - (pos + len));
1800
if (!sf) { /* zero extend final result */
1801
tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
1806
* 31 30 29 28 23 22 21 20 16 15 10 9 5 4 0
1807
* +----+------+-------------+---+----+------+--------+------+------+
1808
* | sf | op21 | 1 0 0 1 1 1 | N | o0 | Rm | imms | Rn | Rd |
1809
* +----+------+-------------+---+----+------+--------+------+------+
1811
static void disas_extract(DisasContext *s, uint32_t insn)
1813
unsigned int sf, n, rm, imm, rn, rd, bitsize, op21, op0;
1815
sf = extract32(insn, 31, 1);
1816
n = extract32(insn, 22, 1);
1817
rm = extract32(insn, 16, 5);
1818
imm = extract32(insn, 10, 6);
1819
rn = extract32(insn, 5, 5);
1820
rd = extract32(insn, 0, 5);
1821
op21 = extract32(insn, 29, 2);
1822
op0 = extract32(insn, 21, 1);
1823
bitsize = sf ? 64 : 32;
1825
if (sf != n || op21 || op0 || imm >= bitsize) {
1826
unallocated_encoding(s);
1828
TCGv_i64 tcg_rd, tcg_rm, tcg_rn;
1830
tcg_rd = cpu_reg(s, rd);
1833
/* OPTME: we can special case rm==rn as a rotate */
1834
tcg_rm = read_cpu_reg(s, rm, sf);
1835
tcg_rn = read_cpu_reg(s, rn, sf);
1836
tcg_gen_shri_i64(tcg_rm, tcg_rm, imm);
1837
tcg_gen_shli_i64(tcg_rn, tcg_rn, bitsize - imm);
1838
tcg_gen_or_i64(tcg_rd, tcg_rm, tcg_rn);
1840
tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
1843
/* tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts,
1844
* so an extract from bit 0 is a special case.
1847
tcg_gen_mov_i64(tcg_rd, cpu_reg(s, rm));
1849
tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rm));
1856
/* C3.4 Data processing - immediate */
1857
static void disas_data_proc_imm(DisasContext *s, uint32_t insn)
1859
switch (extract32(insn, 23, 6)) {
1860
case 0x20: case 0x21: /* PC-rel. addressing */
1861
disas_pc_rel_adr(s, insn);
1863
case 0x22: case 0x23: /* Add/subtract (immediate) */
1864
disas_add_sub_imm(s, insn);
1866
case 0x24: /* Logical (immediate) */
1867
disas_logic_imm(s, insn);
1869
case 0x25: /* Move wide (immediate) */
1870
disas_movw_imm(s, insn);
1872
case 0x26: /* Bitfield */
1873
disas_bitfield(s, insn);
1875
case 0x27: /* Extract */
1876
disas_extract(s, insn);
1879
unallocated_encoding(s);
1884
/* Shift a TCGv src by TCGv shift_amount, put result in dst.
1885
* Note that it is the caller's responsibility to ensure that the
1886
* shift amount is in range (ie 0..31 or 0..63) and provide the ARM
1887
* mandated semantics for out of range shifts.
1889
static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf,
1890
enum a64_shift_type shift_type, TCGv_i64 shift_amount)
1892
switch (shift_type) {
1893
case A64_SHIFT_TYPE_LSL:
1894
tcg_gen_shl_i64(dst, src, shift_amount);
1896
case A64_SHIFT_TYPE_LSR:
1897
tcg_gen_shr_i64(dst, src, shift_amount);
1899
case A64_SHIFT_TYPE_ASR:
1901
tcg_gen_ext32s_i64(dst, src);
1903
tcg_gen_sar_i64(dst, sf ? src : dst, shift_amount);
1905
case A64_SHIFT_TYPE_ROR:
1907
tcg_gen_rotr_i64(dst, src, shift_amount);
1910
t0 = tcg_temp_new_i32();
1911
t1 = tcg_temp_new_i32();
1912
tcg_gen_trunc_i64_i32(t0, src);
1913
tcg_gen_trunc_i64_i32(t1, shift_amount);
1914
tcg_gen_rotr_i32(t0, t0, t1);
1915
tcg_gen_extu_i32_i64(dst, t0);
1916
tcg_temp_free_i32(t0);
1917
tcg_temp_free_i32(t1);
1921
assert(FALSE); /* all shift types should be handled */
1925
if (!sf) { /* zero extend final result */
1926
tcg_gen_ext32u_i64(dst, dst);
1930
/* Shift a TCGv src by immediate, put result in dst.
1931
* The shift amount must be in range (this should always be true as the
1932
* relevant instructions will UNDEF on bad shift immediates).
1934
static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf,
1935
enum a64_shift_type shift_type, unsigned int shift_i)
1937
assert(shift_i < (sf ? 64 : 32));
1940
tcg_gen_mov_i64(dst, src);
1942
TCGv_i64 shift_const;
1944
shift_const = tcg_const_i64(shift_i);
1945
shift_reg(dst, src, sf, shift_type, shift_const);
1946
tcg_temp_free_i64(shift_const);
1950
/* C3.5.10 Logical (shifted register)
1951
* 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
1952
* +----+-----+-----------+-------+---+------+--------+------+------+
1953
* | sf | opc | 0 1 0 1 0 | shift | N | Rm | imm6 | Rn | Rd |
1954
* +----+-----+-----------+-------+---+------+--------+------+------+
1956
static void disas_logic_reg(DisasContext *s, uint32_t insn)
1958
TCGv_i64 tcg_rd, tcg_rn, tcg_rm;
1959
unsigned int sf, opc, shift_type, invert, rm, shift_amount, rn, rd;
1961
sf = extract32(insn, 31, 1);
1962
opc = extract32(insn, 29, 2);
1963
shift_type = extract32(insn, 22, 2);
1964
invert = extract32(insn, 21, 1);
1965
rm = extract32(insn, 16, 5);
1966
shift_amount = extract32(insn, 10, 6);
1967
rn = extract32(insn, 5, 5);
1968
rd = extract32(insn, 0, 5);
1970
if (!sf && (shift_amount & (1 << 5))) {
1971
unallocated_encoding(s);
1975
tcg_rd = cpu_reg(s, rd);
1977
if (opc == 1 && shift_amount == 0 && shift_type == 0 && rn == 31) {
1978
/* Unshifted ORR and ORN with WZR/XZR is the standard encoding for
1979
* register-register MOV and MVN, so it is worth special casing.
1981
tcg_rm = cpu_reg(s, rm);
1983
tcg_gen_not_i64(tcg_rd, tcg_rm);
1985
tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
1989
tcg_gen_mov_i64(tcg_rd, tcg_rm);
1991
tcg_gen_ext32u_i64(tcg_rd, tcg_rm);
1997
tcg_rm = read_cpu_reg(s, rm, sf);
2000
shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, shift_amount);
2003
tcg_rn = cpu_reg(s, rn);
2005
switch (opc | (invert << 2)) {
2008
tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm);
2011
tcg_gen_or_i64(tcg_rd, tcg_rn, tcg_rm);
2014
tcg_gen_xor_i64(tcg_rd, tcg_rn, tcg_rm);
2018
tcg_gen_andc_i64(tcg_rd, tcg_rn, tcg_rm);
2021
tcg_gen_orc_i64(tcg_rd, tcg_rn, tcg_rm);
2024
tcg_gen_eqv_i64(tcg_rd, tcg_rn, tcg_rm);
2032
tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
2036
gen_logic_CC(sf, tcg_rd);
2041
* C3.5.1 Add/subtract (extended register)
2043
* 31|30|29|28 24|23 22|21|20 16|15 13|12 10|9 5|4 0|
2044
* +--+--+--+-----------+-----+--+-------+------+------+----+----+
2045
* |sf|op| S| 0 1 0 1 1 | opt | 1| Rm |option| imm3 | Rn | Rd |
2046
* +--+--+--+-----------+-----+--+-------+------+------+----+----+
2048
* sf: 0 -> 32bit, 1 -> 64bit
2049
* op: 0 -> add , 1 -> sub
2052
* option: extension type (see DecodeRegExtend)
2053
* imm3: optional shift to Rm
2055
* Rd = Rn + LSL(extend(Rm), amount)
2057
static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn)
2059
int rd = extract32(insn, 0, 5);
2060
int rn = extract32(insn, 5, 5);
2061
int imm3 = extract32(insn, 10, 3);
2062
int option = extract32(insn, 13, 3);
2063
int rm = extract32(insn, 16, 5);
2064
bool setflags = extract32(insn, 29, 1);
2065
bool sub_op = extract32(insn, 30, 1);
2066
bool sf = extract32(insn, 31, 1);
2068
TCGv_i64 tcg_rm, tcg_rn; /* temps */
2070
TCGv_i64 tcg_result;
2073
unallocated_encoding(s);
2077
/* non-flag setting ops may use SP */
2079
tcg_rn = read_cpu_reg_sp(s, rn, sf);
2080
tcg_rd = cpu_reg_sp(s, rd);
2082
tcg_rn = read_cpu_reg(s, rn, sf);
2083
tcg_rd = cpu_reg(s, rd);
2086
tcg_rm = read_cpu_reg(s, rm, sf);
2087
ext_and_shift_reg(tcg_rm, tcg_rm, option, imm3);
2089
tcg_result = tcg_temp_new_i64();
2093
tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
2095
tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
2099
gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
2101
gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
2106
tcg_gen_mov_i64(tcg_rd, tcg_result);
2108
tcg_gen_ext32u_i64(tcg_rd, tcg_result);
2111
tcg_temp_free_i64(tcg_result);
2115
* C3.5.2 Add/subtract (shifted register)
2117
* 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
2118
* +--+--+--+-----------+-----+--+-------+---------+------+------+
2119
* |sf|op| S| 0 1 0 1 1 |shift| 0| Rm | imm6 | Rn | Rd |
2120
* +--+--+--+-----------+-----+--+-------+---------+------+------+
2122
* sf: 0 -> 32bit, 1 -> 64bit
2123
* op: 0 -> add , 1 -> sub
2125
* shift: 00 -> LSL, 01 -> LSR, 10 -> ASR, 11 -> RESERVED
2126
* imm6: Shift amount to apply to Rm before the add/sub
2128
static void disas_add_sub_reg(DisasContext *s, uint32_t insn)
2130
int rd = extract32(insn, 0, 5);
2131
int rn = extract32(insn, 5, 5);
2132
int imm6 = extract32(insn, 10, 6);
2133
int rm = extract32(insn, 16, 5);
2134
int shift_type = extract32(insn, 22, 2);
2135
bool setflags = extract32(insn, 29, 1);
2136
bool sub_op = extract32(insn, 30, 1);
2137
bool sf = extract32(insn, 31, 1);
2139
TCGv_i64 tcg_rd = cpu_reg(s, rd);
2140
TCGv_i64 tcg_rn, tcg_rm;
2141
TCGv_i64 tcg_result;
2143
if ((shift_type == 3) || (!sf && (imm6 > 31))) {
2144
unallocated_encoding(s);
2148
tcg_rn = read_cpu_reg(s, rn, sf);
2149
tcg_rm = read_cpu_reg(s, rm, sf);
2151
shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, imm6);
2153
tcg_result = tcg_temp_new_i64();
2157
tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
2159
tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
2163
gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
2165
gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
2170
tcg_gen_mov_i64(tcg_rd, tcg_result);
2172
tcg_gen_ext32u_i64(tcg_rd, tcg_result);
2175
tcg_temp_free_i64(tcg_result);
2178
/* C3.5.9 Data-processing (3 source)
2180
31 30 29 28 24 23 21 20 16 15 14 10 9 5 4 0
2181
+--+------+-----------+------+------+----+------+------+------+
2182
|sf| op54 | 1 1 0 1 1 | op31 | Rm | o0 | Ra | Rn | Rd |
2183
+--+------+-----------+------+------+----+------+------+------+
2186
static void disas_data_proc_3src(DisasContext *s, uint32_t insn)
2188
int rd = extract32(insn, 0, 5);
2189
int rn = extract32(insn, 5, 5);
2190
int ra = extract32(insn, 10, 5);
2191
int rm = extract32(insn, 16, 5);
2192
int op_id = (extract32(insn, 29, 3) << 4) |
2193
(extract32(insn, 21, 3) << 1) |
2194
extract32(insn, 15, 1);
2195
bool sf = extract32(insn, 31, 1);
2196
bool is_sub = extract32(op_id, 0, 1);
2197
bool is_high = extract32(op_id, 2, 1);
2198
bool is_signed = false;
2203
/* Note that op_id is sf:op54:op31:o0 so it includes the 32/64 size flag */
2205
case 0x42: /* SMADDL */
2206
case 0x43: /* SMSUBL */
2207
case 0x44: /* SMULH */
2210
case 0x0: /* MADD (32bit) */
2211
case 0x1: /* MSUB (32bit) */
2212
case 0x40: /* MADD (64bit) */
2213
case 0x41: /* MSUB (64bit) */
2214
case 0x4a: /* UMADDL */
2215
case 0x4b: /* UMSUBL */
2216
case 0x4c: /* UMULH */
2219
unallocated_encoding(s);
2224
TCGv_i64 low_bits = tcg_temp_new_i64(); /* low bits discarded */
2225
TCGv_i64 tcg_rd = cpu_reg(s, rd);
2226
TCGv_i64 tcg_rn = cpu_reg(s, rn);
2227
TCGv_i64 tcg_rm = cpu_reg(s, rm);
2230
tcg_gen_muls2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
2232
tcg_gen_mulu2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
2235
tcg_temp_free_i64(low_bits);
2239
tcg_op1 = tcg_temp_new_i64();
2240
tcg_op2 = tcg_temp_new_i64();
2241
tcg_tmp = tcg_temp_new_i64();
2244
tcg_gen_mov_i64(tcg_op1, cpu_reg(s, rn));
2245
tcg_gen_mov_i64(tcg_op2, cpu_reg(s, rm));
2248
tcg_gen_ext32s_i64(tcg_op1, cpu_reg(s, rn));
2249
tcg_gen_ext32s_i64(tcg_op2, cpu_reg(s, rm));
2251
tcg_gen_ext32u_i64(tcg_op1, cpu_reg(s, rn));
2252
tcg_gen_ext32u_i64(tcg_op2, cpu_reg(s, rm));
2256
if (ra == 31 && !is_sub) {
2257
/* Special-case MADD with rA == XZR; it is the standard MUL alias */
2258
tcg_gen_mul_i64(cpu_reg(s, rd), tcg_op1, tcg_op2);
2260
tcg_gen_mul_i64(tcg_tmp, tcg_op1, tcg_op2);
2262
tcg_gen_sub_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
2264
tcg_gen_add_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
2269
tcg_gen_ext32u_i64(cpu_reg(s, rd), cpu_reg(s, rd));
2272
tcg_temp_free_i64(tcg_op1);
2273
tcg_temp_free_i64(tcg_op2);
2274
tcg_temp_free_i64(tcg_tmp);
2277
/* Add/subtract (with carry) */
2278
static void disas_adc_sbc(DisasContext *s, uint32_t insn)
2280
unsupported_encoding(s, insn);
2283
/* Conditional compare (immediate) */
2284
static void disas_cc_imm(DisasContext *s, uint32_t insn)
2286
unsupported_encoding(s, insn);
2289
/* Conditional compare (register) */
2290
static void disas_cc_reg(DisasContext *s, uint32_t insn)
2292
unsupported_encoding(s, insn);
2295
/* C3.5.6 Conditional select
2296
* 31 30 29 28 21 20 16 15 12 11 10 9 5 4 0
2297
* +----+----+---+-----------------+------+------+-----+------+------+
2298
* | sf | op | S | 1 1 0 1 0 1 0 0 | Rm | cond | op2 | Rn | Rd |
2299
* +----+----+---+-----------------+------+------+-----+------+------+
2301
static void disas_cond_select(DisasContext *s, uint32_t insn)
2303
unsigned int sf, else_inv, rm, cond, else_inc, rn, rd;
2304
TCGv_i64 tcg_rd, tcg_src;
2306
if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) {
2307
/* S == 1 or op2<1> == 1 */
2308
unallocated_encoding(s);
2311
sf = extract32(insn, 31, 1);
2312
else_inv = extract32(insn, 30, 1);
2313
rm = extract32(insn, 16, 5);
2314
cond = extract32(insn, 12, 4);
2315
else_inc = extract32(insn, 10, 1);
2316
rn = extract32(insn, 5, 5);
2317
rd = extract32(insn, 0, 5);
2320
/* silly no-op write; until we use movcond we must special-case
2321
* this to avoid a dead temporary across basic blocks.
2326
tcg_rd = cpu_reg(s, rd);
2328
if (cond >= 0x0e) { /* condition "always" */
2329
tcg_src = read_cpu_reg(s, rn, sf);
2330
tcg_gen_mov_i64(tcg_rd, tcg_src);
2332
/* OPTME: we could use movcond here, at the cost of duplicating
2333
* a lot of the arm_gen_test_cc() logic.
2335
int label_match = gen_new_label();
2336
int label_continue = gen_new_label();
2338
arm_gen_test_cc(cond, label_match);
2340
tcg_src = cpu_reg(s, rm);
2342
if (else_inv && else_inc) {
2343
tcg_gen_neg_i64(tcg_rd, tcg_src);
2344
} else if (else_inv) {
2345
tcg_gen_not_i64(tcg_rd, tcg_src);
2346
} else if (else_inc) {
2347
tcg_gen_addi_i64(tcg_rd, tcg_src, 1);
2349
tcg_gen_mov_i64(tcg_rd, tcg_src);
2352
tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
2354
tcg_gen_br(label_continue);
2356
gen_set_label(label_match);
2357
tcg_src = read_cpu_reg(s, rn, sf);
2358
tcg_gen_mov_i64(tcg_rd, tcg_src);
2360
gen_set_label(label_continue);
2364
static void handle_clz(DisasContext *s, unsigned int sf,
2365
unsigned int rn, unsigned int rd)
2367
TCGv_i64 tcg_rd, tcg_rn;
2368
tcg_rd = cpu_reg(s, rd);
2369
tcg_rn = cpu_reg(s, rn);
2372
gen_helper_clz64(tcg_rd, tcg_rn);
2374
TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
2375
tcg_gen_trunc_i64_i32(tcg_tmp32, tcg_rn);
2376
gen_helper_clz(tcg_tmp32, tcg_tmp32);
2377
tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
2378
tcg_temp_free_i32(tcg_tmp32);
2382
static void handle_cls(DisasContext *s, unsigned int sf,
2383
unsigned int rn, unsigned int rd)
2385
TCGv_i64 tcg_rd, tcg_rn;
2386
tcg_rd = cpu_reg(s, rd);
2387
tcg_rn = cpu_reg(s, rn);
2390
gen_helper_cls64(tcg_rd, tcg_rn);
2392
TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
2393
tcg_gen_trunc_i64_i32(tcg_tmp32, tcg_rn);
2394
gen_helper_cls32(tcg_tmp32, tcg_tmp32);
2395
tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
2396
tcg_temp_free_i32(tcg_tmp32);
2400
static void handle_rbit(DisasContext *s, unsigned int sf,
2401
unsigned int rn, unsigned int rd)
2403
TCGv_i64 tcg_rd, tcg_rn;
2404
tcg_rd = cpu_reg(s, rd);
2405
tcg_rn = cpu_reg(s, rn);
2408
gen_helper_rbit64(tcg_rd, tcg_rn);
2410
TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
2411
tcg_gen_trunc_i64_i32(tcg_tmp32, tcg_rn);
2412
gen_helper_rbit(tcg_tmp32, tcg_tmp32);
2413
tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
2414
tcg_temp_free_i32(tcg_tmp32);
2418
/* C5.6.149 REV with sf==1, opcode==3 ("REV64") */
2419
static void handle_rev64(DisasContext *s, unsigned int sf,
2420
unsigned int rn, unsigned int rd)
2423
unallocated_encoding(s);
2426
tcg_gen_bswap64_i64(cpu_reg(s, rd), cpu_reg(s, rn));
2429
/* C5.6.149 REV with sf==0, opcode==2
2430
* C5.6.151 REV32 (sf==1, opcode==2)
2432
static void handle_rev32(DisasContext *s, unsigned int sf,
2433
unsigned int rn, unsigned int rd)
2435
TCGv_i64 tcg_rd = cpu_reg(s, rd);
2438
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
2439
TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
2441
/* bswap32_i64 requires zero high word */
2442
tcg_gen_ext32u_i64(tcg_tmp, tcg_rn);
2443
tcg_gen_bswap32_i64(tcg_rd, tcg_tmp);
2444
tcg_gen_shri_i64(tcg_tmp, tcg_rn, 32);
2445
tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp);
2446
tcg_gen_concat32_i64(tcg_rd, tcg_rd, tcg_tmp);
2448
tcg_temp_free_i64(tcg_tmp);
2450
tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rn));
2451
tcg_gen_bswap32_i64(tcg_rd, tcg_rd);
2455
/* C5.6.150 REV16 (opcode==1) */
2456
static void handle_rev16(DisasContext *s, unsigned int sf,
2457
unsigned int rn, unsigned int rd)
2459
TCGv_i64 tcg_rd = cpu_reg(s, rd);
2460
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
2461
TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
2463
tcg_gen_andi_i64(tcg_tmp, tcg_rn, 0xffff);
2464
tcg_gen_bswap16_i64(tcg_rd, tcg_tmp);
2466
tcg_gen_shri_i64(tcg_tmp, tcg_rn, 16);
2467
tcg_gen_andi_i64(tcg_tmp, tcg_tmp, 0xffff);
2468
tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
2469
tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, 16, 16);
2472
tcg_gen_shri_i64(tcg_tmp, tcg_rn, 32);
2473
tcg_gen_andi_i64(tcg_tmp, tcg_tmp, 0xffff);
2474
tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
2475
tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, 32, 16);
2477
tcg_gen_shri_i64(tcg_tmp, tcg_rn, 48);
2478
tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
2479
tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, 48, 16);
2482
tcg_temp_free_i64(tcg_tmp);
2485
/* C3.5.7 Data-processing (1 source)
2486
* 31 30 29 28 21 20 16 15 10 9 5 4 0
2487
* +----+---+---+-----------------+---------+--------+------+------+
2488
* | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode | Rn | Rd |
2489
* +----+---+---+-----------------+---------+--------+------+------+
2491
static void disas_data_proc_1src(DisasContext *s, uint32_t insn)
2493
unsigned int sf, opcode, rn, rd;
2495
if (extract32(insn, 29, 1) || extract32(insn, 16, 5)) {
2496
unallocated_encoding(s);
2500
sf = extract32(insn, 31, 1);
2501
opcode = extract32(insn, 10, 6);
2502
rn = extract32(insn, 5, 5);
2503
rd = extract32(insn, 0, 5);
2507
handle_rbit(s, sf, rn, rd);
2510
handle_rev16(s, sf, rn, rd);
2513
handle_rev32(s, sf, rn, rd);
2516
handle_rev64(s, sf, rn, rd);
2519
handle_clz(s, sf, rn, rd);
2522
handle_cls(s, sf, rn, rd);
2527
static void handle_div(DisasContext *s, bool is_signed, unsigned int sf,
2528
unsigned int rm, unsigned int rn, unsigned int rd)
2530
TCGv_i64 tcg_n, tcg_m, tcg_rd;
2531
tcg_rd = cpu_reg(s, rd);
2533
if (!sf && is_signed) {
2534
tcg_n = new_tmp_a64(s);
2535
tcg_m = new_tmp_a64(s);
2536
tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn));
2537
tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, rm));
2539
tcg_n = read_cpu_reg(s, rn, sf);
2540
tcg_m = read_cpu_reg(s, rm, sf);
2544
gen_helper_sdiv64(tcg_rd, tcg_n, tcg_m);
2546
gen_helper_udiv64(tcg_rd, tcg_n, tcg_m);
2549
if (!sf) { /* zero extend final result */
2550
tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
2554
/* C5.6.115 LSLV, C5.6.118 LSRV, C5.6.17 ASRV, C5.6.154 RORV */
2555
static void handle_shift_reg(DisasContext *s,
2556
enum a64_shift_type shift_type, unsigned int sf,
2557
unsigned int rm, unsigned int rn, unsigned int rd)
2559
TCGv_i64 tcg_shift = tcg_temp_new_i64();
2560
TCGv_i64 tcg_rd = cpu_reg(s, rd);
2561
TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
2563
tcg_gen_andi_i64(tcg_shift, cpu_reg(s, rm), sf ? 63 : 31);
2564
shift_reg(tcg_rd, tcg_rn, sf, shift_type, tcg_shift);
2565
tcg_temp_free_i64(tcg_shift);
2568
/* C3.5.8 Data-processing (2 source)
2569
* 31 30 29 28 21 20 16 15 10 9 5 4 0
2570
* +----+---+---+-----------------+------+--------+------+------+
2571
* | sf | 0 | S | 1 1 0 1 0 1 1 0 | Rm | opcode | Rn | Rd |
2572
* +----+---+---+-----------------+------+--------+------+------+
2574
static void disas_data_proc_2src(DisasContext *s, uint32_t insn)
2576
unsigned int sf, rm, opcode, rn, rd;
2577
sf = extract32(insn, 31, 1);
2578
rm = extract32(insn, 16, 5);
2579
opcode = extract32(insn, 10, 6);
2580
rn = extract32(insn, 5, 5);
2581
rd = extract32(insn, 0, 5);
2583
if (extract32(insn, 29, 1)) {
2584
unallocated_encoding(s);
2590
handle_div(s, false, sf, rm, rn, rd);
2593
handle_div(s, true, sf, rm, rn, rd);
2596
handle_shift_reg(s, A64_SHIFT_TYPE_LSL, sf, rm, rn, rd);
2599
handle_shift_reg(s, A64_SHIFT_TYPE_LSR, sf, rm, rn, rd);
2602
handle_shift_reg(s, A64_SHIFT_TYPE_ASR, sf, rm, rn, rd);
2605
handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd);
2614
case 23: /* CRC32 */
2615
unsupported_encoding(s, insn);
2618
unallocated_encoding(s);
2623
/* C3.5 Data processing - register */
2624
static void disas_data_proc_reg(DisasContext *s, uint32_t insn)
2626
switch (extract32(insn, 24, 5)) {
2627
case 0x0a: /* Logical (shifted register) */
2628
disas_logic_reg(s, insn);
2630
case 0x0b: /* Add/subtract */
2631
if (insn & (1 << 21)) { /* (extended register) */
2632
disas_add_sub_ext_reg(s, insn);
2634
disas_add_sub_reg(s, insn);
2637
case 0x1b: /* Data-processing (3 source) */
2638
disas_data_proc_3src(s, insn);
2641
switch (extract32(insn, 21, 3)) {
2642
case 0x0: /* Add/subtract (with carry) */
2643
disas_adc_sbc(s, insn);
2645
case 0x2: /* Conditional compare */
2646
if (insn & (1 << 11)) { /* (immediate) */
2647
disas_cc_imm(s, insn);
2648
} else { /* (register) */
2649
disas_cc_reg(s, insn);
2652
case 0x4: /* Conditional select */
2653
disas_cond_select(s, insn);
2655
case 0x6: /* Data-processing */
2656
if (insn & (1 << 30)) { /* (1 source) */
2657
disas_data_proc_1src(s, insn);
2658
} else { /* (2 source) */
2659
disas_data_proc_2src(s, insn);
2663
unallocated_encoding(s);
2668
unallocated_encoding(s);
2673
/* C3.6 Data processing - SIMD and floating point */
2674
static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
2676
unsupported_encoding(s, insn);
2679
/* C3.1 A64 instruction index by encoding */
2680
static void disas_a64_insn(CPUARMState *env, DisasContext *s)
2684
insn = arm_ldl_code(env, s->pc, s->bswap_code);
2688
switch (extract32(insn, 25, 4)) {
2689
case 0x0: case 0x1: case 0x2: case 0x3: /* UNALLOCATED */
2690
unallocated_encoding(s);
2692
case 0x8: case 0x9: /* Data processing - immediate */
2693
disas_data_proc_imm(s, insn);
2695
case 0xa: case 0xb: /* Branch, exception generation and system insns */
2696
disas_b_exc_sys(s, insn);
2701
case 0xe: /* Loads and stores */
2702
disas_ldst(s, insn);
2705
case 0xd: /* Data processing - register */
2706
disas_data_proc_reg(s, insn);
2709
case 0xf: /* Data processing - SIMD and floating point */
2710
disas_data_proc_simd_fp(s, insn);
2713
assert(FALSE); /* all 15 cases should be handled above */
2717
/* if we allocated any temporaries, free them here */
2721
void gen_intermediate_code_internal_a64(ARMCPU *cpu,
2722
TranslationBlock *tb,
2725
CPUState *cs = CPU(cpu);
2726
CPUARMState *env = &cpu->env;
2727
DisasContext dc1, *dc = &dc1;
2729
uint16_t *gen_opc_end;
2731
target_ulong pc_start;
2732
target_ulong next_page_start;
2740
gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
2742
dc->is_jmp = DISAS_NEXT;
2744
dc->singlestep_enabled = cs->singlestep_enabled;
2750
dc->condexec_mask = 0;
2751
dc->condexec_cond = 0;
2752
#if !defined(CONFIG_USER_ONLY)
2755
dc->vfp_enabled = 0;
2759
init_tmp_a64_array(dc);
2761
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2764
max_insns = tb->cflags & CF_COUNT_MASK;
2765
if (max_insns == 0) {
2766
max_insns = CF_COUNT_MASK;
2771
tcg_clear_temp_count();
2774
if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
2775
QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
2776
if (bp->pc == dc->pc) {
2777
gen_exception_insn(dc, 0, EXCP_DEBUG);
2778
/* Advance PC so that clearing the breakpoint will
2779
invalidate this TB. */
2781
goto done_generating;
2787
j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2791
tcg_ctx.gen_opc_instr_start[lj++] = 0;
2794
tcg_ctx.gen_opc_pc[lj] = dc->pc;
2795
tcg_ctx.gen_opc_instr_start[lj] = 1;
2796
tcg_ctx.gen_opc_icount[lj] = num_insns;
2799
if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
2803
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
2804
tcg_gen_debug_insn_start(dc->pc);
2807
disas_a64_insn(env, dc);
2809
if (tcg_check_temp_count()) {
2810
fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
2814
/* Translation stops when a conditional branch is encountered.
2815
* Otherwise the subsequent code could get translated several times.
2816
* Also stop translation when a page boundary is reached. This
2817
* ensures prefetch aborts occur at the right place.
2820
} while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
2821
!cs->singlestep_enabled &&
2823
dc->pc < next_page_start &&
2824
num_insns < max_insns);
2826
if (tb->cflags & CF_LAST_IO) {
2830
if (unlikely(cs->singlestep_enabled) && dc->is_jmp != DISAS_EXC) {
2831
/* Note that this means single stepping WFI doesn't halt the CPU.
2832
* For conditional branch insns this is harmless unreachable code as
2833
* gen_goto_tb() has already handled emitting the debug exception
2834
* (and thus a tb-jump is not possible when singlestepping).
2836
assert(dc->is_jmp != DISAS_TB_JUMP);
2837
if (dc->is_jmp != DISAS_JUMP) {
2838
gen_a64_set_pc_im(dc->pc);
2840
gen_exception(EXCP_DEBUG);
2842
switch (dc->is_jmp) {
2844
gen_goto_tb(dc, 1, dc->pc);
2849
/* indicate that the hash table must be used to find the next TB */
2857
/* This is a special case because we don't want to just halt the CPU
2858
* if trying to debug across a WFI.
2860
gen_helper_wfi(cpu_env);
2866
gen_tb_end(tb, num_insns);
2867
*tcg_ctx.gen_opc_ptr = INDEX_op_end;
2870
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2871
qemu_log("----------------\n");
2872
qemu_log("IN: %s\n", lookup_symbol(pc_start));
2873
log_target_disas(env, pc_start, dc->pc - pc_start,
2874
dc->thumb | (dc->bswap_code << 1));
2879
j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2882
tcg_ctx.gen_opc_instr_start[lj++] = 0;
2885
tb->size = dc->pc - pc_start;
2886
tb->icount = num_insns;