1
/* aarch64-dis.c -- AArch64 disassembler.
2
Copyright 2009, 2010, 2011, 2012, 2013 Free Software Foundation, Inc.
3
Contributed by ARM Ltd.
5
This file is part of the GNU opcodes library.
7
This library is free software; you can redistribute it and/or modify
8
it under the terms of the GNU General Public License as published by
9
the Free Software Foundation; either version 3, or (at your option)
12
It is distributed in the hope that it will be useful, but WITHOUT
13
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15
License for more details.
17
You should have received a copy of the GNU General Public License
18
along with this program; see the file COPYING3. If not,
19
see <http://www.gnu.org/licenses/>. */
22
#include "bfd_stdint.h"
24
#include "libiberty.h"
26
#include "aarch64-dis.h"
28
#if !defined(EMBEDDED_ENV)
29
#define SYMTAB_AVAILABLE 1
31
#include "elf/aarch64.h"
41
/* Cached mapping symbol state. */
48
static enum map_type last_type;
49
static int last_mapping_sym = -1;
50
static bfd_vma last_mapping_addr = 0;
53
static int no_aliases = 0; /* If set disassemble as most general inst. */
57
set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
62
parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
64
/* Try to match options that are simple flags */
65
if (CONST_STRNEQ (option, "no-aliases"))
71
if (CONST_STRNEQ (option, "aliases"))
78
if (CONST_STRNEQ (option, "debug_dump"))
83
#endif /* DEBUG_AARCH64 */
86
fprintf (stderr, _("Unrecognised disassembler option: %s\n"), option);
90
parse_aarch64_dis_options (const char *options)
92
const char *option_end;
97
while (*options != '\0')
99
/* Skip empty options. */
106
/* We know that *options is neither NUL or a comma. */
107
option_end = options + 1;
108
while (*option_end != ',' && *option_end != '\0')
111
parse_aarch64_dis_option (options, option_end - options);
113
/* Go on to the next one. If option_end points to a comma, it
114
will be skipped above. */
115
options = option_end;
119
/* Functions doing the instruction disassembling. */
121
/* The unnamed arguments consist of the number of fields and information about
122
these fields where the VALUE will be extracted from CODE and returned.
123
MASK can be zero or the base mask of the opcode.
125
N.B. the fields are required to be in such an order than the most signficant
126
field for VALUE comes the first, e.g. the <index> in
127
SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
128
is encoded in H:L:M in some cases, the the fields H:L:M should be passed in
129
the order of H, L, M. */
131
static inline aarch64_insn
132
extract_fields (aarch64_insn code, aarch64_insn mask, ...)
135
const aarch64_field *field;
136
enum aarch64_field_kind kind;
140
num = va_arg (va, uint32_t);
142
aarch64_insn value = 0x0;
145
kind = va_arg (va, enum aarch64_field_kind);
146
field = &fields[kind];
147
value <<= field->width;
148
value |= extract_field (kind, code, mask);
153
/* Sign-extend bit I of VALUE. */
154
static inline int32_t
155
sign_extend (aarch64_insn value, unsigned i)
157
uint32_t ret = value;
160
if ((value >> i) & 0x1)
162
uint32_t val = (uint32_t)(-1) << i;
165
return (int32_t) ret;
168
/* N.B. the following inline helpfer functions create a dependency on the
169
order of operand qualifier enumerators. */
171
/* Given VALUE, return qualifier for a general purpose register. */
172
static inline enum aarch64_opnd_qualifier
173
get_greg_qualifier_from_value (aarch64_insn value)
175
enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
177
&& aarch64_get_qualifier_standard_value (qualifier) == value);
181
/* Given VALUE, return qualifier for a vector register. */
182
static inline enum aarch64_opnd_qualifier
183
get_vreg_qualifier_from_value (aarch64_insn value)
185
enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
188
&& aarch64_get_qualifier_standard_value (qualifier) == value);
192
/* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
193
static inline enum aarch64_opnd_qualifier
194
get_sreg_qualifier_from_value (aarch64_insn value)
196
enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
199
&& aarch64_get_qualifier_standard_value (qualifier) == value);
203
/* Given the instruction in *INST which is probably half way through the
204
decoding and our caller wants to know the expected qualifier for operand
205
I. Return such a qualifier if we can establish it; otherwise return
206
AARCH64_OPND_QLF_NIL. */
208
static aarch64_opnd_qualifier_t
209
get_expected_qualifier (const aarch64_inst *inst, int i)
211
aarch64_opnd_qualifier_seq_t qualifiers;
212
/* Should not be called if the qualifier is known. */
213
assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
214
if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
216
return qualifiers[i];
218
return AARCH64_OPND_QLF_NIL;
221
/* Operand extractors. */
224
aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
225
const aarch64_insn code,
226
const aarch64_inst *inst ATTRIBUTE_UNUSED)
228
info->reg.regno = extract_field (self->fields[0], code, 0);
232
/* e.g. IC <ic_op>{, <Xt>}. */
234
aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
235
const aarch64_insn code,
236
const aarch64_inst *inst ATTRIBUTE_UNUSED)
238
info->reg.regno = extract_field (self->fields[0], code, 0);
239
assert (info->idx == 1
240
&& (aarch64_get_operand_class (inst->operands[0].type)
241
== AARCH64_OPND_CLASS_SYSTEM));
242
/* This will make the constraint checking happy and more importantly will
243
help the disassembler determine whether this operand is optional or
245
info->present = inst->operands[0].sysins_op->has_xt;
250
/* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
252
aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
253
const aarch64_insn code,
254
const aarch64_inst *inst ATTRIBUTE_UNUSED)
257
info->reglane.regno = extract_field (self->fields[0], code,
260
/* Index and/or type. */
261
if (inst->opcode->iclass == asisdone
262
|| inst->opcode->iclass == asimdins)
264
if (info->type == AARCH64_OPND_En
265
&& inst->opcode->operands[0] == AARCH64_OPND_Ed)
268
/* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
269
assert (info->idx == 1); /* Vn */
270
aarch64_insn value = extract_field (FLD_imm4, code, 0);
271
/* Depend on AARCH64_OPND_Ed to determine the qualifier. */
272
info->qualifier = get_expected_qualifier (inst, info->idx);
273
shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
274
info->reglane.index = value >> shift;
278
/* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
286
aarch64_insn value = extract_field (FLD_imm5, code, 0);
287
while (++pos <= 3 && (value & 0x1) == 0)
291
info->qualifier = get_sreg_qualifier_from_value (pos);
292
info->reglane.index = (unsigned) (value >> 1);
297
/* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
298
or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
300
/* Need information in other operand(s) to help decoding. */
301
info->qualifier = get_expected_qualifier (inst, info->idx);
302
switch (info->qualifier)
304
case AARCH64_OPND_QLF_S_H:
306
info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
308
info->reglane.regno &= 0xf;
310
case AARCH64_OPND_QLF_S_S:
312
info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
314
case AARCH64_OPND_QLF_S_D:
316
info->reglane.index = extract_field (FLD_H, code, 0);
327
aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
328
const aarch64_insn code,
329
const aarch64_inst *inst ATTRIBUTE_UNUSED)
332
info->reglist.first_regno = extract_field (self->fields[0], code, 0);
334
info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
338
/* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
340
aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
341
aarch64_opnd_info *info, const aarch64_insn code,
342
const aarch64_inst *inst)
345
/* Number of elements in each structure to be loaded/stored. */
346
unsigned expected_num = get_opcode_dependent_value (inst->opcode);
350
unsigned is_reserved;
352
unsigned num_elements;
368
info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
370
value = extract_field (FLD_opcode, code, 0);
371
if (expected_num != data[value].num_elements || data[value].is_reserved)
373
info->reglist.num_regs = data[value].num_regs;
378
/* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
379
lanes instructions. */
381
aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
382
aarch64_opnd_info *info, const aarch64_insn code,
383
const aarch64_inst *inst)
388
info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
390
value = extract_field (FLD_S, code, 0);
392
/* Number of registers is equal to the number of elements in
393
each structure to be loaded/stored. */
394
info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
395
assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
397
/* Except when it is LD1R. */
398
if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
399
info->reglist.num_regs = 2;
404
/* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
405
load/store single element instructions. */
407
aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
408
aarch64_opnd_info *info, const aarch64_insn code,
409
const aarch64_inst *inst ATTRIBUTE_UNUSED)
411
aarch64_field field = {0, 0};
412
aarch64_insn QSsize; /* fields Q:S:size. */
413
aarch64_insn opcodeh2; /* opcode<2:1> */
416
info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
418
/* Decode the index, opcode<2:1> and size. */
419
gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
420
opcodeh2 = extract_field_2 (&field, code, 0);
421
QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
425
info->qualifier = AARCH64_OPND_QLF_S_B;
426
/* Index encoded in "Q:S:size". */
427
info->reglist.index = QSsize;
430
info->qualifier = AARCH64_OPND_QLF_S_H;
431
/* Index encoded in "Q:S:size<1>". */
432
info->reglist.index = QSsize >> 1;
435
if ((QSsize & 0x1) == 0)
437
info->qualifier = AARCH64_OPND_QLF_S_S;
438
/* Index encoded in "Q:S". */
439
info->reglist.index = QSsize >> 2;
443
info->qualifier = AARCH64_OPND_QLF_S_D;
444
/* Index encoded in "Q". */
445
info->reglist.index = QSsize >> 3;
446
if (extract_field (FLD_S, code, 0))
455
info->reglist.has_index = 1;
456
info->reglist.num_regs = 0;
457
/* Number of registers is equal to the number of elements in
458
each structure to be loaded/stored. */
459
info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
460
assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
465
/* Decode fields immh:immb and/or Q for e.g.
466
SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
467
or SSHR <V><d>, <V><n>, #<shift>. */
470
aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
471
aarch64_opnd_info *info, const aarch64_insn code,
472
const aarch64_inst *inst)
475
aarch64_insn Q, imm, immh;
476
enum aarch64_insn_class iclass = inst->opcode->iclass;
478
immh = extract_field (FLD_immh, code, 0);
481
imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
483
/* Get highest set bit in immh. */
484
while (--pos >= 0 && (immh & 0x8) == 0)
487
assert ((iclass == asimdshf || iclass == asisdshf)
488
&& (info->type == AARCH64_OPND_IMM_VLSR
489
|| info->type == AARCH64_OPND_IMM_VLSL));
491
if (iclass == asimdshf)
493
Q = extract_field (FLD_Q, code, 0);
495
0000 x SEE AdvSIMD modified immediate
505
get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
508
info->qualifier = get_sreg_qualifier_from_value (pos);
510
if (info->type == AARCH64_OPND_IMM_VLSR)
512
0000 SEE AdvSIMD modified immediate
513
0001 (16-UInt(immh:immb))
514
001x (32-UInt(immh:immb))
515
01xx (64-UInt(immh:immb))
516
1xxx (128-UInt(immh:immb)) */
517
info->imm.value = (16 << pos) - imm;
521
0000 SEE AdvSIMD modified immediate
522
0001 (UInt(immh:immb)-8)
523
001x (UInt(immh:immb)-16)
524
01xx (UInt(immh:immb)-32)
525
1xxx (UInt(immh:immb)-64) */
526
info->imm.value = imm - (8 << pos);
531
/* Decode shift immediate for e.g. sshr (imm). */
533
aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
534
aarch64_opnd_info *info, const aarch64_insn code,
535
const aarch64_inst *inst ATTRIBUTE_UNUSED)
539
val = extract_field (FLD_size, code, 0);
542
case 0: imm = 8; break;
543
case 1: imm = 16; break;
544
case 2: imm = 32; break;
547
info->imm.value = imm;
551
/* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
552
value in the field(s) will be extracted as unsigned immediate value. */
554
aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
555
const aarch64_insn code,
556
const aarch64_inst *inst ATTRIBUTE_UNUSED)
559
/* Maximum of two fields to extract. */
560
assert (self->fields[2] == FLD_NIL);
562
if (self->fields[1] == FLD_NIL)
563
imm = extract_field (self->fields[0], code, 0);
565
/* e.g. TBZ b5:b40. */
566
imm = extract_fields (code, 0, 2, self->fields[0], self->fields[1]);
568
if (info->type == AARCH64_OPND_FPIMM)
571
if (operand_need_sign_extension (self))
572
imm = sign_extend (imm, get_operand_fields_width (self) - 1);
574
if (operand_need_shift_by_two (self))
577
if (info->type == AARCH64_OPND_ADDR_ADRP)
580
info->imm.value = imm;
584
/* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
586
aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
587
const aarch64_insn code,
588
const aarch64_inst *inst ATTRIBUTE_UNUSED)
590
aarch64_ext_imm (self, info, code, inst);
591
info->shifter.kind = AARCH64_MOD_LSL;
592
info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
596
/* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
597
MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
599
aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
600
aarch64_opnd_info *info,
601
const aarch64_insn code,
602
const aarch64_inst *inst ATTRIBUTE_UNUSED)
605
enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
606
aarch64_field field = {0, 0};
608
assert (info->idx == 1);
610
if (info->type == AARCH64_OPND_SIMD_FPIMM)
613
/* a:b:c:d:e:f:g:h */
614
imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
615
if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
617
/* Either MOVI <Dd>, #<imm>
618
or MOVI <Vd>.2D, #<imm>.
619
<imm> is a 64-bit immediate
620
'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
621
encoded in "a:b:c:d:e:f:g:h". */
623
unsigned abcdefgh = imm;
624
for (imm = 0ull, i = 0; i < 8; i++)
625
if (((abcdefgh >> i) & 0x1) != 0)
626
imm |= 0xffull << (8 * i);
628
info->imm.value = imm;
631
info->qualifier = get_expected_qualifier (inst, info->idx);
632
switch (info->qualifier)
634
case AARCH64_OPND_QLF_NIL:
636
info->shifter.kind = AARCH64_MOD_NONE;
638
case AARCH64_OPND_QLF_LSL:
640
info->shifter.kind = AARCH64_MOD_LSL;
641
switch (aarch64_get_qualifier_esize (opnd0_qualifier))
643
case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break; /* per word */
644
case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break; /* per half */
645
case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break; /* per byte */
646
default: assert (0); return 0;
648
/* 00: 0; 01: 8; 10:16; 11:24. */
649
info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
651
case AARCH64_OPND_QLF_MSL:
653
info->shifter.kind = AARCH64_MOD_MSL;
654
gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
655
info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
665
/* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
667
aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
668
aarch64_opnd_info *info, const aarch64_insn code,
669
const aarch64_inst *inst ATTRIBUTE_UNUSED)
671
info->imm.value = 64- extract_field (FLD_scale, code, 0);
675
/* Decode arithmetic immediate for e.g.
676
SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
678
aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
679
aarch64_opnd_info *info, const aarch64_insn code,
680
const aarch64_inst *inst ATTRIBUTE_UNUSED)
684
info->shifter.kind = AARCH64_MOD_LSL;
686
value = extract_field (FLD_shift, code, 0);
689
info->shifter.amount = value ? 12 : 0;
690
/* imm12 (unsigned) */
691
info->imm.value = extract_field (FLD_imm12, code, 0);
696
/* Decode logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
699
aarch64_ext_limm (const aarch64_operand *self ATTRIBUTE_UNUSED,
700
aarch64_opnd_info *info, const aarch64_insn code,
701
const aarch64_inst *inst ATTRIBUTE_UNUSED)
709
value = extract_fields (code, 0, 3, FLD_N, FLD_immr, FLD_imms);
710
assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_W
711
|| inst->operands[0].qualifier == AARCH64_OPND_QLF_X);
712
sf = aarch64_get_qualifier_esize (inst->operands[0].qualifier) != 4;
714
/* value is N:immr:imms. */
716
R = (value >> 6) & 0x3f;
717
N = (value >> 12) & 0x1;
719
if (sf == 0 && N == 1)
722
/* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
723
(in other words, right rotated by R), then replicated. */
727
mask = 0xffffffffffffffffull;
733
case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
734
case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
735
case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
736
case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
737
case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
740
mask = (1ull << simd_size) - 1;
741
/* Top bits are IGNORED. */
744
/* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
745
if (S == simd_size - 1)
747
/* S+1 consecutive bits to 1. */
748
/* NOTE: S can't be 63 due to detection above. */
749
imm = (1ull << (S + 1)) - 1;
750
/* Rotate to the left by simd_size - R. */
752
imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
753
/* Replicate the value according to SIMD size. */
756
case 2: imm = (imm << 2) | imm;
757
case 4: imm = (imm << 4) | imm;
758
case 8: imm = (imm << 8) | imm;
759
case 16: imm = (imm << 16) | imm;
760
case 32: imm = (imm << 32) | imm;
762
default: assert (0); return 0;
765
info->imm.value = sf ? imm : imm & 0xffffffff;
770
/* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
771
or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
773
aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
774
aarch64_opnd_info *info,
775
const aarch64_insn code, const aarch64_inst *inst)
780
info->reg.regno = extract_field (FLD_Rt, code, 0);
783
value = extract_field (FLD_ldst_size, code, 0);
784
if (inst->opcode->iclass == ldstpair_indexed
785
|| inst->opcode->iclass == ldstnapair_offs
786
|| inst->opcode->iclass == ldstpair_off
787
|| inst->opcode->iclass == loadlit)
789
enum aarch64_opnd_qualifier qualifier;
792
case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
793
case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
794
case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
797
info->qualifier = qualifier;
802
value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
805
info->qualifier = get_sreg_qualifier_from_value (value);
811
/* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
813
aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
814
aarch64_opnd_info *info,
816
const aarch64_inst *inst ATTRIBUTE_UNUSED)
819
info->addr.base_regno = extract_field (FLD_Rn, code, 0);
823
/* Decode the address operand for e.g.
824
STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
826
aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
827
aarch64_opnd_info *info,
828
aarch64_insn code, const aarch64_inst *inst)
830
aarch64_insn S, value;
833
info->addr.base_regno = extract_field (FLD_Rn, code, 0);
835
info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
837
value = extract_field (FLD_option, code, 0);
839
aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
840
/* Fix-up the shifter kind; although the table-driven approach is
841
efficient, it is slightly inflexible, thus needing this fix-up. */
842
if (info->shifter.kind == AARCH64_MOD_UXTX)
843
info->shifter.kind = AARCH64_MOD_LSL;
845
S = extract_field (FLD_S, code, 0);
848
info->shifter.amount = 0;
849
info->shifter.amount_present = 0;
854
/* Need information in other operand(s) to help achieve the decoding
856
info->qualifier = get_expected_qualifier (inst, info->idx);
857
/* Get the size of the data element that is accessed, which may be
858
different from that of the source register size, e.g. in strb/ldrb. */
859
size = aarch64_get_qualifier_esize (info->qualifier);
860
info->shifter.amount = get_logsz (size);
861
info->shifter.amount_present = 1;
867
/* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
869
aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
870
aarch64_insn code, const aarch64_inst *inst)
873
info->qualifier = get_expected_qualifier (inst, info->idx);
876
info->addr.base_regno = extract_field (FLD_Rn, code, 0);
877
/* simm (imm9 or imm7) */
878
imm = extract_field (self->fields[0], code, 0);
879
info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
880
if (self->fields[0] == FLD_imm7)
881
/* scaled immediate in ld/st pair instructions. */
882
info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
884
if (inst->opcode->iclass == ldst_unscaled
885
|| inst->opcode->iclass == ldstnapair_offs
886
|| inst->opcode->iclass == ldstpair_off
887
|| inst->opcode->iclass == ldst_unpriv)
888
info->addr.writeback = 0;
891
/* pre/post- index */
892
info->addr.writeback = 1;
893
if (extract_field (self->fields[1], code, 0) == 1)
894
info->addr.preind = 1;
896
info->addr.postind = 1;
902
/* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
904
aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
906
const aarch64_inst *inst ATTRIBUTE_UNUSED)
909
info->qualifier = get_expected_qualifier (inst, info->idx);
910
shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
912
info->addr.base_regno = extract_field (self->fields[0], code, 0);
914
info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
918
/* Decode the address operand for e.g.
919
LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
921
aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
922
aarch64_opnd_info *info,
923
aarch64_insn code, const aarch64_inst *inst)
925
/* The opcode dependent area stores the number of elements in
926
each structure to be loaded/stored. */
927
int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
930
info->addr.base_regno = extract_field (FLD_Rn, code, 0);
932
info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
933
if (info->addr.offset.regno == 31)
935
if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
936
/* Special handling of loading single structure to all lane. */
937
info->addr.offset.imm = (is_ld1r ? 1
938
: inst->operands[0].reglist.num_regs)
939
* aarch64_get_qualifier_esize (inst->operands[0].qualifier);
941
info->addr.offset.imm = inst->operands[0].reglist.num_regs
942
* aarch64_get_qualifier_esize (inst->operands[0].qualifier)
943
* aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
946
info->addr.offset.is_reg = 1;
947
info->addr.writeback = 1;
952
/* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
954
aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
955
aarch64_opnd_info *info,
956
aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
960
value = extract_field (FLD_cond, code, 0);
961
info->cond = get_cond_from_value (value);
965
/* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
967
aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
968
aarch64_opnd_info *info,
970
const aarch64_inst *inst ATTRIBUTE_UNUSED)
972
/* op0:op1:CRn:CRm:op2 */
973
info->sysreg = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
978
/* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
980
aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
981
aarch64_opnd_info *info, aarch64_insn code,
982
const aarch64_inst *inst ATTRIBUTE_UNUSED)
986
info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
987
for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
988
if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
990
/* Reserved value in <pstatefield>. */
994
/* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
996
aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
997
aarch64_opnd_info *info,
999
const aarch64_inst *inst ATTRIBUTE_UNUSED)
1003
const aarch64_sys_ins_reg *sysins_ops;
1004
/* op0:op1:CRn:CRm:op2 */
1005
value = extract_fields (code, 0, 5,
1006
FLD_op0, FLD_op1, FLD_CRn,
1011
case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1012
case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1013
case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1014
case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1015
default: assert (0); return 0;
1018
for (i = 0; sysins_ops[i].template != NULL; ++i)
1019
if (sysins_ops[i].value == value)
1021
info->sysins_op = sysins_ops + i;
1022
DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1023
info->sysins_op->template,
1024
(unsigned)info->sysins_op->value,
1025
info->sysins_op->has_xt, i);
1032
/* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
1035
aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1036
aarch64_opnd_info *info,
1038
const aarch64_inst *inst ATTRIBUTE_UNUSED)
1041
info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1045
/* Decode the prefetch operation option operand for e.g.
1046
PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
1049
aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1050
aarch64_opnd_info *info,
1051
aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
1054
info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1058
/* Decode the extended register operand for e.g.
1059
STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1061
aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1062
aarch64_opnd_info *info,
1064
const aarch64_inst *inst ATTRIBUTE_UNUSED)
1069
info->reg.regno = extract_field (FLD_Rm, code, 0);
1071
value = extract_field (FLD_option, code, 0);
1072
info->shifter.kind =
1073
aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1075
info->shifter.amount = extract_field (FLD_imm3, code, 0);
1077
/* This makes the constraint checking happy. */
1078
info->shifter.operator_present = 1;
1080
/* Assume inst->operands[0].qualifier has been resolved. */
1081
assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1082
info->qualifier = AARCH64_OPND_QLF_W;
1083
if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1084
&& (info->shifter.kind == AARCH64_MOD_UXTX
1085
|| info->shifter.kind == AARCH64_MOD_SXTX))
1086
info->qualifier = AARCH64_OPND_QLF_X;
1091
/* Decode the shifted register operand for e.g.
1092
SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1094
aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1095
aarch64_opnd_info *info,
1097
const aarch64_inst *inst ATTRIBUTE_UNUSED)
1102
info->reg.regno = extract_field (FLD_Rm, code, 0);
1104
value = extract_field (FLD_shift, code, 0);
1105
info->shifter.kind =
1106
aarch64_get_operand_modifier_from_value (value, FALSE /* extend_p */);
1107
if (info->shifter.kind == AARCH64_MOD_ROR
1108
&& inst->opcode->iclass != log_shift)
1109
/* ROR is not available for the shifted register operand in arithmetic
1113
info->shifter.amount = extract_field (FLD_imm6, code, 0);
1115
/* This makes the constraint checking happy. */
1116
info->shifter.operator_present = 1;
1121
/* Bitfields that are commonly used to encode certain operands' information
1122
may be partially used as part of the base opcode in some instructions.
1123
For example, the bit 1 of the field 'size' in
1124
FCVTXN <Vb><d>, <Va><n>
1125
is actually part of the base opcode, while only size<0> is available
1126
for encoding the register type. Another example is the AdvSIMD
1127
instruction ORR (register), in which the field 'size' is also used for
1128
the base opcode, leaving only the field 'Q' available to encode the
1129
vector register arrangement specifier '8B' or '16B'.
1131
This function tries to deduce the qualifier from the value of partially
1132
constrained field(s). Given the VALUE of such a field or fields, the
1133
qualifiers CANDIDATES and the MASK (indicating which bits are valid for
1134
operand encoding), the function returns the matching qualifier or
1135
AARCH64_OPND_QLF_NIL if nothing matches.
1137
N.B. CANDIDATES is a group of possible qualifiers that are valid for
1138
one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
1139
may end with AARCH64_OPND_QLF_NIL. */
1141
static enum aarch64_opnd_qualifier
1142
get_qualifier_from_partial_encoding (aarch64_insn value,
1143
const enum aarch64_opnd_qualifier* \
1148
DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
1149
for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1151
aarch64_insn standard_value;
1152
if (candidates[i] == AARCH64_OPND_QLF_NIL)
1154
standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
1155
if ((standard_value & mask) == (value & mask))
1156
return candidates[i];
1158
return AARCH64_OPND_QLF_NIL;
1161
/* Given a list of qualifier sequences, return all possible valid qualifiers
1162
for operand IDX in QUALIFIERS.
1163
Assume QUALIFIERS is an array whose length is large enough. */
1166
get_operand_possible_qualifiers (int idx,
1167
const aarch64_opnd_qualifier_seq_t *list,
1168
enum aarch64_opnd_qualifier *qualifiers)
1171
for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1172
if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
1176
/* Decode the size Q field for e.g. SHADD.
1177
We tag one operand with the qualifer according to the code;
1178
whether the qualifier is valid for this opcode or not, it is the
1179
duty of the semantic checking. */
1182
decode_sizeq (aarch64_inst *inst)
1185
enum aarch64_opnd_qualifier qualifier;
1187
aarch64_insn value, mask;
1188
enum aarch64_field_kind fld_sz;
1189
enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1191
if (inst->opcode->iclass == asisdlse
1192
|| inst->opcode->iclass == asisdlsep
1193
|| inst->opcode->iclass == asisdlso
1194
|| inst->opcode->iclass == asisdlsop)
1195
fld_sz = FLD_vldst_size;
1200
value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
1201
/* Obtain the info that which bits of fields Q and size are actually
1202
available for operand encoding. Opcodes like FMAXNM and FMLA have
1203
size[1] unavailable. */
1204
mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
1206
/* The index of the operand we are going to tag a qualifier and the qualifer
1207
itself are reasoned from the value of the size and Q fields and the
1208
possible valid qualifier lists. */
1209
idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1210
DEBUG_TRACE ("key idx: %d", idx);
1212
/* For most related instruciton, size:Q are fully available for operand
1216
inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
1220
get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1222
#ifdef DEBUG_AARCH64
1226
for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
1227
&& i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1228
DEBUG_TRACE ("qualifier %d: %s", i,
1229
aarch64_get_qualifier_name(candidates[i]));
1230
DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
1232
#endif /* DEBUG_AARCH64 */
1234
qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
1236
if (qualifier == AARCH64_OPND_QLF_NIL)
1239
inst->operands[idx].qualifier = qualifier;
1243
/* Decode size[0]:Q, i.e. bit 22 and bit 30, for
1244
e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1247
decode_asimd_fcvt (aarch64_inst *inst)
1249
aarch64_field field = {0, 0};
1251
enum aarch64_opnd_qualifier qualifier;
1253
gen_sub_field (FLD_size, 0, 1, &field);
1254
value = extract_field_2 (&field, inst->value, 0);
1255
qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
1256
: AARCH64_OPND_QLF_V_2D;
1257
switch (inst->opcode->op)
1261
/* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1262
inst->operands[1].qualifier = qualifier;
1266
/* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1267
inst->operands[0].qualifier = qualifier;
1277
/* Decode size[0], i.e. bit 22, for
1278
e.g. FCVTXN <Vb><d>, <Va><n>. */
1281
decode_asisd_fcvtxn (aarch64_inst *inst)
1283
aarch64_field field = {0, 0};
1284
gen_sub_field (FLD_size, 0, 1, &field);
1285
if (!extract_field_2 (&field, inst->value, 0))
1287
inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
1291
/* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1293
decode_fcvt (aarch64_inst *inst)
1295
enum aarch64_opnd_qualifier qualifier;
1297
const aarch64_field field = {15, 2};
1300
value = extract_field_2 (&field, inst->value, 0);
1303
case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
1304
case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
1305
case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
1308
inst->operands[0].qualifier = qualifier;
1313
/* Do miscellaneous decodings that are not common enough to be driven by
1317
do_misc_decoding (aarch64_inst *inst)
1319
switch (inst->opcode->op)
1322
return decode_fcvt (inst);
1327
return decode_asimd_fcvt (inst);
1329
return decode_asisd_fcvtxn (inst);
1335
/* Opcodes that have fields shared by multiple operands are usually flagged
1336
with flags. In this function, we detect such flags, decode the related
1337
field(s) and store the information in one of the related operands. The
1338
'one' operand is not any operand but one of the operands that can
1339
accommadate all the information that has been decoded. */
1342
do_special_decoding (aarch64_inst *inst)
1346
/* Condition for truly conditional executed instructions, e.g. b.cond. */
1347
if (inst->opcode->flags & F_COND)
1349
value = extract_field (FLD_cond2, inst->value, 0);
1350
inst->cond = get_cond_from_value (value);
1353
if (inst->opcode->flags & F_SF)
1355
idx = select_operand_for_sf_field_coding (inst->opcode);
1356
value = extract_field (FLD_sf, inst->value, 0);
1357
inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1358
if ((inst->opcode->flags & F_N)
1359
&& extract_field (FLD_N, inst->value, 0) != value)
1362
/* size:Q fields. */
1363
if (inst->opcode->flags & F_SIZEQ)
1364
return decode_sizeq (inst);
1366
if (inst->opcode->flags & F_FPTYPE)
1368
idx = select_operand_for_fptype_field_coding (inst->opcode);
1369
value = extract_field (FLD_type, inst->value, 0);
1372
case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
1373
case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
1374
case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
1379
if (inst->opcode->flags & F_SSIZE)
1381
/* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
1382
of the base opcode. */
1384
enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1385
idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1386
value = extract_field (FLD_size, inst->value, inst->opcode->mask);
1387
mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
1388
/* For most related instruciton, the 'size' field is fully available for
1389
operand encoding. */
1391
inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
1394
get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1396
inst->operands[idx].qualifier
1397
= get_qualifier_from_partial_encoding (value, candidates, mask);
1401
if (inst->opcode->flags & F_T)
1403
/* Num of consecutive '0's on the right side of imm5<3:0>. */
1406
assert (aarch64_get_operand_class (inst->opcode->operands[0])
1407
== AARCH64_OPND_CLASS_SIMD_REG);
1418
val = extract_field (FLD_imm5, inst->value, 0);
1419
while ((val & 0x1) == 0 && ++num <= 3)
1423
Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
1424
inst->operands[0].qualifier =
1425
get_vreg_qualifier_from_value ((num << 1) | Q);
1428
if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1430
/* Use Rt to encode in the case of e.g.
1431
STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1432
idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1435
/* Otherwise use the result operand, which has to be a integer
1437
assert (aarch64_get_operand_class (inst->opcode->operands[0])
1438
== AARCH64_OPND_CLASS_INT_REG);
1441
assert (idx == 0 || idx == 1);
1442
value = extract_field (FLD_Q, inst->value, 0);
1443
inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1446
if (inst->opcode->flags & F_LDS_SIZE)
1448
aarch64_field field = {0, 0};
1449
assert (aarch64_get_operand_class (inst->opcode->operands[0])
1450
== AARCH64_OPND_CLASS_INT_REG);
1451
gen_sub_field (FLD_opc, 0, 1, &field);
1452
value = extract_field_2 (&field, inst->value, 0);
1453
inst->operands[0].qualifier
1454
= value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
1457
/* Miscellaneous decoding; done as the last step. */
1458
if (inst->opcode->flags & F_MISC)
1459
return do_misc_decoding (inst);
1464
/* Converters converting a real opcode instruction to its alias form. */
1466
/* ROR <Wd>, <Ws>, #<shift>
1468
EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1470
convert_extr_to_ror (aarch64_inst *inst)
1472
if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
1474
copy_operand_info (inst, 2, 3);
1475
inst->operands[3].type = AARCH64_OPND_NIL;
1481
/* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1483
USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1485
convert_shll_to_xtl (aarch64_inst *inst)
1487
if (inst->operands[2].imm.value == 0)
1489
inst->operands[2].type = AARCH64_OPND_NIL;
1496
UBFM <Xd>, <Xn>, #<shift>, #63.
1498
LSR <Xd>, <Xn>, #<shift>. */
1500
convert_bfm_to_sr (aarch64_inst *inst)
1504
imms = inst->operands[3].imm.value;
1505
val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1508
inst->operands[3].type = AARCH64_OPND_NIL;
1515
/* Convert MOV to ORR. */
1517
convert_orr_to_mov (aarch64_inst *inst)
1519
/* MOV <Vd>.<T>, <Vn>.<T>
1521
ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1522
if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
1524
inst->operands[2].type = AARCH64_OPND_NIL;
1530
/* When <imms> >= <immr>, the instruction written:
1531
SBFX <Xd>, <Xn>, #<lsb>, #<width>
1533
SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1536
convert_bfm_to_bfx (aarch64_inst *inst)
1540
immr = inst->operands[2].imm.value;
1541
imms = inst->operands[3].imm.value;
1545
inst->operands[2].imm.value = lsb;
1546
inst->operands[3].imm.value = imms + 1 - lsb;
1547
/* The two opcodes have different qualifiers for
1548
the immediate operands; reset to help the checking. */
1549
reset_operand_qualifier (inst, 2);
1550
reset_operand_qualifier (inst, 3);
1557
/* When <imms> < <immr>, the instruction written:
1558
SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1560
SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1563
convert_bfm_to_bfi (aarch64_inst *inst)
1565
int64_t immr, imms, val;
1567
immr = inst->operands[2].imm.value;
1568
imms = inst->operands[3].imm.value;
1569
val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
1572
inst->operands[2].imm.value = (val - immr) & (val - 1);
1573
inst->operands[3].imm.value = imms + 1;
1574
/* The two opcodes have different qualifiers for
1575
the immediate operands; reset to help the checking. */
1576
reset_operand_qualifier (inst, 2);
1577
reset_operand_qualifier (inst, 3);
1584
/* The instruction written:
1585
LSL <Xd>, <Xn>, #<shift>
1587
UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1590
convert_ubfm_to_lsl (aarch64_inst *inst)
1592
int64_t immr = inst->operands[2].imm.value;
1593
int64_t imms = inst->operands[3].imm.value;
1595
= inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1597
if ((immr == 0 && imms == val) || immr == imms + 1)
1599
inst->operands[3].type = AARCH64_OPND_NIL;
1600
inst->operands[2].imm.value = val - imms;
1607
/* CINC <Wd>, <Wn>, <cond>
1609
CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1612
convert_from_csel (aarch64_inst *inst)
1614
if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
1616
copy_operand_info (inst, 2, 3);
1617
inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
1618
inst->operands[3].type = AARCH64_OPND_NIL;
1624
/* CSET <Wd>, <cond>
1626
CSINC <Wd>, WZR, WZR, invert(<cond>). */
1629
convert_csinc_to_cset (aarch64_inst *inst)
1631
if (inst->operands[1].reg.regno == 0x1f
1632
&& inst->operands[2].reg.regno == 0x1f)
1634
copy_operand_info (inst, 1, 3);
1635
inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
1636
inst->operands[3].type = AARCH64_OPND_NIL;
1637
inst->operands[2].type = AARCH64_OPND_NIL;
1645
MOVZ <Wd>, #<imm16>, LSL #<shift>.
1647
A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
1648
ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
1649
or where a MOVN has an immediate that could be encoded by MOVZ, or where
1650
MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
1651
machine-instruction mnemonic must be used. */
1654
convert_movewide_to_mov (aarch64_inst *inst)
1656
uint64_t value = inst->operands[1].imm.value;
1657
/* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
1658
if (value == 0 && inst->operands[1].shifter.amount != 0)
1660
inst->operands[1].type = AARCH64_OPND_IMM_MOV;
1661
inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
1662
value <<= inst->operands[1].shifter.amount;
1663
/* As an alias convertor, it has to be clear that the INST->OPCODE
1664
is the opcode of the real instruction. */
1665
if (inst->opcode->op == OP_MOVN)
1667
int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1669
/* A MOVN has an immediate that could be encoded by MOVZ. */
1670
if (aarch64_wide_constant_p (value, is32, NULL) == TRUE)
1673
inst->operands[1].imm.value = value;
1674
inst->operands[1].shifter.amount = 0;
1680
ORR <Wd>, WZR, #<imm>.
1682
A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
1683
ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
1684
or where a MOVN has an immediate that could be encoded by MOVZ, or where
1685
MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
1686
machine-instruction mnemonic must be used. */
1689
convert_movebitmask_to_mov (aarch64_inst *inst)
1694
/* Should have been assured by the base opcode value. */
1695
assert (inst->operands[1].reg.regno == 0x1f);
1696
copy_operand_info (inst, 1, 2);
1697
is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1698
inst->operands[1].type = AARCH64_OPND_IMM_MOV;
1699
value = inst->operands[1].imm.value;
1700
/* ORR has an immediate that could be generated by a MOVZ or MOVN
1702
if (inst->operands[0].reg.regno != 0x1f
1703
&& (aarch64_wide_constant_p (value, is32, NULL) == TRUE
1704
|| aarch64_wide_constant_p (~value, is32, NULL) == TRUE))
1707
inst->operands[2].type = AARCH64_OPND_NIL;
1711
/* Some alias opcodes are disassembled by being converted from their real-form.
1712
N.B. INST->OPCODE is the real opcode rather than the alias. */
1715
convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
1721
return convert_bfm_to_sr (inst);
1723
return convert_ubfm_to_lsl (inst);
1727
return convert_from_csel (inst);
1730
return convert_csinc_to_cset (inst);
1734
return convert_bfm_to_bfx (inst);
1738
return convert_bfm_to_bfi (inst);
1740
return convert_orr_to_mov (inst);
1741
case OP_MOV_IMM_WIDE:
1742
case OP_MOV_IMM_WIDEN:
1743
return convert_movewide_to_mov (inst);
1744
case OP_MOV_IMM_LOG:
1745
return convert_movebitmask_to_mov (inst);
1747
return convert_extr_to_ror (inst);
1752
return convert_shll_to_xtl (inst);
1758
static int aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
1759
aarch64_inst *, int);
1761
/* Given the instruction information in *INST, check if the instruction has
1762
any alias form that can be used to represent *INST. If the answer is yes,
1763
update *INST to be in the form of the determined alias. */
1765
/* In the opcode description table, the following flags are used in opcode
1766
entries to help establish the relations between the real and alias opcodes:
1768
F_ALIAS: opcode is an alias
1769
F_HAS_ALIAS: opcode has alias(es)
1772
F_P3: Disassembly preference priority 1-3 (the larger the
1773
higher). If nothing is specified, it is the priority
1774
0 by default, i.e. the lowest priority.
1776
Although the relation between the machine and the alias instructions are not
1777
explicitly described, it can be easily determined from the base opcode
1778
values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
1779
description entries:
1781
The mask of an alias opcode must be equal to or a super-set (i.e. more
1782
constrained) of that of the aliased opcode; so is the base opcode value.
1784
if (opcode_has_alias (real) && alias_opcode_p (opcode)
1785
&& (opcode->mask & real->mask) == real->mask
1786
&& (real->mask & opcode->opcode) == (real->mask & real->opcode))
1787
then OPCODE is an alias of, and only of, the REAL instruction
1789
The alias relationship is forced flat-structured to keep related algorithm
1790
simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
1792
During the disassembling, the decoding decision tree (in
1793
opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
1794
if the decoding of such a machine instruction succeeds (and -Mno-aliases is
1795
not specified), the disassembler will check whether there is any alias
1796
instruction exists for this real instruction. If there is, the disassembler
1797
will try to disassemble the 32-bit binary again using the alias's rule, or
1798
try to convert the IR to the form of the alias. In the case of the multiple
1799
aliases, the aliases are tried one by one from the highest priority
1800
(currently the flag F_P3) to the lowest priority (no priority flag), and the
1801
first succeeds first adopted.
1803
You may ask why there is a need for the conversion of IR from one form to
1804
another in handling certain aliases. This is because on one hand it avoids
1805
adding more operand code to handle unusual encoding/decoding; on other
1806
hand, during the disassembling, the conversion is an effective approach to
1807
check the condition of an alias (as an alias may be adopted only if certain
1808
conditions are met).
1810
In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
1811
aarch64_opcode_table and generated aarch64_find_alias_opcode and
1812
aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
1815
determine_disassembling_preference (struct aarch64_inst *inst)
1817
const aarch64_opcode *opcode;
1818
const aarch64_opcode *alias;
1820
opcode = inst->opcode;
1822
/* This opcode does not have an alias, so use itself. */
1823
if (opcode_has_alias (opcode) == FALSE)
1826
alias = aarch64_find_alias_opcode (opcode);
1829
#ifdef DEBUG_AARCH64
1832
const aarch64_opcode *tmp = alias;
1833
printf ("#### LIST orderd: ");
1836
printf ("%s, ", tmp->name);
1837
tmp = aarch64_find_next_alias_opcode (tmp);
1841
#endif /* DEBUG_AARCH64 */
1843
for (; alias; alias = aarch64_find_next_alias_opcode (alias))
1845
DEBUG_TRACE ("try %s", alias->name);
1846
assert (alias_opcode_p (alias));
1848
/* An alias can be a pseudo opcode which will never be used in the
1849
disassembly, e.g. BIC logical immediate is such a pseudo opcode
1851
if (pseudo_opcode_p (alias))
1853
DEBUG_TRACE ("skip pseudo %s", alias->name);
1857
if ((inst->value & alias->mask) != alias->opcode)
1859
DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
1862
/* No need to do any complicated transformation on operands, if the alias
1863
opcode does not have any operand. */
1864
if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
1866
DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
1867
aarch64_replace_opcode (inst, alias);
1870
if (alias->flags & F_CONV)
1873
memcpy (©, inst, sizeof (aarch64_inst));
1874
/* ALIAS is the preference as long as the instruction can be
1875
successfully converted to the form of ALIAS. */
1876
if (convert_to_alias (©, alias) == 1)
1878
aarch64_replace_opcode (©, alias);
1879
assert (aarch64_match_operands_constraint (©, NULL));
1880
DEBUG_TRACE ("succeed with %s via conversion", alias->name);
1881
memcpy (inst, ©, sizeof (aarch64_inst));
1887
/* Directly decode the alias opcode. */
1889
memset (&temp, '\0', sizeof (aarch64_inst));
1890
if (aarch64_opcode_decode (alias, inst->value, &temp, 1) == 1)
1892
DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
1893
memcpy (inst, &temp, sizeof (aarch64_inst));
1900
/* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
1901
fails, which meanes that CODE is not an instruction of OPCODE; otherwise
1904
If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
1905
determined and used to disassemble CODE; this is done just before the
1909
aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
1910
aarch64_inst *inst, int noaliases_p)
1914
DEBUG_TRACE ("enter with %s", opcode->name);
1916
assert (opcode && inst);
1918
/* Check the base opcode. */
1919
if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
1921
DEBUG_TRACE ("base opcode match FAIL");
1926
memset (inst, '\0', sizeof (aarch64_inst));
1928
inst->opcode = opcode;
1931
/* Assign operand codes and indexes. */
1932
for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1934
if (opcode->operands[i] == AARCH64_OPND_NIL)
1936
inst->operands[i].type = opcode->operands[i];
1937
inst->operands[i].idx = i;
1940
/* Call the opcode decoder indicated by flags. */
1941
if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
1943
DEBUG_TRACE ("opcode flag-based decoder FAIL");
1947
/* Call operand decoders. */
1948
for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1950
const aarch64_operand *opnd;
1951
enum aarch64_opnd type;
1952
type = opcode->operands[i];
1953
if (type == AARCH64_OPND_NIL)
1955
opnd = &aarch64_operands[type];
1956
if (operand_has_extractor (opnd)
1957
&& (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst)))
1959
DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
1964
/* Match the qualifiers. */
1965
if (aarch64_match_operands_constraint (inst, NULL) == 1)
1967
/* Arriving here, the CODE has been determined as a valid instruction
1968
of OPCODE and *INST has been filled with information of this OPCODE
1969
instruction. Before the return, check if the instruction has any
1970
alias and should be disassembled in the form of its alias instead.
1971
If the answer is yes, *INST will be updated. */
1973
determine_disassembling_preference (inst);
1974
DEBUG_TRACE ("SUCCESS");
1979
DEBUG_TRACE ("constraint matching FAIL");
1986
/* This does some user-friendly fix-up to *INST. It is currently focus on
1987
the adjustment of qualifiers to help the printed instruction
1988
recognized/understood more easily. */
1991
user_friendly_fixup (aarch64_inst *inst)
1993
switch (inst->opcode->iclass)
1996
/* TBNZ Xn|Wn, #uimm6, label
1997
Test and Branch Not Zero: conditionally jumps to label if bit number
1998
uimm6 in register Xn is not zero. The bit number implies the width of
1999
the register, which may be written and should be disassembled as Wn if
2000
uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
2002
if (inst->operands[1].imm.value < 32)
2003
inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
2009
/* Decode INSN and fill in *INST the instruction information. */
2012
disas_aarch64_insn (uint64_t pc ATTRIBUTE_UNUSED, uint32_t insn,
2015
const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
2017
#ifdef DEBUG_AARCH64
2020
const aarch64_opcode *tmp = opcode;
2022
DEBUG_TRACE ("opcode lookup:");
2025
aarch64_verbose (" %s", tmp->name);
2026
tmp = aarch64_find_next_opcode (tmp);
2029
#endif /* DEBUG_AARCH64 */
2031
/* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
2032
distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
2033
opcode field and value, apart from the difference that one of them has an
2034
extra field as part of the opcode, but such a field is used for operand
2035
encoding in other opcode(s) ('immh' in the case of the example). */
2036
while (opcode != NULL)
2038
/* But only one opcode can be decoded successfully for, as the
2039
decoding routine will check the constraint carefully. */
2040
if (aarch64_opcode_decode (opcode, insn, inst, no_aliases) == 1)
2042
opcode = aarch64_find_next_opcode (opcode);
2048
/* Print operands. */
2051
print_operands (bfd_vma pc, const aarch64_opcode *opcode,
2052
const aarch64_opnd_info *opnds, struct disassemble_info *info)
2054
int i, pcrel_p, num_printed;
2055
for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2057
const size_t size = 128;
2059
/* We regard the opcode operand info more, however we also look into
2060
the inst->operands to support the disassembling of the optional
2062
The two operand code should be the same in all cases, apart from
2063
when the operand can be optional. */
2064
if (opcode->operands[i] == AARCH64_OPND_NIL
2065
|| opnds[i].type == AARCH64_OPND_NIL)
2068
/* Generate the operand string in STR. */
2069
aarch64_print_operand (str, size, pc, opcode, opnds, i, &pcrel_p,
2072
/* Print the delimiter (taking account of omitted operand(s)). */
2074
(*info->fprintf_func) (info->stream, "%s",
2075
num_printed++ == 0 ? "\t" : ", ");
2077
/* Print the operand. */
2079
(*info->print_address_func) (info->target, info);
2081
(*info->fprintf_func) (info->stream, "%s", str);
2085
/* Print the instruction mnemonic name. */
2088
print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
2090
if (inst->opcode->flags & F_COND)
2092
/* For instructions that are truly conditionally executed, e.g. b.cond,
2093
prepare the full mnemonic name with the corresponding condition
2098
ptr = strchr (inst->opcode->name, '.');
2099
assert (ptr && inst->cond);
2100
len = ptr - inst->opcode->name;
2102
strncpy (name, inst->opcode->name, len);
2104
(*info->fprintf_func) (info->stream, "%s.%s", name, inst->cond->names[0]);
2107
(*info->fprintf_func) (info->stream, "%s", inst->opcode->name);
2110
/* Print the instruction according to *INST. */
2113
print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
2114
struct disassemble_info *info)
2116
print_mnemonic_name (inst, info);
2117
print_operands (pc, inst->opcode, inst->operands, info);
2120
/* Entry-point of the instruction disassembler and printer. */
2123
print_insn_aarch64_word (bfd_vma pc,
2125
struct disassemble_info *info)
2127
static const char *err_msg[6] =
2130
[-ERR_UND] = "undefined",
2131
[-ERR_UNP] = "unpredictable",
2138
info->insn_info_valid = 1;
2139
info->branch_delay_insns = 0;
2140
info->data_size = 0;
2144
if (info->flags & INSN_HAS_RELOC)
2145
/* If the instruction has a reloc associated with it, then
2146
the offset field in the instruction will actually be the
2147
addend for the reloc. (If we are using REL type relocs).
2148
In such cases, we can ignore the pc when computing
2149
addresses, since the addend is not currently pc-relative. */
2152
ret = disas_aarch64_insn (pc, word, &inst);
2154
if (((word >> 21) & 0x3ff) == 1)
2156
/* RESERVED for ALES. */
2157
assert (ret != ERR_OK);
2166
/* Handle undefined instructions. */
2167
info->insn_type = dis_noninsn;
2168
(*info->fprintf_func) (info->stream,".inst\t0x%08x ; %s",
2169
word, err_msg[-ret]);
2172
user_friendly_fixup (&inst);
2173
print_aarch64_insn (pc, &inst, info);
2180
/* Disallow mapping symbols ($x, $d etc) from
2181
being displayed in symbol relative addresses. */
2184
aarch64_symbol_is_valid (asymbol * sym,
2185
struct disassemble_info * info ATTRIBUTE_UNUSED)
2192
name = bfd_asymbol_name (sym);
2196
|| (name[1] != 'x' && name[1] != 'd')
2197
|| (name[2] != '\0' && name[2] != '.'));
2200
/* Print data bytes on INFO->STREAM. */
2203
print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
2205
struct disassemble_info *info)
2207
switch (info->bytes_per_chunk)
2210
info->fprintf_func (info->stream, ".byte\t0x%02x", word);
2213
info->fprintf_func (info->stream, ".short\t0x%04x", word);
2216
info->fprintf_func (info->stream, ".word\t0x%08x", word);
2223
/* Try to infer the code or data type from a symbol.
2224
Returns nonzero if *MAP_TYPE was set. */
2227
get_sym_code_type (struct disassemble_info *info, int n,
2228
enum map_type *map_type)
2230
elf_symbol_type *es;
2234
es = *(elf_symbol_type **)(info->symtab + n);
2235
type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
2237
/* If the symbol has function type then use that. */
2238
if (type == STT_FUNC)
2240
*map_type = MAP_INSN;
2244
/* Check for mapping symbols. */
2245
name = bfd_asymbol_name(info->symtab[n]);
2247
&& (name[1] == 'x' || name[1] == 'd')
2248
&& (name[2] == '\0' || name[2] == '.'))
2250
*map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
2257
/* Entry-point of the AArch64 disassembler. */
2260
print_insn_aarch64 (bfd_vma pc,
2261
struct disassemble_info *info)
2263
bfd_byte buffer[INSNLEN];
2265
void (*printer) (bfd_vma, uint32_t, struct disassemble_info *);
2266
bfd_boolean found = FALSE;
2267
unsigned int size = 4;
2270
if (info->disassembler_options)
2272
set_default_aarch64_dis_options (info);
2274
parse_aarch64_dis_options (info->disassembler_options);
2276
/* To avoid repeated parsing of these options, we remove them here. */
2277
info->disassembler_options = NULL;
2280
/* Aarch64 instructions are always little-endian */
2281
info->endian_code = BFD_ENDIAN_LITTLE;
2283
/* First check the full symtab for a mapping symbol, even if there
2284
are no usable non-mapping symbols for this address. */
2285
if (info->symtab_size != 0
2286
&& bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
2288
enum map_type type = MAP_INSN;
2293
if (pc <= last_mapping_addr)
2294
last_mapping_sym = -1;
2296
/* Start scanning at the start of the function, or wherever
2297
we finished last time. */
2298
n = info->symtab_pos + 1;
2299
if (n < last_mapping_sym)
2300
n = last_mapping_sym;
2302
/* Scan up to the location being disassembled. */
2303
for (; n < info->symtab_size; n++)
2305
addr = bfd_asymbol_value (info->symtab[n]);
2308
if ((info->section == NULL
2309
|| info->section == info->symtab[n]->section)
2310
&& get_sym_code_type (info, n, &type))
2319
n = info->symtab_pos;
2320
if (n < last_mapping_sym)
2321
n = last_mapping_sym;
2323
/* No mapping symbol found at this address. Look backwards
2324
for a preceeding one. */
2327
if (get_sym_code_type (info, n, &type))
2336
last_mapping_sym = last_sym;
2339
/* Look a little bit ahead to see if we should print out
2340
less than four bytes of data. If there's a symbol,
2341
mapping or otherwise, after two bytes then don't
2343
if (last_type == MAP_DATA)
2345
size = 4 - (pc & 3);
2346
for (n = last_sym + 1; n < info->symtab_size; n++)
2348
addr = bfd_asymbol_value (info->symtab[n]);
2351
if (addr - pc < size)
2356
/* If the next symbol is after three bytes, we need to
2357
print only part of the data, so that we can use either
2360
size = (pc & 1) ? 1 : 2;
2364
if (last_type == MAP_DATA)
2366
/* size was set above. */
2367
info->bytes_per_chunk = size;
2368
info->display_endian = info->endian;
2369
printer = print_insn_data;
2373
info->bytes_per_chunk = size = INSNLEN;
2374
info->display_endian = info->endian_code;
2375
printer = print_insn_aarch64_word;
2378
status = (*info->read_memory_func) (pc, buffer, size, info);
2381
(*info->memory_error_func) (status, pc, info);
2385
data = bfd_get_bits (buffer, size * 8,
2386
info->display_endian == BFD_ENDIAN_BIG);
2388
(*printer) (pc, data, info);
2394
print_aarch64_disassembler_options (FILE *stream)
2396
fprintf (stream, _("\n\
2397
The following AARCH64 specific disassembler options are supported for use\n\
2398
with the -M switch (multiple options should be separated by commas):\n"));
2400
fprintf (stream, _("\n\
2401
no-aliases Don't print instruction aliases.\n"));
2403
fprintf (stream, _("\n\
2404
aliases Do print instruction aliases.\n"));
2406
#ifdef DEBUG_AARCH64
2407
fprintf (stream, _("\n\
2408
debug_dump Temp switch for debug trace.\n"));
2409
#endif /* DEBUG_AARCH64 */
2411
fprintf (stream, _("\n"));