1
From e4ffc38530be8e326ac8362237c559a626d653a2 Mon Sep 17 00:00:00 2001
2
From: Michael Matz <matz@suse.de>
3
Date: Fri, 31 Jan 2014 14:47:31 +0000
4
Subject: [PATCH 118/158] target-arm: A64: Add SIMD across-lanes instructions
6
Add support for the SIMD "across lanes" instruction group (C3.6.4).
8
Signed-off-by: Michael Matz <matz@suse.de>
9
[PMM: Updated to current codebase, added fp min/max ops,
10
added unallocated encoding checks]
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Reviewed-by: Richard Henderson <rth@twiddle.net>
14
target-arm/translate-a64.c | 177 ++++++++++++++++++++++++++++++++++++++++++++-
15
1 file changed, 176 insertions(+), 1 deletion(-)
17
diff --git a/target-arm/translate-a64.c b/target-arm/translate-a64.c
18
index 59e2a85..de4f518 100644
19
--- a/target-arm/translate-a64.c
20
+++ b/target-arm/translate-a64.c
21
@@ -4883,6 +4883,29 @@ static void disas_simd_zip_trn(DisasContext *s, uint32_t insn)
22
tcg_temp_free_i64(tcg_resh);
25
+static void do_minmaxop(DisasContext *s, TCGv_i32 tcg_elt1, TCGv_i32 tcg_elt2,
26
+ int opc, bool is_min, TCGv_ptr fpst)
28
+ /* Helper function for disas_simd_across_lanes: do a single precision
29
+ * min/max operation on the specified two inputs,
30
+ * and return the result in tcg_elt1.
34
+ gen_helper_vfp_minnums(tcg_elt1, tcg_elt1, tcg_elt2, fpst);
36
+ gen_helper_vfp_maxnums(tcg_elt1, tcg_elt1, tcg_elt2, fpst);
41
+ gen_helper_vfp_mins(tcg_elt1, tcg_elt1, tcg_elt2, fpst);
43
+ gen_helper_vfp_maxs(tcg_elt1, tcg_elt1, tcg_elt2, fpst);
48
/* C3.6.4 AdvSIMD across lanes
49
* 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
50
* +---+---+---+-----------+------+-----------+--------+-----+------+------+
51
@@ -4891,7 +4914,159 @@ static void disas_simd_zip_trn(DisasContext *s, uint32_t insn)
53
static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
55
- unsupported_encoding(s, insn);
56
+ int rd = extract32(insn, 0, 5);
57
+ int rn = extract32(insn, 5, 5);
58
+ int size = extract32(insn, 22, 2);
59
+ int opcode = extract32(insn, 12, 5);
60
+ bool is_q = extract32(insn, 30, 1);
61
+ bool is_u = extract32(insn, 29, 1);
63
+ bool is_min = false;
67
+ TCGv_i64 tcg_res, tcg_elt;
70
+ case 0x1b: /* ADDV */
72
+ unallocated_encoding(s);
76
+ case 0x3: /* SADDLV, UADDLV */
77
+ case 0xa: /* SMAXV, UMAXV */
78
+ case 0x1a: /* SMINV, UMINV */
79
+ if (size == 3 || (size == 2 && !is_q)) {
80
+ unallocated_encoding(s);
84
+ case 0xc: /* FMAXNMV, FMINNMV */
85
+ case 0xf: /* FMAXV, FMINV */
86
+ if (!is_u || !is_q || extract32(size, 0, 1)) {
87
+ unallocated_encoding(s);
90
+ /* Bit 1 of size field encodes min vs max, and actual size is always
91
+ * 32 bits: adjust the size variable so following code can rely on it
93
+ is_min = extract32(size, 1, 1);
98
+ unallocated_encoding(s);
103
+ elements = (is_q ? 128 : 64) / esize;
105
+ tcg_res = tcg_temp_new_i64();
106
+ tcg_elt = tcg_temp_new_i64();
108
+ /* These instructions operate across all lanes of a vector
109
+ * to produce a single result. We can guarantee that a 64
110
+ * bit intermediate is sufficient:
111
+ * + for [US]ADDLV the maximum element size is 32 bits, and
112
+ * the result type is 64 bits
113
+ * + for FMAX*V, FMIN*V, ADDV the intermediate type is the
114
+ * same as the element size, which is 32 bits at most
115
+ * For the integer operations we can choose to work at 64
116
+ * or 32 bits and truncate at the end; for simplicity
117
+ * we use 64 bits always. The floating point
118
+ * ops do require 32 bit intermediates, though.
121
+ read_vec_element(s, tcg_res, rn, 0, size | (is_u ? 0 : MO_SIGN));
123
+ for (i = 1; i < elements; i++) {
124
+ read_vec_element(s, tcg_elt, rn, i, size | (is_u ? 0 : MO_SIGN));
127
+ case 0x03: /* SADDLV / UADDLV */
128
+ case 0x1b: /* ADDV */
129
+ tcg_gen_add_i64(tcg_res, tcg_res, tcg_elt);
131
+ case 0x0a: /* SMAXV / UMAXV */
132
+ tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE,
134
+ tcg_res, tcg_elt, tcg_res, tcg_elt);
136
+ case 0x1a: /* SMINV / UMINV */
137
+ tcg_gen_movcond_i64(is_u ? TCG_COND_LEU : TCG_COND_LE,
139
+ tcg_res, tcg_elt, tcg_res, tcg_elt);
143
+ g_assert_not_reached();
148
+ /* Floating point ops which work on 32 bit (single) intermediates.
149
+ * Note that correct NaN propagation requires that we do these
150
+ * operations in exactly the order specified by the pseudocode.
152
+ TCGv_i32 tcg_elt1 = tcg_temp_new_i32();
153
+ TCGv_i32 tcg_elt2 = tcg_temp_new_i32();
154
+ TCGv_i32 tcg_elt3 = tcg_temp_new_i32();
155
+ TCGv_ptr fpst = get_fpstatus_ptr();
157
+ assert(esize == 32);
158
+ assert(elements == 4);
160
+ read_vec_element(s, tcg_elt, rn, 0, MO_32);
161
+ tcg_gen_trunc_i64_i32(tcg_elt1, tcg_elt);
162
+ read_vec_element(s, tcg_elt, rn, 1, MO_32);
163
+ tcg_gen_trunc_i64_i32(tcg_elt2, tcg_elt);
165
+ do_minmaxop(s, tcg_elt1, tcg_elt2, opcode, is_min, fpst);
167
+ read_vec_element(s, tcg_elt, rn, 2, MO_32);
168
+ tcg_gen_trunc_i64_i32(tcg_elt2, tcg_elt);
169
+ read_vec_element(s, tcg_elt, rn, 3, MO_32);
170
+ tcg_gen_trunc_i64_i32(tcg_elt3, tcg_elt);
172
+ do_minmaxop(s, tcg_elt2, tcg_elt3, opcode, is_min, fpst);
174
+ do_minmaxop(s, tcg_elt1, tcg_elt2, opcode, is_min, fpst);
176
+ tcg_gen_extu_i32_i64(tcg_res, tcg_elt1);
177
+ tcg_temp_free_i32(tcg_elt1);
178
+ tcg_temp_free_i32(tcg_elt2);
179
+ tcg_temp_free_i32(tcg_elt3);
180
+ tcg_temp_free_ptr(fpst);
183
+ tcg_temp_free_i64(tcg_elt);
185
+ /* Now truncate the result to the width required for the final output */
186
+ if (opcode == 0x03) {
187
+ /* SADDLV, UADDLV: result is 2*esize */
193
+ tcg_gen_ext8u_i64(tcg_res, tcg_res);
196
+ tcg_gen_ext16u_i64(tcg_res, tcg_res);
199
+ tcg_gen_ext32u_i64(tcg_res, tcg_res);
204
+ g_assert_not_reached();
207
+ write_fp_dreg(s, rd, tcg_res);
208
+ tcg_temp_free_i64(tcg_res);
211
/* C3.6.5 AdvSIMD copy