1
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
3
define <8 x i8> @vaddi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
6
%tmp1 = load <8 x i8>* %A
7
%tmp2 = load <8 x i8>* %B
8
%tmp3 = add <8 x i8> %tmp1, %tmp2
12
define <4 x i16> @vaddi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
15
%tmp1 = load <4 x i16>* %A
16
%tmp2 = load <4 x i16>* %B
17
%tmp3 = add <4 x i16> %tmp1, %tmp2
21
define <2 x i32> @vaddi32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
24
%tmp1 = load <2 x i32>* %A
25
%tmp2 = load <2 x i32>* %B
26
%tmp3 = add <2 x i32> %tmp1, %tmp2
30
define <1 x i64> @vaddi64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
33
%tmp1 = load <1 x i64>* %A
34
%tmp2 = load <1 x i64>* %B
35
%tmp3 = add <1 x i64> %tmp1, %tmp2
39
define <2 x float> @vaddf32(<2 x float>* %A, <2 x float>* %B) nounwind {
42
%tmp1 = load <2 x float>* %A
43
%tmp2 = load <2 x float>* %B
44
%tmp3 = add <2 x float> %tmp1, %tmp2
48
define <16 x i8> @vaddQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
51
%tmp1 = load <16 x i8>* %A
52
%tmp2 = load <16 x i8>* %B
53
%tmp3 = add <16 x i8> %tmp1, %tmp2
57
define <8 x i16> @vaddQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
60
%tmp1 = load <8 x i16>* %A
61
%tmp2 = load <8 x i16>* %B
62
%tmp3 = add <8 x i16> %tmp1, %tmp2
66
define <4 x i32> @vaddQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
69
%tmp1 = load <4 x i32>* %A
70
%tmp2 = load <4 x i32>* %B
71
%tmp3 = add <4 x i32> %tmp1, %tmp2
75
define <2 x i64> @vaddQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
78
%tmp1 = load <2 x i64>* %A
79
%tmp2 = load <2 x i64>* %B
80
%tmp3 = add <2 x i64> %tmp1, %tmp2
84
define <4 x float> @vaddQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
87
%tmp1 = load <4 x float>* %A
88
%tmp2 = load <4 x float>* %B
89
%tmp3 = add <4 x float> %tmp1, %tmp2
93
define <8 x i8> @vaddhni16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
96
%tmp1 = load <8 x i16>* %A
97
%tmp2 = load <8 x i16>* %B
98
%tmp3 = call <8 x i8> @llvm.arm.neon.vaddhn.v8i8(<8 x i16> %tmp1, <8 x i16> %tmp2)
102
define <4 x i16> @vaddhni32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
105
%tmp1 = load <4 x i32>* %A
106
%tmp2 = load <4 x i32>* %B
107
%tmp3 = call <4 x i16> @llvm.arm.neon.vaddhn.v4i16(<4 x i32> %tmp1, <4 x i32> %tmp2)
111
define <2 x i32> @vaddhni64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
114
%tmp1 = load <2 x i64>* %A
115
%tmp2 = load <2 x i64>* %B
116
%tmp3 = call <2 x i32> @llvm.arm.neon.vaddhn.v2i32(<2 x i64> %tmp1, <2 x i64> %tmp2)
120
declare <8 x i8> @llvm.arm.neon.vaddhn.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
121
declare <4 x i16> @llvm.arm.neon.vaddhn.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
122
declare <2 x i32> @llvm.arm.neon.vaddhn.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
124
define <8 x i8> @vraddhni16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
127
%tmp1 = load <8 x i16>* %A
128
%tmp2 = load <8 x i16>* %B
129
%tmp3 = call <8 x i8> @llvm.arm.neon.vraddhn.v8i8(<8 x i16> %tmp1, <8 x i16> %tmp2)
133
define <4 x i16> @vraddhni32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
136
%tmp1 = load <4 x i32>* %A
137
%tmp2 = load <4 x i32>* %B
138
%tmp3 = call <4 x i16> @llvm.arm.neon.vraddhn.v4i16(<4 x i32> %tmp1, <4 x i32> %tmp2)
142
define <2 x i32> @vraddhni64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
145
%tmp1 = load <2 x i64>* %A
146
%tmp2 = load <2 x i64>* %B
147
%tmp3 = call <2 x i32> @llvm.arm.neon.vraddhn.v2i32(<2 x i64> %tmp1, <2 x i64> %tmp2)
151
declare <8 x i8> @llvm.arm.neon.vraddhn.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
152
declare <4 x i16> @llvm.arm.neon.vraddhn.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
153
declare <2 x i32> @llvm.arm.neon.vraddhn.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
155
define <8 x i16> @vaddls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
158
%tmp1 = load <8 x i8>* %A
159
%tmp2 = load <8 x i8>* %B
160
%tmp3 = call <8 x i16> @llvm.arm.neon.vaddls.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
164
define <4 x i32> @vaddls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
167
%tmp1 = load <4 x i16>* %A
168
%tmp2 = load <4 x i16>* %B
169
%tmp3 = call <4 x i32> @llvm.arm.neon.vaddls.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
173
define <2 x i64> @vaddls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
176
%tmp1 = load <2 x i32>* %A
177
%tmp2 = load <2 x i32>* %B
178
%tmp3 = call <2 x i64> @llvm.arm.neon.vaddls.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
182
define <8 x i16> @vaddlu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
185
%tmp1 = load <8 x i8>* %A
186
%tmp2 = load <8 x i8>* %B
187
%tmp3 = call <8 x i16> @llvm.arm.neon.vaddlu.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
191
define <4 x i32> @vaddlu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
194
%tmp1 = load <4 x i16>* %A
195
%tmp2 = load <4 x i16>* %B
196
%tmp3 = call <4 x i32> @llvm.arm.neon.vaddlu.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
200
define <2 x i64> @vaddlu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
203
%tmp1 = load <2 x i32>* %A
204
%tmp2 = load <2 x i32>* %B
205
%tmp3 = call <2 x i64> @llvm.arm.neon.vaddlu.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
209
declare <8 x i16> @llvm.arm.neon.vaddls.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
210
declare <4 x i32> @llvm.arm.neon.vaddls.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
211
declare <2 x i64> @llvm.arm.neon.vaddls.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
213
declare <8 x i16> @llvm.arm.neon.vaddlu.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
214
declare <4 x i32> @llvm.arm.neon.vaddlu.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
215
declare <2 x i64> @llvm.arm.neon.vaddlu.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
217
define <8 x i16> @vaddws8(<8 x i16>* %A, <8 x i8>* %B) nounwind {
220
%tmp1 = load <8 x i16>* %A
221
%tmp2 = load <8 x i8>* %B
222
%tmp3 = call <8 x i16> @llvm.arm.neon.vaddws.v8i16(<8 x i16> %tmp1, <8 x i8> %tmp2)
226
define <4 x i32> @vaddws16(<4 x i32>* %A, <4 x i16>* %B) nounwind {
229
%tmp1 = load <4 x i32>* %A
230
%tmp2 = load <4 x i16>* %B
231
%tmp3 = call <4 x i32> @llvm.arm.neon.vaddws.v4i32(<4 x i32> %tmp1, <4 x i16> %tmp2)
235
define <2 x i64> @vaddws32(<2 x i64>* %A, <2 x i32>* %B) nounwind {
238
%tmp1 = load <2 x i64>* %A
239
%tmp2 = load <2 x i32>* %B
240
%tmp3 = call <2 x i64> @llvm.arm.neon.vaddws.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2)
244
define <8 x i16> @vaddwu8(<8 x i16>* %A, <8 x i8>* %B) nounwind {
247
%tmp1 = load <8 x i16>* %A
248
%tmp2 = load <8 x i8>* %B
249
%tmp3 = call <8 x i16> @llvm.arm.neon.vaddwu.v8i16(<8 x i16> %tmp1, <8 x i8> %tmp2)
253
define <4 x i32> @vaddwu16(<4 x i32>* %A, <4 x i16>* %B) nounwind {
256
%tmp1 = load <4 x i32>* %A
257
%tmp2 = load <4 x i16>* %B
258
%tmp3 = call <4 x i32> @llvm.arm.neon.vaddwu.v4i32(<4 x i32> %tmp1, <4 x i16> %tmp2)
262
define <2 x i64> @vaddwu32(<2 x i64>* %A, <2 x i32>* %B) nounwind {
265
%tmp1 = load <2 x i64>* %A
266
%tmp2 = load <2 x i32>* %B
267
%tmp3 = call <2 x i64> @llvm.arm.neon.vaddwu.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2)
271
declare <8 x i16> @llvm.arm.neon.vaddws.v8i16(<8 x i16>, <8 x i8>) nounwind readnone
272
declare <4 x i32> @llvm.arm.neon.vaddws.v4i32(<4 x i32>, <4 x i16>) nounwind readnone
273
declare <2 x i64> @llvm.arm.neon.vaddws.v2i64(<2 x i64>, <2 x i32>) nounwind readnone
275
declare <8 x i16> @llvm.arm.neon.vaddwu.v8i16(<8 x i16>, <8 x i8>) nounwind readnone
276
declare <4 x i32> @llvm.arm.neon.vaddwu.v4i32(<4 x i32>, <4 x i16>) nounwind readnone
277
declare <2 x i64> @llvm.arm.neon.vaddwu.v2i64(<2 x i64>, <2 x i32>) nounwind readnone