1
//======- X86InstrFragmentsSIMD.td - x86 ISA -------------*- tablegen -*-=====//
3
// The LLVM Compiler Infrastructure
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
8
//===----------------------------------------------------------------------===//
10
// This file provides pattern fragments useful for SIMD instructions.
12
//===----------------------------------------------------------------------===//
14
//===----------------------------------------------------------------------===//
15
// MMX Pattern Fragments
16
//===----------------------------------------------------------------------===//
18
def load_mmx : PatFrag<(ops node:$ptr), (v1i64 (load node:$ptr))>;
20
def bc_v8i8 : PatFrag<(ops node:$in), (v8i8 (bitconvert node:$in))>;
21
def bc_v4i16 : PatFrag<(ops node:$in), (v4i16 (bitconvert node:$in))>;
22
def bc_v2i32 : PatFrag<(ops node:$in), (v2i32 (bitconvert node:$in))>;
23
def bc_v1i64 : PatFrag<(ops node:$in), (v1i64 (bitconvert node:$in))>;
25
//===----------------------------------------------------------------------===//
27
//===----------------------------------------------------------------------===//
29
// MMX_SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to
31
def MMX_SHUFFLE_get_shuf_imm : SDNodeXForm<vector_shuffle, [{
32
return getI8Imm(X86::getShuffleSHUFImmediate(N));
35
// Patterns for: vector_shuffle v1, v2, <2, 6, 3, 7, ...>
36
def mmx_unpckh : PatFrag<(ops node:$lhs, node:$rhs),
37
(vector_shuffle node:$lhs, node:$rhs), [{
38
return X86::isUNPCKHMask(cast<ShuffleVectorSDNode>(N));
41
// Patterns for: vector_shuffle v1, v2, <0, 4, 2, 5, ...>
42
def mmx_unpckl : PatFrag<(ops node:$lhs, node:$rhs),
43
(vector_shuffle node:$lhs, node:$rhs), [{
44
return X86::isUNPCKLMask(cast<ShuffleVectorSDNode>(N));
47
// Patterns for: vector_shuffle v1, <undef>, <0, 0, 1, 1, ...>
48
def mmx_unpckh_undef : PatFrag<(ops node:$lhs, node:$rhs),
49
(vector_shuffle node:$lhs, node:$rhs), [{
50
return X86::isUNPCKH_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
53
// Patterns for: vector_shuffle v1, <undef>, <2, 2, 3, 3, ...>
54
def mmx_unpckl_undef : PatFrag<(ops node:$lhs, node:$rhs),
55
(vector_shuffle node:$lhs, node:$rhs), [{
56
return X86::isUNPCKL_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
59
def mmx_pshufw : PatFrag<(ops node:$lhs, node:$rhs),
60
(vector_shuffle node:$lhs, node:$rhs), [{
61
return X86::isPSHUFDMask(cast<ShuffleVectorSDNode>(N));
62
}], MMX_SHUFFLE_get_shuf_imm>;
64
//===----------------------------------------------------------------------===//
65
// SSE specific DAG Nodes.
66
//===----------------------------------------------------------------------===//
68
def SDTX86FPShiftOp : SDTypeProfile<1, 2, [ SDTCisSameAs<0, 1>,
69
SDTCisFP<0>, SDTCisInt<2> ]>;
70
def SDTX86VFCMP : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<1, 2>,
71
SDTCisFP<1>, SDTCisVT<3, i8>]>;
73
def X86fmin : SDNode<"X86ISD::FMIN", SDTFPBinOp>;
74
def X86fmax : SDNode<"X86ISD::FMAX", SDTFPBinOp>;
75
def X86fand : SDNode<"X86ISD::FAND", SDTFPBinOp,
76
[SDNPCommutative, SDNPAssociative]>;
77
def X86for : SDNode<"X86ISD::FOR", SDTFPBinOp,
78
[SDNPCommutative, SDNPAssociative]>;
79
def X86fxor : SDNode<"X86ISD::FXOR", SDTFPBinOp,
80
[SDNPCommutative, SDNPAssociative]>;
81
def X86frsqrt : SDNode<"X86ISD::FRSQRT", SDTFPUnaryOp>;
82
def X86frcp : SDNode<"X86ISD::FRCP", SDTFPUnaryOp>;
83
def X86fsrl : SDNode<"X86ISD::FSRL", SDTX86FPShiftOp>;
84
def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest>;
85
def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>;
86
def X86pshufb : SDNode<"X86ISD::PSHUFB",
87
SDTypeProfile<1, 2, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
89
def X86pextrb : SDNode<"X86ISD::PEXTRB",
90
SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
91
def X86pextrw : SDNode<"X86ISD::PEXTRW",
92
SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
93
def X86pinsrb : SDNode<"X86ISD::PINSRB",
94
SDTypeProfile<1, 3, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
95
SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
96
def X86pinsrw : SDNode<"X86ISD::PINSRW",
97
SDTypeProfile<1, 3, [SDTCisVT<0, v8i16>, SDTCisSameAs<0,1>,
98
SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
99
def X86insrtps : SDNode<"X86ISD::INSERTPS",
100
SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisSameAs<0,1>,
101
SDTCisVT<2, v4f32>, SDTCisPtrTy<3>]>>;
102
def X86vzmovl : SDNode<"X86ISD::VZEXT_MOVL",
103
SDTypeProfile<1, 1, [SDTCisSameAs<0,1>]>>;
104
def X86vzload : SDNode<"X86ISD::VZEXT_LOAD", SDTLoad,
105
[SDNPHasChain, SDNPMayLoad]>;
106
def X86vshl : SDNode<"X86ISD::VSHL", SDTIntShiftOp>;
107
def X86vshr : SDNode<"X86ISD::VSRL", SDTIntShiftOp>;
108
def X86cmpps : SDNode<"X86ISD::CMPPS", SDTX86VFCMP>;
109
def X86cmppd : SDNode<"X86ISD::CMPPD", SDTX86VFCMP>;
110
def X86pcmpeqb : SDNode<"X86ISD::PCMPEQB", SDTIntBinOp, [SDNPCommutative]>;
111
def X86pcmpeqw : SDNode<"X86ISD::PCMPEQW", SDTIntBinOp, [SDNPCommutative]>;
112
def X86pcmpeqd : SDNode<"X86ISD::PCMPEQD", SDTIntBinOp, [SDNPCommutative]>;
113
def X86pcmpeqq : SDNode<"X86ISD::PCMPEQQ", SDTIntBinOp, [SDNPCommutative]>;
114
def X86pcmpgtb : SDNode<"X86ISD::PCMPGTB", SDTIntBinOp>;
115
def X86pcmpgtw : SDNode<"X86ISD::PCMPGTW", SDTIntBinOp>;
116
def X86pcmpgtd : SDNode<"X86ISD::PCMPGTD", SDTIntBinOp>;
117
def X86pcmpgtq : SDNode<"X86ISD::PCMPGTQ", SDTIntBinOp>;
119
def SDTX86CmpPTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
121
SDTCisSameAs<2, 1>]>;
122
def X86ptest : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>;
123
def X86testp : SDNode<"X86ISD::TESTP", SDTX86CmpPTest>;
125
// Specific shuffle nodes - At some point ISD::VECTOR_SHUFFLE will always get
126
// translated into one of the target nodes below during lowering.
127
// Note: this is a work in progress...
128
def SDTShuff1Op : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
129
def SDTShuff2Op : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
132
def SDTShuff2OpI : SDTypeProfile<1, 2, [SDTCisVec<0>,
133
SDTCisSameAs<0,1>, SDTCisInt<2>]>;
134
def SDTShuff3OpI : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
135
SDTCisSameAs<0,2>, SDTCisInt<3>]>;
137
def SDTShuff2OpLdI : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisPtrTy<1>,
140
def X86PAlign : SDNode<"X86ISD::PALIGN", SDTShuff3OpI>;
142
def X86PShufd : SDNode<"X86ISD::PSHUFD", SDTShuff2OpI>;
143
def X86PShufhw : SDNode<"X86ISD::PSHUFHW", SDTShuff2OpI>;
144
def X86PShuflw : SDNode<"X86ISD::PSHUFLW", SDTShuff2OpI>;
146
def X86PShufhwLd : SDNode<"X86ISD::PSHUFHW_LD", SDTShuff2OpLdI>;
147
def X86PShuflwLd : SDNode<"X86ISD::PSHUFLW_LD", SDTShuff2OpLdI>;
149
def X86Shufpd : SDNode<"X86ISD::SHUFPD", SDTShuff3OpI>;
150
def X86Shufps : SDNode<"X86ISD::SHUFPS", SDTShuff3OpI>;
152
def X86Movddup : SDNode<"X86ISD::MOVDDUP", SDTShuff1Op>;
153
def X86Movshdup : SDNode<"X86ISD::MOVSHDUP", SDTShuff1Op>;
154
def X86Movsldup : SDNode<"X86ISD::MOVSLDUP", SDTShuff1Op>;
156
def X86Movsd : SDNode<"X86ISD::MOVSD", SDTShuff2Op>;
157
def X86Movss : SDNode<"X86ISD::MOVSS", SDTShuff2Op>;
159
def X86Movlhps : SDNode<"X86ISD::MOVLHPS", SDTShuff2Op>;
160
def X86Movlhpd : SDNode<"X86ISD::MOVLHPD", SDTShuff2Op>;
161
def X86Movhlps : SDNode<"X86ISD::MOVHLPS", SDTShuff2Op>;
162
def X86Movhlpd : SDNode<"X86ISD::MOVHLPD", SDTShuff2Op>;
164
def X86Movlps : SDNode<"X86ISD::MOVLPS", SDTShuff2Op>;
165
def X86Movlpd : SDNode<"X86ISD::MOVLPD", SDTShuff2Op>;
167
def X86Unpcklps : SDNode<"X86ISD::UNPCKLPS", SDTShuff2Op>;
168
def X86Unpcklpd : SDNode<"X86ISD::UNPCKLPD", SDTShuff2Op>;
169
def X86Unpckhps : SDNode<"X86ISD::UNPCKHPS", SDTShuff2Op>;
170
def X86Unpckhpd : SDNode<"X86ISD::UNPCKHPD", SDTShuff2Op>;
172
def X86Punpcklbw : SDNode<"X86ISD::PUNPCKLBW", SDTShuff2Op>;
173
def X86Punpcklwd : SDNode<"X86ISD::PUNPCKLWD", SDTShuff2Op>;
174
def X86Punpckldq : SDNode<"X86ISD::PUNPCKLDQ", SDTShuff2Op>;
175
def X86Punpcklqdq : SDNode<"X86ISD::PUNPCKLQDQ", SDTShuff2Op>;
177
def X86Punpckhbw : SDNode<"X86ISD::PUNPCKHBW", SDTShuff2Op>;
178
def X86Punpckhwd : SDNode<"X86ISD::PUNPCKHWD", SDTShuff2Op>;
179
def X86Punpckhdq : SDNode<"X86ISD::PUNPCKHDQ", SDTShuff2Op>;
180
def X86Punpckhqdq : SDNode<"X86ISD::PUNPCKHQDQ", SDTShuff2Op>;
182
//===----------------------------------------------------------------------===//
183
// SSE Complex Patterns
184
//===----------------------------------------------------------------------===//
186
// These are 'extloads' from a scalar to the low element of a vector, zeroing
187
// the top elements. These are used for the SSE 'ss' and 'sd' instruction
189
def sse_load_f32 : ComplexPattern<v4f32, 5, "SelectScalarSSELoad", [],
190
[SDNPHasChain, SDNPMayLoad]>;
191
def sse_load_f64 : ComplexPattern<v2f64, 5, "SelectScalarSSELoad", [],
192
[SDNPHasChain, SDNPMayLoad]>;
194
def ssmem : Operand<v4f32> {
195
let PrintMethod = "printf32mem";
196
let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
197
let ParserMatchClass = X86MemAsmOperand;
199
def sdmem : Operand<v2f64> {
200
let PrintMethod = "printf64mem";
201
let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
202
let ParserMatchClass = X86MemAsmOperand;
205
//===----------------------------------------------------------------------===//
206
// SSE pattern fragments
207
//===----------------------------------------------------------------------===//
209
// 128-bit load pattern fragments
210
def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>;
211
def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>;
212
def loadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (load node:$ptr))>;
213
def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
215
// 256-bit load pattern fragments
216
def loadv8f32 : PatFrag<(ops node:$ptr), (v8f32 (load node:$ptr))>;
217
def loadv4f64 : PatFrag<(ops node:$ptr), (v4f64 (load node:$ptr))>;
218
def loadv8i32 : PatFrag<(ops node:$ptr), (v8i32 (load node:$ptr))>;
219
def loadv4i64 : PatFrag<(ops node:$ptr), (v4i64 (load node:$ptr))>;
221
// Like 'store', but always requires vector alignment.
222
def alignedstore : PatFrag<(ops node:$val, node:$ptr),
223
(store node:$val, node:$ptr), [{
224
return cast<StoreSDNode>(N)->getAlignment() >= 16;
227
// Like 'load', but always requires vector alignment.
228
def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
229
return cast<LoadSDNode>(N)->getAlignment() >= 16;
232
def alignedloadfsf32 : PatFrag<(ops node:$ptr),
233
(f32 (alignedload node:$ptr))>;
234
def alignedloadfsf64 : PatFrag<(ops node:$ptr),
235
(f64 (alignedload node:$ptr))>;
237
// 128-bit aligned load pattern fragments
238
def alignedloadv4f32 : PatFrag<(ops node:$ptr),
239
(v4f32 (alignedload node:$ptr))>;
240
def alignedloadv2f64 : PatFrag<(ops node:$ptr),
241
(v2f64 (alignedload node:$ptr))>;
242
def alignedloadv4i32 : PatFrag<(ops node:$ptr),
243
(v4i32 (alignedload node:$ptr))>;
244
def alignedloadv2i64 : PatFrag<(ops node:$ptr),
245
(v2i64 (alignedload node:$ptr))>;
247
// 256-bit aligned load pattern fragments
248
def alignedloadv8f32 : PatFrag<(ops node:$ptr),
249
(v8f32 (alignedload node:$ptr))>;
250
def alignedloadv4f64 : PatFrag<(ops node:$ptr),
251
(v4f64 (alignedload node:$ptr))>;
252
def alignedloadv8i32 : PatFrag<(ops node:$ptr),
253
(v8i32 (alignedload node:$ptr))>;
254
def alignedloadv4i64 : PatFrag<(ops node:$ptr),
255
(v4i64 (alignedload node:$ptr))>;
257
// Like 'load', but uses special alignment checks suitable for use in
258
// memory operands in most SSE instructions, which are required to
259
// be naturally aligned on some targets but not on others. If the subtarget
260
// allows unaligned accesses, match any load, though this may require
261
// setting a feature bit in the processor (on startup, for example).
262
// Opteron 10h and later implement such a feature.
263
def memop : PatFrag<(ops node:$ptr), (load node:$ptr), [{
264
return Subtarget->hasVectorUAMem()
265
|| cast<LoadSDNode>(N)->getAlignment() >= 16;
268
def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>;
269
def memopfsf64 : PatFrag<(ops node:$ptr), (f64 (memop node:$ptr))>;
271
// 128-bit memop pattern fragments
272
def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>;
273
def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>;
274
def memopv4i32 : PatFrag<(ops node:$ptr), (v4i32 (memop node:$ptr))>;
275
def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
276
def memopv16i8 : PatFrag<(ops node:$ptr), (v16i8 (memop node:$ptr))>;
278
// 256-bit memop pattern fragments
279
def memopv32i8 : PatFrag<(ops node:$ptr), (v32i8 (memop node:$ptr))>;
280
def memopv8f32 : PatFrag<(ops node:$ptr), (v8f32 (memop node:$ptr))>;
281
def memopv4f64 : PatFrag<(ops node:$ptr), (v4f64 (memop node:$ptr))>;
282
def memopv4i64 : PatFrag<(ops node:$ptr), (v4i64 (memop node:$ptr))>;
283
def memopv8i32 : PatFrag<(ops node:$ptr), (v8i32 (memop node:$ptr))>;
285
// SSSE3 uses MMX registers for some instructions. They aren't aligned on a
287
// FIXME: 8 byte alignment for mmx reads is not required
288
def memop64 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
289
return cast<LoadSDNode>(N)->getAlignment() >= 8;
292
def memopv8i8 : PatFrag<(ops node:$ptr), (v8i8 (memop64 node:$ptr))>;
293
def memopv4i16 : PatFrag<(ops node:$ptr), (v4i16 (memop64 node:$ptr))>;
294
def memopv8i16 : PatFrag<(ops node:$ptr), (v8i16 (memop64 node:$ptr))>;
295
def memopv2i32 : PatFrag<(ops node:$ptr), (v2i32 (memop64 node:$ptr))>;
298
// Like 'store', but requires the non-temporal bit to be set
299
def nontemporalstore : PatFrag<(ops node:$val, node:$ptr),
300
(st node:$val, node:$ptr), [{
301
if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
302
return ST->isNonTemporal();
306
def alignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
307
(st node:$val, node:$ptr), [{
308
if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
309
return ST->isNonTemporal() && !ST->isTruncatingStore() &&
310
ST->getAddressingMode() == ISD::UNINDEXED &&
311
ST->getAlignment() >= 16;
315
def unalignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
316
(st node:$val, node:$ptr), [{
317
if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
318
return ST->isNonTemporal() &&
319
ST->getAlignment() < 16;
323
// 128-bit bitconvert pattern fragments
324
def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>;
325
def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>;
326
def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>;
327
def bc_v8i16 : PatFrag<(ops node:$in), (v8i16 (bitconvert node:$in))>;
328
def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>;
329
def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>;
331
// 256-bit bitconvert pattern fragments
332
def bc_v8i32 : PatFrag<(ops node:$in), (v8i32 (bitconvert node:$in))>;
334
def vzmovl_v2i64 : PatFrag<(ops node:$src),
335
(bitconvert (v2i64 (X86vzmovl
336
(v2i64 (scalar_to_vector (loadi64 node:$src))))))>;
337
def vzmovl_v4i32 : PatFrag<(ops node:$src),
338
(bitconvert (v4i32 (X86vzmovl
339
(v4i32 (scalar_to_vector (loadi32 node:$src))))))>;
341
def vzload_v2i64 : PatFrag<(ops node:$src),
342
(bitconvert (v2i64 (X86vzload node:$src)))>;
345
def fp32imm0 : PatLeaf<(f32 fpimm), [{
346
return N->isExactlyValue(+0.0);
349
// BYTE_imm - Transform bit immediates into byte immediates.
350
def BYTE_imm : SDNodeXForm<imm, [{
351
// Transformation function: imm >> 3
352
return getI32Imm(N->getZExtValue() >> 3);
355
// SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to PSHUF*,
357
def SHUFFLE_get_shuf_imm : SDNodeXForm<vector_shuffle, [{
358
return getI8Imm(X86::getShuffleSHUFImmediate(N));
361
// SHUFFLE_get_pshufhw_imm xform function: convert vector_shuffle mask to
363
def SHUFFLE_get_pshufhw_imm : SDNodeXForm<vector_shuffle, [{
364
return getI8Imm(X86::getShufflePSHUFHWImmediate(N));
367
// SHUFFLE_get_pshuflw_imm xform function: convert vector_shuffle mask to
369
def SHUFFLE_get_pshuflw_imm : SDNodeXForm<vector_shuffle, [{
370
return getI8Imm(X86::getShufflePSHUFLWImmediate(N));
373
// SHUFFLE_get_palign_imm xform function: convert vector_shuffle mask to
375
def SHUFFLE_get_palign_imm : SDNodeXForm<vector_shuffle, [{
376
return getI8Imm(X86::getShufflePALIGNRImmediate(N));
379
def splat_lo : PatFrag<(ops node:$lhs, node:$rhs),
380
(vector_shuffle node:$lhs, node:$rhs), [{
381
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
382
return SVOp->isSplat() && SVOp->getSplatIndex() == 0;
385
def movddup : PatFrag<(ops node:$lhs, node:$rhs),
386
(vector_shuffle node:$lhs, node:$rhs), [{
387
return X86::isMOVDDUPMask(cast<ShuffleVectorSDNode>(N));
390
def movhlps : PatFrag<(ops node:$lhs, node:$rhs),
391
(vector_shuffle node:$lhs, node:$rhs), [{
392
return X86::isMOVHLPSMask(cast<ShuffleVectorSDNode>(N));
395
def movhlps_undef : PatFrag<(ops node:$lhs, node:$rhs),
396
(vector_shuffle node:$lhs, node:$rhs), [{
397
return X86::isMOVHLPS_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
400
def movlhps : PatFrag<(ops node:$lhs, node:$rhs),
401
(vector_shuffle node:$lhs, node:$rhs), [{
402
return X86::isMOVLHPSMask(cast<ShuffleVectorSDNode>(N));
405
def movlp : PatFrag<(ops node:$lhs, node:$rhs),
406
(vector_shuffle node:$lhs, node:$rhs), [{
407
return X86::isMOVLPMask(cast<ShuffleVectorSDNode>(N));
410
def movl : PatFrag<(ops node:$lhs, node:$rhs),
411
(vector_shuffle node:$lhs, node:$rhs), [{
412
return X86::isMOVLMask(cast<ShuffleVectorSDNode>(N));
415
def movshdup : PatFrag<(ops node:$lhs, node:$rhs),
416
(vector_shuffle node:$lhs, node:$rhs), [{
417
return X86::isMOVSHDUPMask(cast<ShuffleVectorSDNode>(N));
420
def movsldup : PatFrag<(ops node:$lhs, node:$rhs),
421
(vector_shuffle node:$lhs, node:$rhs), [{
422
return X86::isMOVSLDUPMask(cast<ShuffleVectorSDNode>(N));
425
def unpckl : PatFrag<(ops node:$lhs, node:$rhs),
426
(vector_shuffle node:$lhs, node:$rhs), [{
427
return X86::isUNPCKLMask(cast<ShuffleVectorSDNode>(N));
430
def unpckh : PatFrag<(ops node:$lhs, node:$rhs),
431
(vector_shuffle node:$lhs, node:$rhs), [{
432
return X86::isUNPCKHMask(cast<ShuffleVectorSDNode>(N));
435
def unpckl_undef : PatFrag<(ops node:$lhs, node:$rhs),
436
(vector_shuffle node:$lhs, node:$rhs), [{
437
return X86::isUNPCKL_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
440
def unpckh_undef : PatFrag<(ops node:$lhs, node:$rhs),
441
(vector_shuffle node:$lhs, node:$rhs), [{
442
return X86::isUNPCKH_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
445
def pshufd : PatFrag<(ops node:$lhs, node:$rhs),
446
(vector_shuffle node:$lhs, node:$rhs), [{
447
return X86::isPSHUFDMask(cast<ShuffleVectorSDNode>(N));
448
}], SHUFFLE_get_shuf_imm>;
450
def shufp : PatFrag<(ops node:$lhs, node:$rhs),
451
(vector_shuffle node:$lhs, node:$rhs), [{
452
return X86::isSHUFPMask(cast<ShuffleVectorSDNode>(N));
453
}], SHUFFLE_get_shuf_imm>;
455
def pshufhw : PatFrag<(ops node:$lhs, node:$rhs),
456
(vector_shuffle node:$lhs, node:$rhs), [{
457
return X86::isPSHUFHWMask(cast<ShuffleVectorSDNode>(N));
458
}], SHUFFLE_get_pshufhw_imm>;
460
def pshuflw : PatFrag<(ops node:$lhs, node:$rhs),
461
(vector_shuffle node:$lhs, node:$rhs), [{
462
return X86::isPSHUFLWMask(cast<ShuffleVectorSDNode>(N));
463
}], SHUFFLE_get_pshuflw_imm>;
465
def palign : PatFrag<(ops node:$lhs, node:$rhs),
466
(vector_shuffle node:$lhs, node:$rhs), [{
467
return X86::isPALIGNRMask(cast<ShuffleVectorSDNode>(N));
468
}], SHUFFLE_get_palign_imm>;