1
//===- X86InstrInfo.cpp - X86 Instruction Information -----------*- C++ -*-===//
3
// The LLVM Compiler Infrastructure
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
8
//===----------------------------------------------------------------------===//
10
// This file contains the X86 implementation of the TargetInstrInfo class.
12
//===----------------------------------------------------------------------===//
14
#include "X86InstrInfo.h"
16
#include "X86GenInstrInfo.inc"
17
#include "X86InstrBuilder.h"
18
#include "X86MachineFunctionInfo.h"
19
#include "X86Subtarget.h"
20
#include "X86TargetMachine.h"
21
#include "llvm/DerivedTypes.h"
22
#include "llvm/LLVMContext.h"
23
#include "llvm/ADT/STLExtras.h"
24
#include "llvm/CodeGen/MachineConstantPool.h"
25
#include "llvm/CodeGen/MachineFrameInfo.h"
26
#include "llvm/CodeGen/MachineInstrBuilder.h"
27
#include "llvm/CodeGen/MachineRegisterInfo.h"
28
#include "llvm/CodeGen/LiveVariables.h"
29
#include "llvm/CodeGen/PseudoSourceValue.h"
30
#include "llvm/MC/MCInst.h"
31
#include "llvm/Support/CommandLine.h"
32
#include "llvm/Support/Debug.h"
33
#include "llvm/Support/ErrorHandling.h"
34
#include "llvm/Support/raw_ostream.h"
35
#include "llvm/Target/TargetOptions.h"
36
#include "llvm/MC/MCAsmInfo.h"
43
NoFusing("disable-spill-fusing",
44
cl::desc("Disable fusing of spill code into instructions"));
46
PrintFailedFusing("print-failed-fuse-candidates",
47
cl::desc("Print instructions that the allocator wants to"
48
" fuse, but the X86 backend currently can't"),
51
ReMatPICStubLoad("remat-pic-stub-load",
52
cl::desc("Re-materialize load from stub in PIC mode"),
53
cl::init(false), cl::Hidden);
55
X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
56
: TargetInstrInfoImpl(X86Insts, array_lengthof(X86Insts)),
57
TM(tm), RI(tm, *this) {
58
SmallVector<unsigned,16> AmbEntries;
59
static const unsigned OpTbl2Addr[][2] = {
60
{ X86::ADC32ri, X86::ADC32mi },
61
{ X86::ADC32ri8, X86::ADC32mi8 },
62
{ X86::ADC32rr, X86::ADC32mr },
63
{ X86::ADC64ri32, X86::ADC64mi32 },
64
{ X86::ADC64ri8, X86::ADC64mi8 },
65
{ X86::ADC64rr, X86::ADC64mr },
66
{ X86::ADD16ri, X86::ADD16mi },
67
{ X86::ADD16ri8, X86::ADD16mi8 },
68
{ X86::ADD16rr, X86::ADD16mr },
69
{ X86::ADD32ri, X86::ADD32mi },
70
{ X86::ADD32ri8, X86::ADD32mi8 },
71
{ X86::ADD32rr, X86::ADD32mr },
72
{ X86::ADD64ri32, X86::ADD64mi32 },
73
{ X86::ADD64ri8, X86::ADD64mi8 },
74
{ X86::ADD64rr, X86::ADD64mr },
75
{ X86::ADD8ri, X86::ADD8mi },
76
{ X86::ADD8rr, X86::ADD8mr },
77
{ X86::AND16ri, X86::AND16mi },
78
{ X86::AND16ri8, X86::AND16mi8 },
79
{ X86::AND16rr, X86::AND16mr },
80
{ X86::AND32ri, X86::AND32mi },
81
{ X86::AND32ri8, X86::AND32mi8 },
82
{ X86::AND32rr, X86::AND32mr },
83
{ X86::AND64ri32, X86::AND64mi32 },
84
{ X86::AND64ri8, X86::AND64mi8 },
85
{ X86::AND64rr, X86::AND64mr },
86
{ X86::AND8ri, X86::AND8mi },
87
{ X86::AND8rr, X86::AND8mr },
88
{ X86::DEC16r, X86::DEC16m },
89
{ X86::DEC32r, X86::DEC32m },
90
{ X86::DEC64_16r, X86::DEC64_16m },
91
{ X86::DEC64_32r, X86::DEC64_32m },
92
{ X86::DEC64r, X86::DEC64m },
93
{ X86::DEC8r, X86::DEC8m },
94
{ X86::INC16r, X86::INC16m },
95
{ X86::INC32r, X86::INC32m },
96
{ X86::INC64_16r, X86::INC64_16m },
97
{ X86::INC64_32r, X86::INC64_32m },
98
{ X86::INC64r, X86::INC64m },
99
{ X86::INC8r, X86::INC8m },
100
{ X86::NEG16r, X86::NEG16m },
101
{ X86::NEG32r, X86::NEG32m },
102
{ X86::NEG64r, X86::NEG64m },
103
{ X86::NEG8r, X86::NEG8m },
104
{ X86::NOT16r, X86::NOT16m },
105
{ X86::NOT32r, X86::NOT32m },
106
{ X86::NOT64r, X86::NOT64m },
107
{ X86::NOT8r, X86::NOT8m },
108
{ X86::OR16ri, X86::OR16mi },
109
{ X86::OR16ri8, X86::OR16mi8 },
110
{ X86::OR16rr, X86::OR16mr },
111
{ X86::OR32ri, X86::OR32mi },
112
{ X86::OR32ri8, X86::OR32mi8 },
113
{ X86::OR32rr, X86::OR32mr },
114
{ X86::OR64ri32, X86::OR64mi32 },
115
{ X86::OR64ri8, X86::OR64mi8 },
116
{ X86::OR64rr, X86::OR64mr },
117
{ X86::OR8ri, X86::OR8mi },
118
{ X86::OR8rr, X86::OR8mr },
119
{ X86::ROL16r1, X86::ROL16m1 },
120
{ X86::ROL16rCL, X86::ROL16mCL },
121
{ X86::ROL16ri, X86::ROL16mi },
122
{ X86::ROL32r1, X86::ROL32m1 },
123
{ X86::ROL32rCL, X86::ROL32mCL },
124
{ X86::ROL32ri, X86::ROL32mi },
125
{ X86::ROL64r1, X86::ROL64m1 },
126
{ X86::ROL64rCL, X86::ROL64mCL },
127
{ X86::ROL64ri, X86::ROL64mi },
128
{ X86::ROL8r1, X86::ROL8m1 },
129
{ X86::ROL8rCL, X86::ROL8mCL },
130
{ X86::ROL8ri, X86::ROL8mi },
131
{ X86::ROR16r1, X86::ROR16m1 },
132
{ X86::ROR16rCL, X86::ROR16mCL },
133
{ X86::ROR16ri, X86::ROR16mi },
134
{ X86::ROR32r1, X86::ROR32m1 },
135
{ X86::ROR32rCL, X86::ROR32mCL },
136
{ X86::ROR32ri, X86::ROR32mi },
137
{ X86::ROR64r1, X86::ROR64m1 },
138
{ X86::ROR64rCL, X86::ROR64mCL },
139
{ X86::ROR64ri, X86::ROR64mi },
140
{ X86::ROR8r1, X86::ROR8m1 },
141
{ X86::ROR8rCL, X86::ROR8mCL },
142
{ X86::ROR8ri, X86::ROR8mi },
143
{ X86::SAR16r1, X86::SAR16m1 },
144
{ X86::SAR16rCL, X86::SAR16mCL },
145
{ X86::SAR16ri, X86::SAR16mi },
146
{ X86::SAR32r1, X86::SAR32m1 },
147
{ X86::SAR32rCL, X86::SAR32mCL },
148
{ X86::SAR32ri, X86::SAR32mi },
149
{ X86::SAR64r1, X86::SAR64m1 },
150
{ X86::SAR64rCL, X86::SAR64mCL },
151
{ X86::SAR64ri, X86::SAR64mi },
152
{ X86::SAR8r1, X86::SAR8m1 },
153
{ X86::SAR8rCL, X86::SAR8mCL },
154
{ X86::SAR8ri, X86::SAR8mi },
155
{ X86::SBB32ri, X86::SBB32mi },
156
{ X86::SBB32ri8, X86::SBB32mi8 },
157
{ X86::SBB32rr, X86::SBB32mr },
158
{ X86::SBB64ri32, X86::SBB64mi32 },
159
{ X86::SBB64ri8, X86::SBB64mi8 },
160
{ X86::SBB64rr, X86::SBB64mr },
161
{ X86::SHL16rCL, X86::SHL16mCL },
162
{ X86::SHL16ri, X86::SHL16mi },
163
{ X86::SHL32rCL, X86::SHL32mCL },
164
{ X86::SHL32ri, X86::SHL32mi },
165
{ X86::SHL64rCL, X86::SHL64mCL },
166
{ X86::SHL64ri, X86::SHL64mi },
167
{ X86::SHL8rCL, X86::SHL8mCL },
168
{ X86::SHL8ri, X86::SHL8mi },
169
{ X86::SHLD16rrCL, X86::SHLD16mrCL },
170
{ X86::SHLD16rri8, X86::SHLD16mri8 },
171
{ X86::SHLD32rrCL, X86::SHLD32mrCL },
172
{ X86::SHLD32rri8, X86::SHLD32mri8 },
173
{ X86::SHLD64rrCL, X86::SHLD64mrCL },
174
{ X86::SHLD64rri8, X86::SHLD64mri8 },
175
{ X86::SHR16r1, X86::SHR16m1 },
176
{ X86::SHR16rCL, X86::SHR16mCL },
177
{ X86::SHR16ri, X86::SHR16mi },
178
{ X86::SHR32r1, X86::SHR32m1 },
179
{ X86::SHR32rCL, X86::SHR32mCL },
180
{ X86::SHR32ri, X86::SHR32mi },
181
{ X86::SHR64r1, X86::SHR64m1 },
182
{ X86::SHR64rCL, X86::SHR64mCL },
183
{ X86::SHR64ri, X86::SHR64mi },
184
{ X86::SHR8r1, X86::SHR8m1 },
185
{ X86::SHR8rCL, X86::SHR8mCL },
186
{ X86::SHR8ri, X86::SHR8mi },
187
{ X86::SHRD16rrCL, X86::SHRD16mrCL },
188
{ X86::SHRD16rri8, X86::SHRD16mri8 },
189
{ X86::SHRD32rrCL, X86::SHRD32mrCL },
190
{ X86::SHRD32rri8, X86::SHRD32mri8 },
191
{ X86::SHRD64rrCL, X86::SHRD64mrCL },
192
{ X86::SHRD64rri8, X86::SHRD64mri8 },
193
{ X86::SUB16ri, X86::SUB16mi },
194
{ X86::SUB16ri8, X86::SUB16mi8 },
195
{ X86::SUB16rr, X86::SUB16mr },
196
{ X86::SUB32ri, X86::SUB32mi },
197
{ X86::SUB32ri8, X86::SUB32mi8 },
198
{ X86::SUB32rr, X86::SUB32mr },
199
{ X86::SUB64ri32, X86::SUB64mi32 },
200
{ X86::SUB64ri8, X86::SUB64mi8 },
201
{ X86::SUB64rr, X86::SUB64mr },
202
{ X86::SUB8ri, X86::SUB8mi },
203
{ X86::SUB8rr, X86::SUB8mr },
204
{ X86::XOR16ri, X86::XOR16mi },
205
{ X86::XOR16ri8, X86::XOR16mi8 },
206
{ X86::XOR16rr, X86::XOR16mr },
207
{ X86::XOR32ri, X86::XOR32mi },
208
{ X86::XOR32ri8, X86::XOR32mi8 },
209
{ X86::XOR32rr, X86::XOR32mr },
210
{ X86::XOR64ri32, X86::XOR64mi32 },
211
{ X86::XOR64ri8, X86::XOR64mi8 },
212
{ X86::XOR64rr, X86::XOR64mr },
213
{ X86::XOR8ri, X86::XOR8mi },
214
{ X86::XOR8rr, X86::XOR8mr }
217
for (unsigned i = 0, e = array_lengthof(OpTbl2Addr); i != e; ++i) {
218
unsigned RegOp = OpTbl2Addr[i][0];
219
unsigned MemOp = OpTbl2Addr[i][1];
220
if (!RegOp2MemOpTable2Addr.insert(std::make_pair((unsigned*)RegOp,
221
std::make_pair(MemOp,0))).second)
222
assert(false && "Duplicated entries?");
223
// Index 0, folded load and store, no alignment requirement.
224
unsigned AuxInfo = 0 | (1 << 4) | (1 << 5);
225
if (!MemOp2RegOpTable.insert(std::make_pair((unsigned*)MemOp,
226
std::make_pair(RegOp,
228
AmbEntries.push_back(MemOp);
231
// If the third value is 1, then it's folding either a load or a store.
232
static const unsigned OpTbl0[][4] = {
233
{ X86::BT16ri8, X86::BT16mi8, 1, 0 },
234
{ X86::BT32ri8, X86::BT32mi8, 1, 0 },
235
{ X86::BT64ri8, X86::BT64mi8, 1, 0 },
236
{ X86::CALL32r, X86::CALL32m, 1, 0 },
237
{ X86::CALL64r, X86::CALL64m, 1, 0 },
238
{ X86::WINCALL64r, X86::WINCALL64m, 1, 0 },
239
{ X86::CMP16ri, X86::CMP16mi, 1, 0 },
240
{ X86::CMP16ri8, X86::CMP16mi8, 1, 0 },
241
{ X86::CMP16rr, X86::CMP16mr, 1, 0 },
242
{ X86::CMP32ri, X86::CMP32mi, 1, 0 },
243
{ X86::CMP32ri8, X86::CMP32mi8, 1, 0 },
244
{ X86::CMP32rr, X86::CMP32mr, 1, 0 },
245
{ X86::CMP64ri32, X86::CMP64mi32, 1, 0 },
246
{ X86::CMP64ri8, X86::CMP64mi8, 1, 0 },
247
{ X86::CMP64rr, X86::CMP64mr, 1, 0 },
248
{ X86::CMP8ri, X86::CMP8mi, 1, 0 },
249
{ X86::CMP8rr, X86::CMP8mr, 1, 0 },
250
{ X86::DIV16r, X86::DIV16m, 1, 0 },
251
{ X86::DIV32r, X86::DIV32m, 1, 0 },
252
{ X86::DIV64r, X86::DIV64m, 1, 0 },
253
{ X86::DIV8r, X86::DIV8m, 1, 0 },
254
{ X86::EXTRACTPSrr, X86::EXTRACTPSmr, 0, 16 },
255
{ X86::FsMOVAPDrr, X86::MOVSDmr, 0, 0 },
256
{ X86::FsMOVAPSrr, X86::MOVSSmr, 0, 0 },
257
{ X86::IDIV16r, X86::IDIV16m, 1, 0 },
258
{ X86::IDIV32r, X86::IDIV32m, 1, 0 },
259
{ X86::IDIV64r, X86::IDIV64m, 1, 0 },
260
{ X86::IDIV8r, X86::IDIV8m, 1, 0 },
261
{ X86::IMUL16r, X86::IMUL16m, 1, 0 },
262
{ X86::IMUL32r, X86::IMUL32m, 1, 0 },
263
{ X86::IMUL64r, X86::IMUL64m, 1, 0 },
264
{ X86::IMUL8r, X86::IMUL8m, 1, 0 },
265
{ X86::JMP32r, X86::JMP32m, 1, 0 },
266
{ X86::JMP64r, X86::JMP64m, 1, 0 },
267
{ X86::MOV16ri, X86::MOV16mi, 0, 0 },
268
{ X86::MOV16rr, X86::MOV16mr, 0, 0 },
269
{ X86::MOV32ri, X86::MOV32mi, 0, 0 },
270
{ X86::MOV32rr, X86::MOV32mr, 0, 0 },
271
{ X86::MOV32rr_TC, X86::MOV32mr_TC, 0, 0 },
272
{ X86::MOV64ri32, X86::MOV64mi32, 0, 0 },
273
{ X86::MOV64rr, X86::MOV64mr, 0, 0 },
274
{ X86::MOV8ri, X86::MOV8mi, 0, 0 },
275
{ X86::MOV8rr, X86::MOV8mr, 0, 0 },
276
{ X86::MOV8rr_NOREX, X86::MOV8mr_NOREX, 0, 0 },
277
{ X86::MOVAPDrr, X86::MOVAPDmr, 0, 16 },
278
{ X86::MOVAPSrr, X86::MOVAPSmr, 0, 16 },
279
{ X86::MOVDQArr, X86::MOVDQAmr, 0, 16 },
280
{ X86::MOVPDI2DIrr, X86::MOVPDI2DImr, 0, 0 },
281
{ X86::MOVPQIto64rr,X86::MOVPQI2QImr, 0, 0 },
282
{ X86::MOVSDto64rr, X86::MOVSDto64mr, 0, 0 },
283
{ X86::MOVSS2DIrr, X86::MOVSS2DImr, 0, 0 },
284
{ X86::MOVUPDrr, X86::MOVUPDmr, 0, 0 },
285
{ X86::MOVUPSrr, X86::MOVUPSmr, 0, 0 },
286
{ X86::MUL16r, X86::MUL16m, 1, 0 },
287
{ X86::MUL32r, X86::MUL32m, 1, 0 },
288
{ X86::MUL64r, X86::MUL64m, 1, 0 },
289
{ X86::MUL8r, X86::MUL8m, 1, 0 },
290
{ X86::SETAEr, X86::SETAEm, 0, 0 },
291
{ X86::SETAr, X86::SETAm, 0, 0 },
292
{ X86::SETBEr, X86::SETBEm, 0, 0 },
293
{ X86::SETBr, X86::SETBm, 0, 0 },
294
{ X86::SETEr, X86::SETEm, 0, 0 },
295
{ X86::SETGEr, X86::SETGEm, 0, 0 },
296
{ X86::SETGr, X86::SETGm, 0, 0 },
297
{ X86::SETLEr, X86::SETLEm, 0, 0 },
298
{ X86::SETLr, X86::SETLm, 0, 0 },
299
{ X86::SETNEr, X86::SETNEm, 0, 0 },
300
{ X86::SETNOr, X86::SETNOm, 0, 0 },
301
{ X86::SETNPr, X86::SETNPm, 0, 0 },
302
{ X86::SETNSr, X86::SETNSm, 0, 0 },
303
{ X86::SETOr, X86::SETOm, 0, 0 },
304
{ X86::SETPr, X86::SETPm, 0, 0 },
305
{ X86::SETSr, X86::SETSm, 0, 0 },
306
{ X86::TAILJMPr, X86::TAILJMPm, 1, 0 },
307
{ X86::TAILJMPr64, X86::TAILJMPm64, 1, 0 },
308
{ X86::TEST16ri, X86::TEST16mi, 1, 0 },
309
{ X86::TEST32ri, X86::TEST32mi, 1, 0 },
310
{ X86::TEST64ri32, X86::TEST64mi32, 1, 0 },
311
{ X86::TEST8ri, X86::TEST8mi, 1, 0 }
314
for (unsigned i = 0, e = array_lengthof(OpTbl0); i != e; ++i) {
315
unsigned RegOp = OpTbl0[i][0];
316
unsigned MemOp = OpTbl0[i][1];
317
unsigned Align = OpTbl0[i][3];
318
if (!RegOp2MemOpTable0.insert(std::make_pair((unsigned*)RegOp,
319
std::make_pair(MemOp,Align))).second)
320
assert(false && "Duplicated entries?");
321
unsigned FoldedLoad = OpTbl0[i][2];
322
// Index 0, folded load or store.
323
unsigned AuxInfo = 0 | (FoldedLoad << 4) | ((FoldedLoad^1) << 5);
324
if (RegOp != X86::FsMOVAPDrr && RegOp != X86::FsMOVAPSrr)
325
if (!MemOp2RegOpTable.insert(std::make_pair((unsigned*)MemOp,
326
std::make_pair(RegOp, AuxInfo))).second)
327
AmbEntries.push_back(MemOp);
330
static const unsigned OpTbl1[][3] = {
331
{ X86::CMP16rr, X86::CMP16rm, 0 },
332
{ X86::CMP32rr, X86::CMP32rm, 0 },
333
{ X86::CMP64rr, X86::CMP64rm, 0 },
334
{ X86::CMP8rr, X86::CMP8rm, 0 },
335
{ X86::CVTSD2SSrr, X86::CVTSD2SSrm, 0 },
336
{ X86::CVTSI2SD64rr, X86::CVTSI2SD64rm, 0 },
337
{ X86::CVTSI2SDrr, X86::CVTSI2SDrm, 0 },
338
{ X86::CVTSI2SS64rr, X86::CVTSI2SS64rm, 0 },
339
{ X86::CVTSI2SSrr, X86::CVTSI2SSrm, 0 },
340
{ X86::CVTSS2SDrr, X86::CVTSS2SDrm, 0 },
341
{ X86::CVTTSD2SI64rr, X86::CVTTSD2SI64rm, 0 },
342
{ X86::CVTTSD2SIrr, X86::CVTTSD2SIrm, 0 },
343
{ X86::CVTTSS2SI64rr, X86::CVTTSS2SI64rm, 0 },
344
{ X86::CVTTSS2SIrr, X86::CVTTSS2SIrm, 0 },
345
{ X86::FsMOVAPDrr, X86::MOVSDrm, 0 },
346
{ X86::FsMOVAPSrr, X86::MOVSSrm, 0 },
347
{ X86::IMUL16rri, X86::IMUL16rmi, 0 },
348
{ X86::IMUL16rri8, X86::IMUL16rmi8, 0 },
349
{ X86::IMUL32rri, X86::IMUL32rmi, 0 },
350
{ X86::IMUL32rri8, X86::IMUL32rmi8, 0 },
351
{ X86::IMUL64rri32, X86::IMUL64rmi32, 0 },
352
{ X86::IMUL64rri8, X86::IMUL64rmi8, 0 },
353
{ X86::Int_CMPSDrr, X86::Int_CMPSDrm, 0 },
354
{ X86::Int_CMPSSrr, X86::Int_CMPSSrm, 0 },
355
{ X86::Int_COMISDrr, X86::Int_COMISDrm, 0 },
356
{ X86::Int_COMISSrr, X86::Int_COMISSrm, 0 },
357
{ X86::Int_CVTDQ2PDrr, X86::Int_CVTDQ2PDrm, 16 },
358
{ X86::Int_CVTDQ2PSrr, X86::Int_CVTDQ2PSrm, 16 },
359
{ X86::Int_CVTPD2DQrr, X86::Int_CVTPD2DQrm, 16 },
360
{ X86::Int_CVTPD2PSrr, X86::Int_CVTPD2PSrm, 16 },
361
{ X86::Int_CVTPS2DQrr, X86::Int_CVTPS2DQrm, 16 },
362
{ X86::Int_CVTPS2PDrr, X86::Int_CVTPS2PDrm, 0 },
363
{ X86::Int_CVTSD2SI64rr,X86::Int_CVTSD2SI64rm, 0 },
364
{ X86::Int_CVTSD2SIrr, X86::Int_CVTSD2SIrm, 0 },
365
{ X86::Int_CVTSD2SSrr, X86::Int_CVTSD2SSrm, 0 },
366
{ X86::Int_CVTSI2SD64rr,X86::Int_CVTSI2SD64rm, 0 },
367
{ X86::Int_CVTSI2SDrr, X86::Int_CVTSI2SDrm, 0 },
368
{ X86::Int_CVTSI2SS64rr,X86::Int_CVTSI2SS64rm, 0 },
369
{ X86::Int_CVTSI2SSrr, X86::Int_CVTSI2SSrm, 0 },
370
{ X86::Int_CVTSS2SDrr, X86::Int_CVTSS2SDrm, 0 },
371
{ X86::Int_CVTSS2SI64rr,X86::Int_CVTSS2SI64rm, 0 },
372
{ X86::Int_CVTSS2SIrr, X86::Int_CVTSS2SIrm, 0 },
373
{ X86::Int_CVTTPD2DQrr, X86::Int_CVTTPD2DQrm, 16 },
374
{ X86::Int_CVTTPS2DQrr, X86::Int_CVTTPS2DQrm, 16 },
375
{ X86::Int_CVTTSD2SI64rr,X86::Int_CVTTSD2SI64rm, 0 },
376
{ X86::Int_CVTTSD2SIrr, X86::Int_CVTTSD2SIrm, 0 },
377
{ X86::Int_CVTTSS2SI64rr,X86::Int_CVTTSS2SI64rm, 0 },
378
{ X86::Int_CVTTSS2SIrr, X86::Int_CVTTSS2SIrm, 0 },
379
{ X86::Int_UCOMISDrr, X86::Int_UCOMISDrm, 0 },
380
{ X86::Int_UCOMISSrr, X86::Int_UCOMISSrm, 0 },
381
{ X86::MOV16rr, X86::MOV16rm, 0 },
382
{ X86::MOV32rr, X86::MOV32rm, 0 },
383
{ X86::MOV32rr_TC, X86::MOV32rm_TC, 0 },
384
{ X86::MOV64rr, X86::MOV64rm, 0 },
385
{ X86::MOV64toPQIrr, X86::MOVQI2PQIrm, 0 },
386
{ X86::MOV64toSDrr, X86::MOV64toSDrm, 0 },
387
{ X86::MOV8rr, X86::MOV8rm, 0 },
388
{ X86::MOVAPDrr, X86::MOVAPDrm, 16 },
389
{ X86::MOVAPSrr, X86::MOVAPSrm, 16 },
390
{ X86::MOVDDUPrr, X86::MOVDDUPrm, 0 },
391
{ X86::MOVDI2PDIrr, X86::MOVDI2PDIrm, 0 },
392
{ X86::MOVDI2SSrr, X86::MOVDI2SSrm, 0 },
393
{ X86::MOVDQArr, X86::MOVDQArm, 16 },
394
{ X86::MOVSHDUPrr, X86::MOVSHDUPrm, 16 },
395
{ X86::MOVSLDUPrr, X86::MOVSLDUPrm, 16 },
396
{ X86::MOVSX16rr8, X86::MOVSX16rm8, 0 },
397
{ X86::MOVSX32rr16, X86::MOVSX32rm16, 0 },
398
{ X86::MOVSX32rr8, X86::MOVSX32rm8, 0 },
399
{ X86::MOVSX64rr16, X86::MOVSX64rm16, 0 },
400
{ X86::MOVSX64rr32, X86::MOVSX64rm32, 0 },
401
{ X86::MOVSX64rr8, X86::MOVSX64rm8, 0 },
402
{ X86::MOVUPDrr, X86::MOVUPDrm, 16 },
403
{ X86::MOVUPSrr, X86::MOVUPSrm, 0 },
404
{ X86::MOVZDI2PDIrr, X86::MOVZDI2PDIrm, 0 },
405
{ X86::MOVZQI2PQIrr, X86::MOVZQI2PQIrm, 0 },
406
{ X86::MOVZPQILo2PQIrr, X86::MOVZPQILo2PQIrm, 16 },
407
{ X86::MOVZX16rr8, X86::MOVZX16rm8, 0 },
408
{ X86::MOVZX32rr16, X86::MOVZX32rm16, 0 },
409
{ X86::MOVZX32_NOREXrr8, X86::MOVZX32_NOREXrm8, 0 },
410
{ X86::MOVZX32rr8, X86::MOVZX32rm8, 0 },
411
{ X86::MOVZX64rr16, X86::MOVZX64rm16, 0 },
412
{ X86::MOVZX64rr32, X86::MOVZX64rm32, 0 },
413
{ X86::MOVZX64rr8, X86::MOVZX64rm8, 0 },
414
{ X86::PSHUFDri, X86::PSHUFDmi, 16 },
415
{ X86::PSHUFHWri, X86::PSHUFHWmi, 16 },
416
{ X86::PSHUFLWri, X86::PSHUFLWmi, 16 },
417
{ X86::RCPPSr, X86::RCPPSm, 16 },
418
{ X86::RCPPSr_Int, X86::RCPPSm_Int, 16 },
419
{ X86::RSQRTPSr, X86::RSQRTPSm, 16 },
420
{ X86::RSQRTPSr_Int, X86::RSQRTPSm_Int, 16 },
421
{ X86::RSQRTSSr, X86::RSQRTSSm, 0 },
422
{ X86::RSQRTSSr_Int, X86::RSQRTSSm_Int, 0 },
423
{ X86::SQRTPDr, X86::SQRTPDm, 16 },
424
{ X86::SQRTPDr_Int, X86::SQRTPDm_Int, 16 },
425
{ X86::SQRTPSr, X86::SQRTPSm, 16 },
426
{ X86::SQRTPSr_Int, X86::SQRTPSm_Int, 16 },
427
{ X86::SQRTSDr, X86::SQRTSDm, 0 },
428
{ X86::SQRTSDr_Int, X86::SQRTSDm_Int, 0 },
429
{ X86::SQRTSSr, X86::SQRTSSm, 0 },
430
{ X86::SQRTSSr_Int, X86::SQRTSSm_Int, 0 },
431
{ X86::TEST16rr, X86::TEST16rm, 0 },
432
{ X86::TEST32rr, X86::TEST32rm, 0 },
433
{ X86::TEST64rr, X86::TEST64rm, 0 },
434
{ X86::TEST8rr, X86::TEST8rm, 0 },
435
// FIXME: TEST*rr EAX,EAX ---> CMP [mem], 0
436
{ X86::UCOMISDrr, X86::UCOMISDrm, 0 },
437
{ X86::UCOMISSrr, X86::UCOMISSrm, 0 }
440
for (unsigned i = 0, e = array_lengthof(OpTbl1); i != e; ++i) {
441
unsigned RegOp = OpTbl1[i][0];
442
unsigned MemOp = OpTbl1[i][1];
443
unsigned Align = OpTbl1[i][2];
444
if (!RegOp2MemOpTable1.insert(std::make_pair((unsigned*)RegOp,
445
std::make_pair(MemOp,Align))).second)
446
assert(false && "Duplicated entries?");
447
// Index 1, folded load
448
unsigned AuxInfo = 1 | (1 << 4);
449
if (RegOp != X86::FsMOVAPDrr && RegOp != X86::FsMOVAPSrr)
450
if (!MemOp2RegOpTable.insert(std::make_pair((unsigned*)MemOp,
451
std::make_pair(RegOp, AuxInfo))).second)
452
AmbEntries.push_back(MemOp);
455
static const unsigned OpTbl2[][3] = {
456
{ X86::ADC32rr, X86::ADC32rm, 0 },
457
{ X86::ADC64rr, X86::ADC64rm, 0 },
458
{ X86::ADD16rr, X86::ADD16rm, 0 },
459
{ X86::ADD32rr, X86::ADD32rm, 0 },
460
{ X86::ADD64rr, X86::ADD64rm, 0 },
461
{ X86::ADD8rr, X86::ADD8rm, 0 },
462
{ X86::ADDPDrr, X86::ADDPDrm, 16 },
463
{ X86::ADDPSrr, X86::ADDPSrm, 16 },
464
{ X86::ADDSDrr, X86::ADDSDrm, 0 },
465
{ X86::ADDSSrr, X86::ADDSSrm, 0 },
466
{ X86::ADDSUBPDrr, X86::ADDSUBPDrm, 16 },
467
{ X86::ADDSUBPSrr, X86::ADDSUBPSrm, 16 },
468
{ X86::AND16rr, X86::AND16rm, 0 },
469
{ X86::AND32rr, X86::AND32rm, 0 },
470
{ X86::AND64rr, X86::AND64rm, 0 },
471
{ X86::AND8rr, X86::AND8rm, 0 },
472
{ X86::ANDNPDrr, X86::ANDNPDrm, 16 },
473
{ X86::ANDNPSrr, X86::ANDNPSrm, 16 },
474
{ X86::ANDPDrr, X86::ANDPDrm, 16 },
475
{ X86::ANDPSrr, X86::ANDPSrm, 16 },
476
{ X86::CMOVA16rr, X86::CMOVA16rm, 0 },
477
{ X86::CMOVA32rr, X86::CMOVA32rm, 0 },
478
{ X86::CMOVA64rr, X86::CMOVA64rm, 0 },
479
{ X86::CMOVAE16rr, X86::CMOVAE16rm, 0 },
480
{ X86::CMOVAE32rr, X86::CMOVAE32rm, 0 },
481
{ X86::CMOVAE64rr, X86::CMOVAE64rm, 0 },
482
{ X86::CMOVB16rr, X86::CMOVB16rm, 0 },
483
{ X86::CMOVB32rr, X86::CMOVB32rm, 0 },
484
{ X86::CMOVB64rr, X86::CMOVB64rm, 0 },
485
{ X86::CMOVBE16rr, X86::CMOVBE16rm, 0 },
486
{ X86::CMOVBE32rr, X86::CMOVBE32rm, 0 },
487
{ X86::CMOVBE64rr, X86::CMOVBE64rm, 0 },
488
{ X86::CMOVE16rr, X86::CMOVE16rm, 0 },
489
{ X86::CMOVE32rr, X86::CMOVE32rm, 0 },
490
{ X86::CMOVE64rr, X86::CMOVE64rm, 0 },
491
{ X86::CMOVG16rr, X86::CMOVG16rm, 0 },
492
{ X86::CMOVG32rr, X86::CMOVG32rm, 0 },
493
{ X86::CMOVG64rr, X86::CMOVG64rm, 0 },
494
{ X86::CMOVGE16rr, X86::CMOVGE16rm, 0 },
495
{ X86::CMOVGE32rr, X86::CMOVGE32rm, 0 },
496
{ X86::CMOVGE64rr, X86::CMOVGE64rm, 0 },
497
{ X86::CMOVL16rr, X86::CMOVL16rm, 0 },
498
{ X86::CMOVL32rr, X86::CMOVL32rm, 0 },
499
{ X86::CMOVL64rr, X86::CMOVL64rm, 0 },
500
{ X86::CMOVLE16rr, X86::CMOVLE16rm, 0 },
501
{ X86::CMOVLE32rr, X86::CMOVLE32rm, 0 },
502
{ X86::CMOVLE64rr, X86::CMOVLE64rm, 0 },
503
{ X86::CMOVNE16rr, X86::CMOVNE16rm, 0 },
504
{ X86::CMOVNE32rr, X86::CMOVNE32rm, 0 },
505
{ X86::CMOVNE64rr, X86::CMOVNE64rm, 0 },
506
{ X86::CMOVNO16rr, X86::CMOVNO16rm, 0 },
507
{ X86::CMOVNO32rr, X86::CMOVNO32rm, 0 },
508
{ X86::CMOVNO64rr, X86::CMOVNO64rm, 0 },
509
{ X86::CMOVNP16rr, X86::CMOVNP16rm, 0 },
510
{ X86::CMOVNP32rr, X86::CMOVNP32rm, 0 },
511
{ X86::CMOVNP64rr, X86::CMOVNP64rm, 0 },
512
{ X86::CMOVNS16rr, X86::CMOVNS16rm, 0 },
513
{ X86::CMOVNS32rr, X86::CMOVNS32rm, 0 },
514
{ X86::CMOVNS64rr, X86::CMOVNS64rm, 0 },
515
{ X86::CMOVO16rr, X86::CMOVO16rm, 0 },
516
{ X86::CMOVO32rr, X86::CMOVO32rm, 0 },
517
{ X86::CMOVO64rr, X86::CMOVO64rm, 0 },
518
{ X86::CMOVP16rr, X86::CMOVP16rm, 0 },
519
{ X86::CMOVP32rr, X86::CMOVP32rm, 0 },
520
{ X86::CMOVP64rr, X86::CMOVP64rm, 0 },
521
{ X86::CMOVS16rr, X86::CMOVS16rm, 0 },
522
{ X86::CMOVS32rr, X86::CMOVS32rm, 0 },
523
{ X86::CMOVS64rr, X86::CMOVS64rm, 0 },
524
{ X86::CMPPDrri, X86::CMPPDrmi, 16 },
525
{ X86::CMPPSrri, X86::CMPPSrmi, 16 },
526
{ X86::CMPSDrr, X86::CMPSDrm, 0 },
527
{ X86::CMPSSrr, X86::CMPSSrm, 0 },
528
{ X86::DIVPDrr, X86::DIVPDrm, 16 },
529
{ X86::DIVPSrr, X86::DIVPSrm, 16 },
530
{ X86::DIVSDrr, X86::DIVSDrm, 0 },
531
{ X86::DIVSSrr, X86::DIVSSrm, 0 },
532
{ X86::FsANDNPDrr, X86::FsANDNPDrm, 16 },
533
{ X86::FsANDNPSrr, X86::FsANDNPSrm, 16 },
534
{ X86::FsANDPDrr, X86::FsANDPDrm, 16 },
535
{ X86::FsANDPSrr, X86::FsANDPSrm, 16 },
536
{ X86::FsORPDrr, X86::FsORPDrm, 16 },
537
{ X86::FsORPSrr, X86::FsORPSrm, 16 },
538
{ X86::FsXORPDrr, X86::FsXORPDrm, 16 },
539
{ X86::FsXORPSrr, X86::FsXORPSrm, 16 },
540
{ X86::HADDPDrr, X86::HADDPDrm, 16 },
541
{ X86::HADDPSrr, X86::HADDPSrm, 16 },
542
{ X86::HSUBPDrr, X86::HSUBPDrm, 16 },
543
{ X86::HSUBPSrr, X86::HSUBPSrm, 16 },
544
{ X86::IMUL16rr, X86::IMUL16rm, 0 },
545
{ X86::IMUL32rr, X86::IMUL32rm, 0 },
546
{ X86::IMUL64rr, X86::IMUL64rm, 0 },
547
{ X86::MAXPDrr, X86::MAXPDrm, 16 },
548
{ X86::MAXPDrr_Int, X86::MAXPDrm_Int, 16 },
549
{ X86::MAXPSrr, X86::MAXPSrm, 16 },
550
{ X86::MAXPSrr_Int, X86::MAXPSrm_Int, 16 },
551
{ X86::MAXSDrr, X86::MAXSDrm, 0 },
552
{ X86::MAXSDrr_Int, X86::MAXSDrm_Int, 0 },
553
{ X86::MAXSSrr, X86::MAXSSrm, 0 },
554
{ X86::MAXSSrr_Int, X86::MAXSSrm_Int, 0 },
555
{ X86::MINPDrr, X86::MINPDrm, 16 },
556
{ X86::MINPDrr_Int, X86::MINPDrm_Int, 16 },
557
{ X86::MINPSrr, X86::MINPSrm, 16 },
558
{ X86::MINPSrr_Int, X86::MINPSrm_Int, 16 },
559
{ X86::MINSDrr, X86::MINSDrm, 0 },
560
{ X86::MINSDrr_Int, X86::MINSDrm_Int, 0 },
561
{ X86::MINSSrr, X86::MINSSrm, 0 },
562
{ X86::MINSSrr_Int, X86::MINSSrm_Int, 0 },
563
{ X86::MULPDrr, X86::MULPDrm, 16 },
564
{ X86::MULPSrr, X86::MULPSrm, 16 },
565
{ X86::MULSDrr, X86::MULSDrm, 0 },
566
{ X86::MULSSrr, X86::MULSSrm, 0 },
567
{ X86::OR16rr, X86::OR16rm, 0 },
568
{ X86::OR32rr, X86::OR32rm, 0 },
569
{ X86::OR64rr, X86::OR64rm, 0 },
570
{ X86::OR8rr, X86::OR8rm, 0 },
571
{ X86::ORPDrr, X86::ORPDrm, 16 },
572
{ X86::ORPSrr, X86::ORPSrm, 16 },
573
{ X86::PACKSSDWrr, X86::PACKSSDWrm, 16 },
574
{ X86::PACKSSWBrr, X86::PACKSSWBrm, 16 },
575
{ X86::PACKUSWBrr, X86::PACKUSWBrm, 16 },
576
{ X86::PADDBrr, X86::PADDBrm, 16 },
577
{ X86::PADDDrr, X86::PADDDrm, 16 },
578
{ X86::PADDQrr, X86::PADDQrm, 16 },
579
{ X86::PADDSBrr, X86::PADDSBrm, 16 },
580
{ X86::PADDSWrr, X86::PADDSWrm, 16 },
581
{ X86::PADDWrr, X86::PADDWrm, 16 },
582
{ X86::PANDNrr, X86::PANDNrm, 16 },
583
{ X86::PANDrr, X86::PANDrm, 16 },
584
{ X86::PAVGBrr, X86::PAVGBrm, 16 },
585
{ X86::PAVGWrr, X86::PAVGWrm, 16 },
586
{ X86::PCMPEQBrr, X86::PCMPEQBrm, 16 },
587
{ X86::PCMPEQDrr, X86::PCMPEQDrm, 16 },
588
{ X86::PCMPEQWrr, X86::PCMPEQWrm, 16 },
589
{ X86::PCMPGTBrr, X86::PCMPGTBrm, 16 },
590
{ X86::PCMPGTDrr, X86::PCMPGTDrm, 16 },
591
{ X86::PCMPGTWrr, X86::PCMPGTWrm, 16 },
592
{ X86::PINSRWrri, X86::PINSRWrmi, 16 },
593
{ X86::PMADDWDrr, X86::PMADDWDrm, 16 },
594
{ X86::PMAXSWrr, X86::PMAXSWrm, 16 },
595
{ X86::PMAXUBrr, X86::PMAXUBrm, 16 },
596
{ X86::PMINSWrr, X86::PMINSWrm, 16 },
597
{ X86::PMINUBrr, X86::PMINUBrm, 16 },
598
{ X86::PMULDQrr, X86::PMULDQrm, 16 },
599
{ X86::PMULHUWrr, X86::PMULHUWrm, 16 },
600
{ X86::PMULHWrr, X86::PMULHWrm, 16 },
601
{ X86::PMULLDrr, X86::PMULLDrm, 16 },
602
{ X86::PMULLWrr, X86::PMULLWrm, 16 },
603
{ X86::PMULUDQrr, X86::PMULUDQrm, 16 },
604
{ X86::PORrr, X86::PORrm, 16 },
605
{ X86::PSADBWrr, X86::PSADBWrm, 16 },
606
{ X86::PSLLDrr, X86::PSLLDrm, 16 },
607
{ X86::PSLLQrr, X86::PSLLQrm, 16 },
608
{ X86::PSLLWrr, X86::PSLLWrm, 16 },
609
{ X86::PSRADrr, X86::PSRADrm, 16 },
610
{ X86::PSRAWrr, X86::PSRAWrm, 16 },
611
{ X86::PSRLDrr, X86::PSRLDrm, 16 },
612
{ X86::PSRLQrr, X86::PSRLQrm, 16 },
613
{ X86::PSRLWrr, X86::PSRLWrm, 16 },
614
{ X86::PSUBBrr, X86::PSUBBrm, 16 },
615
{ X86::PSUBDrr, X86::PSUBDrm, 16 },
616
{ X86::PSUBSBrr, X86::PSUBSBrm, 16 },
617
{ X86::PSUBSWrr, X86::PSUBSWrm, 16 },
618
{ X86::PSUBWrr, X86::PSUBWrm, 16 },
619
{ X86::PUNPCKHBWrr, X86::PUNPCKHBWrm, 16 },
620
{ X86::PUNPCKHDQrr, X86::PUNPCKHDQrm, 16 },
621
{ X86::PUNPCKHQDQrr, X86::PUNPCKHQDQrm, 16 },
622
{ X86::PUNPCKHWDrr, X86::PUNPCKHWDrm, 16 },
623
{ X86::PUNPCKLBWrr, X86::PUNPCKLBWrm, 16 },
624
{ X86::PUNPCKLDQrr, X86::PUNPCKLDQrm, 16 },
625
{ X86::PUNPCKLQDQrr, X86::PUNPCKLQDQrm, 16 },
626
{ X86::PUNPCKLWDrr, X86::PUNPCKLWDrm, 16 },
627
{ X86::PXORrr, X86::PXORrm, 16 },
628
{ X86::SBB32rr, X86::SBB32rm, 0 },
629
{ X86::SBB64rr, X86::SBB64rm, 0 },
630
{ X86::SHUFPDrri, X86::SHUFPDrmi, 16 },
631
{ X86::SHUFPSrri, X86::SHUFPSrmi, 16 },
632
{ X86::SUB16rr, X86::SUB16rm, 0 },
633
{ X86::SUB32rr, X86::SUB32rm, 0 },
634
{ X86::SUB64rr, X86::SUB64rm, 0 },
635
{ X86::SUB8rr, X86::SUB8rm, 0 },
636
{ X86::SUBPDrr, X86::SUBPDrm, 16 },
637
{ X86::SUBPSrr, X86::SUBPSrm, 16 },
638
{ X86::SUBSDrr, X86::SUBSDrm, 0 },
639
{ X86::SUBSSrr, X86::SUBSSrm, 0 },
640
// FIXME: TEST*rr -> swapped operand of TEST*mr.
641
{ X86::UNPCKHPDrr, X86::UNPCKHPDrm, 16 },
642
{ X86::UNPCKHPSrr, X86::UNPCKHPSrm, 16 },
643
{ X86::UNPCKLPDrr, X86::UNPCKLPDrm, 16 },
644
{ X86::UNPCKLPSrr, X86::UNPCKLPSrm, 16 },
645
{ X86::XOR16rr, X86::XOR16rm, 0 },
646
{ X86::XOR32rr, X86::XOR32rm, 0 },
647
{ X86::XOR64rr, X86::XOR64rm, 0 },
648
{ X86::XOR8rr, X86::XOR8rm, 0 },
649
{ X86::XORPDrr, X86::XORPDrm, 16 },
650
{ X86::XORPSrr, X86::XORPSrm, 16 }
653
for (unsigned i = 0, e = array_lengthof(OpTbl2); i != e; ++i) {
654
unsigned RegOp = OpTbl2[i][0];
655
unsigned MemOp = OpTbl2[i][1];
656
unsigned Align = OpTbl2[i][2];
657
if (!RegOp2MemOpTable2.insert(std::make_pair((unsigned*)RegOp,
658
std::make_pair(MemOp,Align))).second)
659
assert(false && "Duplicated entries?");
660
// Index 2, folded load
661
unsigned AuxInfo = 2 | (1 << 4);
662
if (!MemOp2RegOpTable.insert(std::make_pair((unsigned*)MemOp,
663
std::make_pair(RegOp, AuxInfo))).second)
664
AmbEntries.push_back(MemOp);
667
// Remove ambiguous entries.
668
assert(AmbEntries.empty() && "Duplicated entries in unfolding maps?");
672
X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
673
unsigned &SrcReg, unsigned &DstReg,
674
unsigned &SubIdx) const {
675
switch (MI.getOpcode()) {
677
case X86::MOVSX16rr8:
678
case X86::MOVZX16rr8:
679
case X86::MOVSX32rr8:
680
case X86::MOVZX32rr8:
681
case X86::MOVSX64rr8:
682
case X86::MOVZX64rr8:
683
if (!TM.getSubtarget<X86Subtarget>().is64Bit())
684
// It's not always legal to reference the low 8-bit of the larger
685
// register in 32-bit mode.
687
case X86::MOVSX32rr16:
688
case X86::MOVZX32rr16:
689
case X86::MOVSX64rr16:
690
case X86::MOVZX64rr16:
691
case X86::MOVSX64rr32:
692
case X86::MOVZX64rr32: {
693
if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg())
696
SrcReg = MI.getOperand(1).getReg();
697
DstReg = MI.getOperand(0).getReg();
698
switch (MI.getOpcode()) {
702
case X86::MOVSX16rr8:
703
case X86::MOVZX16rr8:
704
case X86::MOVSX32rr8:
705
case X86::MOVZX32rr8:
706
case X86::MOVSX64rr8:
707
case X86::MOVZX64rr8:
708
SubIdx = X86::sub_8bit;
710
case X86::MOVSX32rr16:
711
case X86::MOVZX32rr16:
712
case X86::MOVSX64rr16:
713
case X86::MOVZX64rr16:
714
SubIdx = X86::sub_16bit;
716
case X86::MOVSX64rr32:
717
case X86::MOVZX64rr32:
718
SubIdx = X86::sub_32bit;
727
/// isFrameOperand - Return true and the FrameIndex if the specified
728
/// operand and follow operands form a reference to the stack frame.
729
bool X86InstrInfo::isFrameOperand(const MachineInstr *MI, unsigned int Op,
730
int &FrameIndex) const {
731
if (MI->getOperand(Op).isFI() && MI->getOperand(Op+1).isImm() &&
732
MI->getOperand(Op+2).isReg() && MI->getOperand(Op+3).isImm() &&
733
MI->getOperand(Op+1).getImm() == 1 &&
734
MI->getOperand(Op+2).getReg() == 0 &&
735
MI->getOperand(Op+3).getImm() == 0) {
736
FrameIndex = MI->getOperand(Op).getIndex();
742
static bool isFrameLoadOpcode(int Opcode) {
748
case X86::MOV32rm_TC:
750
case X86::MOV64rm_TC:
757
case X86::MMX_MOVD64rm:
758
case X86::MMX_MOVQ64rm:
765
static bool isFrameStoreOpcode(int Opcode) {
771
case X86::MOV32mr_TC:
773
case X86::MOV64mr_TC:
780
case X86::MMX_MOVD64mr:
781
case X86::MMX_MOVQ64mr:
782
case X86::MMX_MOVNTQmr:
788
unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
789
int &FrameIndex) const {
790
if (isFrameLoadOpcode(MI->getOpcode()))
791
if (MI->getOperand(0).getSubReg() == 0 && isFrameOperand(MI, 1, FrameIndex))
792
return MI->getOperand(0).getReg();
796
unsigned X86InstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
797
int &FrameIndex) const {
798
if (isFrameLoadOpcode(MI->getOpcode())) {
800
if ((Reg = isLoadFromStackSlot(MI, FrameIndex)))
802
// Check for post-frame index elimination operations
803
const MachineMemOperand *Dummy;
804
return hasLoadFromStackSlot(MI, Dummy, FrameIndex);
809
bool X86InstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
810
const MachineMemOperand *&MMO,
811
int &FrameIndex) const {
812
for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
813
oe = MI->memoperands_end();
816
if ((*o)->isLoad() && (*o)->getValue())
817
if (const FixedStackPseudoSourceValue *Value =
818
dyn_cast<const FixedStackPseudoSourceValue>((*o)->getValue())) {
819
FrameIndex = Value->getFrameIndex();
827
unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
828
int &FrameIndex) const {
829
if (isFrameStoreOpcode(MI->getOpcode()))
830
if (MI->getOperand(X86::AddrNumOperands).getSubReg() == 0 &&
831
isFrameOperand(MI, 0, FrameIndex))
832
return MI->getOperand(X86::AddrNumOperands).getReg();
836
unsigned X86InstrInfo::isStoreToStackSlotPostFE(const MachineInstr *MI,
837
int &FrameIndex) const {
838
if (isFrameStoreOpcode(MI->getOpcode())) {
840
if ((Reg = isStoreToStackSlot(MI, FrameIndex)))
842
// Check for post-frame index elimination operations
843
const MachineMemOperand *Dummy;
844
return hasStoreToStackSlot(MI, Dummy, FrameIndex);
849
bool X86InstrInfo::hasStoreToStackSlot(const MachineInstr *MI,
850
const MachineMemOperand *&MMO,
851
int &FrameIndex) const {
852
for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
853
oe = MI->memoperands_end();
856
if ((*o)->isStore() && (*o)->getValue())
857
if (const FixedStackPseudoSourceValue *Value =
858
dyn_cast<const FixedStackPseudoSourceValue>((*o)->getValue())) {
859
FrameIndex = Value->getFrameIndex();
867
/// regIsPICBase - Return true if register is PIC base (i.e.g defined by
869
static bool regIsPICBase(unsigned BaseReg, const MachineRegisterInfo &MRI) {
870
bool isPICBase = false;
871
for (MachineRegisterInfo::def_iterator I = MRI.def_begin(BaseReg),
872
E = MRI.def_end(); I != E; ++I) {
873
MachineInstr *DefMI = I.getOperand().getParent();
874
if (DefMI->getOpcode() != X86::MOVPC32r)
876
assert(!isPICBase && "More than one PIC base?");
883
X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI,
884
AliasAnalysis *AA) const {
885
switch (MI->getOpcode()) {
896
case X86::MOVUPSrm_Int:
899
case X86::MMX_MOVD64rm:
900
case X86::MMX_MOVQ64rm:
901
case X86::FsMOVAPSrm:
902
case X86::FsMOVAPDrm: {
903
// Loads from constant pools are trivially rematerializable.
904
if (MI->getOperand(1).isReg() &&
905
MI->getOperand(2).isImm() &&
906
MI->getOperand(3).isReg() && MI->getOperand(3).getReg() == 0 &&
907
MI->isInvariantLoad(AA)) {
908
unsigned BaseReg = MI->getOperand(1).getReg();
909
if (BaseReg == 0 || BaseReg == X86::RIP)
911
// Allow re-materialization of PIC load.
912
if (!ReMatPICStubLoad && MI->getOperand(4).isGlobal())
914
const MachineFunction &MF = *MI->getParent()->getParent();
915
const MachineRegisterInfo &MRI = MF.getRegInfo();
916
bool isPICBase = false;
917
for (MachineRegisterInfo::def_iterator I = MRI.def_begin(BaseReg),
918
E = MRI.def_end(); I != E; ++I) {
919
MachineInstr *DefMI = I.getOperand().getParent();
920
if (DefMI->getOpcode() != X86::MOVPC32r)
922
assert(!isPICBase && "More than one PIC base?");
932
if (MI->getOperand(2).isImm() &&
933
MI->getOperand(3).isReg() && MI->getOperand(3).getReg() == 0 &&
934
!MI->getOperand(4).isReg()) {
935
// lea fi#, lea GV, etc. are all rematerializable.
936
if (!MI->getOperand(1).isReg())
938
unsigned BaseReg = MI->getOperand(1).getReg();
941
// Allow re-materialization of lea PICBase + x.
942
const MachineFunction &MF = *MI->getParent()->getParent();
943
const MachineRegisterInfo &MRI = MF.getRegInfo();
944
return regIsPICBase(BaseReg, MRI);
950
// All other instructions marked M_REMATERIALIZABLE are always trivially
955
/// isSafeToClobberEFLAGS - Return true if it's safe insert an instruction that
956
/// would clobber the EFLAGS condition register. Note the result may be
957
/// conservative. If it cannot definitely determine the safety after visiting
958
/// a few instructions in each direction it assumes it's not safe.
959
static bool isSafeToClobberEFLAGS(MachineBasicBlock &MBB,
960
MachineBasicBlock::iterator I) {
961
MachineBasicBlock::iterator E = MBB.end();
963
// It's always safe to clobber EFLAGS at the end of a block.
967
// For compile time consideration, if we are not able to determine the
968
// safety after visiting 4 instructions in each direction, we will assume
970
MachineBasicBlock::iterator Iter = I;
971
for (unsigned i = 0; i < 4; ++i) {
972
bool SeenDef = false;
973
for (unsigned j = 0, e = Iter->getNumOperands(); j != e; ++j) {
974
MachineOperand &MO = Iter->getOperand(j);
977
if (MO.getReg() == X86::EFLAGS) {
985
// This instruction defines EFLAGS, no need to look any further.
988
// Skip over DBG_VALUE.
989
while (Iter != E && Iter->isDebugValue())
992
// If we make it to the end of the block, it's safe to clobber EFLAGS.
997
MachineBasicBlock::iterator B = MBB.begin();
999
for (unsigned i = 0; i < 4; ++i) {
1000
// If we make it to the beginning of the block, it's safe to clobber
1001
// EFLAGS iff EFLAGS is not live-in.
1003
return !MBB.isLiveIn(X86::EFLAGS);
1006
// Skip over DBG_VALUE.
1007
while (Iter != B && Iter->isDebugValue())
1010
bool SawKill = false;
1011
for (unsigned j = 0, e = Iter->getNumOperands(); j != e; ++j) {
1012
MachineOperand &MO = Iter->getOperand(j);
1013
if (MO.isReg() && MO.getReg() == X86::EFLAGS) {
1014
if (MO.isDef()) return MO.isDead();
1015
if (MO.isKill()) SawKill = true;
1020
// This instruction kills EFLAGS and doesn't redefine it, so
1021
// there's no need to look further.
1025
// Conservative answer.
1029
void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
1030
MachineBasicBlock::iterator I,
1031
unsigned DestReg, unsigned SubIdx,
1032
const MachineInstr *Orig,
1033
const TargetRegisterInfo &TRI) const {
1034
DebugLoc DL = Orig->getDebugLoc();
1036
// MOV32r0 etc. are implemented with xor which clobbers condition code.
1037
// Re-materialize them as movri instructions to avoid side effects.
1039
unsigned Opc = Orig->getOpcode();
1045
case X86::MOV64r0: {
1046
if (!isSafeToClobberEFLAGS(MBB, I)) {
1049
case X86::MOV8r0: Opc = X86::MOV8ri; break;
1050
case X86::MOV16r0: Opc = X86::MOV16ri; break;
1051
case X86::MOV32r0: Opc = X86::MOV32ri; break;
1052
case X86::MOV64r0: Opc = X86::MOV64ri64i32; break;
1061
MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
1064
BuildMI(MBB, I, DL, get(Opc)).addOperand(Orig->getOperand(0)).addImm(0);
1067
MachineInstr *NewMI = prior(I);
1068
NewMI->substituteRegister(Orig->getOperand(0).getReg(), DestReg, SubIdx, TRI);
1071
/// hasLiveCondCodeDef - True if MI has a condition code def, e.g. EFLAGS, that
1072
/// is not marked dead.
1073
static bool hasLiveCondCodeDef(MachineInstr *MI) {
1074
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1075
MachineOperand &MO = MI->getOperand(i);
1076
if (MO.isReg() && MO.isDef() &&
1077
MO.getReg() == X86::EFLAGS && !MO.isDead()) {
1084
/// convertToThreeAddressWithLEA - Helper for convertToThreeAddress when
1085
/// 16-bit LEA is disabled, use 32-bit LEA to form 3-address code by promoting
1086
/// to a 32-bit superregister and then truncating back down to a 16-bit
1089
X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
1090
MachineFunction::iterator &MFI,
1091
MachineBasicBlock::iterator &MBBI,
1092
LiveVariables *LV) const {
1093
MachineInstr *MI = MBBI;
1094
unsigned Dest = MI->getOperand(0).getReg();
1095
unsigned Src = MI->getOperand(1).getReg();
1096
bool isDead = MI->getOperand(0).isDead();
1097
bool isKill = MI->getOperand(1).isKill();
1099
unsigned Opc = TM.getSubtarget<X86Subtarget>().is64Bit()
1100
? X86::LEA64_32r : X86::LEA32r;
1101
MachineRegisterInfo &RegInfo = MFI->getParent()->getRegInfo();
1102
unsigned leaInReg = RegInfo.createVirtualRegister(&X86::GR32RegClass);
1103
unsigned leaOutReg = RegInfo.createVirtualRegister(&X86::GR32RegClass);
1105
// Build and insert into an implicit UNDEF value. This is OK because
1106
// well be shifting and then extracting the lower 16-bits.
1107
// This has the potential to cause partial register stall. e.g.
1108
// movw (%rbp,%rcx,2), %dx
1109
// leal -65(%rdx), %esi
1110
// But testing has shown this *does* help performance in 64-bit mode (at
1111
// least on modern x86 machines).
1112
BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(X86::IMPLICIT_DEF), leaInReg);
1113
MachineInstr *InsMI =
1114
BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(TargetOpcode::COPY))
1115
.addReg(leaInReg, RegState::Define, X86::sub_16bit)
1116
.addReg(Src, getKillRegState(isKill));
1118
MachineInstrBuilder MIB = BuildMI(*MFI, MBBI, MI->getDebugLoc(),
1119
get(Opc), leaOutReg);
1122
llvm_unreachable(0);
1124
case X86::SHL16ri: {
1125
unsigned ShAmt = MI->getOperand(2).getImm();
1126
MIB.addReg(0).addImm(1 << ShAmt)
1127
.addReg(leaInReg, RegState::Kill).addImm(0).addReg(0);
1131
case X86::INC64_16r:
1132
addRegOffset(MIB, leaInReg, true, 1);
1135
case X86::DEC64_16r:
1136
addRegOffset(MIB, leaInReg, true, -1);
1140
addRegOffset(MIB, leaInReg, true, MI->getOperand(2).getImm());
1142
case X86::ADD16rr: {
1143
unsigned Src2 = MI->getOperand(2).getReg();
1144
bool isKill2 = MI->getOperand(2).isKill();
1145
unsigned leaInReg2 = 0;
1146
MachineInstr *InsMI2 = 0;
1148
// ADD16rr %reg1028<kill>, %reg1028
1149
// just a single insert_subreg.
1150
addRegReg(MIB, leaInReg, true, leaInReg, false);
1152
leaInReg2 = RegInfo.createVirtualRegister(&X86::GR32RegClass);
1153
// Build and insert into an implicit UNDEF value. This is OK because
1154
// well be shifting and then extracting the lower 16-bits.
1155
BuildMI(*MFI, MIB, MI->getDebugLoc(), get(X86::IMPLICIT_DEF), leaInReg2);
1157
BuildMI(*MFI, MIB, MI->getDebugLoc(), get(TargetOpcode::COPY))
1158
.addReg(leaInReg2, RegState::Define, X86::sub_16bit)
1159
.addReg(Src2, getKillRegState(isKill2));
1160
addRegReg(MIB, leaInReg, true, leaInReg2, true);
1162
if (LV && isKill2 && InsMI2)
1163
LV->replaceKillInstruction(Src2, MI, InsMI2);
1168
MachineInstr *NewMI = MIB;
1169
MachineInstr *ExtMI =
1170
BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(TargetOpcode::COPY))
1171
.addReg(Dest, RegState::Define | getDeadRegState(isDead))
1172
.addReg(leaOutReg, RegState::Kill, X86::sub_16bit);
1175
// Update live variables
1176
LV->getVarInfo(leaInReg).Kills.push_back(NewMI);
1177
LV->getVarInfo(leaOutReg).Kills.push_back(ExtMI);
1179
LV->replaceKillInstruction(Src, MI, InsMI);
1181
LV->replaceKillInstruction(Dest, MI, ExtMI);
1187
/// convertToThreeAddress - This method must be implemented by targets that
1188
/// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target
1189
/// may be able to convert a two-address instruction into a true
1190
/// three-address instruction on demand. This allows the X86 target (for
1191
/// example) to convert ADD and SHL instructions into LEA instructions if they
1192
/// would require register copies due to two-addressness.
1194
/// This method returns a null pointer if the transformation cannot be
1195
/// performed, otherwise it returns the new instruction.
1198
X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
1199
MachineBasicBlock::iterator &MBBI,
1200
LiveVariables *LV) const {
1201
MachineInstr *MI = MBBI;
1202
MachineFunction &MF = *MI->getParent()->getParent();
1203
// All instructions input are two-addr instructions. Get the known operands.
1204
unsigned Dest = MI->getOperand(0).getReg();
1205
unsigned Src = MI->getOperand(1).getReg();
1206
bool isDead = MI->getOperand(0).isDead();
1207
bool isKill = MI->getOperand(1).isKill();
1209
MachineInstr *NewMI = NULL;
1210
// FIXME: 16-bit LEA's are really slow on Athlons, but not bad on P4's. When
1211
// we have better subtarget support, enable the 16-bit LEA generation here.
1212
// 16-bit LEA is also slow on Core2.
1213
bool DisableLEA16 = true;
1214
bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit();
1216
unsigned MIOpc = MI->getOpcode();
1218
case X86::SHUFPSrri: {
1219
assert(MI->getNumOperands() == 4 && "Unknown shufps instruction!");
1220
if (!TM.getSubtarget<X86Subtarget>().hasSSE2()) return 0;
1222
unsigned B = MI->getOperand(1).getReg();
1223
unsigned C = MI->getOperand(2).getReg();
1224
if (B != C) return 0;
1225
unsigned A = MI->getOperand(0).getReg();
1226
unsigned M = MI->getOperand(3).getImm();
1227
NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::PSHUFDri))
1228
.addReg(A, RegState::Define | getDeadRegState(isDead))
1229
.addReg(B, getKillRegState(isKill)).addImm(M);
1232
case X86::SHL64ri: {
1233
assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
1234
// NOTE: LEA doesn't produce flags like shift does, but LLVM never uses
1235
// the flags produced by a shift yet, so this is safe.
1236
unsigned ShAmt = MI->getOperand(2).getImm();
1237
if (ShAmt == 0 || ShAmt >= 4) return 0;
1239
NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r))
1240
.addReg(Dest, RegState::Define | getDeadRegState(isDead))
1241
.addReg(0).addImm(1 << ShAmt)
1242
.addReg(Src, getKillRegState(isKill))
1243
.addImm(0).addReg(0);
1246
case X86::SHL32ri: {
1247
assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
1248
// NOTE: LEA doesn't produce flags like shift does, but LLVM never uses
1249
// the flags produced by a shift yet, so this is safe.
1250
unsigned ShAmt = MI->getOperand(2).getImm();
1251
if (ShAmt == 0 || ShAmt >= 4) return 0;
1253
unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
1254
NewMI = BuildMI(MF, MI->getDebugLoc(), get(Opc))
1255
.addReg(Dest, RegState::Define | getDeadRegState(isDead))
1256
.addReg(0).addImm(1 << ShAmt)
1257
.addReg(Src, getKillRegState(isKill)).addImm(0).addReg(0);
1260
case X86::SHL16ri: {
1261
assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
1262
// NOTE: LEA doesn't produce flags like shift does, but LLVM never uses
1263
// the flags produced by a shift yet, so this is safe.
1264
unsigned ShAmt = MI->getOperand(2).getImm();
1265
if (ShAmt == 0 || ShAmt >= 4) return 0;
1268
return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0;
1269
NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
1270
.addReg(Dest, RegState::Define | getDeadRegState(isDead))
1271
.addReg(0).addImm(1 << ShAmt)
1272
.addReg(Src, getKillRegState(isKill))
1273
.addImm(0).addReg(0);
1277
// The following opcodes also sets the condition code register(s). Only
1278
// convert them to equivalent lea if the condition code register def's
1280
if (hasLiveCondCodeDef(MI))
1287
case X86::INC64_32r: {
1288
assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
1289
unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r
1290
: (is64Bit ? X86::LEA64_32r : X86::LEA32r);
1291
NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
1292
.addReg(Dest, RegState::Define |
1293
getDeadRegState(isDead)),
1298
case X86::INC64_16r:
1300
return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0;
1301
assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
1302
NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
1303
.addReg(Dest, RegState::Define |
1304
getDeadRegState(isDead)),
1309
case X86::DEC64_32r: {
1310
assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
1311
unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r
1312
: (is64Bit ? X86::LEA64_32r : X86::LEA32r);
1313
NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
1314
.addReg(Dest, RegState::Define |
1315
getDeadRegState(isDead)),
1320
case X86::DEC64_16r:
1322
return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0;
1323
assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
1324
NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
1325
.addReg(Dest, RegState::Define |
1326
getDeadRegState(isDead)),
1330
case X86::ADD32rr: {
1331
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
1332
unsigned Opc = MIOpc == X86::ADD64rr ? X86::LEA64r
1333
: (is64Bit ? X86::LEA64_32r : X86::LEA32r);
1334
unsigned Src2 = MI->getOperand(2).getReg();
1335
bool isKill2 = MI->getOperand(2).isKill();
1336
NewMI = addRegReg(BuildMI(MF, MI->getDebugLoc(), get(Opc))
1337
.addReg(Dest, RegState::Define |
1338
getDeadRegState(isDead)),
1339
Src, isKill, Src2, isKill2);
1341
LV->replaceKillInstruction(Src2, MI, NewMI);
1344
case X86::ADD16rr: {
1346
return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0;
1347
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
1348
unsigned Src2 = MI->getOperand(2).getReg();
1349
bool isKill2 = MI->getOperand(2).isKill();
1350
NewMI = addRegReg(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
1351
.addReg(Dest, RegState::Define |
1352
getDeadRegState(isDead)),
1353
Src, isKill, Src2, isKill2);
1355
LV->replaceKillInstruction(Src2, MI, NewMI);
1358
case X86::ADD64ri32:
1360
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
1361
NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r))
1362
.addReg(Dest, RegState::Define |
1363
getDeadRegState(isDead)),
1364
Src, isKill, MI->getOperand(2).getImm());
1367
case X86::ADD32ri8: {
1368
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
1369
unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
1370
NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
1371
.addReg(Dest, RegState::Define |
1372
getDeadRegState(isDead)),
1373
Src, isKill, MI->getOperand(2).getImm());
1379
return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0;
1380
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
1381
NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
1382
.addReg(Dest, RegState::Define |
1383
getDeadRegState(isDead)),
1384
Src, isKill, MI->getOperand(2).getImm());
1390
if (!NewMI) return 0;
1392
if (LV) { // Update live variables
1394
LV->replaceKillInstruction(Src, MI, NewMI);
1396
LV->replaceKillInstruction(Dest, MI, NewMI);
1399
MFI->insert(MBBI, NewMI); // Insert the new inst
1403
/// commuteInstruction - We have a few instructions that must be hacked on to
1407
X86InstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const {
1408
switch (MI->getOpcode()) {
1409
case X86::SHRD16rri8: // A = SHRD16rri8 B, C, I -> A = SHLD16rri8 C, B, (16-I)
1410
case X86::SHLD16rri8: // A = SHLD16rri8 B, C, I -> A = SHRD16rri8 C, B, (16-I)
1411
case X86::SHRD32rri8: // A = SHRD32rri8 B, C, I -> A = SHLD32rri8 C, B, (32-I)
1412
case X86::SHLD32rri8: // A = SHLD32rri8 B, C, I -> A = SHRD32rri8 C, B, (32-I)
1413
case X86::SHRD64rri8: // A = SHRD64rri8 B, C, I -> A = SHLD64rri8 C, B, (64-I)
1414
case X86::SHLD64rri8:{// A = SHLD64rri8 B, C, I -> A = SHRD64rri8 C, B, (64-I)
1417
switch (MI->getOpcode()) {
1418
default: llvm_unreachable("Unreachable!");
1419
case X86::SHRD16rri8: Size = 16; Opc = X86::SHLD16rri8; break;
1420
case X86::SHLD16rri8: Size = 16; Opc = X86::SHRD16rri8; break;
1421
case X86::SHRD32rri8: Size = 32; Opc = X86::SHLD32rri8; break;
1422
case X86::SHLD32rri8: Size = 32; Opc = X86::SHRD32rri8; break;
1423
case X86::SHRD64rri8: Size = 64; Opc = X86::SHLD64rri8; break;
1424
case X86::SHLD64rri8: Size = 64; Opc = X86::SHRD64rri8; break;
1426
unsigned Amt = MI->getOperand(3).getImm();
1428
MachineFunction &MF = *MI->getParent()->getParent();
1429
MI = MF.CloneMachineInstr(MI);
1432
MI->setDesc(get(Opc));
1433
MI->getOperand(3).setImm(Size-Amt);
1434
return TargetInstrInfoImpl::commuteInstruction(MI, NewMI);
1436
case X86::CMOVB16rr:
1437
case X86::CMOVB32rr:
1438
case X86::CMOVB64rr:
1439
case X86::CMOVAE16rr:
1440
case X86::CMOVAE32rr:
1441
case X86::CMOVAE64rr:
1442
case X86::CMOVE16rr:
1443
case X86::CMOVE32rr:
1444
case X86::CMOVE64rr:
1445
case X86::CMOVNE16rr:
1446
case X86::CMOVNE32rr:
1447
case X86::CMOVNE64rr:
1448
case X86::CMOVBE16rr:
1449
case X86::CMOVBE32rr:
1450
case X86::CMOVBE64rr:
1451
case X86::CMOVA16rr:
1452
case X86::CMOVA32rr:
1453
case X86::CMOVA64rr:
1454
case X86::CMOVL16rr:
1455
case X86::CMOVL32rr:
1456
case X86::CMOVL64rr:
1457
case X86::CMOVGE16rr:
1458
case X86::CMOVGE32rr:
1459
case X86::CMOVGE64rr:
1460
case X86::CMOVLE16rr:
1461
case X86::CMOVLE32rr:
1462
case X86::CMOVLE64rr:
1463
case X86::CMOVG16rr:
1464
case X86::CMOVG32rr:
1465
case X86::CMOVG64rr:
1466
case X86::CMOVS16rr:
1467
case X86::CMOVS32rr:
1468
case X86::CMOVS64rr:
1469
case X86::CMOVNS16rr:
1470
case X86::CMOVNS32rr:
1471
case X86::CMOVNS64rr:
1472
case X86::CMOVP16rr:
1473
case X86::CMOVP32rr:
1474
case X86::CMOVP64rr:
1475
case X86::CMOVNP16rr:
1476
case X86::CMOVNP32rr:
1477
case X86::CMOVNP64rr:
1478
case X86::CMOVO16rr:
1479
case X86::CMOVO32rr:
1480
case X86::CMOVO64rr:
1481
case X86::CMOVNO16rr:
1482
case X86::CMOVNO32rr:
1483
case X86::CMOVNO64rr: {
1485
switch (MI->getOpcode()) {
1487
case X86::CMOVB16rr: Opc = X86::CMOVAE16rr; break;
1488
case X86::CMOVB32rr: Opc = X86::CMOVAE32rr; break;
1489
case X86::CMOVB64rr: Opc = X86::CMOVAE64rr; break;
1490
case X86::CMOVAE16rr: Opc = X86::CMOVB16rr; break;
1491
case X86::CMOVAE32rr: Opc = X86::CMOVB32rr; break;
1492
case X86::CMOVAE64rr: Opc = X86::CMOVB64rr; break;
1493
case X86::CMOVE16rr: Opc = X86::CMOVNE16rr; break;
1494
case X86::CMOVE32rr: Opc = X86::CMOVNE32rr; break;
1495
case X86::CMOVE64rr: Opc = X86::CMOVNE64rr; break;
1496
case X86::CMOVNE16rr: Opc = X86::CMOVE16rr; break;
1497
case X86::CMOVNE32rr: Opc = X86::CMOVE32rr; break;
1498
case X86::CMOVNE64rr: Opc = X86::CMOVE64rr; break;
1499
case X86::CMOVBE16rr: Opc = X86::CMOVA16rr; break;
1500
case X86::CMOVBE32rr: Opc = X86::CMOVA32rr; break;
1501
case X86::CMOVBE64rr: Opc = X86::CMOVA64rr; break;
1502
case X86::CMOVA16rr: Opc = X86::CMOVBE16rr; break;
1503
case X86::CMOVA32rr: Opc = X86::CMOVBE32rr; break;
1504
case X86::CMOVA64rr: Opc = X86::CMOVBE64rr; break;
1505
case X86::CMOVL16rr: Opc = X86::CMOVGE16rr; break;
1506
case X86::CMOVL32rr: Opc = X86::CMOVGE32rr; break;
1507
case X86::CMOVL64rr: Opc = X86::CMOVGE64rr; break;
1508
case X86::CMOVGE16rr: Opc = X86::CMOVL16rr; break;
1509
case X86::CMOVGE32rr: Opc = X86::CMOVL32rr; break;
1510
case X86::CMOVGE64rr: Opc = X86::CMOVL64rr; break;
1511
case X86::CMOVLE16rr: Opc = X86::CMOVG16rr; break;
1512
case X86::CMOVLE32rr: Opc = X86::CMOVG32rr; break;
1513
case X86::CMOVLE64rr: Opc = X86::CMOVG64rr; break;
1514
case X86::CMOVG16rr: Opc = X86::CMOVLE16rr; break;
1515
case X86::CMOVG32rr: Opc = X86::CMOVLE32rr; break;
1516
case X86::CMOVG64rr: Opc = X86::CMOVLE64rr; break;
1517
case X86::CMOVS16rr: Opc = X86::CMOVNS16rr; break;
1518
case X86::CMOVS32rr: Opc = X86::CMOVNS32rr; break;
1519
case X86::CMOVS64rr: Opc = X86::CMOVNS64rr; break;
1520
case X86::CMOVNS16rr: Opc = X86::CMOVS16rr; break;
1521
case X86::CMOVNS32rr: Opc = X86::CMOVS32rr; break;
1522
case X86::CMOVNS64rr: Opc = X86::CMOVS64rr; break;
1523
case X86::CMOVP16rr: Opc = X86::CMOVNP16rr; break;
1524
case X86::CMOVP32rr: Opc = X86::CMOVNP32rr; break;
1525
case X86::CMOVP64rr: Opc = X86::CMOVNP64rr; break;
1526
case X86::CMOVNP16rr: Opc = X86::CMOVP16rr; break;
1527
case X86::CMOVNP32rr: Opc = X86::CMOVP32rr; break;
1528
case X86::CMOVNP64rr: Opc = X86::CMOVP64rr; break;
1529
case X86::CMOVO16rr: Opc = X86::CMOVNO16rr; break;
1530
case X86::CMOVO32rr: Opc = X86::CMOVNO32rr; break;
1531
case X86::CMOVO64rr: Opc = X86::CMOVNO64rr; break;
1532
case X86::CMOVNO16rr: Opc = X86::CMOVO16rr; break;
1533
case X86::CMOVNO32rr: Opc = X86::CMOVO32rr; break;
1534
case X86::CMOVNO64rr: Opc = X86::CMOVO64rr; break;
1537
MachineFunction &MF = *MI->getParent()->getParent();
1538
MI = MF.CloneMachineInstr(MI);
1541
MI->setDesc(get(Opc));
1542
// Fallthrough intended.
1545
return TargetInstrInfoImpl::commuteInstruction(MI, NewMI);
1549
static X86::CondCode GetCondFromBranchOpc(unsigned BrOpc) {
1551
default: return X86::COND_INVALID;
1552
case X86::JE_4: return X86::COND_E;
1553
case X86::JNE_4: return X86::COND_NE;
1554
case X86::JL_4: return X86::COND_L;
1555
case X86::JLE_4: return X86::COND_LE;
1556
case X86::JG_4: return X86::COND_G;
1557
case X86::JGE_4: return X86::COND_GE;
1558
case X86::JB_4: return X86::COND_B;
1559
case X86::JBE_4: return X86::COND_BE;
1560
case X86::JA_4: return X86::COND_A;
1561
case X86::JAE_4: return X86::COND_AE;
1562
case X86::JS_4: return X86::COND_S;
1563
case X86::JNS_4: return X86::COND_NS;
1564
case X86::JP_4: return X86::COND_P;
1565
case X86::JNP_4: return X86::COND_NP;
1566
case X86::JO_4: return X86::COND_O;
1567
case X86::JNO_4: return X86::COND_NO;
1571
unsigned X86::GetCondBranchFromCond(X86::CondCode CC) {
1573
default: llvm_unreachable("Illegal condition code!");
1574
case X86::COND_E: return X86::JE_4;
1575
case X86::COND_NE: return X86::JNE_4;
1576
case X86::COND_L: return X86::JL_4;
1577
case X86::COND_LE: return X86::JLE_4;
1578
case X86::COND_G: return X86::JG_4;
1579
case X86::COND_GE: return X86::JGE_4;
1580
case X86::COND_B: return X86::JB_4;
1581
case X86::COND_BE: return X86::JBE_4;
1582
case X86::COND_A: return X86::JA_4;
1583
case X86::COND_AE: return X86::JAE_4;
1584
case X86::COND_S: return X86::JS_4;
1585
case X86::COND_NS: return X86::JNS_4;
1586
case X86::COND_P: return X86::JP_4;
1587
case X86::COND_NP: return X86::JNP_4;
1588
case X86::COND_O: return X86::JO_4;
1589
case X86::COND_NO: return X86::JNO_4;
1593
/// GetOppositeBranchCondition - Return the inverse of the specified condition,
1594
/// e.g. turning COND_E to COND_NE.
1595
X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) {
1597
default: llvm_unreachable("Illegal condition code!");
1598
case X86::COND_E: return X86::COND_NE;
1599
case X86::COND_NE: return X86::COND_E;
1600
case X86::COND_L: return X86::COND_GE;
1601
case X86::COND_LE: return X86::COND_G;
1602
case X86::COND_G: return X86::COND_LE;
1603
case X86::COND_GE: return X86::COND_L;
1604
case X86::COND_B: return X86::COND_AE;
1605
case X86::COND_BE: return X86::COND_A;
1606
case X86::COND_A: return X86::COND_BE;
1607
case X86::COND_AE: return X86::COND_B;
1608
case X86::COND_S: return X86::COND_NS;
1609
case X86::COND_NS: return X86::COND_S;
1610
case X86::COND_P: return X86::COND_NP;
1611
case X86::COND_NP: return X86::COND_P;
1612
case X86::COND_O: return X86::COND_NO;
1613
case X86::COND_NO: return X86::COND_O;
1617
bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
1618
const TargetInstrDesc &TID = MI->getDesc();
1619
if (!TID.isTerminator()) return false;
1621
// Conditional branch is a special case.
1622
if (TID.isBranch() && !TID.isBarrier())
1624
if (!TID.isPredicable())
1626
return !isPredicated(MI);
1629
bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
1630
MachineBasicBlock *&TBB,
1631
MachineBasicBlock *&FBB,
1632
SmallVectorImpl<MachineOperand> &Cond,
1633
bool AllowModify) const {
1634
// Start from the bottom of the block and work up, examining the
1635
// terminator instructions.
1636
MachineBasicBlock::iterator I = MBB.end();
1637
MachineBasicBlock::iterator UnCondBrIter = MBB.end();
1638
while (I != MBB.begin()) {
1640
if (I->isDebugValue())
1643
// Working from the bottom, when we see a non-terminator instruction, we're
1645
if (!isUnpredicatedTerminator(I))
1648
// A terminator that isn't a branch can't easily be handled by this
1650
if (!I->getDesc().isBranch())
1653
// Handle unconditional branches.
1654
if (I->getOpcode() == X86::JMP_4) {
1658
TBB = I->getOperand(0).getMBB();
1662
// If the block has any instructions after a JMP, delete them.
1663
while (llvm::next(I) != MBB.end())
1664
llvm::next(I)->eraseFromParent();
1669
// Delete the JMP if it's equivalent to a fall-through.
1670
if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
1672
I->eraseFromParent();
1674
UnCondBrIter = MBB.end();
1678
// TBB is used to indicate the unconditional destination.
1679
TBB = I->getOperand(0).getMBB();
1683
// Handle conditional branches.
1684
X86::CondCode BranchCode = GetCondFromBranchOpc(I->getOpcode());
1685
if (BranchCode == X86::COND_INVALID)
1686
return true; // Can't handle indirect branch.
1688
// Working from the bottom, handle the first conditional branch.
1690
MachineBasicBlock *TargetBB = I->getOperand(0).getMBB();
1691
if (AllowModify && UnCondBrIter != MBB.end() &&
1692
MBB.isLayoutSuccessor(TargetBB)) {
1693
// If we can modify the code and it ends in something like:
1701
// Then we can change this to:
1708
// Which is a bit more efficient.
1709
// We conditionally jump to the fall-through block.
1710
BranchCode = GetOppositeBranchCondition(BranchCode);
1711
unsigned JNCC = GetCondBranchFromCond(BranchCode);
1712
MachineBasicBlock::iterator OldInst = I;
1714
BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(JNCC))
1715
.addMBB(UnCondBrIter->getOperand(0).getMBB());
1716
BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(X86::JMP_4))
1718
MBB.addSuccessor(TargetBB);
1720
OldInst->eraseFromParent();
1721
UnCondBrIter->eraseFromParent();
1723
// Restart the analysis.
1724
UnCondBrIter = MBB.end();
1730
TBB = I->getOperand(0).getMBB();
1731
Cond.push_back(MachineOperand::CreateImm(BranchCode));
1735
// Handle subsequent conditional branches. Only handle the case where all
1736
// conditional branches branch to the same destination and their condition
1737
// opcodes fit one of the special multi-branch idioms.
1738
assert(Cond.size() == 1);
1741
// Only handle the case where all conditional branches branch to the same
1743
if (TBB != I->getOperand(0).getMBB())
1746
// If the conditions are the same, we can leave them alone.
1747
X86::CondCode OldBranchCode = (X86::CondCode)Cond[0].getImm();
1748
if (OldBranchCode == BranchCode)
1751
// If they differ, see if they fit one of the known patterns. Theoretically,
1752
// we could handle more patterns here, but we shouldn't expect to see them
1753
// if instruction selection has done a reasonable job.
1754
if ((OldBranchCode == X86::COND_NP &&
1755
BranchCode == X86::COND_E) ||
1756
(OldBranchCode == X86::COND_E &&
1757
BranchCode == X86::COND_NP))
1758
BranchCode = X86::COND_NP_OR_E;
1759
else if ((OldBranchCode == X86::COND_P &&
1760
BranchCode == X86::COND_NE) ||
1761
(OldBranchCode == X86::COND_NE &&
1762
BranchCode == X86::COND_P))
1763
BranchCode = X86::COND_NE_OR_P;
1767
// Update the MachineOperand.
1768
Cond[0].setImm(BranchCode);
1774
unsigned X86InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
1775
MachineBasicBlock::iterator I = MBB.end();
1778
while (I != MBB.begin()) {
1780
if (I->isDebugValue())
1782
if (I->getOpcode() != X86::JMP_4 &&
1783
GetCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID)
1785
// Remove the branch.
1786
I->eraseFromParent();
1795
X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
1796
MachineBasicBlock *FBB,
1797
const SmallVectorImpl<MachineOperand> &Cond,
1798
DebugLoc DL) const {
1799
// Shouldn't be a fall through.
1800
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
1801
assert((Cond.size() == 1 || Cond.size() == 0) &&
1802
"X86 branch conditions have one component!");
1805
// Unconditional branch?
1806
assert(!FBB && "Unconditional branch with multiple successors!");
1807
BuildMI(&MBB, DL, get(X86::JMP_4)).addMBB(TBB);
1811
// Conditional branch.
1813
X86::CondCode CC = (X86::CondCode)Cond[0].getImm();
1815
case X86::COND_NP_OR_E:
1816
// Synthesize NP_OR_E with two branches.
1817
BuildMI(&MBB, DL, get(X86::JNP_4)).addMBB(TBB);
1819
BuildMI(&MBB, DL, get(X86::JE_4)).addMBB(TBB);
1822
case X86::COND_NE_OR_P:
1823
// Synthesize NE_OR_P with two branches.
1824
BuildMI(&MBB, DL, get(X86::JNE_4)).addMBB(TBB);
1826
BuildMI(&MBB, DL, get(X86::JP_4)).addMBB(TBB);
1830
unsigned Opc = GetCondBranchFromCond(CC);
1831
BuildMI(&MBB, DL, get(Opc)).addMBB(TBB);
1836
// Two-way Conditional branch. Insert the second branch.
1837
BuildMI(&MBB, DL, get(X86::JMP_4)).addMBB(FBB);
1843
/// isHReg - Test if the given register is a physical h register.
1844
static bool isHReg(unsigned Reg) {
1845
return X86::GR8_ABCD_HRegClass.contains(Reg);
1848
// Try and copy between VR128/VR64 and GR64 registers.
1849
static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg) {
1850
// SrcReg(VR128) -> DestReg(GR64)
1851
// SrcReg(VR64) -> DestReg(GR64)
1852
// SrcReg(GR64) -> DestReg(VR128)
1853
// SrcReg(GR64) -> DestReg(VR64)
1855
if (X86::GR64RegClass.contains(DestReg)) {
1856
if (X86::VR128RegClass.contains(SrcReg)) {
1857
// Copy from a VR128 register to a GR64 register.
1858
return X86::MOVPQIto64rr;
1859
} else if (X86::VR64RegClass.contains(SrcReg)) {
1860
// Copy from a VR64 register to a GR64 register.
1861
return X86::MOVSDto64rr;
1863
} else if (X86::GR64RegClass.contains(SrcReg)) {
1864
// Copy from a GR64 register to a VR128 register.
1865
if (X86::VR128RegClass.contains(DestReg))
1866
return X86::MOV64toPQIrr;
1867
// Copy from a GR64 register to a VR64 register.
1868
else if (X86::VR64RegClass.contains(DestReg))
1869
return X86::MOV64toSDrr;
1875
void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
1876
MachineBasicBlock::iterator MI, DebugLoc DL,
1877
unsigned DestReg, unsigned SrcReg,
1878
bool KillSrc) const {
1879
// First deal with the normal symmetric copies.
1881
if (X86::GR64RegClass.contains(DestReg, SrcReg))
1883
else if (X86::GR32RegClass.contains(DestReg, SrcReg))
1885
else if (X86::GR16RegClass.contains(DestReg, SrcReg))
1887
else if (X86::GR8RegClass.contains(DestReg, SrcReg)) {
1888
// Copying to or from a physical H register on x86-64 requires a NOREX
1889
// move. Otherwise use a normal move.
1890
if ((isHReg(DestReg) || isHReg(SrcReg)) &&
1891
TM.getSubtarget<X86Subtarget>().is64Bit())
1892
Opc = X86::MOV8rr_NOREX;
1895
} else if (X86::VR128RegClass.contains(DestReg, SrcReg))
1896
Opc = X86::MOVAPSrr;
1897
else if (X86::VR64RegClass.contains(DestReg, SrcReg))
1898
Opc = X86::MMX_MOVQ64rr;
1900
Opc = CopyToFromAsymmetricReg(DestReg, SrcReg);
1903
BuildMI(MBB, MI, DL, get(Opc), DestReg)
1904
.addReg(SrcReg, getKillRegState(KillSrc));
1908
// Moving EFLAGS to / from another register requires a push and a pop.
1909
if (SrcReg == X86::EFLAGS) {
1910
if (X86::GR64RegClass.contains(DestReg)) {
1911
BuildMI(MBB, MI, DL, get(X86::PUSHF64));
1912
BuildMI(MBB, MI, DL, get(X86::POP64r), DestReg);
1914
} else if (X86::GR32RegClass.contains(DestReg)) {
1915
BuildMI(MBB, MI, DL, get(X86::PUSHF32));
1916
BuildMI(MBB, MI, DL, get(X86::POP32r), DestReg);
1920
if (DestReg == X86::EFLAGS) {
1921
if (X86::GR64RegClass.contains(SrcReg)) {
1922
BuildMI(MBB, MI, DL, get(X86::PUSH64r))
1923
.addReg(SrcReg, getKillRegState(KillSrc));
1924
BuildMI(MBB, MI, DL, get(X86::POPF64));
1926
} else if (X86::GR32RegClass.contains(SrcReg)) {
1927
BuildMI(MBB, MI, DL, get(X86::PUSH32r))
1928
.addReg(SrcReg, getKillRegState(KillSrc));
1929
BuildMI(MBB, MI, DL, get(X86::POPF32));
1934
DEBUG(dbgs() << "Cannot copy " << RI.getName(SrcReg)
1935
<< " to " << RI.getName(DestReg) << '\n');
1936
llvm_unreachable("Cannot emit physreg copy instruction");
1939
static unsigned getLoadStoreRegOpcode(unsigned Reg,
1940
const TargetRegisterClass *RC,
1941
bool isStackAligned,
1942
const TargetMachine &TM,
1944
switch (RC->getID()) {
1946
llvm_unreachable("Unknown regclass");
1947
case X86::GR64RegClassID:
1948
case X86::GR64_NOSPRegClassID:
1949
return load ? X86::MOV64rm : X86::MOV64mr;
1950
case X86::GR32RegClassID:
1951
case X86::GR32_NOSPRegClassID:
1952
case X86::GR32_ADRegClassID:
1953
return load ? X86::MOV32rm : X86::MOV32mr;
1954
case X86::GR16RegClassID:
1955
return load ? X86::MOV16rm : X86::MOV16mr;
1956
case X86::GR8RegClassID:
1957
// Copying to or from a physical H register on x86-64 requires a NOREX
1958
// move. Otherwise use a normal move.
1960
TM.getSubtarget<X86Subtarget>().is64Bit())
1961
return load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX;
1963
return load ? X86::MOV8rm : X86::MOV8mr;
1964
case X86::GR64_ABCDRegClassID:
1965
return load ? X86::MOV64rm : X86::MOV64mr;
1966
case X86::GR32_ABCDRegClassID:
1967
return load ? X86::MOV32rm : X86::MOV32mr;
1968
case X86::GR16_ABCDRegClassID:
1969
return load ? X86::MOV16rm : X86::MOV16mr;
1970
case X86::GR8_ABCD_LRegClassID:
1971
return load ? X86::MOV8rm :X86::MOV8mr;
1972
case X86::GR8_ABCD_HRegClassID:
1973
if (TM.getSubtarget<X86Subtarget>().is64Bit())
1974
return load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX;
1976
return load ? X86::MOV8rm : X86::MOV8mr;
1977
case X86::GR64_NOREXRegClassID:
1978
case X86::GR64_NOREX_NOSPRegClassID:
1979
return load ? X86::MOV64rm : X86::MOV64mr;
1980
case X86::GR32_NOREXRegClassID:
1981
return load ? X86::MOV32rm : X86::MOV32mr;
1982
case X86::GR16_NOREXRegClassID:
1983
return load ? X86::MOV16rm : X86::MOV16mr;
1984
case X86::GR8_NOREXRegClassID:
1985
return load ? X86::MOV8rm : X86::MOV8mr;
1986
case X86::GR64_TCRegClassID:
1987
return load ? X86::MOV64rm_TC : X86::MOV64mr_TC;
1988
case X86::GR32_TCRegClassID:
1989
return load ? X86::MOV32rm_TC : X86::MOV32mr_TC;
1990
case X86::RFP80RegClassID:
1991
return load ? X86::LD_Fp80m : X86::ST_FpP80m;
1992
case X86::RFP64RegClassID:
1993
return load ? X86::LD_Fp64m : X86::ST_Fp64m;
1994
case X86::RFP32RegClassID:
1995
return load ? X86::LD_Fp32m : X86::ST_Fp32m;
1996
case X86::FR32RegClassID:
1997
return load ? X86::MOVSSrm : X86::MOVSSmr;
1998
case X86::FR64RegClassID:
1999
return load ? X86::MOVSDrm : X86::MOVSDmr;
2000
case X86::VR128RegClassID:
2001
// If stack is realigned we can use aligned stores.
2003
return load ? X86::MOVAPSrm : X86::MOVAPSmr;
2005
return load ? X86::MOVUPSrm : X86::MOVUPSmr;
2006
case X86::VR64RegClassID:
2007
return load ? X86::MMX_MOVQ64rm : X86::MMX_MOVQ64mr;
2011
static unsigned getStoreRegOpcode(unsigned SrcReg,
2012
const TargetRegisterClass *RC,
2013
bool isStackAligned,
2014
TargetMachine &TM) {
2015
return getLoadStoreRegOpcode(SrcReg, RC, isStackAligned, TM, false);
2019
static unsigned getLoadRegOpcode(unsigned DestReg,
2020
const TargetRegisterClass *RC,
2021
bool isStackAligned,
2022
const TargetMachine &TM) {
2023
return getLoadStoreRegOpcode(DestReg, RC, isStackAligned, TM, true);
2026
void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
2027
MachineBasicBlock::iterator MI,
2028
unsigned SrcReg, bool isKill, int FrameIdx,
2029
const TargetRegisterClass *RC,
2030
const TargetRegisterInfo *TRI) const {
2031
const MachineFunction &MF = *MBB.getParent();
2032
assert(MF.getFrameInfo()->getObjectSize(FrameIdx) >= RC->getSize() &&
2033
"Stack slot too small for store");
2034
bool isAligned = (RI.getStackAlignment() >= 16) || RI.canRealignStack(MF);
2035
unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, TM);
2036
DebugLoc DL = MBB.findDebugLoc(MI);
2037
addFrameReference(BuildMI(MBB, MI, DL, get(Opc)), FrameIdx)
2038
.addReg(SrcReg, getKillRegState(isKill));
2041
void X86InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
2043
SmallVectorImpl<MachineOperand> &Addr,
2044
const TargetRegisterClass *RC,
2045
MachineInstr::mmo_iterator MMOBegin,
2046
MachineInstr::mmo_iterator MMOEnd,
2047
SmallVectorImpl<MachineInstr*> &NewMIs) const {
2048
bool isAligned = MMOBegin != MMOEnd && (*MMOBegin)->getAlignment() >= 16;
2049
unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, TM);
2051
MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc));
2052
for (unsigned i = 0, e = Addr.size(); i != e; ++i)
2053
MIB.addOperand(Addr[i]);
2054
MIB.addReg(SrcReg, getKillRegState(isKill));
2055
(*MIB).setMemRefs(MMOBegin, MMOEnd);
2056
NewMIs.push_back(MIB);
2060
void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
2061
MachineBasicBlock::iterator MI,
2062
unsigned DestReg, int FrameIdx,
2063
const TargetRegisterClass *RC,
2064
const TargetRegisterInfo *TRI) const {
2065
const MachineFunction &MF = *MBB.getParent();
2066
bool isAligned = (RI.getStackAlignment() >= 16) || RI.canRealignStack(MF);
2067
unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, TM);
2068
DebugLoc DL = MBB.findDebugLoc(MI);
2069
addFrameReference(BuildMI(MBB, MI, DL, get(Opc), DestReg), FrameIdx);
2072
void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
2073
SmallVectorImpl<MachineOperand> &Addr,
2074
const TargetRegisterClass *RC,
2075
MachineInstr::mmo_iterator MMOBegin,
2076
MachineInstr::mmo_iterator MMOEnd,
2077
SmallVectorImpl<MachineInstr*> &NewMIs) const {
2078
bool isAligned = MMOBegin != MMOEnd && (*MMOBegin)->getAlignment() >= 16;
2079
unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, TM);
2081
MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg);
2082
for (unsigned i = 0, e = Addr.size(); i != e; ++i)
2083
MIB.addOperand(Addr[i]);
2084
(*MIB).setMemRefs(MMOBegin, MMOEnd);
2085
NewMIs.push_back(MIB);
2088
bool X86InstrInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
2089
MachineBasicBlock::iterator MI,
2090
const std::vector<CalleeSavedInfo> &CSI,
2091
const TargetRegisterInfo *TRI) const {
2095
DebugLoc DL = MBB.findDebugLoc(MI);
2097
bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit();
2098
bool isWin64 = TM.getSubtarget<X86Subtarget>().isTargetWin64();
2099
unsigned SlotSize = is64Bit ? 8 : 4;
2101
MachineFunction &MF = *MBB.getParent();
2102
unsigned FPReg = RI.getFrameRegister(MF);
2103
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
2104
unsigned CalleeFrameSize = 0;
2106
unsigned Opc = is64Bit ? X86::PUSH64r : X86::PUSH32r;
2107
for (unsigned i = CSI.size(); i != 0; --i) {
2108
unsigned Reg = CSI[i-1].getReg();
2109
// Add the callee-saved register as live-in. It's killed at the spill.
2112
// X86RegisterInfo::emitPrologue will handle spilling of frame register.
2114
if (!X86::VR128RegClass.contains(Reg) && !isWin64) {
2115
CalleeFrameSize += SlotSize;
2116
BuildMI(MBB, MI, DL, get(Opc)).addReg(Reg, RegState::Kill);
2118
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
2119
storeRegToStackSlot(MBB, MI, Reg, true, CSI[i-1].getFrameIdx(),
2124
X86FI->setCalleeSavedFrameSize(CalleeFrameSize);
2128
bool X86InstrInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
2129
MachineBasicBlock::iterator MI,
2130
const std::vector<CalleeSavedInfo> &CSI,
2131
const TargetRegisterInfo *TRI) const {
2135
DebugLoc DL = MBB.findDebugLoc(MI);
2137
MachineFunction &MF = *MBB.getParent();
2138
unsigned FPReg = RI.getFrameRegister(MF);
2139
bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit();
2140
bool isWin64 = TM.getSubtarget<X86Subtarget>().isTargetWin64();
2141
unsigned Opc = is64Bit ? X86::POP64r : X86::POP32r;
2142
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
2143
unsigned Reg = CSI[i].getReg();
2145
// X86RegisterInfo::emitEpilogue will handle restoring of frame register.
2147
if (!X86::VR128RegClass.contains(Reg) && !isWin64) {
2148
BuildMI(MBB, MI, DL, get(Opc), Reg);
2150
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
2151
loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(),
2159
X86InstrInfo::emitFrameIndexDebugValue(MachineFunction &MF,
2160
int FrameIx, uint64_t Offset,
2161
const MDNode *MDPtr,
2162
DebugLoc DL) const {
2164
AM.BaseType = X86AddressMode::FrameIndexBase;
2165
AM.Base.FrameIndex = FrameIx;
2166
MachineInstrBuilder MIB = BuildMI(MF, DL, get(X86::DBG_VALUE));
2167
addFullAddress(MIB, AM).addImm(Offset).addMetadata(MDPtr);
2171
static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode,
2172
const SmallVectorImpl<MachineOperand> &MOs,
2174
const TargetInstrInfo &TII) {
2175
// Create the base instruction with the memory operand as the first part.
2176
MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode),
2177
MI->getDebugLoc(), true);
2178
MachineInstrBuilder MIB(NewMI);
2179
unsigned NumAddrOps = MOs.size();
2180
for (unsigned i = 0; i != NumAddrOps; ++i)
2181
MIB.addOperand(MOs[i]);
2182
if (NumAddrOps < 4) // FrameIndex only
2185
// Loop over the rest of the ri operands, converting them over.
2186
unsigned NumOps = MI->getDesc().getNumOperands()-2;
2187
for (unsigned i = 0; i != NumOps; ++i) {
2188
MachineOperand &MO = MI->getOperand(i+2);
2191
for (unsigned i = NumOps+2, e = MI->getNumOperands(); i != e; ++i) {
2192
MachineOperand &MO = MI->getOperand(i);
2198
static MachineInstr *FuseInst(MachineFunction &MF,
2199
unsigned Opcode, unsigned OpNo,
2200
const SmallVectorImpl<MachineOperand> &MOs,
2201
MachineInstr *MI, const TargetInstrInfo &TII) {
2202
MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode),
2203
MI->getDebugLoc(), true);
2204
MachineInstrBuilder MIB(NewMI);
2206
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
2207
MachineOperand &MO = MI->getOperand(i);
2209
assert(MO.isReg() && "Expected to fold into reg operand!");
2210
unsigned NumAddrOps = MOs.size();
2211
for (unsigned i = 0; i != NumAddrOps; ++i)
2212
MIB.addOperand(MOs[i]);
2213
if (NumAddrOps < 4) // FrameIndex only
2222
static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode,
2223
const SmallVectorImpl<MachineOperand> &MOs,
2225
MachineFunction &MF = *MI->getParent()->getParent();
2226
MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), TII.get(Opcode));
2228
unsigned NumAddrOps = MOs.size();
2229
for (unsigned i = 0; i != NumAddrOps; ++i)
2230
MIB.addOperand(MOs[i]);
2231
if (NumAddrOps < 4) // FrameIndex only
2233
return MIB.addImm(0);
2237
X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
2238
MachineInstr *MI, unsigned i,
2239
const SmallVectorImpl<MachineOperand> &MOs,
2240
unsigned Size, unsigned Align) const {
2241
const DenseMap<unsigned*, std::pair<unsigned,unsigned> > *OpcodeTablePtr=NULL;
2242
bool isTwoAddrFold = false;
2243
unsigned NumOps = MI->getDesc().getNumOperands();
2244
bool isTwoAddr = NumOps > 1 &&
2245
MI->getDesc().getOperandConstraint(1, TOI::TIED_TO) != -1;
2247
MachineInstr *NewMI = NULL;
2248
// Folding a memory location into the two-address part of a two-address
2249
// instruction is different than folding it other places. It requires
2250
// replacing the *two* registers with the memory location.
2251
if (isTwoAddr && NumOps >= 2 && i < 2 &&
2252
MI->getOperand(0).isReg() &&
2253
MI->getOperand(1).isReg() &&
2254
MI->getOperand(0).getReg() == MI->getOperand(1).getReg()) {
2255
OpcodeTablePtr = &RegOp2MemOpTable2Addr;
2256
isTwoAddrFold = true;
2257
} else if (i == 0) { // If operand 0
2258
if (MI->getOpcode() == X86::MOV64r0)
2259
NewMI = MakeM0Inst(*this, X86::MOV64mi32, MOs, MI);
2260
else if (MI->getOpcode() == X86::MOV32r0)
2261
NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, MI);
2262
else if (MI->getOpcode() == X86::MOV16r0)
2263
NewMI = MakeM0Inst(*this, X86::MOV16mi, MOs, MI);
2264
else if (MI->getOpcode() == X86::MOV8r0)
2265
NewMI = MakeM0Inst(*this, X86::MOV8mi, MOs, MI);
2269
OpcodeTablePtr = &RegOp2MemOpTable0;
2270
} else if (i == 1) {
2271
OpcodeTablePtr = &RegOp2MemOpTable1;
2272
} else if (i == 2) {
2273
OpcodeTablePtr = &RegOp2MemOpTable2;
2276
// If table selected...
2277
if (OpcodeTablePtr) {
2278
// Find the Opcode to fuse
2279
DenseMap<unsigned*, std::pair<unsigned,unsigned> >::const_iterator I =
2280
OpcodeTablePtr->find((unsigned*)MI->getOpcode());
2281
if (I != OpcodeTablePtr->end()) {
2282
unsigned Opcode = I->second.first;
2283
unsigned MinAlign = I->second.second;
2284
if (Align < MinAlign)
2286
bool NarrowToMOV32rm = false;
2288
unsigned RCSize = MI->getDesc().OpInfo[i].getRegClass(&RI)->getSize();
2289
if (Size < RCSize) {
2290
// Check if it's safe to fold the load. If the size of the object is
2291
// narrower than the load width, then it's not.
2292
if (Opcode != X86::MOV64rm || RCSize != 8 || Size != 4)
2294
// If this is a 64-bit load, but the spill slot is 32, then we can do
2295
// a 32-bit load which is implicitly zero-extended. This likely is due
2296
// to liveintervalanalysis remat'ing a load from stack slot.
2297
if (MI->getOperand(0).getSubReg() || MI->getOperand(1).getSubReg())
2299
Opcode = X86::MOV32rm;
2300
NarrowToMOV32rm = true;
2305
NewMI = FuseTwoAddrInst(MF, Opcode, MOs, MI, *this);
2307
NewMI = FuseInst(MF, Opcode, i, MOs, MI, *this);
2309
if (NarrowToMOV32rm) {
2310
// If this is the special case where we use a MOV32rm to load a 32-bit
2311
// value and zero-extend the top bits. Change the destination register
2313
unsigned DstReg = NewMI->getOperand(0).getReg();
2314
if (TargetRegisterInfo::isPhysicalRegister(DstReg))
2315
NewMI->getOperand(0).setReg(RI.getSubReg(DstReg,
2318
NewMI->getOperand(0).setSubReg(X86::sub_32bit);
2325
if (PrintFailedFusing && !MI->isCopy())
2326
dbgs() << "We failed to fuse operand " << i << " in " << *MI;
2331
MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
2333
const SmallVectorImpl<unsigned> &Ops,
2334
int FrameIndex) const {
2335
// Check switch flag
2336
if (NoFusing) return NULL;
2338
if (!MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize))
2339
switch (MI->getOpcode()) {
2340
case X86::CVTSD2SSrr:
2341
case X86::Int_CVTSD2SSrr:
2342
case X86::CVTSS2SDrr:
2343
case X86::Int_CVTSS2SDrr:
2345
case X86::RCPSSr_Int:
2346
case X86::ROUNDSDr_Int:
2347
case X86::ROUNDSSr_Int:
2349
case X86::RSQRTSSr_Int:
2351
case X86::SQRTSSr_Int:
2355
const MachineFrameInfo *MFI = MF.getFrameInfo();
2356
unsigned Size = MFI->getObjectSize(FrameIndex);
2357
unsigned Alignment = MFI->getObjectAlignment(FrameIndex);
2358
if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
2359
unsigned NewOpc = 0;
2360
unsigned RCSize = 0;
2361
switch (MI->getOpcode()) {
2362
default: return NULL;
2363
case X86::TEST8rr: NewOpc = X86::CMP8ri; RCSize = 1; break;
2364
case X86::TEST16rr: NewOpc = X86::CMP16ri8; RCSize = 2; break;
2365
case X86::TEST32rr: NewOpc = X86::CMP32ri8; RCSize = 4; break;
2366
case X86::TEST64rr: NewOpc = X86::CMP64ri8; RCSize = 8; break;
2368
// Check if it's safe to fold the load. If the size of the object is
2369
// narrower than the load width, then it's not.
2372
// Change to CMPXXri r, 0 first.
2373
MI->setDesc(get(NewOpc));
2374
MI->getOperand(1).ChangeToImmediate(0);
2375
} else if (Ops.size() != 1)
2378
SmallVector<MachineOperand,4> MOs;
2379
MOs.push_back(MachineOperand::CreateFI(FrameIndex));
2380
return foldMemoryOperandImpl(MF, MI, Ops[0], MOs, Size, Alignment);
2383
MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
2385
const SmallVectorImpl<unsigned> &Ops,
2386
MachineInstr *LoadMI) const {
2387
// Check switch flag
2388
if (NoFusing) return NULL;
2390
if (!MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize))
2391
switch (MI->getOpcode()) {
2392
case X86::CVTSD2SSrr:
2393
case X86::Int_CVTSD2SSrr:
2394
case X86::CVTSS2SDrr:
2395
case X86::Int_CVTSS2SDrr:
2397
case X86::RCPSSr_Int:
2398
case X86::ROUNDSDr_Int:
2399
case X86::ROUNDSSr_Int:
2401
case X86::RSQRTSSr_Int:
2403
case X86::SQRTSSr_Int:
2407
// Determine the alignment of the load.
2408
unsigned Alignment = 0;
2409
if (LoadMI->hasOneMemOperand())
2410
Alignment = (*LoadMI->memoperands_begin())->getAlignment();
2412
switch (LoadMI->getOpcode()) {
2413
case X86::AVX_SET0PSY:
2414
case X86::AVX_SET0PDY:
2420
case X86::V_SETALLONES:
2421
case X86::AVX_SET0PS:
2422
case X86::AVX_SET0PD:
2423
case X86::AVX_SET0PI:
2433
llvm_unreachable("Don't know how to fold this instruction!");
2435
if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
2436
unsigned NewOpc = 0;
2437
switch (MI->getOpcode()) {
2438
default: return NULL;
2439
case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
2440
case X86::TEST16rr: NewOpc = X86::CMP16ri8; break;
2441
case X86::TEST32rr: NewOpc = X86::CMP32ri8; break;
2442
case X86::TEST64rr: NewOpc = X86::CMP64ri8; break;
2444
// Change to CMPXXri r, 0 first.
2445
MI->setDesc(get(NewOpc));
2446
MI->getOperand(1).ChangeToImmediate(0);
2447
} else if (Ops.size() != 1)
2450
// Make sure the subregisters match.
2451
// Otherwise we risk changing the size of the load.
2452
if (LoadMI->getOperand(0).getSubReg() != MI->getOperand(Ops[0]).getSubReg())
2455
SmallVector<MachineOperand,X86::AddrNumOperands> MOs;
2456
switch (LoadMI->getOpcode()) {
2460
case X86::V_SETALLONES:
2461
case X86::AVX_SET0PS:
2462
case X86::AVX_SET0PD:
2463
case X86::AVX_SET0PI:
2464
case X86::AVX_SET0PSY:
2465
case X86::AVX_SET0PDY:
2467
case X86::FsFLD0SS: {
2468
// Folding a V_SET0P? or V_SETALLONES as a load, to ease register pressure.
2469
// Create a constant-pool entry and operands to load from it.
2471
// Medium and large mode can't fold loads this way.
2472
if (TM.getCodeModel() != CodeModel::Small &&
2473
TM.getCodeModel() != CodeModel::Kernel)
2476
// x86-32 PIC requires a PIC base register for constant pools.
2477
unsigned PICBase = 0;
2478
if (TM.getRelocationModel() == Reloc::PIC_) {
2479
if (TM.getSubtarget<X86Subtarget>().is64Bit())
2482
// FIXME: PICBase = getGlobalBaseReg(&MF);
2483
// This doesn't work for several reasons.
2484
// 1. GlobalBaseReg may have been spilled.
2485
// 2. It may not be live at MI.
2489
// Create a constant-pool entry.
2490
MachineConstantPool &MCP = *MF.getConstantPool();
2492
unsigned Opc = LoadMI->getOpcode();
2493
if (Opc == X86::FsFLD0SS)
2494
Ty = Type::getFloatTy(MF.getFunction()->getContext());
2495
else if (Opc == X86::FsFLD0SD)
2496
Ty = Type::getDoubleTy(MF.getFunction()->getContext());
2497
else if (Opc == X86::AVX_SET0PSY || Opc == X86::AVX_SET0PDY)
2498
Ty = VectorType::get(Type::getFloatTy(MF.getFunction()->getContext()), 8);
2500
Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()), 4);
2501
const Constant *C = LoadMI->getOpcode() == X86::V_SETALLONES ?
2502
Constant::getAllOnesValue(Ty) :
2503
Constant::getNullValue(Ty);
2504
unsigned CPI = MCP.getConstantPoolIndex(C, Alignment);
2506
// Create operands to load from the constant pool entry.
2507
MOs.push_back(MachineOperand::CreateReg(PICBase, false));
2508
MOs.push_back(MachineOperand::CreateImm(1));
2509
MOs.push_back(MachineOperand::CreateReg(0, false));
2510
MOs.push_back(MachineOperand::CreateCPI(CPI, 0));
2511
MOs.push_back(MachineOperand::CreateReg(0, false));
2515
// Folding a normal load. Just copy the load's address operands.
2516
unsigned NumOps = LoadMI->getDesc().getNumOperands();
2517
for (unsigned i = NumOps - X86::AddrNumOperands; i != NumOps; ++i)
2518
MOs.push_back(LoadMI->getOperand(i));
2522
return foldMemoryOperandImpl(MF, MI, Ops[0], MOs, 0, Alignment);
2526
bool X86InstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
2527
const SmallVectorImpl<unsigned> &Ops) const {
2528
// Check switch flag
2529
if (NoFusing) return 0;
2531
if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
2532
switch (MI->getOpcode()) {
2533
default: return false;
2542
if (Ops.size() != 1)
2545
unsigned OpNum = Ops[0];
2546
unsigned Opc = MI->getOpcode();
2547
unsigned NumOps = MI->getDesc().getNumOperands();
2548
bool isTwoAddr = NumOps > 1 &&
2549
MI->getDesc().getOperandConstraint(1, TOI::TIED_TO) != -1;
2551
// Folding a memory location into the two-address part of a two-address
2552
// instruction is different than folding it other places. It requires
2553
// replacing the *two* registers with the memory location.
2554
const DenseMap<unsigned*, std::pair<unsigned,unsigned> > *OpcodeTablePtr=NULL;
2555
if (isTwoAddr && NumOps >= 2 && OpNum < 2) {
2556
OpcodeTablePtr = &RegOp2MemOpTable2Addr;
2557
} else if (OpNum == 0) { // If operand 0
2566
OpcodeTablePtr = &RegOp2MemOpTable0;
2567
} else if (OpNum == 1) {
2568
OpcodeTablePtr = &RegOp2MemOpTable1;
2569
} else if (OpNum == 2) {
2570
OpcodeTablePtr = &RegOp2MemOpTable2;
2573
if (OpcodeTablePtr) {
2574
// Find the Opcode to fuse
2575
DenseMap<unsigned*, std::pair<unsigned,unsigned> >::const_iterator I =
2576
OpcodeTablePtr->find((unsigned*)Opc);
2577
if (I != OpcodeTablePtr->end())
2580
return TargetInstrInfoImpl::canFoldMemoryOperand(MI, Ops);
2583
bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
2584
unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
2585
SmallVectorImpl<MachineInstr*> &NewMIs) const {
2586
DenseMap<unsigned*, std::pair<unsigned,unsigned> >::const_iterator I =
2587
MemOp2RegOpTable.find((unsigned*)MI->getOpcode());
2588
if (I == MemOp2RegOpTable.end())
2590
unsigned Opc = I->second.first;
2591
unsigned Index = I->second.second & 0xf;
2592
bool FoldedLoad = I->second.second & (1 << 4);
2593
bool FoldedStore = I->second.second & (1 << 5);
2594
if (UnfoldLoad && !FoldedLoad)
2596
UnfoldLoad &= FoldedLoad;
2597
if (UnfoldStore && !FoldedStore)
2599
UnfoldStore &= FoldedStore;
2601
const TargetInstrDesc &TID = get(Opc);
2602
const TargetOperandInfo &TOI = TID.OpInfo[Index];
2603
const TargetRegisterClass *RC = TOI.getRegClass(&RI);
2604
if (!MI->hasOneMemOperand() &&
2605
RC == &X86::VR128RegClass &&
2606
!TM.getSubtarget<X86Subtarget>().isUnalignedMemAccessFast())
2607
// Without memoperands, loadRegFromAddr and storeRegToStackSlot will
2608
// conservatively assume the address is unaligned. That's bad for
2611
SmallVector<MachineOperand, X86::AddrNumOperands> AddrOps;
2612
SmallVector<MachineOperand,2> BeforeOps;
2613
SmallVector<MachineOperand,2> AfterOps;
2614
SmallVector<MachineOperand,4> ImpOps;
2615
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
2616
MachineOperand &Op = MI->getOperand(i);
2617
if (i >= Index && i < Index + X86::AddrNumOperands)
2618
AddrOps.push_back(Op);
2619
else if (Op.isReg() && Op.isImplicit())
2620
ImpOps.push_back(Op);
2622
BeforeOps.push_back(Op);
2624
AfterOps.push_back(Op);
2627
// Emit the load instruction.
2629
std::pair<MachineInstr::mmo_iterator,
2630
MachineInstr::mmo_iterator> MMOs =
2631
MF.extractLoadMemRefs(MI->memoperands_begin(),
2632
MI->memoperands_end());
2633
loadRegFromAddr(MF, Reg, AddrOps, RC, MMOs.first, MMOs.second, NewMIs);
2635
// Address operands cannot be marked isKill.
2636
for (unsigned i = 1; i != 1 + X86::AddrNumOperands; ++i) {
2637
MachineOperand &MO = NewMIs[0]->getOperand(i);
2639
MO.setIsKill(false);
2644
// Emit the data processing instruction.
2645
MachineInstr *DataMI = MF.CreateMachineInstr(TID, MI->getDebugLoc(), true);
2646
MachineInstrBuilder MIB(DataMI);
2649
MIB.addReg(Reg, RegState::Define);
2650
for (unsigned i = 0, e = BeforeOps.size(); i != e; ++i)
2651
MIB.addOperand(BeforeOps[i]);
2654
for (unsigned i = 0, e = AfterOps.size(); i != e; ++i)
2655
MIB.addOperand(AfterOps[i]);
2656
for (unsigned i = 0, e = ImpOps.size(); i != e; ++i) {
2657
MachineOperand &MO = ImpOps[i];
2658
MIB.addReg(MO.getReg(),
2659
getDefRegState(MO.isDef()) |
2660
RegState::Implicit |
2661
getKillRegState(MO.isKill()) |
2662
getDeadRegState(MO.isDead()) |
2663
getUndefRegState(MO.isUndef()));
2665
// Change CMP32ri r, 0 back to TEST32rr r, r, etc.
2666
unsigned NewOpc = 0;
2667
switch (DataMI->getOpcode()) {
2669
case X86::CMP64ri32:
2676
MachineOperand &MO0 = DataMI->getOperand(0);
2677
MachineOperand &MO1 = DataMI->getOperand(1);
2678
if (MO1.getImm() == 0) {
2679
switch (DataMI->getOpcode()) {
2682
case X86::CMP64ri32: NewOpc = X86::TEST64rr; break;
2684
case X86::CMP32ri: NewOpc = X86::TEST32rr; break;
2686
case X86::CMP16ri: NewOpc = X86::TEST16rr; break;
2687
case X86::CMP8ri: NewOpc = X86::TEST8rr; break;
2689
DataMI->setDesc(get(NewOpc));
2690
MO1.ChangeToRegister(MO0.getReg(), false);
2694
NewMIs.push_back(DataMI);
2696
// Emit the store instruction.
2698
const TargetRegisterClass *DstRC = TID.OpInfo[0].getRegClass(&RI);
2699
std::pair<MachineInstr::mmo_iterator,
2700
MachineInstr::mmo_iterator> MMOs =
2701
MF.extractStoreMemRefs(MI->memoperands_begin(),
2702
MI->memoperands_end());
2703
storeRegToAddr(MF, Reg, true, AddrOps, DstRC, MMOs.first, MMOs.second, NewMIs);
2710
X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
2711
SmallVectorImpl<SDNode*> &NewNodes) const {
2712
if (!N->isMachineOpcode())
2715
DenseMap<unsigned*, std::pair<unsigned,unsigned> >::const_iterator I =
2716
MemOp2RegOpTable.find((unsigned*)N->getMachineOpcode());
2717
if (I == MemOp2RegOpTable.end())
2719
unsigned Opc = I->second.first;
2720
unsigned Index = I->second.second & 0xf;
2721
bool FoldedLoad = I->second.second & (1 << 4);
2722
bool FoldedStore = I->second.second & (1 << 5);
2723
const TargetInstrDesc &TID = get(Opc);
2724
const TargetRegisterClass *RC = TID.OpInfo[Index].getRegClass(&RI);
2725
unsigned NumDefs = TID.NumDefs;
2726
std::vector<SDValue> AddrOps;
2727
std::vector<SDValue> BeforeOps;
2728
std::vector<SDValue> AfterOps;
2729
DebugLoc dl = N->getDebugLoc();
2730
unsigned NumOps = N->getNumOperands();
2731
for (unsigned i = 0; i != NumOps-1; ++i) {
2732
SDValue Op = N->getOperand(i);
2733
if (i >= Index-NumDefs && i < Index-NumDefs + X86::AddrNumOperands)
2734
AddrOps.push_back(Op);
2735
else if (i < Index-NumDefs)
2736
BeforeOps.push_back(Op);
2737
else if (i > Index-NumDefs)
2738
AfterOps.push_back(Op);
2740
SDValue Chain = N->getOperand(NumOps-1);
2741
AddrOps.push_back(Chain);
2743
// Emit the load instruction.
2745
MachineFunction &MF = DAG.getMachineFunction();
2747
EVT VT = *RC->vt_begin();
2748
std::pair<MachineInstr::mmo_iterator,
2749
MachineInstr::mmo_iterator> MMOs =
2750
MF.extractLoadMemRefs(cast<MachineSDNode>(N)->memoperands_begin(),
2751
cast<MachineSDNode>(N)->memoperands_end());
2752
if (!(*MMOs.first) &&
2753
RC == &X86::VR128RegClass &&
2754
!TM.getSubtarget<X86Subtarget>().isUnalignedMemAccessFast())
2755
// Do not introduce a slow unaligned load.
2757
bool isAligned = (*MMOs.first) && (*MMOs.first)->getAlignment() >= 16;
2758
Load = DAG.getMachineNode(getLoadRegOpcode(0, RC, isAligned, TM), dl,
2759
VT, MVT::Other, &AddrOps[0], AddrOps.size());
2760
NewNodes.push_back(Load);
2762
// Preserve memory reference information.
2763
cast<MachineSDNode>(Load)->setMemRefs(MMOs.first, MMOs.second);
2766
// Emit the data processing instruction.
2767
std::vector<EVT> VTs;
2768
const TargetRegisterClass *DstRC = 0;
2769
if (TID.getNumDefs() > 0) {
2770
DstRC = TID.OpInfo[0].getRegClass(&RI);
2771
VTs.push_back(*DstRC->vt_begin());
2773
for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
2774
EVT VT = N->getValueType(i);
2775
if (VT != MVT::Other && i >= (unsigned)TID.getNumDefs())
2779
BeforeOps.push_back(SDValue(Load, 0));
2780
std::copy(AfterOps.begin(), AfterOps.end(), std::back_inserter(BeforeOps));
2781
SDNode *NewNode= DAG.getMachineNode(Opc, dl, VTs, &BeforeOps[0],
2783
NewNodes.push_back(NewNode);
2785
// Emit the store instruction.
2788
AddrOps.push_back(SDValue(NewNode, 0));
2789
AddrOps.push_back(Chain);
2790
std::pair<MachineInstr::mmo_iterator,
2791
MachineInstr::mmo_iterator> MMOs =
2792
MF.extractStoreMemRefs(cast<MachineSDNode>(N)->memoperands_begin(),
2793
cast<MachineSDNode>(N)->memoperands_end());
2794
if (!(*MMOs.first) &&
2795
RC == &X86::VR128RegClass &&
2796
!TM.getSubtarget<X86Subtarget>().isUnalignedMemAccessFast())
2797
// Do not introduce a slow unaligned store.
2799
bool isAligned = (*MMOs.first) && (*MMOs.first)->getAlignment() >= 16;
2800
SDNode *Store = DAG.getMachineNode(getStoreRegOpcode(0, DstRC,
2803
&AddrOps[0], AddrOps.size());
2804
NewNodes.push_back(Store);
2806
// Preserve memory reference information.
2807
cast<MachineSDNode>(Load)->setMemRefs(MMOs.first, MMOs.second);
2813
unsigned X86InstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
2814
bool UnfoldLoad, bool UnfoldStore,
2815
unsigned *LoadRegIndex) const {
2816
DenseMap<unsigned*, std::pair<unsigned,unsigned> >::const_iterator I =
2817
MemOp2RegOpTable.find((unsigned*)Opc);
2818
if (I == MemOp2RegOpTable.end())
2820
bool FoldedLoad = I->second.second & (1 << 4);
2821
bool FoldedStore = I->second.second & (1 << 5);
2822
if (UnfoldLoad && !FoldedLoad)
2824
if (UnfoldStore && !FoldedStore)
2827
*LoadRegIndex = I->second.second & 0xf;
2828
return I->second.first;
2832
X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
2833
int64_t &Offset1, int64_t &Offset2) const {
2834
if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode())
2836
unsigned Opc1 = Load1->getMachineOpcode();
2837
unsigned Opc2 = Load2->getMachineOpcode();
2839
default: return false;
2849
case X86::MMX_MOVD64rm:
2850
case X86::MMX_MOVQ64rm:
2851
case X86::FsMOVAPSrm:
2852
case X86::FsMOVAPDrm:
2855
case X86::MOVUPSrm_Int:
2859
case X86::MOVDQUrm_Int:
2863
default: return false;
2873
case X86::MMX_MOVD64rm:
2874
case X86::MMX_MOVQ64rm:
2875
case X86::FsMOVAPSrm:
2876
case X86::FsMOVAPDrm:
2879
case X86::MOVUPSrm_Int:
2883
case X86::MOVDQUrm_Int:
2887
// Check if chain operands and base addresses match.
2888
if (Load1->getOperand(0) != Load2->getOperand(0) ||
2889
Load1->getOperand(5) != Load2->getOperand(5))
2891
// Segment operands should match as well.
2892
if (Load1->getOperand(4) != Load2->getOperand(4))
2894
// Scale should be 1, Index should be Reg0.
2895
if (Load1->getOperand(1) == Load2->getOperand(1) &&
2896
Load1->getOperand(2) == Load2->getOperand(2)) {
2897
if (cast<ConstantSDNode>(Load1->getOperand(1))->getZExtValue() != 1)
2900
// Now let's examine the displacements.
2901
if (isa<ConstantSDNode>(Load1->getOperand(3)) &&
2902
isa<ConstantSDNode>(Load2->getOperand(3))) {
2903
Offset1 = cast<ConstantSDNode>(Load1->getOperand(3))->getSExtValue();
2904
Offset2 = cast<ConstantSDNode>(Load2->getOperand(3))->getSExtValue();
2911
bool X86InstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
2912
int64_t Offset1, int64_t Offset2,
2913
unsigned NumLoads) const {
2914
assert(Offset2 > Offset1);
2915
if ((Offset2 - Offset1) / 8 > 64)
2918
unsigned Opc1 = Load1->getMachineOpcode();
2919
unsigned Opc2 = Load2->getMachineOpcode();
2921
return false; // FIXME: overly conservative?
2928
case X86::MMX_MOVD64rm:
2929
case X86::MMX_MOVQ64rm:
2933
EVT VT = Load1->getValueType(0);
2934
switch (VT.getSimpleVT().SimpleTy) {
2936
// XMM registers. In 64-bit mode we can be a bit more aggressive since we
2937
// have 16 of them to play with.
2938
if (TM.getSubtargetImpl()->is64Bit()) {
2941
} else if (NumLoads) {
2961
ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
2962
assert(Cond.size() == 1 && "Invalid X86 branch condition!");
2963
X86::CondCode CC = static_cast<X86::CondCode>(Cond[0].getImm());
2964
if (CC == X86::COND_NE_OR_P || CC == X86::COND_NP_OR_E)
2966
Cond[0].setImm(GetOppositeBranchCondition(CC));
2971
isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
2972
// FIXME: Return false for x87 stack register classes for now. We can't
2973
// allow any loads of these registers before FpGet_ST0_80.
2974
return !(RC == &X86::CCRRegClass || RC == &X86::RFP32RegClass ||
2975
RC == &X86::RFP64RegClass || RC == &X86::RFP80RegClass);
2979
/// isX86_64ExtendedReg - Is the MachineOperand a x86-64 extended (r8 or higher)
2980
/// register? e.g. r8, xmm8, xmm13, etc.
2981
bool X86InstrInfo::isX86_64ExtendedReg(unsigned RegNo) {
2984
case X86::R8: case X86::R9: case X86::R10: case X86::R11:
2985
case X86::R12: case X86::R13: case X86::R14: case X86::R15:
2986
case X86::R8D: case X86::R9D: case X86::R10D: case X86::R11D:
2987
case X86::R12D: case X86::R13D: case X86::R14D: case X86::R15D:
2988
case X86::R8W: case X86::R9W: case X86::R10W: case X86::R11W:
2989
case X86::R12W: case X86::R13W: case X86::R14W: case X86::R15W:
2990
case X86::R8B: case X86::R9B: case X86::R10B: case X86::R11B:
2991
case X86::R12B: case X86::R13B: case X86::R14B: case X86::R15B:
2992
case X86::XMM8: case X86::XMM9: case X86::XMM10: case X86::XMM11:
2993
case X86::XMM12: case X86::XMM13: case X86::XMM14: case X86::XMM15:
2994
case X86::YMM8: case X86::YMM9: case X86::YMM10: case X86::YMM11:
2995
case X86::YMM12: case X86::YMM13: case X86::YMM14: case X86::YMM15:
3001
/// getGlobalBaseReg - Return a virtual register initialized with the
3002
/// the global base register value. Output instructions required to
3003
/// initialize the register in the function entry block, if necessary.
3005
/// TODO: Eliminate this and move the code to X86MachineFunctionInfo.
3007
unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
3008
assert(!TM.getSubtarget<X86Subtarget>().is64Bit() &&
3009
"X86-64 PIC uses RIP relative addressing");
3011
X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
3012
unsigned GlobalBaseReg = X86FI->getGlobalBaseReg();
3013
if (GlobalBaseReg != 0)
3014
return GlobalBaseReg;
3016
// Create the register. The code to initialize it is inserted
3017
// later, by the CGBR pass (below).
3018
MachineRegisterInfo &RegInfo = MF->getRegInfo();
3019
GlobalBaseReg = RegInfo.createVirtualRegister(X86::GR32RegisterClass);
3020
X86FI->setGlobalBaseReg(GlobalBaseReg);
3021
return GlobalBaseReg;
3024
// These are the replaceable SSE instructions. Some of these have Int variants
3025
// that we don't include here. We don't want to replace instructions selected
3027
static const unsigned ReplaceableInstrs[][3] = {
3028
//PackedSingle PackedDouble PackedInt
3029
{ X86::MOVAPSmr, X86::MOVAPDmr, X86::MOVDQAmr },
3030
{ X86::MOVAPSrm, X86::MOVAPDrm, X86::MOVDQArm },
3031
{ X86::MOVAPSrr, X86::MOVAPDrr, X86::MOVDQArr },
3032
{ X86::MOVUPSmr, X86::MOVUPDmr, X86::MOVDQUmr },
3033
{ X86::MOVUPSrm, X86::MOVUPDrm, X86::MOVDQUrm },
3034
{ X86::MOVNTPSmr, X86::MOVNTPDmr, X86::MOVNTDQmr },
3035
{ X86::ANDNPSrm, X86::ANDNPDrm, X86::PANDNrm },
3036
{ X86::ANDNPSrr, X86::ANDNPDrr, X86::PANDNrr },
3037
{ X86::ANDPSrm, X86::ANDPDrm, X86::PANDrm },
3038
{ X86::ANDPSrr, X86::ANDPDrr, X86::PANDrr },
3039
{ X86::ORPSrm, X86::ORPDrm, X86::PORrm },
3040
{ X86::ORPSrr, X86::ORPDrr, X86::PORrr },
3041
{ X86::V_SET0PS, X86::V_SET0PD, X86::V_SET0PI },
3042
{ X86::XORPSrm, X86::XORPDrm, X86::PXORrm },
3043
{ X86::XORPSrr, X86::XORPDrr, X86::PXORrr },
3044
// AVX 128-bit support
3045
{ X86::VMOVAPSmr, X86::VMOVAPDmr, X86::VMOVDQAmr },
3046
{ X86::VMOVAPSrm, X86::VMOVAPDrm, X86::VMOVDQArm },
3047
{ X86::VMOVAPSrr, X86::VMOVAPDrr, X86::VMOVDQArr },
3048
{ X86::VMOVUPSmr, X86::VMOVUPDmr, X86::VMOVDQUmr },
3049
{ X86::VMOVUPSrm, X86::VMOVUPDrm, X86::VMOVDQUrm },
3050
{ X86::VMOVNTPSmr, X86::VMOVNTPDmr, X86::VMOVNTDQmr },
3051
{ X86::VANDNPSrm, X86::VANDNPDrm, X86::VPANDNrm },
3052
{ X86::VANDNPSrr, X86::VANDNPDrr, X86::VPANDNrr },
3053
{ X86::VANDPSrm, X86::VANDPDrm, X86::VPANDrm },
3054
{ X86::VANDPSrr, X86::VANDPDrr, X86::VPANDrr },
3055
{ X86::VORPSrm, X86::VORPDrm, X86::VPORrm },
3056
{ X86::VORPSrr, X86::VORPDrr, X86::VPORrr },
3057
{ X86::AVX_SET0PS, X86::AVX_SET0PD, X86::AVX_SET0PI },
3058
{ X86::VXORPSrm, X86::VXORPDrm, X86::VPXORrm },
3059
{ X86::VXORPSrr, X86::VXORPDrr, X86::VPXORrr },
3062
// FIXME: Some shuffle and unpack instructions have equivalents in different
3063
// domains, but they require a bit more work than just switching opcodes.
3065
static const unsigned *lookup(unsigned opcode, unsigned domain) {
3066
for (unsigned i = 0, e = array_lengthof(ReplaceableInstrs); i != e; ++i)
3067
if (ReplaceableInstrs[i][domain-1] == opcode)
3068
return ReplaceableInstrs[i];
3072
std::pair<uint16_t, uint16_t>
3073
X86InstrInfo::GetSSEDomain(const MachineInstr *MI) const {
3074
uint16_t domain = (MI->getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
3075
return std::make_pair(domain,
3076
domain && lookup(MI->getOpcode(), domain) ? 0xe : 0);
3079
void X86InstrInfo::SetSSEDomain(MachineInstr *MI, unsigned Domain) const {
3080
assert(Domain>0 && Domain<4 && "Invalid execution domain");
3081
uint16_t dom = (MI->getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
3082
assert(dom && "Not an SSE instruction");
3083
const unsigned *table = lookup(MI->getOpcode(), dom);
3084
assert(table && "Cannot change domain");
3085
MI->setDesc(get(table[Domain-1]));
3088
/// getNoopForMachoTarget - Return the noop instruction to use for a noop.
3089
void X86InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
3090
NopInst.setOpcode(X86::NOOP);
3094
/// CGBR - Create Global Base Reg pass. This initializes the PIC
3095
/// global base register for x86-32.
3096
struct CGBR : public MachineFunctionPass {
3098
CGBR() : MachineFunctionPass(ID) {}
3100
virtual bool runOnMachineFunction(MachineFunction &MF) {
3101
const X86TargetMachine *TM =
3102
static_cast<const X86TargetMachine *>(&MF.getTarget());
3104
assert(!TM->getSubtarget<X86Subtarget>().is64Bit() &&
3105
"X86-64 PIC uses RIP relative addressing");
3107
// Only emit a global base reg in PIC mode.
3108
if (TM->getRelocationModel() != Reloc::PIC_)
3111
// Insert the set of GlobalBaseReg into the first MBB of the function
3112
MachineBasicBlock &FirstMBB = MF.front();
3113
MachineBasicBlock::iterator MBBI = FirstMBB.begin();
3114
DebugLoc DL = FirstMBB.findDebugLoc(MBBI);
3115
MachineRegisterInfo &RegInfo = MF.getRegInfo();
3116
const X86InstrInfo *TII = TM->getInstrInfo();
3119
if (TM->getSubtarget<X86Subtarget>().isPICStyleGOT())
3120
PC = RegInfo.createVirtualRegister(X86::GR32RegisterClass);
3122
PC = TII->getGlobalBaseReg(&MF);
3124
// Operand of MovePCtoStack is completely ignored by asm printer. It's
3125
// only used in JIT code emission as displacement to pc.
3126
BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVPC32r), PC).addImm(0);
3128
// If we're using vanilla 'GOT' PIC style, we should use relative addressing
3129
// not to pc, but to _GLOBAL_OFFSET_TABLE_ external.
3130
if (TM->getSubtarget<X86Subtarget>().isPICStyleGOT()) {
3131
unsigned GlobalBaseReg = TII->getGlobalBaseReg(&MF);
3132
// Generate addl $__GLOBAL_OFFSET_TABLE_ + [.-piclabel], %some_register
3133
BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg)
3134
.addReg(PC).addExternalSymbol("_GLOBAL_OFFSET_TABLE_",
3135
X86II::MO_GOT_ABSOLUTE_ADDRESS);
3141
virtual const char *getPassName() const {
3142
return "X86 PIC Global Base Reg Initialization";
3145
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
3146
AU.setPreservesCFG();
3147
MachineFunctionPass::getAnalysisUsage(AU);
3154
llvm::createGlobalBaseRegPass() { return new CGBR(); }