1
//===----- SchedulePostRAList.cpp - list scheduler ------------------------===//
3
// The LLVM Compiler Infrastructure
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
8
//===----------------------------------------------------------------------===//
10
// This implements a top-down list scheduler, using standard algorithms.
11
// The basic approach uses a priority queue of available nodes to schedule.
12
// One at a time, nodes are taken from the priority queue (thus in priority
13
// order), checked for legality to schedule, and emitted if legal.
15
// Nodes may not be legal to schedule either due to structural hazards (e.g.
16
// pipeline or resource constraints) or because an input to the instruction has
17
// not completed execution.
19
//===----------------------------------------------------------------------===//
21
#define DEBUG_TYPE "post-RA-sched"
22
#include "AntiDepBreaker.h"
23
#include "AggressiveAntiDepBreaker.h"
24
#include "CriticalAntiDepBreaker.h"
25
#include "ExactHazardRecognizer.h"
26
#include "SimpleHazardRecognizer.h"
27
#include "ScheduleDAGInstrs.h"
28
#include "llvm/CodeGen/Passes.h"
29
#include "llvm/CodeGen/LatencyPriorityQueue.h"
30
#include "llvm/CodeGen/SchedulerRegistry.h"
31
#include "llvm/CodeGen/MachineDominators.h"
32
#include "llvm/CodeGen/MachineFrameInfo.h"
33
#include "llvm/CodeGen/MachineFunctionPass.h"
34
#include "llvm/CodeGen/MachineLoopInfo.h"
35
#include "llvm/CodeGen/MachineRegisterInfo.h"
36
#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
37
#include "llvm/Analysis/AliasAnalysis.h"
38
#include "llvm/Target/TargetLowering.h"
39
#include "llvm/Target/TargetMachine.h"
40
#include "llvm/Target/TargetInstrInfo.h"
41
#include "llvm/Target/TargetRegisterInfo.h"
42
#include "llvm/Target/TargetSubtarget.h"
43
#include "llvm/Support/CommandLine.h"
44
#include "llvm/Support/Debug.h"
45
#include "llvm/Support/ErrorHandling.h"
46
#include "llvm/Support/raw_ostream.h"
47
#include "llvm/ADT/BitVector.h"
48
#include "llvm/ADT/Statistic.h"
53
STATISTIC(NumNoops, "Number of noops inserted");
54
STATISTIC(NumStalls, "Number of pipeline stalls");
55
STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies");
57
// Post-RA scheduling is enabled with
58
// TargetSubtarget.enablePostRAScheduler(). This flag can be used to
59
// override the target.
61
EnablePostRAScheduler("post-RA-scheduler",
62
cl::desc("Enable scheduling after register allocation"),
63
cl::init(false), cl::Hidden);
64
static cl::opt<std::string>
65
EnableAntiDepBreaking("break-anti-dependencies",
66
cl::desc("Break post-RA scheduling anti-dependencies: "
67
"\"critical\", \"all\", or \"none\""),
68
cl::init("none"), cl::Hidden);
70
EnablePostRAHazardAvoidance("avoid-hazards",
71
cl::desc("Enable exact hazard avoidance"),
72
cl::init(true), cl::Hidden);
74
// If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
76
DebugDiv("postra-sched-debugdiv",
77
cl::desc("Debug control MBBs that are scheduled"),
78
cl::init(0), cl::Hidden);
80
DebugMod("postra-sched-debugmod",
81
cl::desc("Debug control MBBs that are scheduled"),
82
cl::init(0), cl::Hidden);
84
AntiDepBreaker::~AntiDepBreaker() { }
87
class PostRAScheduler : public MachineFunctionPass {
89
CodeGenOpt::Level OptLevel;
93
PostRAScheduler(CodeGenOpt::Level ol) :
94
MachineFunctionPass(&ID), OptLevel(ol) {}
96
void getAnalysisUsage(AnalysisUsage &AU) const {
98
AU.addRequired<AliasAnalysis>();
99
AU.addRequired<MachineDominatorTree>();
100
AU.addPreserved<MachineDominatorTree>();
101
AU.addRequired<MachineLoopInfo>();
102
AU.addPreserved<MachineLoopInfo>();
103
MachineFunctionPass::getAnalysisUsage(AU);
106
const char *getPassName() const {
107
return "Post RA top-down list latency scheduler";
110
bool runOnMachineFunction(MachineFunction &Fn);
112
char PostRAScheduler::ID = 0;
114
class SchedulePostRATDList : public ScheduleDAGInstrs {
115
/// AvailableQueue - The priority queue to use for the available SUnits.
117
LatencyPriorityQueue AvailableQueue;
119
/// PendingQueue - This contains all of the instructions whose operands have
120
/// been issued, but their results are not ready yet (due to the latency of
121
/// the operation). Once the operands becomes available, the instruction is
122
/// added to the AvailableQueue.
123
std::vector<SUnit*> PendingQueue;
125
/// Topo - A topological ordering for SUnits.
126
ScheduleDAGTopologicalSort Topo;
128
/// HazardRec - The hazard recognizer to use.
129
ScheduleHazardRecognizer *HazardRec;
131
/// AntiDepBreak - Anti-dependence breaking object, or NULL if none
132
AntiDepBreaker *AntiDepBreak;
134
/// AA - AliasAnalysis for making memory reference queries.
137
/// KillIndices - The index of the most recent kill (proceding bottom-up),
138
/// or ~0u if the register is not live.
139
unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister];
142
SchedulePostRATDList(MachineFunction &MF,
143
const MachineLoopInfo &MLI,
144
const MachineDominatorTree &MDT,
145
ScheduleHazardRecognizer *HR,
148
: ScheduleDAGInstrs(MF, MLI, MDT), Topo(SUnits),
149
HazardRec(HR), AntiDepBreak(ADB), AA(aa) {}
151
~SchedulePostRATDList() {
154
/// StartBlock - Initialize register live-range state for scheduling in
157
void StartBlock(MachineBasicBlock *BB);
159
/// Schedule - Schedule the instruction range using list scheduling.
163
/// Observe - Update liveness information to account for the current
164
/// instruction, which will not be scheduled.
166
void Observe(MachineInstr *MI, unsigned Count);
168
/// FinishBlock - Clean up register live-range state.
172
/// FixupKills - Fix register kill flags that have been made
173
/// invalid due to scheduling
175
void FixupKills(MachineBasicBlock *MBB);
178
void ReleaseSucc(SUnit *SU, SDep *SuccEdge);
179
void ReleaseSuccessors(SUnit *SU);
180
void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
181
void ListScheduleTopDown();
182
void StartBlockForKills(MachineBasicBlock *BB);
184
// ToggleKillFlag - Toggle a register operand kill flag. Other
185
// adjustments may be made to the instruction if necessary. Return
186
// true if the operand has been deleted, false if not.
187
bool ToggleKillFlag(MachineInstr *MI, MachineOperand &MO);
191
/// isSchedulingBoundary - Test if the given instruction should be
192
/// considered a scheduling boundary. This primarily includes labels
195
static bool isSchedulingBoundary(const MachineInstr *MI,
196
const MachineFunction &MF) {
197
// Terminators and labels can't be scheduled around.
198
if (MI->getDesc().isTerminator() || MI->isLabel())
201
// Don't attempt to schedule around any instruction that modifies
202
// a stack-oriented pointer, as it's unlikely to be profitable. This
203
// saves compile time, because it doesn't require every single
204
// stack slot reference to depend on the instruction that does the
206
const TargetLowering &TLI = *MF.getTarget().getTargetLowering();
207
if (MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore()))
213
bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
214
AA = &getAnalysis<AliasAnalysis>();
216
// Check for explicit enable/disable of post-ra scheduling.
217
TargetSubtarget::AntiDepBreakMode AntiDepMode = TargetSubtarget::ANTIDEP_NONE;
218
SmallVector<TargetRegisterClass*, 4> CriticalPathRCs;
219
if (EnablePostRAScheduler.getPosition() > 0) {
220
if (!EnablePostRAScheduler)
223
// Check that post-RA scheduling is enabled for this target.
224
const TargetSubtarget &ST = Fn.getTarget().getSubtarget<TargetSubtarget>();
225
if (!ST.enablePostRAScheduler(OptLevel, AntiDepMode, CriticalPathRCs))
229
// Check for antidep breaking override...
230
if (EnableAntiDepBreaking.getPosition() > 0) {
231
AntiDepMode = (EnableAntiDepBreaking == "all") ? TargetSubtarget::ANTIDEP_ALL :
232
(EnableAntiDepBreaking == "critical") ? TargetSubtarget::ANTIDEP_CRITICAL :
233
TargetSubtarget::ANTIDEP_NONE;
236
DEBUG(dbgs() << "PostRAScheduler\n");
238
const MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
239
const MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
240
const InstrItineraryData &InstrItins = Fn.getTarget().getInstrItineraryData();
241
ScheduleHazardRecognizer *HR = EnablePostRAHazardAvoidance ?
242
(ScheduleHazardRecognizer *)new ExactHazardRecognizer(InstrItins) :
243
(ScheduleHazardRecognizer *)new SimpleHazardRecognizer();
244
AntiDepBreaker *ADB =
245
((AntiDepMode == TargetSubtarget::ANTIDEP_ALL) ?
246
(AntiDepBreaker *)new AggressiveAntiDepBreaker(Fn, CriticalPathRCs) :
247
((AntiDepMode == TargetSubtarget::ANTIDEP_CRITICAL) ?
248
(AntiDepBreaker *)new CriticalAntiDepBreaker(Fn) : NULL));
250
SchedulePostRATDList Scheduler(Fn, MLI, MDT, HR, ADB, AA);
252
// Loop over all of the basic blocks
253
for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
254
MBB != MBBe; ++MBB) {
256
// If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
258
static int bbcnt = 0;
259
if (bbcnt++ % DebugDiv != DebugMod)
261
dbgs() << "*** DEBUG scheduling " << Fn.getFunction()->getNameStr() <<
262
":BB#" << MBB->getNumber() << " ***\n";
266
// Initialize register live-range state for scheduling in this block.
267
Scheduler.StartBlock(MBB);
269
// Schedule each sequence of instructions not interrupted by a label
270
// or anything else that effectively needs to shut down scheduling.
271
MachineBasicBlock::iterator Current = MBB->end();
272
unsigned Count = MBB->size(), CurrentCount = Count;
273
for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) {
274
MachineInstr *MI = prior(I);
275
if (isSchedulingBoundary(MI, Fn)) {
276
Scheduler.Run(MBB, I, Current, CurrentCount);
277
Scheduler.EmitSchedule(0);
279
CurrentCount = Count - 1;
280
Scheduler.Observe(MI, CurrentCount);
285
assert(Count == 0 && "Instruction count mismatch!");
286
assert((MBB->begin() == Current || CurrentCount != 0) &&
287
"Instruction count mismatch!");
288
Scheduler.Run(MBB, MBB->begin(), Current, CurrentCount);
289
Scheduler.EmitSchedule(0);
291
// Clean up register live-range state.
292
Scheduler.FinishBlock();
294
// Update register kills
295
Scheduler.FixupKills(MBB);
304
/// StartBlock - Initialize register live-range state for scheduling in
307
void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) {
308
// Call the superclass.
309
ScheduleDAGInstrs::StartBlock(BB);
311
// Reset the hazard recognizer and anti-dep breaker.
313
if (AntiDepBreak != NULL)
314
AntiDepBreak->StartBlock(BB);
317
/// Schedule - Schedule the instruction range using list scheduling.
319
void SchedulePostRATDList::Schedule() {
320
// Build the scheduling graph.
323
if (AntiDepBreak != NULL) {
325
AntiDepBreak->BreakAntiDependencies(SUnits, Begin, InsertPos,
329
// We made changes. Update the dependency graph.
330
// Theoretically we could update the graph in place:
331
// When a live range is changed to use a different register, remove
332
// the def's anti-dependence *and* output-dependence edges due to
333
// that register, and add new anti-dependence and output-dependence
334
// edges based on the next live range of the register.
341
NumFixedAnti += Broken;
345
DEBUG(dbgs() << "********** List Scheduling **********\n");
346
DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
347
SUnits[su].dumpAll(this));
349
AvailableQueue.initNodes(SUnits);
350
ListScheduleTopDown();
351
AvailableQueue.releaseState();
354
/// Observe - Update liveness information to account for the current
355
/// instruction, which will not be scheduled.
357
void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) {
358
if (AntiDepBreak != NULL)
359
AntiDepBreak->Observe(MI, Count, InsertPosIndex);
362
/// FinishBlock - Clean up register live-range state.
364
void SchedulePostRATDList::FinishBlock() {
365
if (AntiDepBreak != NULL)
366
AntiDepBreak->FinishBlock();
368
// Call the superclass.
369
ScheduleDAGInstrs::FinishBlock();
372
/// StartBlockForKills - Initialize register live-range state for updating kills
374
void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) {
375
// Initialize the indices to indicate that no registers are live.
376
for (unsigned i = 0; i < TRI->getNumRegs(); ++i)
377
KillIndices[i] = ~0u;
379
// Determine the live-out physregs for this block.
380
if (!BB->empty() && BB->back().getDesc().isReturn()) {
381
// In a return block, examine the function live-out regs.
382
for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
383
E = MRI.liveout_end(); I != E; ++I) {
385
KillIndices[Reg] = BB->size();
386
// Repeat, for all subregs.
387
for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
389
KillIndices[*Subreg] = BB->size();
394
// In a non-return block, examine the live-in regs of all successors.
395
for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
396
SE = BB->succ_end(); SI != SE; ++SI) {
397
for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
398
E = (*SI)->livein_end(); I != E; ++I) {
400
KillIndices[Reg] = BB->size();
401
// Repeat, for all subregs.
402
for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
404
KillIndices[*Subreg] = BB->size();
411
bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI,
412
MachineOperand &MO) {
413
// Setting kill flag...
419
// If MO itself is live, clear the kill flag...
420
if (KillIndices[MO.getReg()] != ~0u) {
425
// If any subreg of MO is live, then create an imp-def for that
426
// subreg and keep MO marked as killed.
429
const unsigned SuperReg = MO.getReg();
430
for (const unsigned *Subreg = TRI->getSubRegisters(SuperReg);
432
if (KillIndices[*Subreg] != ~0u) {
433
MI->addOperand(MachineOperand::CreateReg(*Subreg,
447
/// FixupKills - Fix the register kill flags, they may have been made
448
/// incorrect by instruction reordering.
450
void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
451
DEBUG(dbgs() << "Fixup kills for BB#" << MBB->getNumber() << '\n');
453
std::set<unsigned> killedRegs;
454
BitVector ReservedRegs = TRI->getReservedRegs(MF);
456
StartBlockForKills(MBB);
458
// Examine block from end to start...
459
unsigned Count = MBB->size();
460
for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin();
462
MachineInstr *MI = --I;
463
if (MI->isDebugValue())
466
// Update liveness. Registers that are defed but not used in this
467
// instruction are now dead. Mark register and all subregs as they
468
// are completely defined.
469
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
470
MachineOperand &MO = MI->getOperand(i);
471
if (!MO.isReg()) continue;
472
unsigned Reg = MO.getReg();
473
if (Reg == 0) continue;
474
if (!MO.isDef()) continue;
475
// Ignore two-addr defs.
476
if (MI->isRegTiedToUseOperand(i)) continue;
478
KillIndices[Reg] = ~0u;
480
// Repeat for all subregs.
481
for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
483
KillIndices[*Subreg] = ~0u;
487
// Examine all used registers and set/clear kill flag. When a
488
// register is used multiple times we only set the kill flag on
491
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
492
MachineOperand &MO = MI->getOperand(i);
493
if (!MO.isReg() || !MO.isUse()) continue;
494
unsigned Reg = MO.getReg();
495
if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
498
if (killedRegs.find(Reg) == killedRegs.end()) {
500
// A register is not killed if any subregs are live...
501
for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
503
if (KillIndices[*Subreg] != ~0u) {
509
// If subreg is not live, then register is killed if it became
510
// live in this instruction
512
kill = (KillIndices[Reg] == ~0u);
515
if (MO.isKill() != kill) {
516
DEBUG(dbgs() << "Fixing " << MO << " in ");
517
// Warning: ToggleKillFlag may invalidate MO.
518
ToggleKillFlag(MI, MO);
522
killedRegs.insert(Reg);
525
// Mark any used register (that is not using undef) and subregs as
527
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
528
MachineOperand &MO = MI->getOperand(i);
529
if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue;
530
unsigned Reg = MO.getReg();
531
if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
533
KillIndices[Reg] = Count;
535
for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
537
KillIndices[*Subreg] = Count;
543
//===----------------------------------------------------------------------===//
544
// Top-Down Scheduling
545
//===----------------------------------------------------------------------===//
547
/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
548
/// the PendingQueue if the count reaches zero. Also update its cycle bound.
549
void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) {
550
SUnit *SuccSU = SuccEdge->getSUnit();
553
if (SuccSU->NumPredsLeft == 0) {
554
dbgs() << "*** Scheduling failed! ***\n";
556
dbgs() << " has been released too many times!\n";
560
--SuccSU->NumPredsLeft;
562
// Compute how many cycles it will be before this actually becomes
563
// available. This is the max of the start time of all predecessors plus
565
SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency());
567
// If all the node's predecessors are scheduled, this node is ready
568
// to be scheduled. Ignore the special ExitSU node.
569
if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
570
PendingQueue.push_back(SuccSU);
573
/// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors.
574
void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) {
575
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
577
ReleaseSucc(SU, &*I);
581
/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
582
/// count of its successors. If a successor pending count is zero, add it to
583
/// the Available queue.
584
void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
585
DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
586
DEBUG(SU->dump(this));
588
Sequence.push_back(SU);
589
assert(CurCycle >= SU->getDepth() &&
590
"Node scheduled above its depth!");
591
SU->setDepthToAtLeast(CurCycle);
593
ReleaseSuccessors(SU);
594
SU->isScheduled = true;
595
AvailableQueue.ScheduledNode(SU);
598
/// ListScheduleTopDown - The main loop of list scheduling for top-down
600
void SchedulePostRATDList::ListScheduleTopDown() {
601
unsigned CurCycle = 0;
603
// We're scheduling top-down but we're visiting the regions in
604
// bottom-up order, so we don't know the hazards at the start of a
605
// region. So assume no hazards (this should usually be ok as most
606
// blocks are a single region).
609
// Release any successors of the special Entry node.
610
ReleaseSuccessors(&EntrySU);
612
// Add all leaves to Available queue.
613
for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
614
// It is available if it has no predecessors.
615
bool available = SUnits[i].Preds.empty();
617
AvailableQueue.push(&SUnits[i]);
618
SUnits[i].isAvailable = true;
622
// In any cycle where we can't schedule any instructions, we must
623
// stall or emit a noop, depending on the target.
624
bool CycleHasInsts = false;
626
// While Available queue is not empty, grab the node with the highest
627
// priority. If it is not ready put it back. Schedule the node.
628
std::vector<SUnit*> NotReady;
629
Sequence.reserve(SUnits.size());
630
while (!AvailableQueue.empty() || !PendingQueue.empty()) {
631
// Check to see if any of the pending instructions are ready to issue. If
632
// so, add them to the available queue.
633
unsigned MinDepth = ~0u;
634
for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
635
if (PendingQueue[i]->getDepth() <= CurCycle) {
636
AvailableQueue.push(PendingQueue[i]);
637
PendingQueue[i]->isAvailable = true;
638
PendingQueue[i] = PendingQueue.back();
639
PendingQueue.pop_back();
641
} else if (PendingQueue[i]->getDepth() < MinDepth)
642
MinDepth = PendingQueue[i]->getDepth();
645
DEBUG(dbgs() << "\n*** Examining Available\n";
646
LatencyPriorityQueue q = AvailableQueue;
649
dbgs() << "Height " << su->getHeight() << ": ";
653
SUnit *FoundSUnit = 0;
654
bool HasNoopHazards = false;
655
while (!AvailableQueue.empty()) {
656
SUnit *CurSUnit = AvailableQueue.pop();
658
ScheduleHazardRecognizer::HazardType HT =
659
HazardRec->getHazardType(CurSUnit);
660
if (HT == ScheduleHazardRecognizer::NoHazard) {
661
FoundSUnit = CurSUnit;
665
// Remember if this is a noop hazard.
666
HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard;
668
NotReady.push_back(CurSUnit);
671
// Add the nodes that aren't ready back onto the available list.
672
if (!NotReady.empty()) {
673
AvailableQueue.push_all(NotReady);
677
// If we found a node to schedule...
679
// ... schedule the node...
680
ScheduleNodeTopDown(FoundSUnit, CurCycle);
681
HazardRec->EmitInstruction(FoundSUnit);
682
CycleHasInsts = true;
684
// If we are using the target-specific hazards, then don't
685
// advance the cycle time just because we schedule a node. If
686
// the target allows it we can schedule multiple nodes in the
688
if (!EnablePostRAHazardAvoidance) {
689
if (FoundSUnit->Latency) // Don't increment CurCycle for pseudo-ops!
694
DEBUG(dbgs() << "*** Finished cycle " << CurCycle << '\n');
695
HazardRec->AdvanceCycle();
696
} else if (!HasNoopHazards) {
697
// Otherwise, we have a pipeline stall, but no other problem,
698
// just advance the current cycle and try again.
699
DEBUG(dbgs() << "*** Stall in cycle " << CurCycle << '\n');
700
HazardRec->AdvanceCycle();
703
// Otherwise, we have no instructions to issue and we have instructions
704
// that will fault if we don't do this right. This is the case for
705
// processors without pipeline interlocks and other cases.
706
DEBUG(dbgs() << "*** Emitting noop in cycle " << CurCycle << '\n');
707
HazardRec->EmitNoop();
708
Sequence.push_back(0); // NULL here means noop
713
CycleHasInsts = false;
718
VerifySchedule(/*isBottomUp=*/false);
722
//===----------------------------------------------------------------------===//
723
// Public Constructor Functions
724
//===----------------------------------------------------------------------===//
726
FunctionPass *llvm::createPostRAScheduler(CodeGenOpt::Level OptLevel) {
727
return new PostRAScheduler(OptLevel);