2012-05-17 22:37:09 +00:00
|
|
|
//===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===//
|
2012-01-13 06:30:30 +00:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// MachineScheduler schedules machine instructions after phi elimination. It
|
|
|
|
// preserves LiveIntervals so it can be invoked before register allocation.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2012-03-08 01:41:12 +00:00
|
|
|
#include "llvm/CodeGen/MachineScheduler.h"
|
2012-12-03 16:50:05 +00:00
|
|
|
#include "llvm/ADT/PriorityQueue.h"
|
|
|
|
#include "llvm/Analysis/AliasAnalysis.h"
|
|
|
|
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
|
2013-03-10 13:11:23 +00:00
|
|
|
#include "llvm/CodeGen/MachineDominators.h"
|
|
|
|
#include "llvm/CodeGen/MachineLoopInfo.h"
|
2013-06-21 18:32:58 +00:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2012-01-13 06:30:30 +00:00
|
|
|
#include "llvm/CodeGen/Passes.h"
|
2012-06-06 20:29:31 +00:00
|
|
|
#include "llvm/CodeGen/RegisterClassInfo.h"
|
2012-11-28 05:13:24 +00:00
|
|
|
#include "llvm/CodeGen/ScheduleDFS.h"
|
2012-05-24 22:11:09 +00:00
|
|
|
#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
|
2016-05-10 03:21:59 +00:00
|
|
|
#include "llvm/CodeGen/TargetPassConfig.h"
|
2012-01-13 06:30:30 +00:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2013-01-25 07:45:29 +00:00
|
|
|
#include "llvm/Support/GraphWriter.h"
|
2012-01-13 06:30:30 +00:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2013-06-14 00:00:13 +00:00
|
|
|
#include "llvm/Target/TargetInstrInfo.h"
|
2012-01-17 06:55:07 +00:00
|
|
|
|
2012-01-13 06:30:30 +00:00
|
|
|
using namespace llvm;
|
|
|
|
|
2014-04-22 02:02:50 +00:00
|
|
|
#define DEBUG_TYPE "misched"
|
|
|
|
|
2012-09-11 00:39:15 +00:00
|
|
|
namespace llvm {
|
|
|
|
cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden,
|
|
|
|
cl::desc("Force top-down list scheduling"));
|
|
|
|
cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden,
|
|
|
|
cl::desc("Force bottom-up list scheduling"));
|
2014-08-07 21:49:44 +00:00
|
|
|
cl::opt<bool>
|
|
|
|
DumpCriticalPathLength("misched-dcpl", cl::Hidden,
|
|
|
|
cl::desc("Print critical path length to stdout"));
|
2012-09-11 00:39:15 +00:00
|
|
|
}
|
2012-03-14 04:00:41 +00:00
|
|
|
|
2012-03-07 00:18:25 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden,
|
|
|
|
cl::desc("Pop up a window to show MISched dags after they are processed"));
|
2012-03-19 18:38:38 +00:00
|
|
|
|
2015-09-17 21:09:59 +00:00
|
|
|
/// In some situations a few uninteresting nodes depend on nearly all other
|
|
|
|
/// nodes in the graph, provide a cutoff to hide them.
|
|
|
|
static cl::opt<unsigned> ViewMISchedCutoff("view-misched-cutoff", cl::Hidden,
|
|
|
|
cl::desc("Hide nodes with more predecessor/successor than cutoff"));
|
|
|
|
|
2012-03-19 18:38:38 +00:00
|
|
|
static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden,
|
|
|
|
cl::desc("Stop scheduling after N instructions"), cl::init(~0U));
|
2013-12-28 21:57:02 +00:00
|
|
|
|
|
|
|
static cl::opt<std::string> SchedOnlyFunc("misched-only-func", cl::Hidden,
|
|
|
|
cl::desc("Only schedule this function"));
|
|
|
|
static cl::opt<unsigned> SchedOnlyBlock("misched-only-block", cl::Hidden,
|
|
|
|
cl::desc("Only schedule this MBB#"));
|
2012-03-07 00:18:25 +00:00
|
|
|
#else
|
|
|
|
static bool ViewMISchedDAGs = false;
|
|
|
|
#endif // NDEBUG
|
|
|
|
|
2016-04-22 19:09:17 +00:00
|
|
|
/// Avoid quadratic complexity in unusually large basic blocks by limiting the
|
|
|
|
/// size of the ready lists.
|
|
|
|
static cl::opt<unsigned> ReadyListLimit("misched-limit", cl::Hidden,
|
|
|
|
cl::desc("Limit ready list to N instructions"), cl::init(256));
|
|
|
|
|
2013-09-04 20:59:59 +00:00
|
|
|
static cl::opt<bool> EnableRegPressure("misched-regpressure", cl::Hidden,
|
|
|
|
cl::desc("Enable register pressure scheduling."), cl::init(true));
|
|
|
|
|
2013-08-23 17:48:43 +00:00
|
|
|
static cl::opt<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden,
|
2013-09-09 23:31:14 +00:00
|
|
|
cl::desc("Enable cyclic critical path analysis."), cl::init(true));
|
2013-08-23 17:48:43 +00:00
|
|
|
|
2016-04-15 14:58:38 +00:00
|
|
|
static cl::opt<bool> EnableMemOpCluster("misched-cluster", cl::Hidden,
|
|
|
|
cl::desc("Enable memop clustering."),
|
|
|
|
cl::init(true));
|
2012-11-12 19:40:10 +00:00
|
|
|
|
2012-11-12 19:52:20 +00:00
|
|
|
// Experimental heuristics
|
|
|
|
static cl::opt<bool> EnableMacroFusion("misched-fusion", cl::Hidden,
|
2012-11-13 08:47:29 +00:00
|
|
|
cl::desc("Enable scheduling for macro fusion."), cl::init(true));
|
2012-11-12 19:52:20 +00:00
|
|
|
|
2013-03-08 05:40:34 +00:00
|
|
|
static cl::opt<bool> VerifyScheduling("verify-misched", cl::Hidden,
|
|
|
|
cl::desc("Verify machine instrs before and after machine scheduling"));
|
|
|
|
|
2013-01-25 04:01:04 +00:00
|
|
|
// DAG subtrees must have at least this many nodes.
|
|
|
|
static const unsigned MinSubtreeSize = 8;
|
|
|
|
|
2013-11-19 00:57:56 +00:00
|
|
|
// Pin the vtables to this file.
|
|
|
|
void MachineSchedStrategy::anchor() {}
|
|
|
|
void ScheduleDAGMutation::anchor() {}
|
|
|
|
|
2012-01-14 02:17:06 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Machine Instruction Scheduling Pass and Registry
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2012-04-24 20:36:19 +00:00
|
|
|
MachineSchedContext::MachineSchedContext():
|
2014-04-14 00:51:57 +00:00
|
|
|
MF(nullptr), MLI(nullptr), MDT(nullptr), PassConfig(nullptr), AA(nullptr), LIS(nullptr) {
|
2012-04-24 20:36:19 +00:00
|
|
|
RegClassInfo = new RegisterClassInfo();
|
|
|
|
}
|
|
|
|
|
|
|
|
MachineSchedContext::~MachineSchedContext() {
|
|
|
|
delete RegClassInfo;
|
|
|
|
}
|
|
|
|
|
2012-01-13 06:30:30 +00:00
|
|
|
namespace {
|
2013-12-28 21:56:47 +00:00
|
|
|
/// Base class for a machine scheduler class that can run at any point.
|
|
|
|
class MachineSchedulerBase : public MachineSchedContext,
|
|
|
|
public MachineFunctionPass {
|
|
|
|
public:
|
|
|
|
MachineSchedulerBase(char &ID): MachineFunctionPass(ID) {}
|
|
|
|
|
2014-04-14 00:51:57 +00:00
|
|
|
void print(raw_ostream &O, const Module* = nullptr) const override;
|
2013-12-28 21:56:47 +00:00
|
|
|
|
|
|
|
protected:
|
2015-11-03 01:53:29 +00:00
|
|
|
void scheduleRegions(ScheduleDAGInstrs &Scheduler, bool FixKillFlags);
|
2013-12-28 21:56:47 +00:00
|
|
|
};
|
|
|
|
|
2012-01-17 06:55:03 +00:00
|
|
|
/// MachineScheduler runs after coalescing and before register allocation.
|
2013-12-28 21:56:47 +00:00
|
|
|
class MachineScheduler : public MachineSchedulerBase {
|
2012-01-13 06:30:30 +00:00
|
|
|
public:
|
2012-01-17 06:55:03 +00:00
|
|
|
MachineScheduler();
|
2012-01-13 06:30:30 +00:00
|
|
|
|
2014-03-07 09:26:03 +00:00
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override;
|
2012-01-13 06:30:30 +00:00
|
|
|
|
2014-03-07 09:26:03 +00:00
|
|
|
bool runOnMachineFunction(MachineFunction&) override;
|
2012-01-13 06:30:30 +00:00
|
|
|
|
|
|
|
static char ID; // Class identification, replacement for typeinfo
|
2013-09-20 05:14:41 +00:00
|
|
|
|
|
|
|
protected:
|
|
|
|
ScheduleDAGInstrs *createMachineScheduler();
|
2012-01-13 06:30:30 +00:00
|
|
|
};
|
2013-12-28 21:56:51 +00:00
|
|
|
|
|
|
|
/// PostMachineScheduler runs after shortly before code emission.
|
|
|
|
class PostMachineScheduler : public MachineSchedulerBase {
|
|
|
|
public:
|
|
|
|
PostMachineScheduler();
|
|
|
|
|
2014-03-07 09:26:03 +00:00
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override;
|
2013-12-28 21:56:51 +00:00
|
|
|
|
2014-03-07 09:26:03 +00:00
|
|
|
bool runOnMachineFunction(MachineFunction&) override;
|
2013-12-28 21:56:51 +00:00
|
|
|
|
|
|
|
static char ID; // Class identification, replacement for typeinfo
|
|
|
|
|
|
|
|
protected:
|
|
|
|
ScheduleDAGInstrs *createPostMachineScheduler();
|
|
|
|
};
|
2012-01-13 06:30:30 +00:00
|
|
|
} // namespace
|
|
|
|
|
2012-01-17 06:55:03 +00:00
|
|
|
char MachineScheduler::ID = 0;
|
2012-01-13 06:30:30 +00:00
|
|
|
|
2012-01-17 06:55:03 +00:00
|
|
|
char &llvm::MachineSchedulerID = MachineScheduler::ID;
|
2012-01-13 06:30:30 +00:00
|
|
|
|
2014-12-13 04:52:04 +00:00
|
|
|
INITIALIZE_PASS_BEGIN(MachineScheduler, "machine-scheduler",
|
2012-01-13 06:30:30 +00:00
|
|
|
"Machine Instruction Scheduler", false, false)
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@247167 91177308-0d34-0410-b5e6-96231b3b80d8
2015-09-09 17:55:00 +00:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
|
2012-01-13 06:30:30 +00:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
|
2014-12-13 04:52:04 +00:00
|
|
|
INITIALIZE_PASS_END(MachineScheduler, "machine-scheduler",
|
2012-01-13 06:30:30 +00:00
|
|
|
"Machine Instruction Scheduler", false, false)
|
|
|
|
|
2012-01-17 06:55:03 +00:00
|
|
|
MachineScheduler::MachineScheduler()
|
2013-12-28 21:56:47 +00:00
|
|
|
: MachineSchedulerBase(ID) {
|
2012-01-17 06:55:03 +00:00
|
|
|
initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
|
2012-01-13 06:30:30 +00:00
|
|
|
}
|
|
|
|
|
2012-01-17 06:55:03 +00:00
|
|
|
void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
|
2012-01-13 06:30:30 +00:00
|
|
|
AU.setPreservesCFG();
|
|
|
|
AU.addRequiredID(MachineDominatorsID);
|
|
|
|
AU.addRequired<MachineLoopInfo>();
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@247167 91177308-0d34-0410-b5e6-96231b3b80d8
2015-09-09 17:55:00 +00:00
|
|
|
AU.addRequired<AAResultsWrapperPass>();
|
2012-03-09 00:52:20 +00:00
|
|
|
AU.addRequired<TargetPassConfig>();
|
2012-01-13 06:30:30 +00:00
|
|
|
AU.addRequired<SlotIndexes>();
|
|
|
|
AU.addPreserved<SlotIndexes>();
|
|
|
|
AU.addRequired<LiveIntervals>();
|
|
|
|
AU.addPreserved<LiveIntervals>();
|
|
|
|
MachineFunctionPass::getAnalysisUsage(AU);
|
|
|
|
}
|
|
|
|
|
2013-12-28 21:56:51 +00:00
|
|
|
char PostMachineScheduler::ID = 0;
|
|
|
|
|
|
|
|
char &llvm::PostMachineSchedulerID = PostMachineScheduler::ID;
|
|
|
|
|
|
|
|
INITIALIZE_PASS(PostMachineScheduler, "postmisched",
|
2013-12-28 22:47:55 +00:00
|
|
|
"PostRA Machine Instruction Scheduler", false, false)
|
2013-12-28 21:56:51 +00:00
|
|
|
|
|
|
|
PostMachineScheduler::PostMachineScheduler()
|
|
|
|
: MachineSchedulerBase(ID) {
|
|
|
|
initializePostMachineSchedulerPass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
|
|
|
|
|
|
|
void PostMachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
|
|
|
|
AU.setPreservesCFG();
|
|
|
|
AU.addRequiredID(MachineDominatorsID);
|
|
|
|
AU.addRequired<MachineLoopInfo>();
|
|
|
|
AU.addRequired<TargetPassConfig>();
|
|
|
|
MachineFunctionPass::getAnalysisUsage(AU);
|
|
|
|
}
|
|
|
|
|
2012-01-13 06:30:30 +00:00
|
|
|
MachinePassRegistry MachineSchedRegistry::Registry;
|
|
|
|
|
2012-03-09 00:52:20 +00:00
|
|
|
/// A dummy default scheduler factory indicates whether the scheduler
|
|
|
|
/// is overridden on the command line.
|
|
|
|
static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) {
|
2014-04-14 00:51:57 +00:00
|
|
|
return nullptr;
|
2012-03-09 00:52:20 +00:00
|
|
|
}
|
2012-01-13 06:30:30 +00:00
|
|
|
|
|
|
|
/// MachineSchedOpt allows command line selection of the scheduler.
|
|
|
|
static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false,
|
|
|
|
RegisterPassParser<MachineSchedRegistry> >
|
|
|
|
MachineSchedOpt("misched",
|
2012-03-09 00:52:20 +00:00
|
|
|
cl::init(&useDefaultMachineSched), cl::Hidden,
|
2012-01-13 06:30:30 +00:00
|
|
|
cl::desc("Machine instruction scheduler to use"));
|
|
|
|
|
2012-03-09 00:52:20 +00:00
|
|
|
static MachineSchedRegistry
|
2012-03-14 04:00:41 +00:00
|
|
|
DefaultSchedRegistry("default", "Use the target's default scheduler choice.",
|
2012-03-09 00:52:20 +00:00
|
|
|
useDefaultMachineSched);
|
|
|
|
|
2015-03-11 22:56:10 +00:00
|
|
|
static cl::opt<bool> EnableMachineSched(
|
|
|
|
"enable-misched",
|
|
|
|
cl::desc("Enable the machine instruction scheduling pass."), cl::init(true),
|
|
|
|
cl::Hidden);
|
|
|
|
|
2016-01-20 23:08:32 +00:00
|
|
|
static cl::opt<bool> EnablePostRAMachineSched(
|
|
|
|
"enable-post-misched",
|
|
|
|
cl::desc("Enable the post-ra machine instruction scheduling pass."),
|
|
|
|
cl::init(true), cl::Hidden);
|
|
|
|
|
2012-03-14 04:00:41 +00:00
|
|
|
/// Forward declare the standard machine scheduler. This will be used as the
|
2012-03-09 00:52:20 +00:00
|
|
|
/// default scheduler if the target does not set a default.
|
2013-12-28 21:56:57 +00:00
|
|
|
static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C);
|
|
|
|
static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C);
|
2012-04-24 18:04:34 +00:00
|
|
|
|
|
|
|
/// Decrement this iterator until reaching the top or a non-debug instr.
|
2013-08-30 04:36:57 +00:00
|
|
|
static MachineBasicBlock::const_iterator
|
|
|
|
priorNonDebug(MachineBasicBlock::const_iterator I,
|
|
|
|
MachineBasicBlock::const_iterator Beg) {
|
2012-04-24 18:04:34 +00:00
|
|
|
assert(I != Beg && "reached the top of the region, cannot decrement");
|
|
|
|
while (--I != Beg) {
|
|
|
|
if (!I->isDebugValue())
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return I;
|
|
|
|
}
|
|
|
|
|
2013-08-30 04:36:57 +00:00
|
|
|
/// Non-const version.
|
|
|
|
static MachineBasicBlock::iterator
|
|
|
|
priorNonDebug(MachineBasicBlock::iterator I,
|
|
|
|
MachineBasicBlock::const_iterator Beg) {
|
2016-08-16 23:34:07 +00:00
|
|
|
return priorNonDebug(MachineBasicBlock::const_iterator(I), Beg)
|
|
|
|
.getNonConstIterator();
|
2013-08-30 04:36:57 +00:00
|
|
|
}
|
|
|
|
|
2012-04-24 18:04:34 +00:00
|
|
|
/// If this iterator is a debug value, increment until reaching the End or a
|
|
|
|
/// non-debug instruction.
|
2013-08-31 05:17:58 +00:00
|
|
|
static MachineBasicBlock::const_iterator
|
|
|
|
nextIfDebug(MachineBasicBlock::const_iterator I,
|
|
|
|
MachineBasicBlock::const_iterator End) {
|
2012-05-17 18:35:03 +00:00
|
|
|
for(; I != End; ++I) {
|
2012-04-24 18:04:34 +00:00
|
|
|
if (!I->isDebugValue())
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return I;
|
|
|
|
}
|
|
|
|
|
2013-08-31 05:17:58 +00:00
|
|
|
/// Non-const version.
|
|
|
|
static MachineBasicBlock::iterator
|
|
|
|
nextIfDebug(MachineBasicBlock::iterator I,
|
|
|
|
MachineBasicBlock::const_iterator End) {
|
2016-08-16 23:34:07 +00:00
|
|
|
return nextIfDebug(MachineBasicBlock::const_iterator(I), End)
|
|
|
|
.getNonConstIterator();
|
2013-08-31 05:17:58 +00:00
|
|
|
}
|
|
|
|
|
2013-09-24 17:11:19 +00:00
|
|
|
/// Instantiate a ScheduleDAGInstrs that will be owned by the caller.
|
2013-09-20 05:14:41 +00:00
|
|
|
ScheduleDAGInstrs *MachineScheduler::createMachineScheduler() {
|
|
|
|
// Select the scheduler, or set the default.
|
|
|
|
MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt;
|
|
|
|
if (Ctor != useDefaultMachineSched)
|
|
|
|
return Ctor(this);
|
|
|
|
|
|
|
|
// Get the default scheduler set by the target for this function.
|
|
|
|
ScheduleDAGInstrs *Scheduler = PassConfig->createMachineScheduler(this);
|
|
|
|
if (Scheduler)
|
|
|
|
return Scheduler;
|
|
|
|
|
|
|
|
// Default to GenericScheduler.
|
2013-12-28 21:56:57 +00:00
|
|
|
return createGenericSchedLive(this);
|
2013-09-20 05:14:41 +00:00
|
|
|
}
|
|
|
|
|
2013-12-28 21:56:51 +00:00
|
|
|
/// Instantiate a ScheduleDAGInstrs for PostRA scheduling that will be owned by
|
|
|
|
/// the caller. We don't have a command line option to override the postRA
|
|
|
|
/// scheduler. The Target must configure it.
|
|
|
|
ScheduleDAGInstrs *PostMachineScheduler::createPostMachineScheduler() {
|
|
|
|
// Get the postRA scheduler set by the target for this function.
|
|
|
|
ScheduleDAGInstrs *Scheduler = PassConfig->createPostMachineScheduler(this);
|
|
|
|
if (Scheduler)
|
|
|
|
return Scheduler;
|
|
|
|
|
|
|
|
// Default to GenericScheduler.
|
2013-12-28 21:56:57 +00:00
|
|
|
return createGenericSchedPostRA(this);
|
2013-12-28 21:56:51 +00:00
|
|
|
}
|
|
|
|
|
2012-03-14 04:00:38 +00:00
|
|
|
/// Top-level MachineScheduler pass driver.
|
|
|
|
///
|
|
|
|
/// Visit blocks in function order. Divide each block into scheduling regions
|
2012-03-14 04:00:41 +00:00
|
|
|
/// and visit them bottom-up. Visiting regions bottom-up is not required, but is
|
|
|
|
/// consistent with the DAG builder, which traverses the interior of the
|
|
|
|
/// scheduling regions bottom-up.
|
2012-03-14 04:00:38 +00:00
|
|
|
///
|
|
|
|
/// This design avoids exposing scheduling boundaries to the DAG builder,
|
2012-03-14 04:00:41 +00:00
|
|
|
/// simplifying the DAG builder's support for "special" target instructions.
|
|
|
|
/// At the same time the design allows target schedulers to operate across
|
2012-03-14 04:00:38 +00:00
|
|
|
/// scheduling boundaries, for example to bundle the boudary instructions
|
|
|
|
/// without reordering them. This creates complexity, because the target
|
|
|
|
/// scheduler must update the RegionBegin and RegionEnd positions cached by
|
|
|
|
/// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler
|
|
|
|
/// design would be to split blocks at scheduling boundaries, but LLVM has a
|
|
|
|
/// general bias against block splitting purely for implementation simplicity.
|
2012-03-08 01:41:12 +00:00
|
|
|
bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
|
2016-04-22 22:06:11 +00:00
|
|
|
if (skipFunction(*mf.getFunction()))
|
2016-01-20 22:38:25 +00:00
|
|
|
return false;
|
|
|
|
|
2015-03-11 22:56:10 +00:00
|
|
|
if (EnableMachineSched.getNumOccurrences()) {
|
|
|
|
if (!EnableMachineSched)
|
|
|
|
return false;
|
|
|
|
} else if (!mf.getSubtarget().enableMachineScheduler())
|
|
|
|
return false;
|
|
|
|
|
2015-10-29 03:57:28 +00:00
|
|
|
DEBUG(dbgs() << "Before MISched:\n"; mf.print(dbgs()));
|
2012-05-10 21:06:21 +00:00
|
|
|
|
2012-03-08 01:41:12 +00:00
|
|
|
// Initialize the context of the pass.
|
|
|
|
MF = &mf;
|
|
|
|
MLI = &getAnalysis<MachineLoopInfo>();
|
|
|
|
MDT = &getAnalysis<MachineDominatorTree>();
|
2012-03-09 00:52:20 +00:00
|
|
|
PassConfig = &getAnalysis<TargetPassConfig>();
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@247167 91177308-0d34-0410-b5e6-96231b3b80d8
2015-09-09 17:55:00 +00:00
|
|
|
AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
|
2012-03-08 01:41:12 +00:00
|
|
|
|
|
|
|
LIS = &getAnalysis<LiveIntervals>();
|
|
|
|
|
2013-03-08 05:40:34 +00:00
|
|
|
if (VerifyScheduling) {
|
2013-07-25 07:26:26 +00:00
|
|
|
DEBUG(LIS->dump());
|
2013-03-08 05:40:34 +00:00
|
|
|
MF->verify(this, "Before machine scheduling.");
|
|
|
|
}
|
2012-04-24 20:36:19 +00:00
|
|
|
RegClassInfo->runOnMachineFunction(*MF);
|
2012-04-24 17:56:43 +00:00
|
|
|
|
2013-09-20 05:14:41 +00:00
|
|
|
// Instantiate the selected scheduler for this target, function, and
|
|
|
|
// optimization level.
|
2014-03-06 05:51:42 +00:00
|
|
|
std::unique_ptr<ScheduleDAGInstrs> Scheduler(createMachineScheduler());
|
2015-11-03 01:53:29 +00:00
|
|
|
scheduleRegions(*Scheduler, false);
|
2013-12-28 21:56:47 +00:00
|
|
|
|
|
|
|
DEBUG(LIS->dump());
|
|
|
|
if (VerifyScheduling)
|
|
|
|
MF->verify(this, "After machine scheduling.");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-12-28 21:56:51 +00:00
|
|
|
bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) {
|
2016-04-22 22:06:11 +00:00
|
|
|
if (skipFunction(*mf.getFunction()))
|
2014-03-31 17:43:35 +00:00
|
|
|
return false;
|
|
|
|
|
2016-01-20 23:08:32 +00:00
|
|
|
if (EnablePostRAMachineSched.getNumOccurrences()) {
|
|
|
|
if (!EnablePostRAMachineSched)
|
|
|
|
return false;
|
|
|
|
} else if (!mf.getSubtarget().enablePostRAScheduler()) {
|
2014-06-04 07:06:27 +00:00
|
|
|
DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n");
|
|
|
|
return false;
|
|
|
|
}
|
2013-12-28 21:56:51 +00:00
|
|
|
DEBUG(dbgs() << "Before post-MI-sched:\n"; mf.print(dbgs()));
|
|
|
|
|
|
|
|
// Initialize the context of the pass.
|
|
|
|
MF = &mf;
|
|
|
|
PassConfig = &getAnalysis<TargetPassConfig>();
|
|
|
|
|
|
|
|
if (VerifyScheduling)
|
|
|
|
MF->verify(this, "Before post machine scheduling.");
|
|
|
|
|
|
|
|
// Instantiate the selected scheduler for this target, function, and
|
|
|
|
// optimization level.
|
2014-03-06 05:51:42 +00:00
|
|
|
std::unique_ptr<ScheduleDAGInstrs> Scheduler(createPostMachineScheduler());
|
2015-11-03 01:53:29 +00:00
|
|
|
scheduleRegions(*Scheduler, true);
|
2013-12-28 21:56:51 +00:00
|
|
|
|
|
|
|
if (VerifyScheduling)
|
|
|
|
MF->verify(this, "After post machine scheduling.");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-12-28 21:56:57 +00:00
|
|
|
/// Return true of the given instruction should not be included in a scheduling
|
|
|
|
/// region.
|
|
|
|
///
|
|
|
|
/// MachineScheduler does not currently support scheduling across calls. To
|
|
|
|
/// handle calls, the DAG builder needs to be modified to create register
|
|
|
|
/// anti/output dependencies on the registers clobbered by the call's regmask
|
|
|
|
/// operand. In PreRA scheduling, the stack pointer adjustment already prevents
|
|
|
|
/// scheduling across calls. In PostRA scheduling, we need the isCall to enforce
|
|
|
|
/// the boundary, but there would be no benefit to postRA scheduling across
|
|
|
|
/// calls this late anyway.
|
|
|
|
static bool isSchedBoundary(MachineBasicBlock::iterator MI,
|
|
|
|
MachineBasicBlock *MBB,
|
|
|
|
MachineFunction *MF,
|
2015-11-03 01:53:29 +00:00
|
|
|
const TargetInstrInfo *TII) {
|
2016-06-30 00:01:54 +00:00
|
|
|
return MI->isCall() || TII->isSchedulingBoundary(*MI, MBB, *MF);
|
2013-12-28 21:56:57 +00:00
|
|
|
}
|
|
|
|
|
2013-12-28 21:56:47 +00:00
|
|
|
/// Main driver for both MachineScheduler and PostMachineScheduler.
|
2015-11-03 01:53:29 +00:00
|
|
|
void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler,
|
|
|
|
bool FixKillFlags) {
|
2014-08-05 02:39:49 +00:00
|
|
|
const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
|
2012-03-08 01:41:12 +00:00
|
|
|
|
|
|
|
// Visit all machine basic blocks.
|
2012-04-24 17:56:43 +00:00
|
|
|
//
|
|
|
|
// TODO: Visit blocks in global postorder or postorder within the bottom-up
|
|
|
|
// loop tree. Then we can optionally compute global RegPressure.
|
2012-03-08 01:41:12 +00:00
|
|
|
for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end();
|
|
|
|
MBB != MBBEnd; ++MBB) {
|
|
|
|
|
2015-10-09 19:40:45 +00:00
|
|
|
Scheduler.startBlock(&*MBB);
|
2012-03-09 08:02:51 +00:00
|
|
|
|
2013-12-28 21:57:02 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
if (SchedOnlyFunc.getNumOccurrences() && SchedOnlyFunc != MF->getName())
|
|
|
|
continue;
|
|
|
|
if (SchedOnlyBlock.getNumOccurrences()
|
|
|
|
&& (int)SchedOnlyBlock != MBB->getNumber())
|
|
|
|
continue;
|
|
|
|
#endif
|
|
|
|
|
2012-03-08 01:41:12 +00:00
|
|
|
// Break the block into scheduling regions [I, RegionEnd), and schedule each
|
2012-07-23 08:51:15 +00:00
|
|
|
// region as soon as it is discovered. RegionEnd points the scheduling
|
2012-03-09 22:34:56 +00:00
|
|
|
// boundary at the bottom of the region. The DAG does not include RegionEnd,
|
|
|
|
// but the region does (i.e. the next RegionEnd is above the previous
|
|
|
|
// RegionBegin). If the current block has no terminator then RegionEnd ==
|
|
|
|
// MBB->end() for the bottom region.
|
|
|
|
//
|
|
|
|
// The Scheduler may insert instructions during either schedule() or
|
|
|
|
// exitRegion(), even for empty regions. So the local iterators 'I' and
|
|
|
|
// 'RegionEnd' are invalid across these calls.
|
2013-12-28 21:56:57 +00:00
|
|
|
//
|
|
|
|
// MBB::size() uses instr_iterator to count. Here we need a bundle to count
|
|
|
|
// as a single instruction.
|
2012-03-09 03:46:39 +00:00
|
|
|
for(MachineBasicBlock::iterator RegionEnd = MBB->end();
|
2013-12-28 21:56:47 +00:00
|
|
|
RegionEnd != MBB->begin(); RegionEnd = Scheduler.begin()) {
|
2012-04-24 17:56:43 +00:00
|
|
|
|
2012-03-09 08:02:51 +00:00
|
|
|
// Avoid decrementing RegionEnd for blocks with no terminator.
|
2014-03-02 12:27:27 +00:00
|
|
|
if (RegionEnd != MBB->end() ||
|
2015-11-03 01:53:29 +00:00
|
|
|
isSchedBoundary(&*std::prev(RegionEnd), &*MBB, MF, TII)) {
|
2012-03-09 08:02:51 +00:00
|
|
|
--RegionEnd;
|
|
|
|
}
|
|
|
|
|
2012-03-08 01:41:12 +00:00
|
|
|
// The next region starts above the previous region. Look backward in the
|
|
|
|
// instruction stream until we find the nearest boundary.
|
2013-08-23 17:48:33 +00:00
|
|
|
unsigned NumRegionInstrs = 0;
|
2012-03-08 01:41:12 +00:00
|
|
|
MachineBasicBlock::iterator I = RegionEnd;
|
2016-05-20 19:46:13 +00:00
|
|
|
for (;I != MBB->begin(); --I) {
|
2016-08-11 20:03:09 +00:00
|
|
|
MachineInstr &MI = *std::prev(I);
|
|
|
|
if (isSchedBoundary(&MI, &*MBB, MF, TII))
|
2012-03-08 01:41:12 +00:00
|
|
|
break;
|
2016-08-11 20:03:09 +00:00
|
|
|
if (!MI.isDebugValue())
|
2014-12-12 15:09:58 +00:00
|
|
|
++NumRegionInstrs;
|
2012-03-08 01:41:12 +00:00
|
|
|
}
|
|
|
|
// Notify the scheduler of the region, even if we may skip scheduling
|
|
|
|
// it. Perhaps it still needs to be bundled.
|
2015-10-09 19:40:45 +00:00
|
|
|
Scheduler.enterRegion(&*MBB, I, RegionEnd, NumRegionInstrs);
|
2012-03-08 01:41:12 +00:00
|
|
|
|
|
|
|
// Skip empty scheduling regions (0 or 1 schedulable instructions).
|
2014-03-02 12:27:27 +00:00
|
|
|
if (I == RegionEnd || I == std::prev(RegionEnd)) {
|
2012-03-08 01:41:12 +00:00
|
|
|
// Close the current region. Bundle the terminator if needed.
|
2012-03-09 22:34:56 +00:00
|
|
|
// This invalidates 'RegionEnd' and 'I'.
|
2013-12-28 21:56:47 +00:00
|
|
|
Scheduler.exitRegion();
|
2012-03-08 01:41:12 +00:00
|
|
|
continue;
|
|
|
|
}
|
2015-11-03 01:53:29 +00:00
|
|
|
DEBUG(dbgs() << "********** MI Scheduling **********\n");
|
2012-08-22 06:07:19 +00:00
|
|
|
DEBUG(dbgs() << MF->getName()
|
2013-01-25 07:45:31 +00:00
|
|
|
<< ":BB#" << MBB->getNumber() << " " << MBB->getName()
|
|
|
|
<< "\n From: " << *I << " To: ";
|
2012-03-08 01:41:12 +00:00
|
|
|
if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
|
|
|
|
else dbgs() << "End";
|
2016-05-20 19:46:13 +00:00
|
|
|
dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n');
|
2014-08-07 21:49:44 +00:00
|
|
|
if (DumpCriticalPathLength) {
|
|
|
|
errs() << MF->getName();
|
|
|
|
errs() << ":BB# " << MBB->getNumber();
|
|
|
|
errs() << " " << MBB->getName() << " \n";
|
|
|
|
}
|
2012-03-08 01:41:12 +00:00
|
|
|
|
2012-03-09 03:46:42 +00:00
|
|
|
// Schedule a region: possibly reorder instructions.
|
2012-03-09 22:34:56 +00:00
|
|
|
// This invalidates 'RegionEnd' and 'I'.
|
2013-12-28 21:56:47 +00:00
|
|
|
Scheduler.schedule();
|
2012-03-09 03:46:42 +00:00
|
|
|
|
|
|
|
// Close the current region.
|
2013-12-28 21:56:47 +00:00
|
|
|
Scheduler.exitRegion();
|
2012-03-08 01:41:12 +00:00
|
|
|
|
|
|
|
// Scheduling has invalidated the current iterator 'I'. Ask the
|
|
|
|
// scheduler for the top of it's scheduled region.
|
2013-12-28 21:56:47 +00:00
|
|
|
RegionEnd = Scheduler.begin();
|
2012-03-08 01:41:12 +00:00
|
|
|
}
|
2013-12-28 21:56:47 +00:00
|
|
|
Scheduler.finishBlock();
|
2015-11-03 01:53:29 +00:00
|
|
|
// FIXME: Ideally, no further passes should rely on kill flags. However,
|
|
|
|
// thumb2 size reduction is currently an exception, so the PostMIScheduler
|
|
|
|
// needs to do this.
|
|
|
|
if (FixKillFlags)
|
|
|
|
Scheduler.fixupKills(&*MBB);
|
2012-03-08 01:41:12 +00:00
|
|
|
}
|
2013-12-28 21:56:47 +00:00
|
|
|
Scheduler.finalizeSchedule();
|
2012-03-08 01:41:12 +00:00
|
|
|
}
|
|
|
|
|
2013-12-28 21:56:47 +00:00
|
|
|
void MachineSchedulerBase::print(raw_ostream &O, const Module* m) const {
|
2012-03-08 01:41:12 +00:00
|
|
|
// unimplemented
|
|
|
|
}
|
|
|
|
|
2014-07-01 21:19:13 +00:00
|
|
|
LLVM_DUMP_METHOD
|
2012-09-11 00:39:15 +00:00
|
|
|
void ReadyQueue::dump() {
|
2015-09-18 18:52:20 +00:00
|
|
|
dbgs() << "Queue " << Name << ": ";
|
2012-09-11 00:39:15 +00:00
|
|
|
for (unsigned i = 0, e = Queue.size(); i < e; ++i)
|
|
|
|
dbgs() << Queue[i]->NodeNum << " ";
|
|
|
|
dbgs() << "\n";
|
|
|
|
}
|
2012-03-14 04:00:41 +00:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
2013-12-28 21:56:47 +00:00
|
|
|
// ScheduleDAGMI - Basic machine instruction scheduling. This is
|
|
|
|
// independent of PreRA/PostRA scheduling and involves no extra book-keeping for
|
|
|
|
// virtual registers.
|
|
|
|
// ===----------------------------------------------------------------------===/
|
2012-01-14 02:17:06 +00:00
|
|
|
|
2014-04-21 20:32:32 +00:00
|
|
|
// Provide a vtable anchor.
|
2013-01-25 04:01:04 +00:00
|
|
|
ScheduleDAGMI::~ScheduleDAGMI() {
|
|
|
|
}
|
|
|
|
|
2013-04-24 15:54:43 +00:00
|
|
|
bool ScheduleDAGMI::canAddEdge(SUnit *SuccSU, SUnit *PredSU) {
|
|
|
|
return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU);
|
|
|
|
}
|
|
|
|
|
2012-11-12 19:40:10 +00:00
|
|
|
bool ScheduleDAGMI::addEdge(SUnit *SuccSU, const SDep &PredDep) {
|
2012-11-12 19:52:20 +00:00
|
|
|
if (SuccSU != &ExitSU) {
|
|
|
|
// Do not use WillCreateCycle, it assumes SD scheduling.
|
|
|
|
// If Pred is reachable from Succ, then the edge creates a cycle.
|
|
|
|
if (Topo.IsReachable(PredDep.getSUnit(), SuccSU))
|
|
|
|
return false;
|
|
|
|
Topo.AddPred(SuccSU, PredDep.getSUnit());
|
|
|
|
}
|
2012-11-12 19:40:10 +00:00
|
|
|
SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial());
|
|
|
|
// Return true regardless of whether a new edge needed to be inserted.
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-01-17 06:55:07 +00:00
|
|
|
/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When
|
|
|
|
/// NumPredsLeft reaches zero, release the successor node.
|
2012-05-24 22:11:09 +00:00
|
|
|
///
|
|
|
|
/// FIXME: Adjust SuccSU height based on MinLatency.
|
2012-03-14 04:00:41 +00:00
|
|
|
void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) {
|
2012-01-17 06:55:07 +00:00
|
|
|
SUnit *SuccSU = SuccEdge->getSUnit();
|
|
|
|
|
2012-11-12 19:28:57 +00:00
|
|
|
if (SuccEdge->isWeak()) {
|
|
|
|
--SuccSU->WeakPredsLeft;
|
2012-11-12 19:40:10 +00:00
|
|
|
if (SuccEdge->isCluster())
|
|
|
|
NextClusterSucc = SuccSU;
|
2012-11-12 19:28:57 +00:00
|
|
|
return;
|
|
|
|
}
|
2012-01-17 06:55:07 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
if (SuccSU->NumPredsLeft == 0) {
|
|
|
|
dbgs() << "*** Scheduling failed! ***\n";
|
|
|
|
SuccSU->dump(this);
|
|
|
|
dbgs() << " has been released too many times!\n";
|
2014-04-14 00:51:57 +00:00
|
|
|
llvm_unreachable(nullptr);
|
2012-01-17 06:55:07 +00:00
|
|
|
}
|
|
|
|
#endif
|
2014-06-07 01:48:43 +00:00
|
|
|
// SU->TopReadyCycle was set to CurrCycle when it was scheduled. However,
|
|
|
|
// CurrCycle may have advanced since then.
|
|
|
|
if (SuccSU->TopReadyCycle < SU->TopReadyCycle + SuccEdge->getLatency())
|
|
|
|
SuccSU->TopReadyCycle = SU->TopReadyCycle + SuccEdge->getLatency();
|
|
|
|
|
2012-01-17 06:55:07 +00:00
|
|
|
--SuccSU->NumPredsLeft;
|
|
|
|
if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
|
2012-03-14 04:00:41 +00:00
|
|
|
SchedImpl->releaseTopNode(SuccSU);
|
2012-01-17 06:55:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// releaseSuccessors - Call releaseSucc on each of SU's successors.
|
2012-03-14 04:00:41 +00:00
|
|
|
void ScheduleDAGMI::releaseSuccessors(SUnit *SU) {
|
2012-01-17 06:55:07 +00:00
|
|
|
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
|
|
|
|
I != E; ++I) {
|
|
|
|
releaseSucc(SU, &*I);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-03-14 04:00:41 +00:00
|
|
|
/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When
|
|
|
|
/// NumSuccsLeft reaches zero, release the predecessor node.
|
2012-05-24 22:11:09 +00:00
|
|
|
///
|
|
|
|
/// FIXME: Adjust PredSU height based on MinLatency.
|
2012-03-14 04:00:41 +00:00
|
|
|
void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) {
|
|
|
|
SUnit *PredSU = PredEdge->getSUnit();
|
|
|
|
|
2012-11-12 19:28:57 +00:00
|
|
|
if (PredEdge->isWeak()) {
|
|
|
|
--PredSU->WeakSuccsLeft;
|
2012-11-12 19:40:10 +00:00
|
|
|
if (PredEdge->isCluster())
|
|
|
|
NextClusterPred = PredSU;
|
2012-11-12 19:28:57 +00:00
|
|
|
return;
|
|
|
|
}
|
2012-03-14 04:00:41 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
if (PredSU->NumSuccsLeft == 0) {
|
|
|
|
dbgs() << "*** Scheduling failed! ***\n";
|
|
|
|
PredSU->dump(this);
|
|
|
|
dbgs() << " has been released too many times!\n";
|
2014-04-14 00:51:57 +00:00
|
|
|
llvm_unreachable(nullptr);
|
2012-03-14 04:00:41 +00:00
|
|
|
}
|
|
|
|
#endif
|
2014-06-07 01:48:43 +00:00
|
|
|
// SU->BotReadyCycle was set to CurrCycle when it was scheduled. However,
|
|
|
|
// CurrCycle may have advanced since then.
|
|
|
|
if (PredSU->BotReadyCycle < SU->BotReadyCycle + PredEdge->getLatency())
|
|
|
|
PredSU->BotReadyCycle = SU->BotReadyCycle + PredEdge->getLatency();
|
|
|
|
|
2012-03-14 04:00:41 +00:00
|
|
|
--PredSU->NumSuccsLeft;
|
|
|
|
if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU)
|
|
|
|
SchedImpl->releaseBottomNode(PredSU);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// releasePredecessors - Call releasePred on each of SU's predecessors.
|
|
|
|
void ScheduleDAGMI::releasePredecessors(SUnit *SU) {
|
|
|
|
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
|
|
|
|
I != E; ++I) {
|
|
|
|
releasePred(SU, &*I);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-28 21:56:47 +00:00
|
|
|
/// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
|
|
|
|
/// crossing a scheduling boundary. [begin, end) includes all instructions in
|
|
|
|
/// the region, including the boundary itself and single-instruction regions
|
|
|
|
/// that don't get scheduled.
|
|
|
|
void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb,
|
|
|
|
MachineBasicBlock::iterator begin,
|
|
|
|
MachineBasicBlock::iterator end,
|
|
|
|
unsigned regioninstrs)
|
|
|
|
{
|
|
|
|
ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs);
|
|
|
|
|
|
|
|
SchedImpl->initPolicy(begin, end, regioninstrs);
|
|
|
|
}
|
|
|
|
|
2013-04-13 06:07:40 +00:00
|
|
|
/// This is normally called from the main scheduler loop but may also be invoked
|
|
|
|
/// by the scheduling strategy to perform additional code motion.
|
2013-12-28 21:56:47 +00:00
|
|
|
void ScheduleDAGMI::moveInstruction(
|
|
|
|
MachineInstr *MI, MachineBasicBlock::iterator InsertPos) {
|
2012-05-17 18:35:03 +00:00
|
|
|
// Advance RegionBegin if the first instruction moves down.
|
2012-03-21 04:12:10 +00:00
|
|
|
if (&*RegionBegin == MI)
|
2012-05-17 18:35:03 +00:00
|
|
|
++RegionBegin;
|
|
|
|
|
|
|
|
// Update the instruction stream.
|
2012-03-14 04:00:41 +00:00
|
|
|
BB->splice(InsertPos, BB, MI);
|
2012-05-17 18:35:03 +00:00
|
|
|
|
|
|
|
// Update LiveIntervals
|
2013-12-28 21:56:47 +00:00
|
|
|
if (LIS)
|
2016-02-27 20:14:29 +00:00
|
|
|
LIS->handleMove(*MI, /*UpdateFlags=*/true);
|
2012-05-17 18:35:03 +00:00
|
|
|
|
|
|
|
// Recede RegionBegin if an instruction moves above the first.
|
2012-03-14 04:00:41 +00:00
|
|
|
if (RegionBegin == InsertPos)
|
|
|
|
RegionBegin = MI;
|
|
|
|
}
|
|
|
|
|
2012-03-21 04:12:07 +00:00
|
|
|
bool ScheduleDAGMI::checkSchedLimit() {
|
|
|
|
#ifndef NDEBUG
|
|
|
|
if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) {
|
|
|
|
CurrentTop = CurrentBottom;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
++NumInstrsScheduled;
|
|
|
|
#endif
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-12-28 21:56:47 +00:00
|
|
|
/// Per-region scheduling driver, called back from
|
|
|
|
/// MachineScheduler::runOnMachineFunction. This is a simplified driver that
|
|
|
|
/// does not consider liveness or register pressure. It is useful for PostRA
|
|
|
|
/// scheduling and potentially other custom schedulers.
|
|
|
|
void ScheduleDAGMI::schedule() {
|
2015-09-18 18:52:20 +00:00
|
|
|
DEBUG(dbgs() << "ScheduleDAGMI::schedule starting\n");
|
|
|
|
DEBUG(SchedImpl->dumpPolicy());
|
|
|
|
|
2013-12-28 21:56:47 +00:00
|
|
|
// Build the DAG.
|
|
|
|
buildSchedGraph(AA);
|
|
|
|
|
|
|
|
Topo.InitDAGTopologicalSorting();
|
|
|
|
|
|
|
|
postprocessDAG();
|
|
|
|
|
|
|
|
SmallVector<SUnit*, 8> TopRoots, BotRoots;
|
|
|
|
findRootsAndBiasEdges(TopRoots, BotRoots);
|
|
|
|
|
|
|
|
// Initialize the strategy before modifying the DAG.
|
|
|
|
// This may initialize a DFSResult to be used for queue priority.
|
|
|
|
SchedImpl->initialize(this);
|
|
|
|
|
|
|
|
DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
|
|
|
|
SUnits[su].dumpAll(this));
|
|
|
|
if (ViewMISchedDAGs) viewGraph();
|
|
|
|
|
|
|
|
// Initialize ready queues now that the DAG and priority data are finalized.
|
|
|
|
initQueues(TopRoots, BotRoots);
|
|
|
|
|
|
|
|
bool IsTopNode = false;
|
2015-09-18 18:52:20 +00:00
|
|
|
while (true) {
|
|
|
|
DEBUG(dbgs() << "** ScheduleDAGMI::schedule picking next node\n");
|
|
|
|
SUnit *SU = SchedImpl->pickNode(IsTopNode);
|
|
|
|
if (!SU) break;
|
|
|
|
|
2013-12-28 21:56:47 +00:00
|
|
|
assert(!SU->isScheduled && "Node already scheduled");
|
|
|
|
if (!checkSchedLimit())
|
|
|
|
break;
|
|
|
|
|
|
|
|
MachineInstr *MI = SU->getInstr();
|
|
|
|
if (IsTopNode) {
|
|
|
|
assert(SU->isTopReady() && "node still has unscheduled dependencies");
|
|
|
|
if (&*CurrentTop == MI)
|
|
|
|
CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
|
|
|
|
else
|
|
|
|
moveInstruction(MI, CurrentTop);
|
2016-04-21 01:54:13 +00:00
|
|
|
} else {
|
2013-12-28 21:56:47 +00:00
|
|
|
assert(SU->isBottomReady() && "node still has unscheduled dependencies");
|
|
|
|
MachineBasicBlock::iterator priorII =
|
|
|
|
priorNonDebug(CurrentBottom, CurrentTop);
|
|
|
|
if (&*priorII == MI)
|
|
|
|
CurrentBottom = priorII;
|
|
|
|
else {
|
|
|
|
if (&*CurrentTop == MI)
|
|
|
|
CurrentTop = nextIfDebug(++CurrentTop, priorII);
|
|
|
|
moveInstruction(MI, CurrentBottom);
|
|
|
|
CurrentBottom = MI;
|
|
|
|
}
|
|
|
|
}
|
2014-06-07 01:48:43 +00:00
|
|
|
// Notify the scheduling strategy before updating the DAG.
|
2014-06-12 22:36:28 +00:00
|
|
|
// This sets the scheduled node's ReadyCycle to CurrCycle. When updateQueues
|
2014-06-07 01:48:43 +00:00
|
|
|
// runs, it can then use the accurate ReadyCycle time to determine whether
|
|
|
|
// newly released nodes can move to the readyQ.
|
2013-12-28 21:56:47 +00:00
|
|
|
SchedImpl->schedNode(SU, IsTopNode);
|
2014-06-07 01:48:43 +00:00
|
|
|
|
|
|
|
updateQueues(SU, IsTopNode);
|
2013-12-28 21:56:47 +00:00
|
|
|
}
|
|
|
|
assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
|
|
|
|
|
|
|
|
placeDebugValues();
|
|
|
|
|
|
|
|
DEBUG({
|
|
|
|
unsigned BBNum = begin()->getParent()->getNumber();
|
|
|
|
dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
|
|
|
|
dumpSchedule();
|
|
|
|
dbgs() << '\n';
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Apply each ScheduleDAGMutation step in order.
|
|
|
|
void ScheduleDAGMI::postprocessDAG() {
|
|
|
|
for (unsigned i = 0, e = Mutations.size(); i < e; ++i) {
|
|
|
|
Mutations[i]->apply(this);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void ScheduleDAGMI::
|
|
|
|
findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
|
|
|
|
SmallVectorImpl<SUnit*> &BotRoots) {
|
|
|
|
for (std::vector<SUnit>::iterator
|
|
|
|
I = SUnits.begin(), E = SUnits.end(); I != E; ++I) {
|
|
|
|
SUnit *SU = &(*I);
|
|
|
|
assert(!SU->isBoundaryNode() && "Boundary node should not be in SUnits");
|
|
|
|
|
|
|
|
// Order predecessors so DFSResult follows the critical path.
|
|
|
|
SU->biasCriticalPath();
|
|
|
|
|
|
|
|
// A SUnit is ready to top schedule if it has no predecessors.
|
|
|
|
if (!I->NumPredsLeft)
|
|
|
|
TopRoots.push_back(SU);
|
|
|
|
// A SUnit is ready to bottom schedule if it has no successors.
|
|
|
|
if (!I->NumSuccsLeft)
|
|
|
|
BotRoots.push_back(SU);
|
|
|
|
}
|
|
|
|
ExitSU.biasCriticalPath();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Identify DAG roots and setup scheduler queues.
|
|
|
|
void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots,
|
|
|
|
ArrayRef<SUnit*> BotRoots) {
|
2014-04-14 00:51:57 +00:00
|
|
|
NextClusterSucc = nullptr;
|
|
|
|
NextClusterPred = nullptr;
|
2013-12-28 21:56:47 +00:00
|
|
|
|
|
|
|
// Release all DAG roots for scheduling, not including EntrySU/ExitSU.
|
|
|
|
//
|
|
|
|
// Nodes with unreleased weak edges can still be roots.
|
|
|
|
// Release top roots in forward order.
|
|
|
|
for (SmallVectorImpl<SUnit*>::const_iterator
|
|
|
|
I = TopRoots.begin(), E = TopRoots.end(); I != E; ++I) {
|
|
|
|
SchedImpl->releaseTopNode(*I);
|
|
|
|
}
|
|
|
|
// Release bottom roots in reverse order so the higher priority nodes appear
|
|
|
|
// first. This is more natural and slightly more efficient.
|
|
|
|
for (SmallVectorImpl<SUnit*>::const_reverse_iterator
|
|
|
|
I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) {
|
|
|
|
SchedImpl->releaseBottomNode(*I);
|
|
|
|
}
|
|
|
|
|
|
|
|
releaseSuccessors(&EntrySU);
|
|
|
|
releasePredecessors(&ExitSU);
|
|
|
|
|
|
|
|
SchedImpl->registerRoots();
|
|
|
|
|
|
|
|
// Advance past initial DebugValues.
|
|
|
|
CurrentTop = nextIfDebug(RegionBegin, RegionEnd);
|
|
|
|
CurrentBottom = RegionEnd;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Update scheduler queues after scheduling an instruction.
|
|
|
|
void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) {
|
|
|
|
// Release dependent instructions for scheduling.
|
|
|
|
if (IsTopNode)
|
|
|
|
releaseSuccessors(SU);
|
|
|
|
else
|
|
|
|
releasePredecessors(SU);
|
|
|
|
|
|
|
|
SU->isScheduled = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Reinsert any remaining debug_values, just like the PostRA scheduler.
|
|
|
|
void ScheduleDAGMI::placeDebugValues() {
|
|
|
|
// If first instruction was a DBG_VALUE then put it back.
|
|
|
|
if (FirstDbgValue) {
|
|
|
|
BB->splice(RegionBegin, BB, FirstDbgValue);
|
|
|
|
RegionBegin = FirstDbgValue;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
|
|
|
|
DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
|
2014-03-02 12:27:27 +00:00
|
|
|
std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI);
|
2013-12-28 21:56:47 +00:00
|
|
|
MachineInstr *DbgValue = P.first;
|
|
|
|
MachineBasicBlock::iterator OrigPrevMI = P.second;
|
|
|
|
if (&*RegionBegin == DbgValue)
|
|
|
|
++RegionBegin;
|
|
|
|
BB->splice(++OrigPrevMI, BB, DbgValue);
|
2014-03-02 12:27:27 +00:00
|
|
|
if (OrigPrevMI == std::prev(RegionEnd))
|
2013-12-28 21:56:47 +00:00
|
|
|
RegionEnd = DbgValue;
|
|
|
|
}
|
|
|
|
DbgValues.clear();
|
2014-04-14 00:51:57 +00:00
|
|
|
FirstDbgValue = nullptr;
|
2013-12-28 21:56:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
|
|
|
|
void ScheduleDAGMI::dumpSchedule() const {
|
|
|
|
for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) {
|
|
|
|
if (SUnit *SU = getSUnit(&(*MI)))
|
|
|
|
SU->dump(this);
|
|
|
|
else
|
|
|
|
dbgs() << "Missing SUnit\n";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// ScheduleDAGMILive - Base class for MachineInstr scheduling with LiveIntervals
|
|
|
|
// preservation.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
ScheduleDAGMILive::~ScheduleDAGMILive() {
|
|
|
|
delete DFSResult;
|
|
|
|
}
|
|
|
|
|
2012-04-24 17:56:43 +00:00
|
|
|
/// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
|
|
|
|
/// crossing a scheduling boundary. [begin, end) includes all instructions in
|
|
|
|
/// the region, including the boundary itself and single-instruction regions
|
|
|
|
/// that don't get scheduled.
|
2013-12-28 21:56:47 +00:00
|
|
|
void ScheduleDAGMILive::enterRegion(MachineBasicBlock *bb,
|
2012-04-24 17:56:43 +00:00
|
|
|
MachineBasicBlock::iterator begin,
|
|
|
|
MachineBasicBlock::iterator end,
|
2013-08-23 17:48:33 +00:00
|
|
|
unsigned regioninstrs)
|
2012-04-24 17:56:43 +00:00
|
|
|
{
|
2013-12-28 21:56:47 +00:00
|
|
|
// ScheduleDAGMI initializes SchedImpl's per-region policy.
|
|
|
|
ScheduleDAGMI::enterRegion(bb, begin, end, regioninstrs);
|
2012-05-10 21:06:10 +00:00
|
|
|
|
|
|
|
// For convenience remember the end of the liveness region.
|
2014-03-02 12:27:27 +00:00
|
|
|
LiveRegionEnd = (RegionEnd == bb->end()) ? RegionEnd : std::next(RegionEnd);
|
2013-09-06 17:32:34 +00:00
|
|
|
|
2013-09-06 17:32:47 +00:00
|
|
|
SUPressureDiffs.clear();
|
|
|
|
|
2013-09-06 17:32:34 +00:00
|
|
|
ShouldTrackPressure = SchedImpl->shouldTrackPressure();
|
2016-01-20 00:23:32 +00:00
|
|
|
ShouldTrackLaneMasks = SchedImpl->shouldTrackLaneMasks();
|
|
|
|
|
2016-05-31 22:38:06 +00:00
|
|
|
assert((!ShouldTrackLaneMasks || ShouldTrackPressure) &&
|
|
|
|
"ShouldTrackLaneMasks requires ShouldTrackPressure");
|
2012-05-10 21:06:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Setup the register pressure trackers for the top scheduled top and bottom
|
|
|
|
// scheduled regions.
|
2013-12-28 21:56:47 +00:00
|
|
|
void ScheduleDAGMILive::initRegPressure() {
|
2016-01-20 00:23:32 +00:00
|
|
|
TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin,
|
|
|
|
ShouldTrackLaneMasks, false);
|
|
|
|
BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
|
|
|
|
ShouldTrackLaneMasks, false);
|
2012-05-10 21:06:10 +00:00
|
|
|
|
|
|
|
// Close the RPTracker to finalize live ins.
|
|
|
|
RPTracker.closeRegion();
|
|
|
|
|
2013-07-30 19:59:12 +00:00
|
|
|
DEBUG(RPTracker.dump());
|
2012-05-24 22:11:14 +00:00
|
|
|
|
2012-05-10 21:06:10 +00:00
|
|
|
// Initialize the live ins and live outs.
|
2015-09-17 21:12:24 +00:00
|
|
|
TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs);
|
|
|
|
BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs);
|
2012-05-10 21:06:10 +00:00
|
|
|
|
|
|
|
// Close one end of the tracker so we can call
|
|
|
|
// getMaxUpward/DownwardPressureDelta before advancing across any
|
|
|
|
// instructions. This converts currently live regs into live ins/outs.
|
|
|
|
TopRPTracker.closeTop();
|
|
|
|
BotRPTracker.closeBottom();
|
|
|
|
|
2013-07-30 19:59:12 +00:00
|
|
|
BotRPTracker.initLiveThru(RPTracker);
|
|
|
|
if (!BotRPTracker.getLiveThru().empty()) {
|
|
|
|
TopRPTracker.initLiveThru(BotRPTracker.getLiveThru());
|
|
|
|
DEBUG(dbgs() << "Live Thru: ";
|
|
|
|
dumpRegSetPressure(BotRPTracker.getLiveThru(), TRI));
|
|
|
|
};
|
|
|
|
|
2013-08-30 04:36:57 +00:00
|
|
|
// For each live out vreg reduce the pressure change associated with other
|
|
|
|
// uses of the same vreg below the live-out reaching def.
|
2015-09-17 21:12:24 +00:00
|
|
|
updatePressureDiffs(RPTracker.getPressure().LiveOutRegs);
|
2013-08-30 04:36:57 +00:00
|
|
|
|
2012-05-10 21:06:10 +00:00
|
|
|
// Account for liveness generated by the region boundary.
|
2013-08-30 04:36:57 +00:00
|
|
|
if (LiveRegionEnd != RegionEnd) {
|
2016-01-20 00:23:26 +00:00
|
|
|
SmallVector<RegisterMaskPair, 8> LiveUses;
|
2013-08-30 04:36:57 +00:00
|
|
|
BotRPTracker.recede(&LiveUses);
|
|
|
|
updatePressureDiffs(LiveUses);
|
|
|
|
}
|
2012-05-10 21:06:10 +00:00
|
|
|
|
2015-11-13 22:30:31 +00:00
|
|
|
DEBUG(
|
|
|
|
dbgs() << "Top Pressure:\n";
|
|
|
|
dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI);
|
|
|
|
dbgs() << "Bottom Pressure:\n";
|
|
|
|
dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI);
|
|
|
|
);
|
|
|
|
|
2012-05-10 21:06:10 +00:00
|
|
|
assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom");
|
2012-05-17 18:35:10 +00:00
|
|
|
|
|
|
|
// Cache the list of excess pressure sets in this region. This will also track
|
|
|
|
// the max pressure in the scheduled code for these sets.
|
|
|
|
RegionCriticalPSets.clear();
|
2013-01-25 21:44:27 +00:00
|
|
|
const std::vector<unsigned> &RegionPressure =
|
|
|
|
RPTracker.getPressure().MaxSetPressure;
|
2012-05-17 18:35:10 +00:00
|
|
|
for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) {
|
2013-06-21 18:32:58 +00:00
|
|
|
unsigned Limit = RegClassInfo->getRegPressureSetLimit(i);
|
2013-06-21 18:33:01 +00:00
|
|
|
if (RegionPressure[i] > Limit) {
|
|
|
|
DEBUG(dbgs() << TRI->getRegPressureSetName(i)
|
|
|
|
<< " Limit " << Limit
|
|
|
|
<< " Actual " << RegionPressure[i] << "\n");
|
2013-08-30 03:49:48 +00:00
|
|
|
RegionCriticalPSets.push_back(PressureChange(i));
|
2013-06-21 18:33:01 +00:00
|
|
|
}
|
2012-05-17 18:35:10 +00:00
|
|
|
}
|
|
|
|
DEBUG(dbgs() << "Excess PSets: ";
|
|
|
|
for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i)
|
|
|
|
dbgs() << TRI->getRegPressureSetName(
|
2013-08-30 03:49:48 +00:00
|
|
|
RegionCriticalPSets[i].getPSet()) << " ";
|
2012-05-17 18:35:10 +00:00
|
|
|
dbgs() << "\n");
|
|
|
|
}
|
|
|
|
|
2013-12-28 21:56:47 +00:00
|
|
|
void ScheduleDAGMILive::
|
2013-09-06 17:32:47 +00:00
|
|
|
updateScheduledPressure(const SUnit *SU,
|
|
|
|
const std::vector<unsigned> &NewMaxPressure) {
|
|
|
|
const PressureDiff &PDiff = getPressureDiff(SU);
|
|
|
|
unsigned CritIdx = 0, CritEnd = RegionCriticalPSets.size();
|
|
|
|
for (PressureDiff::const_iterator I = PDiff.begin(), E = PDiff.end();
|
|
|
|
I != E; ++I) {
|
|
|
|
if (!I->isValid())
|
|
|
|
break;
|
|
|
|
unsigned ID = I->getPSet();
|
|
|
|
while (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() < ID)
|
|
|
|
++CritIdx;
|
|
|
|
if (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() == ID) {
|
|
|
|
if ((int)NewMaxPressure[ID] > RegionCriticalPSets[CritIdx].getUnitInc()
|
|
|
|
&& NewMaxPressure[ID] <= INT16_MAX)
|
|
|
|
RegionCriticalPSets[CritIdx].setUnitInc(NewMaxPressure[ID]);
|
|
|
|
}
|
|
|
|
unsigned Limit = RegClassInfo->getRegPressureSetLimit(ID);
|
|
|
|
if (NewMaxPressure[ID] >= Limit - 2) {
|
|
|
|
DEBUG(dbgs() << " " << TRI->getRegPressureSetName(ID) << ": "
|
2015-05-17 23:40:31 +00:00
|
|
|
<< NewMaxPressure[ID]
|
|
|
|
<< ((NewMaxPressure[ID] > Limit) ? " > " : " <= ") << Limit
|
|
|
|
<< "(+ " << BotRPTracker.getLiveThru()[ID] << " livethru)\n");
|
2013-09-06 17:32:47 +00:00
|
|
|
}
|
2012-05-17 18:35:10 +00:00
|
|
|
}
|
2012-04-24 17:56:43 +00:00
|
|
|
}
|
|
|
|
|
2013-08-30 04:36:57 +00:00
|
|
|
/// Update the PressureDiff array for liveness after scheduling this
|
|
|
|
/// instruction.
|
2016-01-20 00:23:26 +00:00
|
|
|
void ScheduleDAGMILive::updatePressureDiffs(
|
|
|
|
ArrayRef<RegisterMaskPair> LiveUses) {
|
|
|
|
for (const RegisterMaskPair &P : LiveUses) {
|
|
|
|
unsigned Reg = P.RegUnit;
|
2016-01-20 00:23:32 +00:00
|
|
|
/// FIXME: Currently assuming single-use physregs.
|
2013-08-30 04:36:57 +00:00
|
|
|
if (!TRI->isVirtualRegister(Reg))
|
|
|
|
continue;
|
2013-09-06 17:32:39 +00:00
|
|
|
|
2016-01-20 00:23:32 +00:00
|
|
|
if (ShouldTrackLaneMasks) {
|
|
|
|
// If the register has just become live then other uses won't change
|
|
|
|
// this fact anymore => decrement pressure.
|
|
|
|
// If the register has just become dead then other uses make it come
|
|
|
|
// back to life => increment pressure.
|
|
|
|
bool Decrement = P.LaneMask != 0;
|
|
|
|
|
|
|
|
for (const VReg2SUnit &V2SU
|
|
|
|
: make_range(VRegUses.find(Reg), VRegUses.end())) {
|
|
|
|
SUnit &SU = *V2SU.SU;
|
|
|
|
if (SU.isScheduled || &SU == &ExitSU)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
PressureDiff &PDiff = getPressureDiff(&SU);
|
|
|
|
PDiff.addPressureChange(Reg, Decrement, &MRI);
|
|
|
|
DEBUG(
|
|
|
|
dbgs() << " UpdateRegP: SU(" << SU.NodeNum << ") "
|
|
|
|
<< PrintReg(Reg, TRI) << ':' << PrintLaneMask(P.LaneMask)
|
|
|
|
<< ' ' << *SU.getInstr();
|
|
|
|
dbgs() << " to ";
|
|
|
|
PDiff.dump(*TRI);
|
|
|
|
);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
assert(P.LaneMask != 0);
|
|
|
|
DEBUG(dbgs() << " LiveReg: " << PrintVRegOrUnit(Reg, TRI) << "\n");
|
|
|
|
// This may be called before CurrentBottom has been initialized. However,
|
|
|
|
// BotRPTracker must have a valid position. We want the value live into the
|
|
|
|
// instruction or live out of the block, so ask for the previous
|
|
|
|
// instruction's live-out.
|
|
|
|
const LiveInterval &LI = LIS->getInterval(Reg);
|
|
|
|
VNInfo *VNI;
|
|
|
|
MachineBasicBlock::const_iterator I =
|
|
|
|
nextIfDebug(BotRPTracker.getPos(), BB->end());
|
|
|
|
if (I == BB->end())
|
|
|
|
VNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
|
|
|
|
else {
|
2016-02-27 06:40:41 +00:00
|
|
|
LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*I));
|
2016-01-20 00:23:32 +00:00
|
|
|
VNI = LRQ.valueIn();
|
|
|
|
}
|
|
|
|
// RegisterPressureTracker guarantees that readsReg is true for LiveUses.
|
|
|
|
assert(VNI && "No live value at use.");
|
|
|
|
for (const VReg2SUnit &V2SU
|
|
|
|
: make_range(VRegUses.find(Reg), VRegUses.end())) {
|
|
|
|
SUnit *SU = V2SU.SU;
|
|
|
|
// If this use comes before the reaching def, it cannot be a last use,
|
|
|
|
// so decrease its pressure change.
|
|
|
|
if (!SU->isScheduled && SU != &ExitSU) {
|
2016-02-27 06:40:41 +00:00
|
|
|
LiveQueryResult LRQ =
|
|
|
|
LI.Query(LIS->getInstructionIndex(*SU->getInstr()));
|
2016-01-20 00:23:32 +00:00
|
|
|
if (LRQ.valueIn() == VNI) {
|
|
|
|
PressureDiff &PDiff = getPressureDiff(SU);
|
|
|
|
PDiff.addPressureChange(Reg, true, &MRI);
|
|
|
|
DEBUG(
|
|
|
|
dbgs() << " UpdateRegP: SU(" << SU->NodeNum << ") "
|
|
|
|
<< *SU->getInstr();
|
|
|
|
dbgs() << " to ";
|
|
|
|
PDiff.dump(*TRI);
|
|
|
|
);
|
|
|
|
}
|
2015-11-06 20:59:02 +00:00
|
|
|
}
|
2013-08-30 04:36:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-03-14 04:00:41 +00:00
|
|
|
/// schedule - Called back from MachineScheduler::runOnMachineFunction
|
2012-04-24 17:56:43 +00:00
|
|
|
/// after setting up the current scheduling region. [RegionBegin, RegionEnd)
|
|
|
|
/// only includes instructions that have DAG nodes, not scheduling boundaries.
|
2012-09-11 00:39:15 +00:00
|
|
|
///
|
|
|
|
/// This is a skeletal driver, with all the functionality pushed into helpers,
|
2015-08-18 22:41:58 +00:00
|
|
|
/// so that it can be easily extended by experimental schedulers. Generally,
|
2012-09-11 00:39:15 +00:00
|
|
|
/// implementing MachineSchedStrategy should be sufficient to implement a new
|
|
|
|
/// scheduling algorithm. However, if a scheduler further subclasses
|
2013-12-28 21:56:47 +00:00
|
|
|
/// ScheduleDAGMILive then it will want to override this virtual method in order
|
|
|
|
/// to update any specialized state.
|
|
|
|
void ScheduleDAGMILive::schedule() {
|
2015-09-18 18:52:20 +00:00
|
|
|
DEBUG(dbgs() << "ScheduleDAGMILive::schedule starting\n");
|
|
|
|
DEBUG(SchedImpl->dumpPolicy());
|
2012-09-11 00:39:15 +00:00
|
|
|
buildDAGWithRegPressure();
|
|
|
|
|
2012-11-12 19:40:10 +00:00
|
|
|
Topo.InitDAGTopologicalSorting();
|
|
|
|
|
2012-09-14 17:22:42 +00:00
|
|
|
postprocessDAG();
|
|
|
|
|
2013-01-25 06:33:57 +00:00
|
|
|
SmallVector<SUnit*, 8> TopRoots, BotRoots;
|
|
|
|
findRootsAndBiasEdges(TopRoots, BotRoots);
|
|
|
|
|
|
|
|
// Initialize the strategy before modifying the DAG.
|
|
|
|
// This may initialize a DFSResult to be used for queue priority.
|
|
|
|
SchedImpl->initialize(this);
|
|
|
|
|
2015-11-06 20:59:02 +00:00
|
|
|
DEBUG(
|
|
|
|
for (const SUnit &SU : SUnits) {
|
|
|
|
SU.dumpAll(this);
|
|
|
|
if (ShouldTrackPressure) {
|
|
|
|
dbgs() << " Pressure Diff : ";
|
|
|
|
getPressureDiff(&SU).dump(*TRI);
|
|
|
|
}
|
|
|
|
dbgs() << '\n';
|
|
|
|
}
|
|
|
|
);
|
2013-01-25 06:33:57 +00:00
|
|
|
if (ViewMISchedDAGs) viewGraph();
|
2012-09-11 00:39:15 +00:00
|
|
|
|
2013-01-25 06:33:57 +00:00
|
|
|
// Initialize ready queues now that the DAG and priority data are finalized.
|
|
|
|
initQueues(TopRoots, BotRoots);
|
2012-09-11 00:39:15 +00:00
|
|
|
|
|
|
|
bool IsTopNode = false;
|
2015-09-18 18:52:20 +00:00
|
|
|
while (true) {
|
|
|
|
DEBUG(dbgs() << "** ScheduleDAGMILive::schedule picking next node\n");
|
|
|
|
SUnit *SU = SchedImpl->pickNode(IsTopNode);
|
|
|
|
if (!SU) break;
|
|
|
|
|
2012-10-08 18:53:53 +00:00
|
|
|
assert(!SU->isScheduled && "Node already scheduled");
|
2012-09-11 00:39:15 +00:00
|
|
|
if (!checkSchedLimit())
|
|
|
|
break;
|
|
|
|
|
|
|
|
scheduleMI(SU, IsTopNode);
|
|
|
|
|
2013-12-28 21:56:47 +00:00
|
|
|
if (DFSResult) {
|
|
|
|
unsigned SubtreeID = DFSResult->getSubtreeID(SU);
|
|
|
|
if (!ScheduledTrees.test(SubtreeID)) {
|
|
|
|
ScheduledTrees.set(SubtreeID);
|
|
|
|
DFSResult->scheduleTree(SubtreeID);
|
|
|
|
SchedImpl->scheduleTree(SubtreeID);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Notify the scheduling strategy after updating the DAG.
|
|
|
|
SchedImpl->schedNode(SU, IsTopNode);
|
2015-03-27 06:10:13 +00:00
|
|
|
|
|
|
|
updateQueues(SU, IsTopNode);
|
2012-09-11 00:39:15 +00:00
|
|
|
}
|
|
|
|
assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
|
|
|
|
|
|
|
|
placeDebugValues();
|
2012-11-07 07:05:09 +00:00
|
|
|
|
|
|
|
DEBUG({
|
2012-11-28 03:42:47 +00:00
|
|
|
unsigned BBNum = begin()->getParent()->getNumber();
|
2012-11-07 07:05:09 +00:00
|
|
|
dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
|
|
|
|
dumpSchedule();
|
|
|
|
dbgs() << '\n';
|
|
|
|
});
|
2012-09-11 00:39:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Build the DAG and setup three register pressure trackers.
|
2013-12-28 21:56:47 +00:00
|
|
|
void ScheduleDAGMILive::buildDAGWithRegPressure() {
|
2013-09-04 20:59:59 +00:00
|
|
|
if (!ShouldTrackPressure) {
|
|
|
|
RPTracker.reset();
|
|
|
|
RegionCriticalPSets.clear();
|
|
|
|
buildSchedGraph(AA);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-05-10 21:06:10 +00:00
|
|
|
// Initialize the register pressure tracker used by buildSchedGraph.
|
2013-07-30 19:59:12 +00:00
|
|
|
RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
|
2016-01-20 00:23:32 +00:00
|
|
|
ShouldTrackLaneMasks, /*TrackUntiedDefs=*/true);
|
2012-05-10 21:06:10 +00:00
|
|
|
|
|
|
|
// Account for liveness generate by the region boundary.
|
|
|
|
if (LiveRegionEnd != RegionEnd)
|
|
|
|
RPTracker.recede();
|
2012-04-24 17:56:43 +00:00
|
|
|
|
2012-05-10 21:06:10 +00:00
|
|
|
// Build the DAG, and compute current register pressure.
|
2016-01-20 00:23:32 +00:00
|
|
|
buildSchedGraph(AA, &RPTracker, &SUPressureDiffs, LIS, ShouldTrackLaneMasks);
|
2012-01-17 06:55:07 +00:00
|
|
|
|
2012-05-10 21:06:10 +00:00
|
|
|
// Initialize top/bottom trackers after computing region pressure.
|
|
|
|
initRegPressure();
|
2012-09-11 00:39:15 +00:00
|
|
|
}
|
2012-05-10 21:06:10 +00:00
|
|
|
|
2013-12-28 21:56:47 +00:00
|
|
|
void ScheduleDAGMILive::computeDFSResult() {
|
2013-01-25 04:01:04 +00:00
|
|
|
if (!DFSResult)
|
|
|
|
DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize);
|
|
|
|
DFSResult->clear();
|
|
|
|
ScheduledTrees.clear();
|
2013-01-25 06:33:57 +00:00
|
|
|
DFSResult->resize(SUnits.size());
|
|
|
|
DFSResult->compute(SUnits);
|
2013-01-25 04:01:04 +00:00
|
|
|
ScheduledTrees.resize(DFSResult->getNumSubtrees());
|
|
|
|
}
|
|
|
|
|
2013-08-29 18:04:49 +00:00
|
|
|
/// Compute the max cyclic critical path through the DAG. The scheduling DAG
|
|
|
|
/// only provides the critical path for single block loops. To handle loops that
|
|
|
|
/// span blocks, we could use the vreg path latencies provided by
|
|
|
|
/// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently
|
|
|
|
/// available for use in the scheduler.
|
|
|
|
///
|
|
|
|
/// The cyclic path estimation identifies a def-use pair that crosses the back
|
2013-08-30 02:02:12 +00:00
|
|
|
/// edge and considers the depth and height of the nodes. For example, consider
|
2013-08-29 18:04:49 +00:00
|
|
|
/// the following instruction sequence where each instruction has unit latency
|
|
|
|
/// and defines an epomymous virtual register:
|
|
|
|
///
|
|
|
|
/// a->b(a,c)->c(b)->d(c)->exit
|
|
|
|
///
|
|
|
|
/// The cyclic critical path is a two cycles: b->c->b
|
|
|
|
/// The acyclic critical path is four cycles: a->b->c->d->exit
|
|
|
|
/// LiveOutHeight = height(c) = len(c->d->exit) = 2
|
|
|
|
/// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3
|
|
|
|
/// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4
|
|
|
|
/// LiveInDepth = depth(b) = len(a->b) = 1
|
|
|
|
///
|
|
|
|
/// LiveOutDepth - LiveInDepth = 3 - 1 = 2
|
|
|
|
/// LiveInHeight - LiveOutHeight = 4 - 2 = 2
|
|
|
|
/// CyclicCriticalPath = min(2, 2) = 2
|
2013-12-28 21:56:47 +00:00
|
|
|
///
|
|
|
|
/// This could be relevant to PostRA scheduling, but is currently implemented
|
|
|
|
/// assuming LiveIntervals.
|
|
|
|
unsigned ScheduleDAGMILive::computeCyclicCriticalPath() {
|
2013-08-29 18:04:49 +00:00
|
|
|
// This only applies to single block loop.
|
|
|
|
if (!BB->isSuccessor(BB))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
unsigned MaxCyclicLatency = 0;
|
|
|
|
// Visit each live out vreg def to find def/use pairs that cross iterations.
|
2016-01-20 00:23:26 +00:00
|
|
|
for (const RegisterMaskPair &P : RPTracker.getPressure().LiveOutRegs) {
|
|
|
|
unsigned Reg = P.RegUnit;
|
2013-08-29 18:04:49 +00:00
|
|
|
if (!TRI->isVirtualRegister(Reg))
|
|
|
|
continue;
|
|
|
|
const LiveInterval &LI = LIS->getInterval(Reg);
|
|
|
|
const VNInfo *DefVNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
|
|
|
|
if (!DefVNI)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
MachineInstr *DefMI = LIS->getInstructionFromIndex(DefVNI->def);
|
|
|
|
const SUnit *DefSU = getSUnit(DefMI);
|
|
|
|
if (!DefSU)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
unsigned LiveOutHeight = DefSU->getHeight();
|
|
|
|
unsigned LiveOutDepth = DefSU->getDepth() + DefSU->Latency;
|
|
|
|
// Visit all local users of the vreg def.
|
2015-10-29 03:57:17 +00:00
|
|
|
for (const VReg2SUnit &V2SU
|
|
|
|
: make_range(VRegUses.find(Reg), VRegUses.end())) {
|
|
|
|
SUnit *SU = V2SU.SU;
|
|
|
|
if (SU == &ExitSU)
|
2013-08-29 18:04:49 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
// Only consider uses of the phi.
|
2016-02-27 06:40:41 +00:00
|
|
|
LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*SU->getInstr()));
|
2013-08-29 18:04:49 +00:00
|
|
|
if (!LRQ.valueIn()->isPHIDef())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Assume that a path spanning two iterations is a cycle, which could
|
|
|
|
// overestimate in strange cases. This allows cyclic latency to be
|
|
|
|
// estimated as the minimum slack of the vreg's depth or height.
|
|
|
|
unsigned CyclicLatency = 0;
|
2015-10-29 03:57:17 +00:00
|
|
|
if (LiveOutDepth > SU->getDepth())
|
|
|
|
CyclicLatency = LiveOutDepth - SU->getDepth();
|
2013-08-29 18:04:49 +00:00
|
|
|
|
2015-10-29 03:57:17 +00:00
|
|
|
unsigned LiveInHeight = SU->getHeight() + DefSU->Latency;
|
2013-08-29 18:04:49 +00:00
|
|
|
if (LiveInHeight > LiveOutHeight) {
|
|
|
|
if (LiveInHeight - LiveOutHeight < CyclicLatency)
|
|
|
|
CyclicLatency = LiveInHeight - LiveOutHeight;
|
2016-04-21 01:54:13 +00:00
|
|
|
} else
|
2013-08-29 18:04:49 +00:00
|
|
|
CyclicLatency = 0;
|
|
|
|
|
|
|
|
DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU->NodeNum << ") -> SU("
|
2015-10-29 03:57:17 +00:00
|
|
|
<< SU->NodeNum << ") = " << CyclicLatency << "c\n");
|
2013-08-29 18:04:49 +00:00
|
|
|
if (CyclicLatency > MaxCyclicLatency)
|
|
|
|
MaxCyclicLatency = CyclicLatency;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "c\n");
|
|
|
|
return MaxCyclicLatency;
|
|
|
|
}
|
|
|
|
|
2016-04-28 19:17:44 +00:00
|
|
|
/// Release ExitSU predecessors and setup scheduler queues. Re-position
|
|
|
|
/// the Top RP tracker in case the region beginning has changed.
|
|
|
|
void ScheduleDAGMILive::initQueues(ArrayRef<SUnit*> TopRoots,
|
|
|
|
ArrayRef<SUnit*> BotRoots) {
|
|
|
|
ScheduleDAGMI::initQueues(TopRoots, BotRoots);
|
|
|
|
if (ShouldTrackPressure) {
|
|
|
|
assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker");
|
|
|
|
TopRPTracker.setPos(CurrentTop);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-09-11 00:39:15 +00:00
|
|
|
/// Move an instruction and update register pressure.
|
2013-12-28 21:56:47 +00:00
|
|
|
void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) {
|
2012-09-11 00:39:15 +00:00
|
|
|
// Move the instruction to its new location in the instruction stream.
|
|
|
|
MachineInstr *MI = SU->getInstr();
|
2012-05-10 21:06:10 +00:00
|
|
|
|
2012-09-11 00:39:15 +00:00
|
|
|
if (IsTopNode) {
|
|
|
|
assert(SU->isTopReady() && "node still has unscheduled dependencies");
|
|
|
|
if (&*CurrentTop == MI)
|
|
|
|
CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
|
|
|
|
else {
|
|
|
|
moveInstruction(MI, CurrentTop);
|
|
|
|
TopRPTracker.setPos(MI);
|
2012-03-14 04:00:41 +00:00
|
|
|
}
|
2012-09-11 00:39:15 +00:00
|
|
|
|
2013-09-04 20:59:59 +00:00
|
|
|
if (ShouldTrackPressure) {
|
|
|
|
// Update top scheduled pressure.
|
2016-01-20 00:23:32 +00:00
|
|
|
RegisterOperands RegOpers;
|
|
|
|
RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
|
|
|
|
if (ShouldTrackLaneMasks) {
|
|
|
|
// Adjust liveness and add missing dead+read-undef flags.
|
2016-02-27 06:40:41 +00:00
|
|
|
SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
|
2016-01-20 00:23:32 +00:00
|
|
|
RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
|
|
|
|
} else {
|
|
|
|
// Adjust for missing dead-def flags.
|
|
|
|
RegOpers.detectDeadDefs(*MI, *LIS);
|
|
|
|
}
|
|
|
|
|
|
|
|
TopRPTracker.advance(RegOpers);
|
2013-09-04 20:59:59 +00:00
|
|
|
assert(TopRPTracker.getPos() == CurrentTop && "out of sync");
|
2015-11-06 20:59:02 +00:00
|
|
|
DEBUG(
|
|
|
|
dbgs() << "Top Pressure:\n";
|
|
|
|
dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI);
|
|
|
|
);
|
|
|
|
|
2013-09-06 17:32:47 +00:00
|
|
|
updateScheduledPressure(SU, TopRPTracker.getPressure().MaxSetPressure);
|
2013-09-04 20:59:59 +00:00
|
|
|
}
|
2016-04-21 01:54:13 +00:00
|
|
|
} else {
|
2012-09-11 00:39:15 +00:00
|
|
|
assert(SU->isBottomReady() && "node still has unscheduled dependencies");
|
|
|
|
MachineBasicBlock::iterator priorII =
|
|
|
|
priorNonDebug(CurrentBottom, CurrentTop);
|
|
|
|
if (&*priorII == MI)
|
|
|
|
CurrentBottom = priorII;
|
2012-01-17 06:55:07 +00:00
|
|
|
else {
|
2012-09-11 00:39:15 +00:00
|
|
|
if (&*CurrentTop == MI) {
|
|
|
|
CurrentTop = nextIfDebug(++CurrentTop, priorII);
|
|
|
|
TopRPTracker.setPos(CurrentTop);
|
2012-03-14 04:00:41 +00:00
|
|
|
}
|
2012-09-11 00:39:15 +00:00
|
|
|
moveInstruction(MI, CurrentBottom);
|
|
|
|
CurrentBottom = MI;
|
2012-01-17 06:55:07 +00:00
|
|
|
}
|
2013-09-04 20:59:59 +00:00
|
|
|
if (ShouldTrackPressure) {
|
2016-01-20 00:23:32 +00:00
|
|
|
RegisterOperands RegOpers;
|
|
|
|
RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
|
|
|
|
if (ShouldTrackLaneMasks) {
|
|
|
|
// Adjust liveness and add missing dead+read-undef flags.
|
2016-02-27 06:40:41 +00:00
|
|
|
SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
|
2016-01-20 00:23:32 +00:00
|
|
|
RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
|
|
|
|
} else {
|
|
|
|
// Adjust for missing dead-def flags.
|
|
|
|
RegOpers.detectDeadDefs(*MI, *LIS);
|
|
|
|
}
|
|
|
|
|
|
|
|
BotRPTracker.recedeSkipDebugValues();
|
2016-01-20 00:23:26 +00:00
|
|
|
SmallVector<RegisterMaskPair, 8> LiveUses;
|
2016-01-20 00:23:32 +00:00
|
|
|
BotRPTracker.recede(RegOpers, &LiveUses);
|
2013-09-04 20:59:59 +00:00
|
|
|
assert(BotRPTracker.getPos() == CurrentBottom && "out of sync");
|
2015-11-06 20:59:02 +00:00
|
|
|
DEBUG(
|
|
|
|
dbgs() << "Bottom Pressure:\n";
|
|
|
|
dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI);
|
|
|
|
);
|
|
|
|
|
2013-09-06 17:32:47 +00:00
|
|
|
updateScheduledPressure(SU, BotRPTracker.getPressure().MaxSetPressure);
|
2013-09-04 20:59:59 +00:00
|
|
|
updatePressureDiffs(LiveUses);
|
|
|
|
}
|
2012-01-17 06:55:07 +00:00
|
|
|
}
|
2012-09-11 00:39:15 +00:00
|
|
|
}
|
2012-04-24 18:04:37 +00:00
|
|
|
|
2012-11-12 19:52:20 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
2016-04-15 14:58:38 +00:00
|
|
|
// BaseMemOpClusterMutation - DAG post-processing to cluster loads or stores.
|
2012-11-12 19:52:20 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2012-11-12 19:40:10 +00:00
|
|
|
namespace {
|
|
|
|
/// \brief Post-process the DAG to create cluster edges between neighboring
|
2016-04-15 14:58:38 +00:00
|
|
|
/// loads or between neighboring stores.
|
|
|
|
class BaseMemOpClusterMutation : public ScheduleDAGMutation {
|
|
|
|
struct MemOpInfo {
|
2012-11-12 19:40:10 +00:00
|
|
|
SUnit *SU;
|
|
|
|
unsigned BaseReg;
|
2016-03-09 16:00:35 +00:00
|
|
|
int64_t Offset;
|
2016-04-15 14:58:38 +00:00
|
|
|
MemOpInfo(SUnit *su, unsigned reg, int64_t ofs)
|
|
|
|
: SU(su), BaseReg(reg), Offset(ofs) {}
|
2014-03-07 21:35:39 +00:00
|
|
|
|
2016-04-15 14:58:38 +00:00
|
|
|
bool operator<(const MemOpInfo&RHS) const {
|
2016-10-18 00:11:19 +00:00
|
|
|
return std::tie(BaseReg, Offset, SU->NodeNum) <
|
|
|
|
std::tie(RHS.BaseReg, RHS.Offset, RHS.SU->NodeNum);
|
2014-03-07 21:35:39 +00:00
|
|
|
}
|
2012-11-12 19:40:10 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
const TargetInstrInfo *TII;
|
|
|
|
const TargetRegisterInfo *TRI;
|
2016-04-15 14:58:38 +00:00
|
|
|
bool IsLoad;
|
|
|
|
|
2012-11-12 19:40:10 +00:00
|
|
|
public:
|
2016-04-15 14:58:38 +00:00
|
|
|
BaseMemOpClusterMutation(const TargetInstrInfo *tii,
|
|
|
|
const TargetRegisterInfo *tri, bool IsLoad)
|
|
|
|
: TII(tii), TRI(tri), IsLoad(IsLoad) {}
|
2012-11-12 19:40:10 +00:00
|
|
|
|
2016-03-05 15:45:23 +00:00
|
|
|
void apply(ScheduleDAGInstrs *DAGInstrs) override;
|
2016-04-15 14:58:38 +00:00
|
|
|
|
2012-11-12 19:40:10 +00:00
|
|
|
protected:
|
2016-04-15 14:58:38 +00:00
|
|
|
void clusterNeighboringMemOps(ArrayRef<SUnit *> MemOps, ScheduleDAGMI *DAG);
|
|
|
|
};
|
|
|
|
|
|
|
|
class StoreClusterMutation : public BaseMemOpClusterMutation {
|
|
|
|
public:
|
|
|
|
StoreClusterMutation(const TargetInstrInfo *tii,
|
|
|
|
const TargetRegisterInfo *tri)
|
|
|
|
: BaseMemOpClusterMutation(tii, tri, false) {}
|
|
|
|
};
|
|
|
|
|
|
|
|
class LoadClusterMutation : public BaseMemOpClusterMutation {
|
|
|
|
public:
|
|
|
|
LoadClusterMutation(const TargetInstrInfo *tii, const TargetRegisterInfo *tri)
|
|
|
|
: BaseMemOpClusterMutation(tii, tri, true) {}
|
2012-11-12 19:40:10 +00:00
|
|
|
};
|
2015-06-23 09:49:53 +00:00
|
|
|
} // anonymous
|
2012-11-12 19:40:10 +00:00
|
|
|
|
2016-08-19 19:59:18 +00:00
|
|
|
namespace llvm {
|
|
|
|
|
|
|
|
std::unique_ptr<ScheduleDAGMutation>
|
|
|
|
createLoadClusterDAGMutation(const TargetInstrInfo *TII,
|
|
|
|
const TargetRegisterInfo *TRI) {
|
|
|
|
return make_unique<LoadClusterMutation>(TII, TRI);
|
|
|
|
}
|
|
|
|
|
|
|
|
std::unique_ptr<ScheduleDAGMutation>
|
|
|
|
createStoreClusterDAGMutation(const TargetInstrInfo *TII,
|
|
|
|
const TargetRegisterInfo *TRI) {
|
|
|
|
return make_unique<StoreClusterMutation>(TII, TRI);
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace llvm
|
|
|
|
|
2016-04-15 14:58:38 +00:00
|
|
|
void BaseMemOpClusterMutation::clusterNeighboringMemOps(
|
|
|
|
ArrayRef<SUnit *> MemOps, ScheduleDAGMI *DAG) {
|
|
|
|
SmallVector<MemOpInfo, 32> MemOpRecords;
|
|
|
|
for (unsigned Idx = 0, End = MemOps.size(); Idx != End; ++Idx) {
|
|
|
|
SUnit *SU = MemOps[Idx];
|
2012-11-12 19:40:10 +00:00
|
|
|
unsigned BaseReg;
|
2016-03-09 16:00:35 +00:00
|
|
|
int64_t Offset;
|
2016-06-30 00:01:54 +00:00
|
|
|
if (TII->getMemOpBaseRegImmOfs(*SU->getInstr(), BaseReg, Offset, TRI))
|
2016-04-15 14:58:38 +00:00
|
|
|
MemOpRecords.push_back(MemOpInfo(SU, BaseReg, Offset));
|
2012-11-12 19:40:10 +00:00
|
|
|
}
|
2016-04-15 14:58:38 +00:00
|
|
|
if (MemOpRecords.size() < 2)
|
2012-11-12 19:40:10 +00:00
|
|
|
return;
|
2016-04-15 14:58:38 +00:00
|
|
|
|
|
|
|
std::sort(MemOpRecords.begin(), MemOpRecords.end());
|
2012-11-12 19:40:10 +00:00
|
|
|
unsigned ClusterLength = 1;
|
2016-04-15 14:58:38 +00:00
|
|
|
for (unsigned Idx = 0, End = MemOpRecords.size(); Idx < (End - 1); ++Idx) {
|
|
|
|
if (MemOpRecords[Idx].BaseReg != MemOpRecords[Idx+1].BaseReg) {
|
2012-11-12 19:40:10 +00:00
|
|
|
ClusterLength = 1;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2016-04-15 14:58:38 +00:00
|
|
|
SUnit *SUa = MemOpRecords[Idx].SU;
|
|
|
|
SUnit *SUb = MemOpRecords[Idx+1].SU;
|
2016-06-30 00:01:54 +00:00
|
|
|
if (TII->shouldClusterMemOps(*SUa->getInstr(), *SUb->getInstr(),
|
|
|
|
ClusterLength) &&
|
|
|
|
DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) {
|
2016-04-15 14:58:38 +00:00
|
|
|
DEBUG(dbgs() << "Cluster ld/st SU(" << SUa->NodeNum << ") - SU("
|
2012-11-12 19:40:10 +00:00
|
|
|
<< SUb->NodeNum << ")\n");
|
|
|
|
// Copy successor edges from SUa to SUb. Interleaving computation
|
|
|
|
// dependent on SUa can prevent load combining due to register reuse.
|
|
|
|
// Predecessor edges do not need to be copied from SUb to SUa since nearby
|
|
|
|
// loads should have effectively the same inputs.
|
|
|
|
for (SUnit::const_succ_iterator
|
|
|
|
SI = SUa->Succs.begin(), SE = SUa->Succs.end(); SI != SE; ++SI) {
|
|
|
|
if (SI->getSUnit() == SUb)
|
|
|
|
continue;
|
|
|
|
DEBUG(dbgs() << " Copy Succ SU(" << SI->getSUnit()->NodeNum << ")\n");
|
|
|
|
DAG->addEdge(SI->getSUnit(), SDep(SUb, SDep::Artificial));
|
|
|
|
}
|
|
|
|
++ClusterLength;
|
2016-04-21 01:54:13 +00:00
|
|
|
} else
|
2012-11-12 19:40:10 +00:00
|
|
|
ClusterLength = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \brief Callback from DAG postProcessing to create cluster edges for loads.
|
2016-04-15 14:58:38 +00:00
|
|
|
void BaseMemOpClusterMutation::apply(ScheduleDAGInstrs *DAGInstrs) {
|
|
|
|
|
2016-03-05 15:45:23 +00:00
|
|
|
ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
|
|
|
|
|
2012-11-12 19:40:10 +00:00
|
|
|
// Map DAG NodeNum to store chain ID.
|
|
|
|
DenseMap<unsigned, unsigned> StoreChainIDs;
|
2016-04-15 14:58:38 +00:00
|
|
|
// Map each store chain to a set of dependent MemOps.
|
2012-11-12 19:40:10 +00:00
|
|
|
SmallVector<SmallVector<SUnit*,4>, 32> StoreChainDependents;
|
|
|
|
for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
|
|
|
|
SUnit *SU = &DAG->SUnits[Idx];
|
2016-04-15 14:58:38 +00:00
|
|
|
if ((IsLoad && !SU->getInstr()->mayLoad()) ||
|
|
|
|
(!IsLoad && !SU->getInstr()->mayStore()))
|
2012-11-12 19:40:10 +00:00
|
|
|
continue;
|
2016-04-15 14:58:38 +00:00
|
|
|
|
2012-11-12 19:40:10 +00:00
|
|
|
unsigned ChainPredID = DAG->SUnits.size();
|
|
|
|
for (SUnit::const_pred_iterator
|
|
|
|
PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) {
|
|
|
|
if (PI->isCtrl()) {
|
|
|
|
ChainPredID = PI->getSUnit()->NodeNum;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Check if this chain-like pred has been seen
|
2016-04-15 14:58:38 +00:00
|
|
|
// before. ChainPredID==MaxNodeID at the top of the schedule.
|
2012-11-12 19:40:10 +00:00
|
|
|
unsigned NumChains = StoreChainDependents.size();
|
|
|
|
std::pair<DenseMap<unsigned, unsigned>::iterator, bool> Result =
|
|
|
|
StoreChainIDs.insert(std::make_pair(ChainPredID, NumChains));
|
|
|
|
if (Result.second)
|
|
|
|
StoreChainDependents.resize(NumChains + 1);
|
|
|
|
StoreChainDependents[Result.first->second].push_back(SU);
|
|
|
|
}
|
2016-04-15 14:58:38 +00:00
|
|
|
|
2012-11-12 19:40:10 +00:00
|
|
|
// Iterate over the store chains.
|
|
|
|
for (unsigned Idx = 0, End = StoreChainDependents.size(); Idx != End; ++Idx)
|
2016-04-15 14:58:38 +00:00
|
|
|
clusterNeighboringMemOps(StoreChainDependents[Idx], DAG);
|
2012-11-12 19:40:10 +00:00
|
|
|
}
|
|
|
|
|
2012-11-12 19:52:20 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// MacroFusion - DAG post-processing to encourage fusion of macro ops.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
/// \brief Post-process the DAG to create cluster edges between instructions
|
|
|
|
/// that may be fused by the processor into a single operation.
|
|
|
|
class MacroFusion : public ScheduleDAGMutation {
|
2015-07-20 22:34:44 +00:00
|
|
|
const TargetInstrInfo &TII;
|
2012-11-12 19:52:20 +00:00
|
|
|
public:
|
2016-11-11 01:34:21 +00:00
|
|
|
MacroFusion(const TargetInstrInfo &TII)
|
|
|
|
: TII(TII) {}
|
2012-11-12 19:52:20 +00:00
|
|
|
|
2016-03-05 15:45:23 +00:00
|
|
|
void apply(ScheduleDAGInstrs *DAGInstrs) override;
|
2012-11-12 19:52:20 +00:00
|
|
|
};
|
2015-06-23 09:49:53 +00:00
|
|
|
} // anonymous
|
2012-11-12 19:52:20 +00:00
|
|
|
|
2016-08-19 19:59:18 +00:00
|
|
|
namespace llvm {
|
|
|
|
|
|
|
|
std::unique_ptr<ScheduleDAGMutation>
|
2016-11-11 01:34:21 +00:00
|
|
|
createMacroFusionDAGMutation(const TargetInstrInfo *TII) {
|
|
|
|
return make_unique<MacroFusion>(*TII);
|
2016-08-19 19:59:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace llvm
|
|
|
|
|
2012-11-12 19:52:20 +00:00
|
|
|
/// \brief Callback from DAG postProcessing to create cluster edges to encourage
|
|
|
|
/// fused operations.
|
2016-03-05 15:45:23 +00:00
|
|
|
void MacroFusion::apply(ScheduleDAGInstrs *DAGInstrs) {
|
|
|
|
ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
|
|
|
|
|
2012-11-12 19:52:20 +00:00
|
|
|
// For now, assume targets can only fuse with the branch.
|
2015-07-20 22:34:44 +00:00
|
|
|
SUnit &ExitSU = DAG->ExitSU;
|
|
|
|
MachineInstr *Branch = ExitSU.getInstr();
|
2012-11-12 19:52:20 +00:00
|
|
|
if (!Branch)
|
|
|
|
return;
|
|
|
|
|
2016-11-11 01:34:21 +00:00
|
|
|
for (SDep &PredDep : ExitSU.Preds) {
|
|
|
|
if (PredDep.isWeak())
|
2015-07-20 22:34:44 +00:00
|
|
|
continue;
|
2016-11-11 01:34:21 +00:00
|
|
|
SUnit &SU = *PredDep.getSUnit();
|
|
|
|
MachineInstr &Pred = *SU.getInstr();
|
|
|
|
if (!TII.shouldScheduleAdjacent(Pred, *Branch))
|
2012-11-12 19:52:20 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
// Create a single weak edge from SU to ExitSU. The only effect is to cause
|
|
|
|
// bottom-up scheduling to heavily prioritize the clustered SU. There is no
|
|
|
|
// need to copy predecessor edges from ExitSU to SU, since top-down
|
|
|
|
// scheduling cannot prioritize ExitSU anyway. To defer top-down scheduling
|
|
|
|
// of SU, we could create an artificial edge from the deepest root, but it
|
|
|
|
// hasn't been needed yet.
|
2015-07-20 22:34:44 +00:00
|
|
|
bool Success = DAG->addEdge(&ExitSU, SDep(&SU, SDep::Cluster));
|
2012-11-12 19:52:20 +00:00
|
|
|
(void)Success;
|
|
|
|
assert(Success && "No DAG nodes should be reachable from ExitSU");
|
|
|
|
|
2016-11-11 01:34:21 +00:00
|
|
|
// Adjust latency of data deps between the nodes.
|
|
|
|
for (SDep &PredDep : ExitSU.Preds) {
|
|
|
|
if (PredDep.getSUnit() == &SU)
|
|
|
|
PredDep.setLatency(0);
|
|
|
|
}
|
|
|
|
for (SDep &SuccDep : SU.Succs) {
|
|
|
|
if (SuccDep.getSUnit() == &ExitSU)
|
|
|
|
SuccDep.setLatency(0);
|
|
|
|
}
|
|
|
|
|
2015-07-20 22:34:44 +00:00
|
|
|
DEBUG(dbgs() << "Macro Fuse SU(" << SU.NodeNum << ")\n");
|
2012-11-12 19:52:20 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-04-24 15:54:43 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// CopyConstrain - DAG post-processing to encourage copy elimination.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
/// \brief Post-process the DAG to create weak edges from all uses of a copy to
|
|
|
|
/// the one use that defines the copy's source vreg, most likely an induction
|
|
|
|
/// variable increment.
|
|
|
|
class CopyConstrain : public ScheduleDAGMutation {
|
|
|
|
// Transient state.
|
|
|
|
SlotIndex RegionBeginIdx;
|
2013-04-24 23:19:56 +00:00
|
|
|
// RegionEndIdx is the slot index of the last non-debug instruction in the
|
|
|
|
// scheduling region. So we may have RegionBeginIdx == RegionEndIdx.
|
2013-04-24 15:54:43 +00:00
|
|
|
SlotIndex RegionEndIdx;
|
|
|
|
public:
|
|
|
|
CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {}
|
|
|
|
|
2016-03-05 15:45:23 +00:00
|
|
|
void apply(ScheduleDAGInstrs *DAGInstrs) override;
|
2013-04-24 15:54:43 +00:00
|
|
|
|
|
|
|
protected:
|
2013-12-28 21:56:47 +00:00
|
|
|
void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG);
|
2013-04-24 15:54:43 +00:00
|
|
|
};
|
2015-06-23 09:49:53 +00:00
|
|
|
} // anonymous
|
2013-04-24 15:54:43 +00:00
|
|
|
|
2016-08-19 19:59:18 +00:00
|
|
|
namespace llvm {
|
|
|
|
|
|
|
|
std::unique_ptr<ScheduleDAGMutation>
|
|
|
|
createCopyConstrainDAGMutation(const TargetInstrInfo *TII,
|
|
|
|
const TargetRegisterInfo *TRI) {
|
|
|
|
return make_unique<CopyConstrain>(TII, TRI);
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace llvm
|
|
|
|
|
2013-04-24 15:54:43 +00:00
|
|
|
/// constrainLocalCopy handles two possibilities:
|
|
|
|
/// 1) Local src:
|
|
|
|
/// I0: = dst
|
|
|
|
/// I1: src = ...
|
|
|
|
/// I2: = dst
|
|
|
|
/// I3: dst = src (copy)
|
|
|
|
/// (create pred->succ edges I0->I1, I2->I1)
|
|
|
|
///
|
|
|
|
/// 2) Local copy:
|
|
|
|
/// I0: dst = src (copy)
|
|
|
|
/// I1: = dst
|
|
|
|
/// I2: src = ...
|
|
|
|
/// I3: = dst
|
|
|
|
/// (create pred->succ edges I1->I2, I3->I2)
|
|
|
|
///
|
|
|
|
/// Although the MachineScheduler is currently constrained to single blocks,
|
|
|
|
/// this algorithm should handle extended blocks. An EBB is a set of
|
|
|
|
/// contiguously numbered blocks such that the previous block in the EBB is
|
|
|
|
/// always the single predecessor.
|
2013-12-28 21:56:47 +00:00
|
|
|
void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) {
|
2013-04-24 15:54:43 +00:00
|
|
|
LiveIntervals *LIS = DAG->getLIS();
|
|
|
|
MachineInstr *Copy = CopySU->getInstr();
|
|
|
|
|
|
|
|
// Check for pure vreg copies.
|
2016-04-04 21:23:46 +00:00
|
|
|
const MachineOperand &SrcOp = Copy->getOperand(1);
|
|
|
|
unsigned SrcReg = SrcOp.getReg();
|
|
|
|
if (!TargetRegisterInfo::isVirtualRegister(SrcReg) || !SrcOp.readsReg())
|
2013-04-24 15:54:43 +00:00
|
|
|
return;
|
|
|
|
|
2016-04-04 21:23:46 +00:00
|
|
|
const MachineOperand &DstOp = Copy->getOperand(0);
|
|
|
|
unsigned DstReg = DstOp.getReg();
|
|
|
|
if (!TargetRegisterInfo::isVirtualRegister(DstReg) || DstOp.isDead())
|
2013-04-24 15:54:43 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
// Check if either the dest or source is local. If it's live across a back
|
|
|
|
// edge, it's not local. Note that if both vregs are live across the back
|
|
|
|
// edge, we cannot successfully contrain the copy without cyclic scheduling.
|
2015-01-19 07:30:47 +00:00
|
|
|
// If both the copy's source and dest are local live intervals, then we
|
|
|
|
// should treat the dest as the global for the purpose of adding
|
|
|
|
// constraints. This adds edges from source's other uses to the copy.
|
|
|
|
unsigned LocalReg = SrcReg;
|
|
|
|
unsigned GlobalReg = DstReg;
|
2013-04-24 15:54:43 +00:00
|
|
|
LiveInterval *LocalLI = &LIS->getInterval(LocalReg);
|
|
|
|
if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) {
|
2015-01-19 07:30:47 +00:00
|
|
|
LocalReg = DstReg;
|
|
|
|
GlobalReg = SrcReg;
|
2013-04-24 15:54:43 +00:00
|
|
|
LocalLI = &LIS->getInterval(LocalReg);
|
|
|
|
if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx))
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg);
|
|
|
|
|
|
|
|
// Find the global segment after the start of the local LI.
|
|
|
|
LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex());
|
|
|
|
// If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a
|
|
|
|
// local live range. We could create edges from other global uses to the local
|
|
|
|
// start, but the coalescer should have already eliminated these cases, so
|
|
|
|
// don't bother dealing with it.
|
|
|
|
if (GlobalSegment == GlobalLI->end())
|
|
|
|
return;
|
|
|
|
|
|
|
|
// If GlobalSegment is killed at the LocalLI->start, the call to find()
|
|
|
|
// returned the next global segment. But if GlobalSegment overlaps with
|
|
|
|
// LocalLI->start, then advance to the next segement. If a hole in GlobalLI
|
|
|
|
// exists in LocalLI's vicinity, GlobalSegment will be the end of the hole.
|
|
|
|
if (GlobalSegment->contains(LocalLI->beginIndex()))
|
|
|
|
++GlobalSegment;
|
|
|
|
|
|
|
|
if (GlobalSegment == GlobalLI->end())
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Check if GlobalLI contains a hole in the vicinity of LocalLI.
|
|
|
|
if (GlobalSegment != GlobalLI->begin()) {
|
|
|
|
// Two address defs have no hole.
|
2014-03-02 12:27:27 +00:00
|
|
|
if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->end,
|
2013-04-24 15:54:43 +00:00
|
|
|
GlobalSegment->start)) {
|
|
|
|
return;
|
|
|
|
}
|
2013-07-30 19:59:08 +00:00
|
|
|
// If the prior global segment may be defined by the same two-address
|
|
|
|
// instruction that also defines LocalLI, then can't make a hole here.
|
2014-03-02 12:27:27 +00:00
|
|
|
if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->start,
|
2013-07-30 19:59:08 +00:00
|
|
|
LocalLI->beginIndex())) {
|
|
|
|
return;
|
|
|
|
}
|
2013-04-24 15:54:43 +00:00
|
|
|
// If GlobalLI has a prior segment, it must be live into the EBB. Otherwise
|
|
|
|
// it would be a disconnected component in the live range.
|
2014-03-02 12:27:27 +00:00
|
|
|
assert(std::prev(GlobalSegment)->start < LocalLI->beginIndex() &&
|
2013-04-24 15:54:43 +00:00
|
|
|
"Disconnected LRG within the scheduling region.");
|
|
|
|
}
|
|
|
|
MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start);
|
|
|
|
if (!GlobalDef)
|
|
|
|
return;
|
|
|
|
|
|
|
|
SUnit *GlobalSU = DAG->getSUnit(GlobalDef);
|
|
|
|
if (!GlobalSU)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// GlobalDef is the bottom of the GlobalLI hole. Open the hole by
|
|
|
|
// constraining the uses of the last local def to precede GlobalDef.
|
|
|
|
SmallVector<SUnit*,8> LocalUses;
|
|
|
|
const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex());
|
|
|
|
MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def);
|
|
|
|
SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef);
|
|
|
|
for (SUnit::const_succ_iterator
|
|
|
|
I = LastLocalSU->Succs.begin(), E = LastLocalSU->Succs.end();
|
|
|
|
I != E; ++I) {
|
|
|
|
if (I->getKind() != SDep::Data || I->getReg() != LocalReg)
|
|
|
|
continue;
|
|
|
|
if (I->getSUnit() == GlobalSU)
|
|
|
|
continue;
|
|
|
|
if (!DAG->canAddEdge(GlobalSU, I->getSUnit()))
|
|
|
|
return;
|
|
|
|
LocalUses.push_back(I->getSUnit());
|
|
|
|
}
|
|
|
|
// Open the top of the GlobalLI hole by constraining any earlier global uses
|
|
|
|
// to precede the start of LocalLI.
|
|
|
|
SmallVector<SUnit*,8> GlobalUses;
|
|
|
|
MachineInstr *FirstLocalDef =
|
|
|
|
LIS->getInstructionFromIndex(LocalLI->beginIndex());
|
|
|
|
SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef);
|
|
|
|
for (SUnit::const_pred_iterator
|
|
|
|
I = GlobalSU->Preds.begin(), E = GlobalSU->Preds.end(); I != E; ++I) {
|
|
|
|
if (I->getKind() != SDep::Anti || I->getReg() != GlobalReg)
|
|
|
|
continue;
|
|
|
|
if (I->getSUnit() == FirstLocalSU)
|
|
|
|
continue;
|
|
|
|
if (!DAG->canAddEdge(FirstLocalSU, I->getSUnit()))
|
|
|
|
return;
|
|
|
|
GlobalUses.push_back(I->getSUnit());
|
|
|
|
}
|
|
|
|
DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n");
|
|
|
|
// Add the weak edges.
|
|
|
|
for (SmallVectorImpl<SUnit*>::const_iterator
|
|
|
|
I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) {
|
|
|
|
DEBUG(dbgs() << " Local use SU(" << (*I)->NodeNum << ") -> SU("
|
|
|
|
<< GlobalSU->NodeNum << ")\n");
|
|
|
|
DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak));
|
|
|
|
}
|
|
|
|
for (SmallVectorImpl<SUnit*>::const_iterator
|
|
|
|
I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) {
|
|
|
|
DEBUG(dbgs() << " Global use SU(" << (*I)->NodeNum << ") -> SU("
|
|
|
|
<< FirstLocalSU->NodeNum << ")\n");
|
|
|
|
DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \brief Callback from DAG postProcessing to create weak edges to encourage
|
|
|
|
/// copy elimination.
|
2016-03-05 15:45:23 +00:00
|
|
|
void CopyConstrain::apply(ScheduleDAGInstrs *DAGInstrs) {
|
|
|
|
ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
|
2013-12-28 21:56:47 +00:00
|
|
|
assert(DAG->hasVRegLiveness() && "Expect VRegs with LiveIntervals");
|
|
|
|
|
2013-04-24 23:19:56 +00:00
|
|
|
MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end());
|
|
|
|
if (FirstPos == DAG->end())
|
|
|
|
return;
|
2016-02-27 06:40:41 +00:00
|
|
|
RegionBeginIdx = DAG->getLIS()->getInstructionIndex(*FirstPos);
|
2013-04-24 15:54:43 +00:00
|
|
|
RegionEndIdx = DAG->getLIS()->getInstructionIndex(
|
2016-02-27 06:40:41 +00:00
|
|
|
*priorNonDebug(DAG->end(), DAG->begin()));
|
2013-04-24 15:54:43 +00:00
|
|
|
|
|
|
|
for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
|
|
|
|
SUnit *SU = &DAG->SUnits[Idx];
|
|
|
|
if (!SU->getInstr()->isCopy())
|
|
|
|
continue;
|
|
|
|
|
2013-12-28 21:56:47 +00:00
|
|
|
constrainLocalCopy(SU, static_cast<ScheduleDAGMILive*>(DAG));
|
2013-04-24 15:54:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-01-17 06:55:03 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
2013-12-07 05:59:44 +00:00
|
|
|
// MachineSchedStrategy helpers used by GenericScheduler, GenericPostScheduler
|
|
|
|
// and possibly other custom schedulers.
|
2013-12-28 21:56:57 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
2012-01-17 06:55:03 +00:00
|
|
|
|
2013-12-05 17:56:02 +00:00
|
|
|
static const unsigned InvalidCycle = ~0U;
|
|
|
|
|
2013-12-07 05:59:44 +00:00
|
|
|
SchedBoundary::~SchedBoundary() { delete HazardRec; }
|
2013-04-13 06:07:40 +00:00
|
|
|
|
2013-12-07 05:59:44 +00:00
|
|
|
void SchedBoundary::reset() {
|
|
|
|
// A new HazardRec is created for each DAG and owned by SchedBoundary.
|
|
|
|
// Destroying and reconstructing it is very expensive though. So keep
|
|
|
|
// invalid, placeholder HazardRecs.
|
|
|
|
if (HazardRec && HazardRec->isEnabled()) {
|
|
|
|
delete HazardRec;
|
2014-04-14 00:51:57 +00:00
|
|
|
HazardRec = nullptr;
|
2013-12-07 05:59:44 +00:00
|
|
|
}
|
|
|
|
Available.clear();
|
|
|
|
Pending.clear();
|
|
|
|
CheckPending = false;
|
|
|
|
CurrCycle = 0;
|
|
|
|
CurrMOps = 0;
|
|
|
|
MinReadyCycle = UINT_MAX;
|
|
|
|
ExpectedLatency = 0;
|
|
|
|
DependentLatency = 0;
|
|
|
|
RetiredMOps = 0;
|
|
|
|
MaxExecutedResCount = 0;
|
|
|
|
ZoneCritResIdx = 0;
|
|
|
|
IsResourceLimited = false;
|
|
|
|
ReservedCycles.clear();
|
2012-05-10 21:06:19 +00:00
|
|
|
#ifndef NDEBUG
|
2013-12-28 21:56:57 +00:00
|
|
|
// Track the maximum number of stall cycles that could arise either from the
|
|
|
|
// latency of a DAG edge or the number of cycles that a processor resource is
|
|
|
|
// reserved (SchedBoundary::ReservedCycles).
|
2014-06-07 01:48:43 +00:00
|
|
|
MaxObservedStall = 0;
|
2012-05-10 21:06:19 +00:00
|
|
|
#endif
|
2013-12-07 05:59:44 +00:00
|
|
|
// Reserve a zero-count for invalid CritResIdx.
|
|
|
|
ExecutedResCounts.resize(1);
|
|
|
|
assert(!ExecutedResCounts[0] && "nonzero count for bad resource");
|
|
|
|
}
|
2012-01-17 06:55:03 +00:00
|
|
|
|
2013-12-07 05:59:44 +00:00
|
|
|
void SchedRemainder::
|
2012-11-07 07:05:09 +00:00
|
|
|
init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) {
|
|
|
|
reset();
|
|
|
|
if (!SchedModel->hasInstrSchedModel())
|
|
|
|
return;
|
|
|
|
RemainingCounts.resize(SchedModel->getNumProcResourceKinds());
|
|
|
|
for (std::vector<SUnit>::iterator
|
|
|
|
I = DAG->SUnits.begin(), E = DAG->SUnits.end(); I != E; ++I) {
|
|
|
|
const MCSchedClassDesc *SC = DAG->getSchedClass(&*I);
|
2013-06-15 05:39:19 +00:00
|
|
|
RemIssueCount += SchedModel->getNumMicroOps(I->getInstr(), SC)
|
|
|
|
* SchedModel->getMicroOpFactor();
|
2012-11-07 07:05:09 +00:00
|
|
|
for (TargetSchedModel::ProcResIter
|
|
|
|
PI = SchedModel->getWriteProcResBegin(SC),
|
|
|
|
PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
|
|
|
|
unsigned PIdx = PI->ProcResourceIdx;
|
|
|
|
unsigned Factor = SchedModel->getResourceFactor(PIdx);
|
|
|
|
RemainingCounts[PIdx] += (Factor * PI->Cycles);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-07 05:59:44 +00:00
|
|
|
void SchedBoundary::
|
2012-11-07 07:05:09 +00:00
|
|
|
init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) {
|
|
|
|
reset();
|
|
|
|
DAG = dag;
|
|
|
|
SchedModel = smodel;
|
|
|
|
Rem = rem;
|
2013-12-05 17:56:02 +00:00
|
|
|
if (SchedModel->hasInstrSchedModel()) {
|
2013-06-15 05:39:19 +00:00
|
|
|
ExecutedResCounts.resize(SchedModel->getNumProcResourceKinds());
|
2013-12-05 17:56:02 +00:00
|
|
|
ReservedCycles.resize(SchedModel->getNumProcResourceKinds(), InvalidCycle);
|
|
|
|
}
|
2012-11-07 07:05:09 +00:00
|
|
|
}
|
|
|
|
|
2013-12-07 05:59:44 +00:00
|
|
|
/// Compute the stall cycles based on this SUnit's ready time. Heuristics treat
|
|
|
|
/// these "soft stalls" differently than the hard stall cycles based on CPU
|
|
|
|
/// resources and computed by checkHazard(). A fully in-order model
|
|
|
|
/// (MicroOpBufferSize==0) will not make use of this since instructions are not
|
|
|
|
/// available for scheduling until they are ready. However, a weaker in-order
|
|
|
|
/// model may use this for heuristics. For example, if a processor has in-order
|
|
|
|
/// behavior when reading certain resources, this may come into play.
|
|
|
|
unsigned SchedBoundary::getLatencyStallCycles(SUnit *SU) {
|
|
|
|
if (!SU->isUnbuffered)
|
|
|
|
return 0;
|
2013-09-04 21:00:11 +00:00
|
|
|
|
2013-12-07 05:59:44 +00:00
|
|
|
unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
|
|
|
|
if (ReadyCycle > CurrCycle)
|
|
|
|
return ReadyCycle - CurrCycle;
|
|
|
|
return 0;
|
|
|
|
}
|
2013-09-06 17:32:34 +00:00
|
|
|
|
2013-12-07 05:59:44 +00:00
|
|
|
/// Compute the next cycle at which the given processor resource can be
|
|
|
|
/// scheduled.
|
|
|
|
unsigned SchedBoundary::
|
|
|
|
getNextResourceCycle(unsigned PIdx, unsigned Cycles) {
|
|
|
|
unsigned NextUnreserved = ReservedCycles[PIdx];
|
|
|
|
// If this resource has never been used, always return cycle zero.
|
|
|
|
if (NextUnreserved == InvalidCycle)
|
|
|
|
return 0;
|
|
|
|
// For bottom-up scheduling add the cycles needed for the current operation.
|
|
|
|
if (!isTop())
|
|
|
|
NextUnreserved += Cycles;
|
|
|
|
return NextUnreserved;
|
|
|
|
}
|
2013-12-05 17:56:02 +00:00
|
|
|
|
2012-06-29 03:23:24 +00:00
|
|
|
/// Does this SU have a hazard within the current instruction group.
|
|
|
|
///
|
|
|
|
/// The scheduler supports two modes of hazard recognition. The first is the
|
|
|
|
/// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
|
|
|
|
/// supports highly complicated in-order reservation tables
|
|
|
|
/// (ScoreboardHazardRecognizer) and arbitraty target-specific logic.
|
|
|
|
///
|
|
|
|
/// The second is a streamlined mechanism that checks for hazards based on
|
|
|
|
/// simple counters that the scheduler itself maintains. It explicitly checks
|
|
|
|
/// for instruction dispatch limitations, including the number of micro-ops that
|
|
|
|
/// can dispatch per cycle.
|
|
|
|
///
|
|
|
|
/// TODO: Also check whether the SU must start a new group.
|
2013-12-07 05:59:44 +00:00
|
|
|
bool SchedBoundary::checkHazard(SUnit *SU) {
|
2013-12-28 21:56:57 +00:00
|
|
|
if (HazardRec->isEnabled()
|
|
|
|
&& HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard) {
|
|
|
|
return true;
|
|
|
|
}
|
2012-10-10 05:43:09 +00:00
|
|
|
unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
|
2013-06-15 04:49:49 +00:00
|
|
|
if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) {
|
2012-11-07 07:05:09 +00:00
|
|
|
DEBUG(dbgs() << " SU(" << SU->NodeNum << ") uops="
|
|
|
|
<< SchedModel->getNumMicroOps(SU->getInstr()) << '\n');
|
2012-06-29 03:23:24 +00:00
|
|
|
return true;
|
2012-11-07 07:05:09 +00:00
|
|
|
}
|
2013-12-05 17:56:02 +00:00
|
|
|
if (SchedModel->hasInstrSchedModel() && SU->hasReservedResource) {
|
|
|
|
const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
|
|
|
|
for (TargetSchedModel::ProcResIter
|
|
|
|
PI = SchedModel->getWriteProcResBegin(SC),
|
|
|
|
PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
|
2014-06-27 04:57:05 +00:00
|
|
|
unsigned NRCycle = getNextResourceCycle(PI->ProcResourceIdx, PI->Cycles);
|
|
|
|
if (NRCycle > CurrCycle) {
|
2014-06-27 05:09:36 +00:00
|
|
|
#ifndef NDEBUG
|
2014-07-02 16:46:08 +00:00
|
|
|
MaxObservedStall = std::max(PI->Cycles, MaxObservedStall);
|
2014-06-27 05:09:36 +00:00
|
|
|
#endif
|
2014-06-27 04:57:05 +00:00
|
|
|
DEBUG(dbgs() << " SU(" << SU->NodeNum << ") "
|
|
|
|
<< SchedModel->getResourceName(PI->ProcResourceIdx)
|
|
|
|
<< "=" << NRCycle << "c\n");
|
2013-12-05 17:56:02 +00:00
|
|
|
return true;
|
2014-06-27 04:57:05 +00:00
|
|
|
}
|
2013-12-05 17:56:02 +00:00
|
|
|
}
|
|
|
|
}
|
2012-06-29 03:23:24 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-06-15 05:39:19 +00:00
|
|
|
// Find the unscheduled node in ReadySUs with the highest latency.
|
2013-12-07 05:59:44 +00:00
|
|
|
unsigned SchedBoundary::
|
2013-06-15 05:39:19 +00:00
|
|
|
findMaxLatency(ArrayRef<SUnit*> ReadySUs) {
|
2014-04-14 00:51:57 +00:00
|
|
|
SUnit *LateSU = nullptr;
|
2013-06-15 05:39:19 +00:00
|
|
|
unsigned RemLatency = 0;
|
|
|
|
for (ArrayRef<SUnit*>::iterator I = ReadySUs.begin(), E = ReadySUs.end();
|
2012-12-18 20:52:56 +00:00
|
|
|
I != E; ++I) {
|
|
|
|
unsigned L = getUnscheduledLatency(*I);
|
2013-06-15 04:49:44 +00:00
|
|
|
if (L > RemLatency) {
|
2012-12-18 20:52:56 +00:00
|
|
|
RemLatency = L;
|
2013-06-15 05:39:19 +00:00
|
|
|
LateSU = *I;
|
2013-06-15 04:49:44 +00:00
|
|
|
}
|
2012-12-18 20:52:56 +00:00
|
|
|
}
|
2013-06-15 05:39:19 +00:00
|
|
|
if (LateSU) {
|
|
|
|
DEBUG(dbgs() << Available.getName() << " RemLatency SU("
|
|
|
|
<< LateSU->NodeNum << ") " << RemLatency << "c\n");
|
2012-12-18 20:52:56 +00:00
|
|
|
}
|
2013-06-15 05:39:19 +00:00
|
|
|
return RemLatency;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Count resources in this zone and the remaining unscheduled
|
|
|
|
// instruction. Return the max count, scaled. Set OtherCritIdx to the critical
|
|
|
|
// resource index, or zero if the zone is issue limited.
|
2013-12-07 05:59:44 +00:00
|
|
|
unsigned SchedBoundary::
|
2013-06-15 05:39:19 +00:00
|
|
|
getOtherResourceCount(unsigned &OtherCritIdx) {
|
2013-07-19 08:55:18 +00:00
|
|
|
OtherCritIdx = 0;
|
2013-06-15 05:39:19 +00:00
|
|
|
if (!SchedModel->hasInstrSchedModel())
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
unsigned OtherCritCount = Rem->RemIssueCount
|
|
|
|
+ (RetiredMOps * SchedModel->getMicroOpFactor());
|
|
|
|
DEBUG(dbgs() << " " << Available.getName() << " + Remain MOps: "
|
|
|
|
<< OtherCritCount / SchedModel->getMicroOpFactor() << '\n');
|
|
|
|
for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds();
|
|
|
|
PIdx != PEnd; ++PIdx) {
|
|
|
|
unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx];
|
|
|
|
if (OtherCount > OtherCritCount) {
|
|
|
|
OtherCritCount = OtherCount;
|
|
|
|
OtherCritIdx = PIdx;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (OtherCritIdx) {
|
|
|
|
DEBUG(dbgs() << " " << Available.getName() << " + Remain CritRes: "
|
|
|
|
<< OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx)
|
2013-12-07 05:59:44 +00:00
|
|
|
<< " " << SchedModel->getResourceName(OtherCritIdx) << "\n");
|
2013-06-15 05:39:19 +00:00
|
|
|
}
|
|
|
|
return OtherCritCount;
|
|
|
|
}
|
|
|
|
|
2013-12-07 05:59:44 +00:00
|
|
|
void SchedBoundary::releaseNode(SUnit *SU, unsigned ReadyCycle) {
|
2014-06-07 01:48:43 +00:00
|
|
|
assert(SU->getInstr() && "Scheduled SUnit must have instr");
|
|
|
|
|
|
|
|
#ifndef NDEBUG
|
2014-06-12 22:36:28 +00:00
|
|
|
// ReadyCycle was been bumped up to the CurrCycle when this node was
|
|
|
|
// scheduled, but CurrCycle may have been eagerly advanced immediately after
|
|
|
|
// scheduling, so may now be greater than ReadyCycle.
|
|
|
|
if (ReadyCycle > CurrCycle)
|
|
|
|
MaxObservedStall = std::max(ReadyCycle - CurrCycle, MaxObservedStall);
|
2014-06-07 01:48:43 +00:00
|
|
|
#endif
|
|
|
|
|
2012-05-24 22:11:09 +00:00
|
|
|
if (ReadyCycle < MinReadyCycle)
|
|
|
|
MinReadyCycle = ReadyCycle;
|
|
|
|
|
|
|
|
// Check for interlocks first. For the purpose of other heuristics, an
|
|
|
|
// instruction that cannot issue appears as if it's not in the ReadyQueue.
|
2013-06-15 05:39:19 +00:00
|
|
|
bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
|
2016-04-22 19:09:17 +00:00
|
|
|
if ((!IsBuffered && ReadyCycle > CurrCycle) || checkHazard(SU) ||
|
|
|
|
Available.size() >= ReadyListLimit)
|
2012-05-24 22:11:09 +00:00
|
|
|
Pending.push(SU);
|
|
|
|
else
|
|
|
|
Available.push(SU);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Move the boundary of scheduled code by one cycle.
|
2013-12-07 05:59:44 +00:00
|
|
|
void SchedBoundary::bumpCycle(unsigned NextCycle) {
|
2013-06-15 05:39:19 +00:00
|
|
|
if (SchedModel->getMicroOpBufferSize() == 0) {
|
|
|
|
assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized");
|
|
|
|
if (MinReadyCycle > NextCycle)
|
|
|
|
NextCycle = MinReadyCycle;
|
2012-11-07 07:05:09 +00:00
|
|
|
}
|
2013-06-15 05:39:19 +00:00
|
|
|
// Update the current micro-ops, which will issue in the next cycle.
|
|
|
|
unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle);
|
|
|
|
CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps;
|
|
|
|
|
|
|
|
// Decrement DependentLatency based on the next cycle.
|
2013-06-15 04:49:44 +00:00
|
|
|
if ((NextCycle - CurrCycle) > DependentLatency)
|
|
|
|
DependentLatency = 0;
|
|
|
|
else
|
|
|
|
DependentLatency -= (NextCycle - CurrCycle);
|
2012-05-24 22:11:09 +00:00
|
|
|
|
|
|
|
if (!HazardRec->isEnabled()) {
|
2012-06-05 21:11:27 +00:00
|
|
|
// Bypass HazardRec virtual calls.
|
2012-05-24 22:11:09 +00:00
|
|
|
CurrCycle = NextCycle;
|
2016-04-21 01:54:13 +00:00
|
|
|
} else {
|
2012-06-05 21:11:27 +00:00
|
|
|
// Bypass getHazardType calls in case of long latency.
|
2012-05-24 22:11:09 +00:00
|
|
|
for (; CurrCycle != NextCycle; ++CurrCycle) {
|
|
|
|
if (isTop())
|
|
|
|
HazardRec->AdvanceCycle();
|
|
|
|
else
|
|
|
|
HazardRec->RecedeCycle();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
CheckPending = true;
|
2013-06-15 05:39:19 +00:00
|
|
|
unsigned LFactor = SchedModel->getLatencyFactor();
|
|
|
|
IsResourceLimited =
|
|
|
|
(int)(getCriticalCount() - (getScheduledLatency() * LFactor))
|
|
|
|
> (int)LFactor;
|
2012-05-24 22:11:09 +00:00
|
|
|
|
2013-06-15 05:39:19 +00:00
|
|
|
DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName() << '\n');
|
|
|
|
}
|
|
|
|
|
2013-12-07 05:59:44 +00:00
|
|
|
void SchedBoundary::incExecutedResources(unsigned PIdx, unsigned Count) {
|
2013-06-15 05:39:19 +00:00
|
|
|
ExecutedResCounts[PIdx] += Count;
|
|
|
|
if (ExecutedResCounts[PIdx] > MaxExecutedResCount)
|
|
|
|
MaxExecutedResCount = ExecutedResCounts[PIdx];
|
2012-05-24 22:11:09 +00:00
|
|
|
}
|
|
|
|
|
2012-11-07 07:05:09 +00:00
|
|
|
/// Add the given processor resource to this scheduled zone.
|
2013-06-15 05:39:19 +00:00
|
|
|
///
|
|
|
|
/// \param Cycles indicates the number of consecutive (non-pipelined) cycles
|
|
|
|
/// during which this resource is consumed.
|
|
|
|
///
|
|
|
|
/// \return the next cycle at which the instruction may execute without
|
|
|
|
/// oversubscribing resources.
|
2013-12-07 05:59:44 +00:00
|
|
|
unsigned SchedBoundary::
|
2013-12-05 17:56:02 +00:00
|
|
|
countResource(unsigned PIdx, unsigned Cycles, unsigned NextCycle) {
|
2012-11-07 07:05:09 +00:00
|
|
|
unsigned Factor = SchedModel->getResourceFactor(PIdx);
|
|
|
|
unsigned Count = Factor * Cycles;
|
2013-12-07 05:59:44 +00:00
|
|
|
DEBUG(dbgs() << " " << SchedModel->getResourceName(PIdx)
|
2013-06-15 05:39:19 +00:00
|
|
|
<< " +" << Cycles << "x" << Factor << "u\n");
|
|
|
|
|
|
|
|
// Update Executed resources counts.
|
|
|
|
incExecutedResources(PIdx, Count);
|
2012-11-07 07:05:09 +00:00
|
|
|
assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted");
|
|
|
|
Rem->RemainingCounts[PIdx] -= Count;
|
|
|
|
|
2013-07-19 00:20:07 +00:00
|
|
|
// Check if this resource exceeds the current critical resource. If so, it
|
|
|
|
// becomes the critical resource.
|
|
|
|
if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) {
|
2013-06-15 05:39:19 +00:00
|
|
|
ZoneCritResIdx = PIdx;
|
2012-11-07 07:05:09 +00:00
|
|
|
DEBUG(dbgs() << " *** Critical resource "
|
2013-12-07 05:59:44 +00:00
|
|
|
<< SchedModel->getResourceName(PIdx) << ": "
|
2013-06-15 05:39:19 +00:00
|
|
|
<< getResourceCount(PIdx) / SchedModel->getLatencyFactor() << "c\n");
|
2012-11-07 07:05:09 +00:00
|
|
|
}
|
2013-12-05 17:56:02 +00:00
|
|
|
// For reserved resources, record the highest cycle using the resource.
|
|
|
|
unsigned NextAvailable = getNextResourceCycle(PIdx, Cycles);
|
|
|
|
if (NextAvailable > CurrCycle) {
|
|
|
|
DEBUG(dbgs() << " Resource conflict: "
|
|
|
|
<< SchedModel->getProcResource(PIdx)->Name << " reserved until @"
|
|
|
|
<< NextAvailable << "\n");
|
|
|
|
}
|
|
|
|
return NextAvailable;
|
2012-11-07 07:05:09 +00:00
|
|
|
}
|
|
|
|
|
2012-06-05 21:11:27 +00:00
|
|
|
/// Move the boundary of scheduled code by one SUnit.
|
2013-12-07 05:59:44 +00:00
|
|
|
void SchedBoundary::bumpNode(SUnit *SU) {
|
2012-06-05 21:11:27 +00:00
|
|
|
// Update the reservation table.
|
|
|
|
if (HazardRec->isEnabled()) {
|
|
|
|
if (!isTop() && SU->isCall) {
|
|
|
|
// Calls are scheduled with their preceding instructions. For bottom-up
|
|
|
|
// scheduling, clear the pipeline state before emitting.
|
|
|
|
HazardRec->Reset();
|
|
|
|
}
|
|
|
|
HazardRec->EmitInstruction(SU);
|
|
|
|
}
|
2013-12-05 17:56:02 +00:00
|
|
|
// checkHazard should prevent scheduling multiple instructions per cycle that
|
|
|
|
// exceed the issue width.
|
2013-06-15 05:39:19 +00:00
|
|
|
const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
|
|
|
|
unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr());
|
2013-12-06 08:58:22 +00:00
|
|
|
assert(
|
|
|
|
(CurrMOps == 0 || (CurrMOps + IncMOps) <= SchedModel->getIssueWidth()) &&
|
2013-12-06 17:19:20 +00:00
|
|
|
"Cannot schedule this instruction's MicroOps in the current cycle.");
|
2013-12-05 17:56:02 +00:00
|
|
|
|
2013-06-15 05:39:19 +00:00
|
|
|
unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
|
|
|
|
DEBUG(dbgs() << " Ready @" << ReadyCycle << "c\n");
|
|
|
|
|
2013-12-05 17:56:02 +00:00
|
|
|
unsigned NextCycle = CurrCycle;
|
2013-06-15 05:39:19 +00:00
|
|
|
switch (SchedModel->getMicroOpBufferSize()) {
|
|
|
|
case 0:
|
|
|
|
assert(ReadyCycle <= CurrCycle && "Broken PendingQueue");
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
if (ReadyCycle > NextCycle) {
|
|
|
|
NextCycle = ReadyCycle;
|
|
|
|
DEBUG(dbgs() << " *** Stall until: " << ReadyCycle << "\n");
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
// We don't currently model the OOO reorder buffer, so consider all
|
2013-12-05 17:55:58 +00:00
|
|
|
// scheduled MOps to be "retired". We do loosely model in-order resource
|
|
|
|
// latency. If this instruction uses an in-order resource, account for any
|
|
|
|
// likely stall cycles.
|
|
|
|
if (SU->isUnbuffered && ReadyCycle > NextCycle)
|
|
|
|
NextCycle = ReadyCycle;
|
2013-06-15 05:39:19 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
RetiredMOps += IncMOps;
|
|
|
|
|
2012-11-07 07:05:09 +00:00
|
|
|
// Update resource counts and critical resource.
|
|
|
|
if (SchedModel->hasInstrSchedModel()) {
|
2013-06-15 05:39:19 +00:00
|
|
|
unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor();
|
|
|
|
assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted");
|
|
|
|
Rem->RemIssueCount -= DecRemIssue;
|
|
|
|
if (ZoneCritResIdx) {
|
|
|
|
// Scale scheduled micro-ops for comparing with the critical resource.
|
|
|
|
unsigned ScaledMOps =
|
|
|
|
RetiredMOps * SchedModel->getMicroOpFactor();
|
|
|
|
|
|
|
|
// If scaled micro-ops are now more than the previous critical resource by
|
|
|
|
// a full cycle, then micro-ops issue becomes critical.
|
|
|
|
if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx))
|
|
|
|
>= (int)SchedModel->getLatencyFactor()) {
|
|
|
|
ZoneCritResIdx = 0;
|
|
|
|
DEBUG(dbgs() << " *** Critical resource NumMicroOps: "
|
|
|
|
<< ScaledMOps / SchedModel->getLatencyFactor() << "c\n");
|
|
|
|
}
|
|
|
|
}
|
2012-11-07 07:05:09 +00:00
|
|
|
for (TargetSchedModel::ProcResIter
|
|
|
|
PI = SchedModel->getWriteProcResBegin(SC),
|
|
|
|
PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
|
2013-06-15 05:39:19 +00:00
|
|
|
unsigned RCycle =
|
2013-12-05 17:56:02 +00:00
|
|
|
countResource(PI->ProcResourceIdx, PI->Cycles, NextCycle);
|
2013-06-15 05:39:19 +00:00
|
|
|
if (RCycle > NextCycle)
|
|
|
|
NextCycle = RCycle;
|
2012-11-07 07:05:09 +00:00
|
|
|
}
|
2013-12-05 17:56:02 +00:00
|
|
|
if (SU->hasReservedResource) {
|
|
|
|
// For reserved resources, record the highest cycle using the resource.
|
|
|
|
// For top-down scheduling, this is the cycle in which we schedule this
|
|
|
|
// instruction plus the number of cycles the operations reserves the
|
|
|
|
// resource. For bottom-up is it simply the instruction's cycle.
|
|
|
|
for (TargetSchedModel::ProcResIter
|
|
|
|
PI = SchedModel->getWriteProcResBegin(SC),
|
|
|
|
PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
|
|
|
|
unsigned PIdx = PI->ProcResourceIdx;
|
2013-12-28 21:56:57 +00:00
|
|
|
if (SchedModel->getProcResource(PIdx)->BufferSize == 0) {
|
2014-07-02 16:46:08 +00:00
|
|
|
if (isTop()) {
|
|
|
|
ReservedCycles[PIdx] =
|
|
|
|
std::max(getNextResourceCycle(PIdx, 0), NextCycle + PI->Cycles);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
ReservedCycles[PIdx] = NextCycle;
|
2013-12-28 21:56:57 +00:00
|
|
|
}
|
2013-12-05 17:56:02 +00:00
|
|
|
}
|
|
|
|
}
|
2012-11-07 07:05:09 +00:00
|
|
|
}
|
2013-06-15 05:39:19 +00:00
|
|
|
// Update ExpectedLatency and DependentLatency.
|
2013-06-15 04:49:44 +00:00
|
|
|
unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency;
|
|
|
|
unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency;
|
2013-06-15 05:39:19 +00:00
|
|
|
if (SU->getDepth() > TopLatency) {
|
2013-06-15 04:49:44 +00:00
|
|
|
TopLatency = SU->getDepth();
|
2013-06-15 05:39:19 +00:00
|
|
|
DEBUG(dbgs() << " " << Available.getName()
|
|
|
|
<< " TopLatency SU(" << SU->NodeNum << ") " << TopLatency << "c\n");
|
|
|
|
}
|
|
|
|
if (SU->getHeight() > BotLatency) {
|
2013-06-15 04:49:44 +00:00
|
|
|
BotLatency = SU->getHeight();
|
2013-06-15 05:39:19 +00:00
|
|
|
DEBUG(dbgs() << " " << Available.getName()
|
|
|
|
<< " BotLatency SU(" << SU->NodeNum << ") " << BotLatency << "c\n");
|
2012-06-05 21:11:27 +00:00
|
|
|
}
|
2013-06-15 05:39:19 +00:00
|
|
|
// If we stall for any reason, bump the cycle.
|
|
|
|
if (NextCycle > CurrCycle) {
|
|
|
|
bumpCycle(NextCycle);
|
2016-04-21 01:54:13 +00:00
|
|
|
} else {
|
2013-06-15 05:39:19 +00:00
|
|
|
// After updating ZoneCritResIdx and ExpectedLatency, check if we're
|
2014-01-24 17:20:08 +00:00
|
|
|
// resource limited. If a stall occurred, bumpCycle does this.
|
2013-06-15 05:39:19 +00:00
|
|
|
unsigned LFactor = SchedModel->getLatencyFactor();
|
|
|
|
IsResourceLimited =
|
|
|
|
(int)(getCriticalCount() - (getScheduledLatency() * LFactor))
|
|
|
|
> (int)LFactor;
|
|
|
|
}
|
2013-12-05 17:56:02 +00:00
|
|
|
// Update CurrMOps after calling bumpCycle to handle stalls, since bumpCycle
|
|
|
|
// resets CurrMOps. Loop to handle instructions with more MOps than issue in
|
|
|
|
// one cycle. Since we commonly reach the max MOps here, opportunistically
|
|
|
|
// bump the cycle to avoid uselessly checking everything in the readyQ.
|
|
|
|
CurrMOps += IncMOps;
|
|
|
|
while (CurrMOps >= SchedModel->getIssueWidth()) {
|
|
|
|
DEBUG(dbgs() << " *** Max MOps " << CurrMOps
|
|
|
|
<< " at cycle " << CurrCycle << '\n');
|
2013-12-28 21:56:57 +00:00
|
|
|
bumpCycle(++NextCycle);
|
2013-12-05 17:56:02 +00:00
|
|
|
}
|
2013-06-15 05:39:19 +00:00
|
|
|
DEBUG(dumpScheduledState());
|
2012-06-05 21:11:27 +00:00
|
|
|
}
|
|
|
|
|
2012-05-24 22:11:09 +00:00
|
|
|
/// Release pending ready nodes in to the available queue. This makes them
|
|
|
|
/// visible to heuristics.
|
2013-12-07 05:59:44 +00:00
|
|
|
void SchedBoundary::releasePending() {
|
2012-05-24 22:11:09 +00:00
|
|
|
// If the available queue is empty, it is safe to reset MinReadyCycle.
|
|
|
|
if (Available.empty())
|
|
|
|
MinReadyCycle = UINT_MAX;
|
|
|
|
|
|
|
|
// Check to see if any of the pending instructions are ready to issue. If
|
|
|
|
// so, add them to the available queue.
|
2013-06-15 05:39:19 +00:00
|
|
|
bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
|
2012-05-24 22:11:09 +00:00
|
|
|
for (unsigned i = 0, e = Pending.size(); i != e; ++i) {
|
|
|
|
SUnit *SU = *(Pending.begin()+i);
|
2012-06-05 21:11:27 +00:00
|
|
|
unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
|
2012-05-24 22:11:09 +00:00
|
|
|
|
|
|
|
if (ReadyCycle < MinReadyCycle)
|
|
|
|
MinReadyCycle = ReadyCycle;
|
|
|
|
|
2013-06-15 05:39:19 +00:00
|
|
|
if (!IsBuffered && ReadyCycle > CurrCycle)
|
2012-05-24 22:11:09 +00:00
|
|
|
continue;
|
|
|
|
|
2012-06-29 03:23:24 +00:00
|
|
|
if (checkHazard(SU))
|
2012-05-24 22:11:09 +00:00
|
|
|
continue;
|
|
|
|
|
2016-04-22 19:09:17 +00:00
|
|
|
if (Available.size() >= ReadyListLimit)
|
|
|
|
break;
|
|
|
|
|
2012-05-24 22:11:09 +00:00
|
|
|
Available.push(SU);
|
|
|
|
Pending.remove(Pending.begin()+i);
|
|
|
|
--i; --e;
|
|
|
|
}
|
|
|
|
CheckPending = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Remove SU from the ready set for this boundary.
|
2013-12-07 05:59:44 +00:00
|
|
|
void SchedBoundary::removeReady(SUnit *SU) {
|
2012-05-24 22:11:09 +00:00
|
|
|
if (Available.isInQueue(SU))
|
|
|
|
Available.remove(Available.find(SU));
|
|
|
|
else {
|
|
|
|
assert(Pending.isInQueue(SU) && "bad ready count");
|
|
|
|
Pending.remove(Pending.find(SU));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// If this queue only has one ready candidate, return it. As a side effect,
|
2012-11-07 07:05:09 +00:00
|
|
|
/// defer any nodes that now hit a hazard, and advance the cycle until at least
|
|
|
|
/// one node is ready. If multiple instructions are ready, return NULL.
|
2013-12-07 05:59:44 +00:00
|
|
|
SUnit *SchedBoundary::pickOnlyChoice() {
|
2012-05-24 22:11:09 +00:00
|
|
|
if (CheckPending)
|
|
|
|
releasePending();
|
|
|
|
|
2013-06-15 04:49:49 +00:00
|
|
|
if (CurrMOps > 0) {
|
2012-11-07 07:05:09 +00:00
|
|
|
// Defer any ready instrs that now have a hazard.
|
|
|
|
for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) {
|
|
|
|
if (checkHazard(*I)) {
|
|
|
|
Pending.push(*I);
|
|
|
|
I = Available.remove(I);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
++I;
|
|
|
|
}
|
|
|
|
}
|
2012-05-24 22:11:09 +00:00
|
|
|
for (unsigned i = 0; Available.empty(); ++i) {
|
2014-07-02 16:46:08 +00:00
|
|
|
// FIXME: Re-enable assert once PR20057 is resolved.
|
|
|
|
// assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedStall) &&
|
|
|
|
// "permanent hazard");
|
|
|
|
(void)i;
|
2013-06-15 05:39:19 +00:00
|
|
|
bumpCycle(CurrCycle + 1);
|
2012-05-24 22:11:09 +00:00
|
|
|
releasePending();
|
|
|
|
}
|
2016-06-23 21:27:38 +00:00
|
|
|
|
|
|
|
DEBUG(Pending.dump());
|
|
|
|
DEBUG(Available.dump());
|
|
|
|
|
2012-05-24 22:11:09 +00:00
|
|
|
if (Available.size() == 1)
|
|
|
|
return *Available.begin();
|
2014-04-14 00:51:57 +00:00
|
|
|
return nullptr;
|
2012-05-24 22:11:09 +00:00
|
|
|
}
|
|
|
|
|
2013-06-15 05:46:47 +00:00
|
|
|
#ifndef NDEBUG
|
2013-06-15 05:39:19 +00:00
|
|
|
// This is useful information to dump after bumpNode.
|
|
|
|
// Note that the Queue contents are more useful before pickNodeFromQueue.
|
2013-12-07 05:59:44 +00:00
|
|
|
void SchedBoundary::dumpScheduledState() {
|
2013-06-15 05:39:19 +00:00
|
|
|
unsigned ResFactor;
|
|
|
|
unsigned ResCount;
|
|
|
|
if (ZoneCritResIdx) {
|
|
|
|
ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx);
|
|
|
|
ResCount = getResourceCount(ZoneCritResIdx);
|
2016-04-21 01:54:13 +00:00
|
|
|
} else {
|
2013-06-15 05:39:19 +00:00
|
|
|
ResFactor = SchedModel->getMicroOpFactor();
|
|
|
|
ResCount = RetiredMOps * SchedModel->getMicroOpFactor();
|
2012-11-07 07:05:09 +00:00
|
|
|
}
|
2013-06-15 05:39:19 +00:00
|
|
|
unsigned LFactor = SchedModel->getLatencyFactor();
|
|
|
|
dbgs() << Available.getName() << " @" << CurrCycle << "c\n"
|
|
|
|
<< " Retired: " << RetiredMOps;
|
|
|
|
dbgs() << "\n Executed: " << getExecutedCount() / LFactor << "c";
|
|
|
|
dbgs() << "\n Critical: " << ResCount / LFactor << "c, "
|
2013-12-07 05:59:44 +00:00
|
|
|
<< ResCount / ResFactor << " "
|
|
|
|
<< SchedModel->getResourceName(ZoneCritResIdx)
|
2013-06-15 05:39:19 +00:00
|
|
|
<< "\n ExpectedLatency: " << ExpectedLatency << "c\n"
|
|
|
|
<< (IsResourceLimited ? " - Resource" : " - Latency")
|
|
|
|
<< " limited.\n";
|
2012-11-07 07:05:09 +00:00
|
|
|
}
|
2013-06-15 05:46:47 +00:00
|
|
|
#endif
|
2012-11-07 07:05:09 +00:00
|
|
|
|
2013-12-07 05:59:44 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
2013-12-28 21:56:57 +00:00
|
|
|
// GenericScheduler - Generic implementation of MachineSchedStrategy.
|
2013-12-07 05:59:44 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
2012-11-07 07:05:09 +00:00
|
|
|
|
2013-12-28 21:56:57 +00:00
|
|
|
void GenericSchedulerBase::SchedCandidate::
|
|
|
|
initResourceDelta(const ScheduleDAGMI *DAG,
|
|
|
|
const TargetSchedModel *SchedModel) {
|
|
|
|
if (!Policy.ReduceResIdx && !Policy.DemandResIdx)
|
|
|
|
return;
|
|
|
|
|
|
|
|
const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
|
|
|
|
for (TargetSchedModel::ProcResIter
|
|
|
|
PI = SchedModel->getWriteProcResBegin(SC),
|
|
|
|
PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
|
|
|
|
if (PI->ProcResourceIdx == Policy.ReduceResIdx)
|
|
|
|
ResDelta.CritResources += PI->Cycles;
|
|
|
|
if (PI->ProcResourceIdx == Policy.DemandResIdx)
|
|
|
|
ResDelta.DemandedResources += PI->Cycles;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Set the CandPolicy given a scheduling zone given the current resources and
|
|
|
|
/// latencies inside and outside the zone.
|
2016-04-21 01:54:13 +00:00
|
|
|
void GenericSchedulerBase::setPolicy(CandPolicy &Policy, bool IsPostRA,
|
2013-12-28 21:56:57 +00:00
|
|
|
SchedBoundary &CurrZone,
|
|
|
|
SchedBoundary *OtherZone) {
|
2015-06-19 01:53:21 +00:00
|
|
|
// Apply preemptive heuristics based on the total latency and resources
|
2013-12-28 21:56:57 +00:00
|
|
|
// inside and outside this zone. Potential stalls should be considered before
|
|
|
|
// following this policy.
|
|
|
|
|
|
|
|
// Compute remaining latency. We need this both to determine whether the
|
|
|
|
// overall schedule has become latency-limited and whether the instructions
|
|
|
|
// outside this zone are resource or latency limited.
|
|
|
|
//
|
|
|
|
// The "dependent" latency is updated incrementally during scheduling as the
|
|
|
|
// max height/depth of scheduled nodes minus the cycles since it was
|
|
|
|
// scheduled:
|
|
|
|
// DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone
|
|
|
|
//
|
|
|
|
// The "independent" latency is the max ready queue depth:
|
|
|
|
// ILat = max N.depth for N in Available|Pending
|
|
|
|
//
|
|
|
|
// RemainingLatency is the greater of independent and dependent latency.
|
|
|
|
unsigned RemLatency = CurrZone.getDependentLatency();
|
|
|
|
RemLatency = std::max(RemLatency,
|
|
|
|
CurrZone.findMaxLatency(CurrZone.Available.elements()));
|
|
|
|
RemLatency = std::max(RemLatency,
|
|
|
|
CurrZone.findMaxLatency(CurrZone.Pending.elements()));
|
|
|
|
|
|
|
|
// Compute the critical resource outside the zone.
|
2013-12-28 22:25:57 +00:00
|
|
|
unsigned OtherCritIdx = 0;
|
2013-12-28 21:56:57 +00:00
|
|
|
unsigned OtherCount =
|
|
|
|
OtherZone ? OtherZone->getOtherResourceCount(OtherCritIdx) : 0;
|
|
|
|
|
|
|
|
bool OtherResLimited = false;
|
|
|
|
if (SchedModel->hasInstrSchedModel()) {
|
|
|
|
unsigned LFactor = SchedModel->getLatencyFactor();
|
|
|
|
OtherResLimited = (int)(OtherCount - (RemLatency * LFactor)) > (int)LFactor;
|
|
|
|
}
|
|
|
|
// Schedule aggressively for latency in PostRA mode. We don't check for
|
|
|
|
// acyclic latency during PostRA, and highly out-of-order processors will
|
|
|
|
// skip PostRA scheduling.
|
|
|
|
if (!OtherResLimited) {
|
|
|
|
if (IsPostRA || (RemLatency + CurrZone.getCurrCycle() > Rem.CriticalPath)) {
|
|
|
|
Policy.ReduceLatency |= true;
|
|
|
|
DEBUG(dbgs() << " " << CurrZone.Available.getName()
|
|
|
|
<< " RemainingLatency " << RemLatency << " + "
|
|
|
|
<< CurrZone.getCurrCycle() << "c > CritPath "
|
|
|
|
<< Rem.CriticalPath << "\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// If the same resource is limiting inside and outside the zone, do nothing.
|
|
|
|
if (CurrZone.getZoneCritResIdx() == OtherCritIdx)
|
|
|
|
return;
|
|
|
|
|
|
|
|
DEBUG(
|
|
|
|
if (CurrZone.isResourceLimited()) {
|
|
|
|
dbgs() << " " << CurrZone.Available.getName() << " ResourceLimited: "
|
|
|
|
<< SchedModel->getResourceName(CurrZone.getZoneCritResIdx())
|
|
|
|
<< "\n";
|
|
|
|
}
|
|
|
|
if (OtherResLimited)
|
|
|
|
dbgs() << " RemainingLimit: "
|
|
|
|
<< SchedModel->getResourceName(OtherCritIdx) << "\n";
|
|
|
|
if (!CurrZone.isResourceLimited() && !OtherResLimited)
|
|
|
|
dbgs() << " Latency limited both directions.\n");
|
|
|
|
|
|
|
|
if (CurrZone.isResourceLimited() && !Policy.ReduceResIdx)
|
|
|
|
Policy.ReduceResIdx = CurrZone.getZoneCritResIdx();
|
|
|
|
|
|
|
|
if (OtherResLimited)
|
|
|
|
Policy.DemandResIdx = OtherCritIdx;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef NDEBUG
|
|
|
|
const char *GenericSchedulerBase::getReasonStr(
|
|
|
|
GenericSchedulerBase::CandReason Reason) {
|
|
|
|
switch (Reason) {
|
|
|
|
case NoCand: return "NOCAND ";
|
2016-05-27 22:14:26 +00:00
|
|
|
case Only1: return "ONLY1 ";
|
|
|
|
case PhysRegCopy: return "PREG-COPY ";
|
2013-12-28 21:56:57 +00:00
|
|
|
case RegExcess: return "REG-EXCESS";
|
|
|
|
case RegCritical: return "REG-CRIT ";
|
|
|
|
case Stall: return "STALL ";
|
|
|
|
case Cluster: return "CLUSTER ";
|
|
|
|
case Weak: return "WEAK ";
|
|
|
|
case RegMax: return "REG-MAX ";
|
|
|
|
case ResourceReduce: return "RES-REDUCE";
|
|
|
|
case ResourceDemand: return "RES-DEMAND";
|
|
|
|
case TopDepthReduce: return "TOP-DEPTH ";
|
|
|
|
case TopPathReduce: return "TOP-PATH ";
|
|
|
|
case BotHeightReduce:return "BOT-HEIGHT";
|
|
|
|
case BotPathReduce: return "BOT-PATH ";
|
|
|
|
case NextDefUse: return "DEF-USE ";
|
|
|
|
case NodeOrder: return "ORDER ";
|
|
|
|
};
|
|
|
|
llvm_unreachable("Unknown reason!");
|
|
|
|
}
|
|
|
|
|
|
|
|
void GenericSchedulerBase::traceCandidate(const SchedCandidate &Cand) {
|
|
|
|
PressureChange P;
|
|
|
|
unsigned ResIdx = 0;
|
|
|
|
unsigned Latency = 0;
|
|
|
|
switch (Cand.Reason) {
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
case RegExcess:
|
|
|
|
P = Cand.RPDelta.Excess;
|
|
|
|
break;
|
|
|
|
case RegCritical:
|
|
|
|
P = Cand.RPDelta.CriticalMax;
|
|
|
|
break;
|
|
|
|
case RegMax:
|
|
|
|
P = Cand.RPDelta.CurrentMax;
|
|
|
|
break;
|
|
|
|
case ResourceReduce:
|
|
|
|
ResIdx = Cand.Policy.ReduceResIdx;
|
|
|
|
break;
|
|
|
|
case ResourceDemand:
|
|
|
|
ResIdx = Cand.Policy.DemandResIdx;
|
|
|
|
break;
|
|
|
|
case TopDepthReduce:
|
|
|
|
Latency = Cand.SU->getDepth();
|
|
|
|
break;
|
|
|
|
case TopPathReduce:
|
|
|
|
Latency = Cand.SU->getHeight();
|
|
|
|
break;
|
|
|
|
case BotHeightReduce:
|
|
|
|
Latency = Cand.SU->getHeight();
|
|
|
|
break;
|
|
|
|
case BotPathReduce:
|
|
|
|
Latency = Cand.SU->getDepth();
|
|
|
|
break;
|
|
|
|
}
|
2015-09-18 18:52:20 +00:00
|
|
|
dbgs() << " Cand SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason);
|
2013-12-28 21:56:57 +00:00
|
|
|
if (P.isValid())
|
|
|
|
dbgs() << " " << TRI->getRegPressureSetName(P.getPSet())
|
|
|
|
<< ":" << P.getUnitInc() << " ";
|
|
|
|
else
|
|
|
|
dbgs() << " ";
|
|
|
|
if (ResIdx)
|
|
|
|
dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " ";
|
|
|
|
else
|
|
|
|
dbgs() << " ";
|
|
|
|
if (Latency)
|
|
|
|
dbgs() << " " << Latency << " cycles ";
|
|
|
|
else
|
|
|
|
dbgs() << " ";
|
|
|
|
dbgs() << '\n';
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/// Return true if this heuristic determines order.
|
|
|
|
static bool tryLess(int TryVal, int CandVal,
|
|
|
|
GenericSchedulerBase::SchedCandidate &TryCand,
|
|
|
|
GenericSchedulerBase::SchedCandidate &Cand,
|
|
|
|
GenericSchedulerBase::CandReason Reason) {
|
|
|
|
if (TryVal < CandVal) {
|
|
|
|
TryCand.Reason = Reason;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (TryVal > CandVal) {
|
|
|
|
if (Cand.Reason > Reason)
|
|
|
|
Cand.Reason = Reason;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool tryGreater(int TryVal, int CandVal,
|
|
|
|
GenericSchedulerBase::SchedCandidate &TryCand,
|
|
|
|
GenericSchedulerBase::SchedCandidate &Cand,
|
|
|
|
GenericSchedulerBase::CandReason Reason) {
|
|
|
|
if (TryVal > CandVal) {
|
|
|
|
TryCand.Reason = Reason;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (TryVal < CandVal) {
|
|
|
|
if (Cand.Reason > Reason)
|
|
|
|
Cand.Reason = Reason;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand,
|
|
|
|
GenericSchedulerBase::SchedCandidate &Cand,
|
|
|
|
SchedBoundary &Zone) {
|
|
|
|
if (Zone.isTop()) {
|
|
|
|
if (Cand.SU->getDepth() > Zone.getScheduledLatency()) {
|
|
|
|
if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(),
|
|
|
|
TryCand, Cand, GenericSchedulerBase::TopDepthReduce))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(),
|
|
|
|
TryCand, Cand, GenericSchedulerBase::TopPathReduce))
|
|
|
|
return true;
|
2016-04-21 01:54:13 +00:00
|
|
|
} else {
|
2013-12-28 21:56:57 +00:00
|
|
|
if (Cand.SU->getHeight() > Zone.getScheduledLatency()) {
|
|
|
|
if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(),
|
|
|
|
TryCand, Cand, GenericSchedulerBase::BotHeightReduce))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(),
|
|
|
|
TryCand, Cand, GenericSchedulerBase::BotPathReduce))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-05-27 22:14:26 +00:00
|
|
|
static void tracePick(GenericSchedulerBase::CandReason Reason, bool IsTop) {
|
|
|
|
DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ")
|
|
|
|
<< GenericSchedulerBase::getReasonStr(Reason) << '\n');
|
|
|
|
}
|
|
|
|
|
2016-06-25 00:23:00 +00:00
|
|
|
static void tracePick(const GenericSchedulerBase::SchedCandidate &Cand) {
|
|
|
|
tracePick(Cand.Reason, Cand.AtTop);
|
2013-12-28 21:56:57 +00:00
|
|
|
}
|
|
|
|
|
2013-12-07 05:59:44 +00:00
|
|
|
void GenericScheduler::initialize(ScheduleDAGMI *dag) {
|
2013-12-28 21:56:47 +00:00
|
|
|
assert(dag->hasVRegLiveness() &&
|
|
|
|
"(PreRA)GenericScheduler needs vreg liveness");
|
|
|
|
DAG = static_cast<ScheduleDAGMILive*>(dag);
|
2013-12-07 05:59:44 +00:00
|
|
|
SchedModel = DAG->getSchedModel();
|
|
|
|
TRI = DAG->TRI;
|
|
|
|
|
|
|
|
Rem.init(DAG, SchedModel);
|
|
|
|
Top.init(DAG, SchedModel, &Rem);
|
|
|
|
Bot.init(DAG, SchedModel, &Rem);
|
|
|
|
|
|
|
|
// Initialize resource counts.
|
|
|
|
|
|
|
|
// Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
|
|
|
|
// are disabled, then these HazardRecs will be disabled.
|
|
|
|
const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
|
|
|
|
if (!Top.HazardRec) {
|
|
|
|
Top.HazardRec =
|
2014-10-14 06:56:25 +00:00
|
|
|
DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
|
2014-08-04 21:25:23 +00:00
|
|
|
Itin, DAG);
|
2013-12-07 05:59:44 +00:00
|
|
|
}
|
|
|
|
if (!Bot.HazardRec) {
|
|
|
|
Bot.HazardRec =
|
2014-10-14 06:56:25 +00:00
|
|
|
DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
|
2014-08-04 21:25:23 +00:00
|
|
|
Itin, DAG);
|
2013-12-07 05:59:44 +00:00
|
|
|
}
|
2016-06-25 02:03:36 +00:00
|
|
|
TopCand.SU = nullptr;
|
|
|
|
BotCand.SU = nullptr;
|
2013-12-07 05:59:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Initialize the per-region scheduling policy.
|
|
|
|
void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin,
|
|
|
|
MachineBasicBlock::iterator End,
|
|
|
|
unsigned NumRegionInstrs) {
|
2014-10-14 06:56:25 +00:00
|
|
|
const MachineFunction &MF = *Begin->getParent()->getParent();
|
|
|
|
const TargetLowering *TLI = MF.getSubtarget().getTargetLowering();
|
2013-12-07 05:59:44 +00:00
|
|
|
|
|
|
|
// Avoid setting up the register pressure tracker for small regions to save
|
|
|
|
// compile time. As a rough heuristic, only track pressure when the number of
|
|
|
|
// schedulable instructions exceeds half the integer register file.
|
2014-01-21 21:27:37 +00:00
|
|
|
RegionPolicy.ShouldTrackPressure = true;
|
2014-01-22 03:38:55 +00:00
|
|
|
for (unsigned VT = MVT::i32; VT > (unsigned)MVT::i1; --VT) {
|
|
|
|
MVT::SimpleValueType LegalIntVT = (MVT::SimpleValueType)VT;
|
|
|
|
if (TLI->isTypeLegal(LegalIntVT)) {
|
2014-01-21 21:27:37 +00:00
|
|
|
unsigned NIntRegs = Context->RegClassInfo->getNumAllocatableRegs(
|
2014-01-22 03:38:55 +00:00
|
|
|
TLI->getRegClassFor(LegalIntVT));
|
2014-01-21 21:27:37 +00:00
|
|
|
RegionPolicy.ShouldTrackPressure = NumRegionInstrs > (NIntRegs / 2);
|
|
|
|
}
|
|
|
|
}
|
2013-12-07 05:59:44 +00:00
|
|
|
|
|
|
|
// For generic targets, we default to bottom-up, because it's simpler and more
|
|
|
|
// compile-time optimizations have been implemented in that direction.
|
|
|
|
RegionPolicy.OnlyBottomUp = true;
|
|
|
|
|
|
|
|
// Allow the subtarget to override default policy.
|
2016-07-01 00:23:27 +00:00
|
|
|
MF.getSubtarget().overrideSchedPolicy(RegionPolicy, NumRegionInstrs);
|
2013-12-07 05:59:44 +00:00
|
|
|
|
|
|
|
// After subtarget overrides, apply command line options.
|
|
|
|
if (!EnableRegPressure)
|
|
|
|
RegionPolicy.ShouldTrackPressure = false;
|
|
|
|
|
|
|
|
// Check -misched-topdown/bottomup can force or unforce scheduling direction.
|
|
|
|
// e.g. -misched-bottomup=false allows scheduling in both directions.
|
|
|
|
assert((!ForceTopDown || !ForceBottomUp) &&
|
|
|
|
"-misched-topdown incompatible with -misched-bottomup");
|
|
|
|
if (ForceBottomUp.getNumOccurrences() > 0) {
|
|
|
|
RegionPolicy.OnlyBottomUp = ForceBottomUp;
|
|
|
|
if (RegionPolicy.OnlyBottomUp)
|
|
|
|
RegionPolicy.OnlyTopDown = false;
|
|
|
|
}
|
|
|
|
if (ForceTopDown.getNumOccurrences() > 0) {
|
|
|
|
RegionPolicy.OnlyTopDown = ForceTopDown;
|
|
|
|
if (RegionPolicy.OnlyTopDown)
|
|
|
|
RegionPolicy.OnlyBottomUp = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-18 18:52:20 +00:00
|
|
|
void GenericScheduler::dumpPolicy() {
|
|
|
|
dbgs() << "GenericScheduler RegionPolicy: "
|
|
|
|
<< " ShouldTrackPressure=" << RegionPolicy.ShouldTrackPressure
|
|
|
|
<< " OnlyTopDown=" << RegionPolicy.OnlyTopDown
|
|
|
|
<< " OnlyBottomUp=" << RegionPolicy.OnlyBottomUp
|
|
|
|
<< "\n";
|
|
|
|
}
|
|
|
|
|
2013-12-07 05:59:44 +00:00
|
|
|
/// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic
|
|
|
|
/// critical path by more cycles than it takes to drain the instruction buffer.
|
|
|
|
/// We estimate an upper bounds on in-flight instructions as:
|
|
|
|
///
|
|
|
|
/// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height )
|
|
|
|
/// InFlightIterations = AcyclicPath / CyclesPerIteration
|
|
|
|
/// InFlightResources = InFlightIterations * LoopResources
|
|
|
|
///
|
|
|
|
/// TODO: Check execution resources in addition to IssueCount.
|
|
|
|
void GenericScheduler::checkAcyclicLatency() {
|
|
|
|
if (Rem.CyclicCritPath == 0 || Rem.CyclicCritPath >= Rem.CriticalPath)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Scaled number of cycles per loop iteration.
|
|
|
|
unsigned IterCount =
|
|
|
|
std::max(Rem.CyclicCritPath * SchedModel->getLatencyFactor(),
|
|
|
|
Rem.RemIssueCount);
|
|
|
|
// Scaled acyclic critical path.
|
|
|
|
unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor();
|
|
|
|
// InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop
|
|
|
|
unsigned InFlightCount =
|
|
|
|
(AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount;
|
|
|
|
unsigned BufferLimit =
|
|
|
|
SchedModel->getMicroOpBufferSize() * SchedModel->getMicroOpFactor();
|
|
|
|
|
|
|
|
Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit;
|
|
|
|
|
|
|
|
DEBUG(dbgs() << "IssueCycles="
|
|
|
|
<< Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c "
|
|
|
|
<< "IterCycles=" << IterCount / SchedModel->getLatencyFactor()
|
|
|
|
<< "c NumIters=" << (AcyclicCount + IterCount-1) / IterCount
|
|
|
|
<< " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor()
|
|
|
|
<< "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n";
|
|
|
|
if (Rem.IsAcyclicLatencyLimited)
|
|
|
|
dbgs() << " ACYCLIC LATENCY LIMIT\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
void GenericScheduler::registerRoots() {
|
|
|
|
Rem.CriticalPath = DAG->ExitSU.getDepth();
|
|
|
|
|
|
|
|
// Some roots may not feed into ExitSU. Check all of them in case.
|
|
|
|
for (std::vector<SUnit*>::const_iterator
|
|
|
|
I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) {
|
|
|
|
if ((*I)->getDepth() > Rem.CriticalPath)
|
|
|
|
Rem.CriticalPath = (*I)->getDepth();
|
|
|
|
}
|
2014-08-07 21:49:44 +00:00
|
|
|
DEBUG(dbgs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << '\n');
|
|
|
|
if (DumpCriticalPathLength) {
|
|
|
|
errs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << " \n";
|
|
|
|
}
|
2013-12-07 05:59:44 +00:00
|
|
|
|
|
|
|
if (EnableCyclicPath) {
|
|
|
|
Rem.CyclicCritPath = DAG->computeCyclicCriticalPath();
|
|
|
|
checkAcyclicLatency();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-08-30 03:49:48 +00:00
|
|
|
static bool tryPressure(const PressureChange &TryP,
|
|
|
|
const PressureChange &CandP,
|
2013-12-28 21:56:57 +00:00
|
|
|
GenericSchedulerBase::SchedCandidate &TryCand,
|
|
|
|
GenericSchedulerBase::SchedCandidate &Cand,
|
2015-12-16 18:31:01 +00:00
|
|
|
GenericSchedulerBase::CandReason Reason,
|
|
|
|
const TargetRegisterInfo *TRI,
|
|
|
|
const MachineFunction &MF) {
|
2013-08-30 04:27:29 +00:00
|
|
|
// If one candidate decreases and the other increases, go with it.
|
|
|
|
// Invalid candidates have UnitInc==0.
|
2014-10-10 17:06:20 +00:00
|
|
|
if (tryGreater(TryP.getUnitInc() < 0, CandP.getUnitInc() < 0, TryCand, Cand,
|
|
|
|
Reason)) {
|
2013-08-30 04:27:29 +00:00
|
|
|
return true;
|
2013-07-25 07:26:35 +00:00
|
|
|
}
|
2016-06-25 00:23:00 +00:00
|
|
|
// Do not compare the magnitude of pressure changes between top and bottom
|
|
|
|
// boundary.
|
|
|
|
if (Cand.AtTop != TryCand.AtTop)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// If both candidates affect the same set in the same boundary, go with the
|
|
|
|
// smallest increase.
|
|
|
|
unsigned TryPSet = TryP.getPSetOrMax();
|
|
|
|
unsigned CandPSet = CandP.getPSetOrMax();
|
|
|
|
if (TryPSet == CandPSet) {
|
|
|
|
return tryLess(TryP.getUnitInc(), CandP.getUnitInc(), TryCand, Cand,
|
|
|
|
Reason);
|
|
|
|
}
|
2015-12-16 18:31:01 +00:00
|
|
|
|
|
|
|
int TryRank = TryP.isValid() ? TRI->getRegPressureSetScore(MF, TryPSet) :
|
|
|
|
std::numeric_limits<int>::max();
|
|
|
|
|
|
|
|
int CandRank = CandP.isValid() ? TRI->getRegPressureSetScore(MF, CandPSet) :
|
|
|
|
std::numeric_limits<int>::max();
|
|
|
|
|
2013-07-25 07:26:35 +00:00
|
|
|
// If the candidates are decreasing pressure, reverse priority.
|
2013-08-30 03:49:48 +00:00
|
|
|
if (TryP.getUnitInc() < 0)
|
2013-07-25 07:26:35 +00:00
|
|
|
std::swap(TryRank, CandRank);
|
|
|
|
return tryGreater(TryRank, CandRank, TryCand, Cand, Reason);
|
|
|
|
}
|
|
|
|
|
2012-11-12 19:40:10 +00:00
|
|
|
static unsigned getWeakLeft(const SUnit *SU, bool isTop) {
|
|
|
|
return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft;
|
|
|
|
}
|
|
|
|
|
2013-04-13 06:07:40 +00:00
|
|
|
/// Minimize physical register live ranges. Regalloc wants them adjacent to
|
|
|
|
/// their physreg def/use.
|
|
|
|
///
|
|
|
|
/// FIXME: This is an unnecessary check on the critical path. Most are root/leaf
|
|
|
|
/// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled
|
|
|
|
/// with the operation that produces or consumes the physreg. We'll do this when
|
|
|
|
/// regalloc has support for parallel copies.
|
|
|
|
static int biasPhysRegCopy(const SUnit *SU, bool isTop) {
|
|
|
|
const MachineInstr *MI = SU->getInstr();
|
|
|
|
if (!MI->isCopy())
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
unsigned ScheduledOper = isTop ? 1 : 0;
|
|
|
|
unsigned UnscheduledOper = isTop ? 0 : 1;
|
|
|
|
// If we have already scheduled the physreg produce/consumer, immediately
|
|
|
|
// schedule the copy.
|
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(
|
|
|
|
MI->getOperand(ScheduledOper).getReg()))
|
|
|
|
return 1;
|
|
|
|
// If the physreg is at the boundary, defer it. Otherwise schedule it
|
|
|
|
// immediately to free the dependent. We can hoist the copy later.
|
|
|
|
bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft;
|
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(
|
|
|
|
MI->getOperand(UnscheduledOper).getReg()))
|
|
|
|
return AtBoundary ? -1 : 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-04-22 19:10:15 +00:00
|
|
|
void GenericScheduler::initCandidate(SchedCandidate &Cand, SUnit *SU,
|
|
|
|
bool AtTop,
|
|
|
|
const RegPressureTracker &RPTracker,
|
|
|
|
RegPressureTracker &TempTracker) {
|
|
|
|
Cand.SU = SU;
|
2016-06-25 00:23:00 +00:00
|
|
|
Cand.AtTop = AtTop;
|
2013-09-04 21:00:11 +00:00
|
|
|
if (DAG->isTrackingPressure()) {
|
2016-04-22 19:10:15 +00:00
|
|
|
if (AtTop) {
|
2013-09-04 21:00:02 +00:00
|
|
|
TempTracker.getMaxDownwardPressureDelta(
|
2016-04-22 19:10:15 +00:00
|
|
|
Cand.SU->getInstr(),
|
|
|
|
Cand.RPDelta,
|
2013-08-30 03:49:48 +00:00
|
|
|
DAG->getRegionCriticalPSets(),
|
|
|
|
DAG->getRegPressure().MaxSetPressure);
|
2016-04-21 01:54:13 +00:00
|
|
|
} else {
|
2013-09-04 21:00:02 +00:00
|
|
|
if (VerifyScheduling) {
|
|
|
|
TempTracker.getMaxUpwardPressureDelta(
|
2016-04-22 19:10:15 +00:00
|
|
|
Cand.SU->getInstr(),
|
|
|
|
&DAG->getPressureDiff(Cand.SU),
|
|
|
|
Cand.RPDelta,
|
2013-09-04 21:00:02 +00:00
|
|
|
DAG->getRegionCriticalPSets(),
|
|
|
|
DAG->getRegPressure().MaxSetPressure);
|
2016-04-21 01:54:13 +00:00
|
|
|
} else {
|
2013-09-04 21:00:02 +00:00
|
|
|
RPTracker.getUpwardPressureDelta(
|
2016-04-22 19:10:15 +00:00
|
|
|
Cand.SU->getInstr(),
|
|
|
|
DAG->getPressureDiff(Cand.SU),
|
|
|
|
Cand.RPDelta,
|
2013-09-04 21:00:02 +00:00
|
|
|
DAG->getRegionCriticalPSets(),
|
|
|
|
DAG->getRegPressure().MaxSetPressure);
|
|
|
|
}
|
2013-08-30 03:49:48 +00:00
|
|
|
}
|
|
|
|
}
|
2016-04-22 19:10:15 +00:00
|
|
|
DEBUG(if (Cand.RPDelta.Excess.isValid())
|
|
|
|
dbgs() << " Try SU(" << Cand.SU->NodeNum << ") "
|
|
|
|
<< TRI->getRegPressureSetName(Cand.RPDelta.Excess.getPSet())
|
|
|
|
<< ":" << Cand.RPDelta.Excess.getUnitInc() << "\n");
|
|
|
|
}
|
2012-11-07 07:05:09 +00:00
|
|
|
|
2016-04-22 19:10:15 +00:00
|
|
|
/// Apply a set of heursitics to a new candidate. Heuristics are currently
|
|
|
|
/// hierarchical. This may be more efficient than a graduated cost model because
|
|
|
|
/// we don't need to evaluate all aspects of the model for each node in the
|
|
|
|
/// queue. But it's really done to make the heuristics easier to debug and
|
|
|
|
/// statistically analyze.
|
|
|
|
///
|
|
|
|
/// \param Cand provides the policy and current best candidate.
|
|
|
|
/// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
|
2016-06-25 00:23:00 +00:00
|
|
|
/// \param Zone describes the scheduled zone that we are extending, or nullptr
|
|
|
|
// if Cand is from a different zone than TryCand.
|
2016-04-22 19:10:15 +00:00
|
|
|
void GenericScheduler::tryCandidate(SchedCandidate &Cand,
|
|
|
|
SchedCandidate &TryCand,
|
2016-06-25 00:23:00 +00:00
|
|
|
SchedBoundary *Zone) {
|
2012-11-07 07:05:09 +00:00
|
|
|
// Initialize the candidate if needed.
|
|
|
|
if (!Cand.isValid()) {
|
|
|
|
TryCand.Reason = NodeOrder;
|
|
|
|
return;
|
|
|
|
}
|
2013-04-13 06:07:40 +00:00
|
|
|
|
2016-06-25 00:23:00 +00:00
|
|
|
if (tryGreater(biasPhysRegCopy(TryCand.SU, TryCand.AtTop),
|
|
|
|
biasPhysRegCopy(Cand.SU, Cand.AtTop),
|
2013-04-13 06:07:40 +00:00
|
|
|
TryCand, Cand, PhysRegCopy))
|
|
|
|
return;
|
|
|
|
|
2015-05-17 23:40:27 +00:00
|
|
|
// Avoid exceeding the target's limit.
|
2013-09-04 21:00:11 +00:00
|
|
|
if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.Excess,
|
|
|
|
Cand.RPDelta.Excess,
|
2015-12-16 18:31:01 +00:00
|
|
|
TryCand, Cand, RegExcess, TRI,
|
|
|
|
DAG->MF))
|
2012-11-07 07:05:09 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
// Avoid increasing the max critical pressure in the scheduled region.
|
2013-09-04 21:00:11 +00:00
|
|
|
if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CriticalMax,
|
|
|
|
Cand.RPDelta.CriticalMax,
|
2015-12-16 18:31:01 +00:00
|
|
|
TryCand, Cand, RegCritical, TRI,
|
|
|
|
DAG->MF))
|
2012-11-07 07:05:09 +00:00
|
|
|
return;
|
|
|
|
|
2016-06-25 00:23:00 +00:00
|
|
|
// We only compare a subset of features when comparing nodes between
|
|
|
|
// Top and Bottom boundary. Some properties are simply incomparable, in many
|
|
|
|
// other instances we should only override the other boundary if something
|
|
|
|
// is a clear good pick on one boundary. Skip heuristics that are more
|
|
|
|
// "tie-breaking" in nature.
|
|
|
|
bool SameBoundary = Zone != nullptr;
|
|
|
|
if (SameBoundary) {
|
|
|
|
// For loops that are acyclic path limited, aggressively schedule for
|
2016-11-04 08:31:14 +00:00
|
|
|
// latency. Within an single cycle, whenever CurrMOps > 0, allow normal
|
|
|
|
// heuristics to take precedence.
|
2016-06-25 00:23:00 +00:00
|
|
|
if (Rem.IsAcyclicLatencyLimited && !Zone->getCurrMOps() &&
|
|
|
|
tryLatency(TryCand, Cand, *Zone))
|
|
|
|
return;
|
2013-09-06 17:32:36 +00:00
|
|
|
|
2016-06-25 00:23:00 +00:00
|
|
|
// Prioritize instructions that read unbuffered resources by stall cycles.
|
|
|
|
if (tryLess(Zone->getLatencyStallCycles(TryCand.SU),
|
|
|
|
Zone->getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
|
|
|
|
return;
|
|
|
|
}
|
2013-12-05 17:55:58 +00:00
|
|
|
|
2012-11-12 19:40:10 +00:00
|
|
|
// Keep clustered nodes together to encourage downstream peephole
|
|
|
|
// optimizations which may reduce resource requirements.
|
|
|
|
//
|
|
|
|
// This is a best effort to set things up for a post-RA pass. Optimizations
|
|
|
|
// like generating loads of multiple registers should ideally be done within
|
|
|
|
// the scheduler pass by combining the loads during DAG postprocessing.
|
2016-06-25 00:23:00 +00:00
|
|
|
const SUnit *CandNextClusterSU =
|
|
|
|
Cand.AtTop ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
|
|
|
|
const SUnit *TryCandNextClusterSU =
|
|
|
|
TryCand.AtTop ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
|
|
|
|
if (tryGreater(TryCand.SU == TryCandNextClusterSU,
|
|
|
|
Cand.SU == CandNextClusterSU,
|
2012-11-12 19:40:10 +00:00
|
|
|
TryCand, Cand, Cluster))
|
|
|
|
return;
|
2013-04-24 15:54:43 +00:00
|
|
|
|
2016-06-25 00:23:00 +00:00
|
|
|
if (SameBoundary) {
|
|
|
|
// Weak edges are for clustering and other constraints.
|
|
|
|
if (tryLess(getWeakLeft(TryCand.SU, TryCand.AtTop),
|
|
|
|
getWeakLeft(Cand.SU, Cand.AtTop),
|
|
|
|
TryCand, Cand, Weak))
|
|
|
|
return;
|
2012-11-12 19:40:10 +00:00
|
|
|
}
|
2016-06-25 00:23:00 +00:00
|
|
|
|
2013-06-17 21:45:13 +00:00
|
|
|
// Avoid increasing the max pressure of the entire region.
|
2013-09-04 21:00:11 +00:00
|
|
|
if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CurrentMax,
|
|
|
|
Cand.RPDelta.CurrentMax,
|
2015-12-16 18:31:01 +00:00
|
|
|
TryCand, Cand, RegMax, TRI,
|
|
|
|
DAG->MF))
|
2013-06-17 21:45:13 +00:00
|
|
|
return;
|
|
|
|
|
2016-06-25 00:23:00 +00:00
|
|
|
if (SameBoundary) {
|
|
|
|
// Avoid critical resource consumption and balance the schedule.
|
|
|
|
TryCand.initResourceDelta(DAG, SchedModel);
|
|
|
|
if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
|
|
|
|
TryCand, Cand, ResourceReduce))
|
|
|
|
return;
|
|
|
|
if (tryGreater(TryCand.ResDelta.DemandedResources,
|
|
|
|
Cand.ResDelta.DemandedResources,
|
|
|
|
TryCand, Cand, ResourceDemand))
|
|
|
|
return;
|
2012-11-07 07:05:09 +00:00
|
|
|
|
2016-06-25 00:23:00 +00:00
|
|
|
// Avoid serializing long latency dependence chains.
|
|
|
|
// For acyclic path limited loops, latency was already checked above.
|
|
|
|
if (!RegionPolicy.DisableLatencyHeuristic && TryCand.Policy.ReduceLatency &&
|
|
|
|
!Rem.IsAcyclicLatencyLimited && tryLatency(TryCand, Cand, *Zone))
|
|
|
|
return;
|
2012-11-07 07:05:09 +00:00
|
|
|
|
2016-06-25 00:23:00 +00:00
|
|
|
// Fall through to original instruction order.
|
|
|
|
if ((Zone->isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum)
|
|
|
|
|| (!Zone->isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) {
|
|
|
|
TryCand.Reason = NodeOrder;
|
|
|
|
}
|
2012-11-07 07:05:09 +00:00
|
|
|
}
|
2012-05-10 21:06:19 +00:00
|
|
|
}
|
|
|
|
|
2013-09-06 17:32:44 +00:00
|
|
|
/// Pick the best candidate from the queue.
|
2012-05-10 21:06:16 +00:00
|
|
|
///
|
|
|
|
/// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
|
|
|
|
/// DAG building. To adjust for the current scheduling location we need to
|
|
|
|
/// maintain the number of vreg uses remaining to be top-scheduled.
|
2013-09-19 23:10:59 +00:00
|
|
|
void GenericScheduler::pickNodeFromQueue(SchedBoundary &Zone,
|
2016-06-25 00:23:00 +00:00
|
|
|
const CandPolicy &ZonePolicy,
|
2013-12-05 17:55:47 +00:00
|
|
|
const RegPressureTracker &RPTracker,
|
|
|
|
SchedCandidate &Cand) {
|
2012-05-10 21:06:16 +00:00
|
|
|
// getMaxPressureDelta temporarily modifies the tracker.
|
|
|
|
RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
|
|
|
|
|
2016-06-23 21:27:38 +00:00
|
|
|
ReadyQueue &Q = Zone.Available;
|
2012-05-24 22:11:03 +00:00
|
|
|
for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
|
2012-05-10 21:06:16 +00:00
|
|
|
|
2016-06-25 00:23:00 +00:00
|
|
|
SchedCandidate TryCand(ZonePolicy);
|
2016-04-22 19:10:15 +00:00
|
|
|
initCandidate(TryCand, *I, Zone.isTop(), RPTracker, TempTracker);
|
2016-06-25 00:23:00 +00:00
|
|
|
// Pass SchedBoundary only when comparing nodes from the same boundary.
|
|
|
|
SchedBoundary *ZoneArg = Cand.AtTop == TryCand.AtTop ? &Zone : nullptr;
|
|
|
|
tryCandidate(Cand, TryCand, ZoneArg);
|
2012-11-07 07:05:09 +00:00
|
|
|
if (TryCand.Reason != NoCand) {
|
|
|
|
// Initialize resource delta if needed in case future heuristics query it.
|
|
|
|
if (TryCand.ResDelta == SchedResourceDelta())
|
|
|
|
TryCand.initResourceDelta(DAG, SchedModel);
|
|
|
|
Cand.setBest(TryCand);
|
2013-04-05 00:31:29 +00:00
|
|
|
DEBUG(traceCandidate(Cand));
|
2012-05-10 21:06:16 +00:00
|
|
|
}
|
|
|
|
}
|
2012-11-07 07:05:09 +00:00
|
|
|
}
|
|
|
|
|
2012-05-17 18:35:10 +00:00
|
|
|
/// Pick the best candidate node from either the top or bottom queue.
|
2013-09-19 23:10:59 +00:00
|
|
|
SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) {
|
2012-05-17 18:35:10 +00:00
|
|
|
// Schedule as far as possible in the direction of no choice. This is most
|
|
|
|
// efficient, but also provides the best heuristics for CriticalPSets.
|
2012-05-24 22:11:09 +00:00
|
|
|
if (SUnit *SU = Bot.pickOnlyChoice()) {
|
2012-05-17 18:35:10 +00:00
|
|
|
IsTopNode = false;
|
2016-05-27 22:14:26 +00:00
|
|
|
tracePick(Only1, false);
|
2012-05-24 22:11:09 +00:00
|
|
|
return SU;
|
2012-05-17 18:35:10 +00:00
|
|
|
}
|
2012-05-24 22:11:09 +00:00
|
|
|
if (SUnit *SU = Top.pickOnlyChoice()) {
|
2012-05-17 18:35:10 +00:00
|
|
|
IsTopNode = true;
|
2016-05-27 22:14:26 +00:00
|
|
|
tracePick(Only1, true);
|
2012-05-24 22:11:09 +00:00
|
|
|
return SU;
|
2012-05-17 18:35:10 +00:00
|
|
|
}
|
2013-12-07 05:59:44 +00:00
|
|
|
// Set the bottom-up policy based on the state of the current bottom zone and
|
|
|
|
// the instructions outside the zone, including the top zone.
|
2016-06-25 00:23:00 +00:00
|
|
|
CandPolicy BotPolicy;
|
|
|
|
setPolicy(BotPolicy, /*IsPostRA=*/false, Bot, &Top);
|
2013-12-07 05:59:44 +00:00
|
|
|
// Set the top-down policy based on the state of the current top zone and
|
|
|
|
// the instructions outside the zone, including the bottom zone.
|
2016-06-25 00:23:00 +00:00
|
|
|
CandPolicy TopPolicy;
|
|
|
|
setPolicy(TopPolicy, /*IsPostRA=*/false, Top, &Bot);
|
2012-11-07 07:05:09 +00:00
|
|
|
|
2016-06-25 02:03:36 +00:00
|
|
|
// See if BotCand is still valid (because we previously scheduled from Top).
|
2016-06-23 21:27:38 +00:00
|
|
|
DEBUG(dbgs() << "Picking from Bot:\n");
|
2016-06-25 02:03:36 +00:00
|
|
|
if (!BotCand.isValid() || BotCand.SU->isScheduled ||
|
|
|
|
BotCand.Policy != BotPolicy) {
|
|
|
|
BotCand.reset(CandPolicy());
|
|
|
|
pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), BotCand);
|
|
|
|
assert(BotCand.Reason != NoCand && "failed to find the first candidate");
|
|
|
|
} else {
|
|
|
|
DEBUG(traceCandidate(BotCand));
|
|
|
|
#ifndef NDEBUG
|
|
|
|
if (VerifyScheduling) {
|
|
|
|
SchedCandidate TCand;
|
|
|
|
TCand.reset(CandPolicy());
|
|
|
|
pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), TCand);
|
|
|
|
assert(TCand.SU == BotCand.SU &&
|
|
|
|
"Last pick result should correspond to re-picking right now");
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
2012-05-17 18:35:10 +00:00
|
|
|
|
|
|
|
// Check if the top Q has a better candidate.
|
2016-06-23 21:27:38 +00:00
|
|
|
DEBUG(dbgs() << "Picking from Top:\n");
|
2016-06-25 02:03:36 +00:00
|
|
|
if (!TopCand.isValid() || TopCand.SU->isScheduled ||
|
|
|
|
TopCand.Policy != TopPolicy) {
|
|
|
|
TopCand.reset(CandPolicy());
|
|
|
|
pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TopCand);
|
|
|
|
assert(TopCand.Reason != NoCand && "failed to find the first candidate");
|
|
|
|
} else {
|
|
|
|
DEBUG(traceCandidate(TopCand));
|
|
|
|
#ifndef NDEBUG
|
|
|
|
if (VerifyScheduling) {
|
|
|
|
SchedCandidate TCand;
|
|
|
|
TCand.reset(CandPolicy());
|
|
|
|
pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TCand);
|
|
|
|
assert(TCand.SU == TopCand.SU &&
|
|
|
|
"Last pick result should correspond to re-picking right now");
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// Pick best from BotCand and TopCand.
|
|
|
|
assert(BotCand.isValid());
|
|
|
|
assert(TopCand.isValid());
|
|
|
|
SchedCandidate Cand = BotCand;
|
|
|
|
TopCand.Reason = NoCand;
|
|
|
|
tryCandidate(Cand, TopCand, nullptr);
|
|
|
|
if (TopCand.Reason != NoCand) {
|
|
|
|
Cand.setBest(TopCand);
|
|
|
|
DEBUG(traceCandidate(Cand));
|
|
|
|
}
|
2012-05-17 18:35:10 +00:00
|
|
|
|
2016-06-25 00:23:00 +00:00
|
|
|
IsTopNode = Cand.AtTop;
|
|
|
|
tracePick(Cand);
|
|
|
|
return Cand.SU;
|
2012-05-17 18:35:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
|
2013-09-19 23:10:59 +00:00
|
|
|
SUnit *GenericScheduler::pickNode(bool &IsTopNode) {
|
2012-05-10 21:06:16 +00:00
|
|
|
if (DAG->top() == DAG->bottom()) {
|
2012-05-24 22:11:09 +00:00
|
|
|
assert(Top.Available.empty() && Top.Pending.empty() &&
|
|
|
|
Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
|
2014-04-14 00:51:57 +00:00
|
|
|
return nullptr;
|
2012-05-10 21:06:16 +00:00
|
|
|
}
|
|
|
|
SUnit *SU;
|
2012-10-08 18:53:53 +00:00
|
|
|
do {
|
2013-09-06 17:32:34 +00:00
|
|
|
if (RegionPolicy.OnlyTopDown) {
|
2012-10-08 18:53:53 +00:00
|
|
|
SU = Top.pickOnlyChoice();
|
|
|
|
if (!SU) {
|
2012-11-07 07:05:09 +00:00
|
|
|
CandPolicy NoPolicy;
|
2016-06-25 02:03:36 +00:00
|
|
|
TopCand.reset(NoPolicy);
|
2016-06-25 00:23:00 +00:00
|
|
|
pickNodeFromQueue(Top, NoPolicy, DAG->getTopRPTracker(), TopCand);
|
2013-09-04 21:00:13 +00:00
|
|
|
assert(TopCand.Reason != NoCand && "failed to find a candidate");
|
2016-06-25 00:23:00 +00:00
|
|
|
tracePick(TopCand);
|
2012-10-08 18:53:53 +00:00
|
|
|
SU = TopCand.SU;
|
|
|
|
}
|
|
|
|
IsTopNode = true;
|
2016-04-21 01:54:13 +00:00
|
|
|
} else if (RegionPolicy.OnlyBottomUp) {
|
2012-10-08 18:53:53 +00:00
|
|
|
SU = Bot.pickOnlyChoice();
|
|
|
|
if (!SU) {
|
2012-11-07 07:05:09 +00:00
|
|
|
CandPolicy NoPolicy;
|
2016-06-25 02:03:36 +00:00
|
|
|
BotCand.reset(NoPolicy);
|
2016-06-25 00:23:00 +00:00
|
|
|
pickNodeFromQueue(Bot, NoPolicy, DAG->getBotRPTracker(), BotCand);
|
2013-09-04 21:00:13 +00:00
|
|
|
assert(BotCand.Reason != NoCand && "failed to find a candidate");
|
2016-06-25 00:23:00 +00:00
|
|
|
tracePick(BotCand);
|
2012-10-08 18:53:53 +00:00
|
|
|
SU = BotCand.SU;
|
|
|
|
}
|
|
|
|
IsTopNode = false;
|
2016-04-21 01:54:13 +00:00
|
|
|
} else {
|
2012-11-07 07:05:09 +00:00
|
|
|
SU = pickNodeBidirectional(IsTopNode);
|
2012-10-08 18:53:53 +00:00
|
|
|
}
|
|
|
|
} while (SU->isScheduled);
|
|
|
|
|
2012-05-24 22:11:09 +00:00
|
|
|
if (SU->isTopReady())
|
|
|
|
Top.removeReady(SU);
|
|
|
|
if (SU->isBottomReady())
|
|
|
|
Bot.removeReady(SU);
|
2012-05-25 02:02:39 +00:00
|
|
|
|
2013-04-13 06:07:49 +00:00
|
|
|
DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
|
2012-05-24 22:11:09 +00:00
|
|
|
return SU;
|
|
|
|
}
|
|
|
|
|
2013-09-19 23:10:59 +00:00
|
|
|
void GenericScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) {
|
2013-04-13 06:07:40 +00:00
|
|
|
|
|
|
|
MachineBasicBlock::iterator InsertPos = SU->getInstr();
|
|
|
|
if (!isTop)
|
|
|
|
++InsertPos;
|
|
|
|
SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs;
|
|
|
|
|
|
|
|
// Find already scheduled copies with a single physreg dependence and move
|
|
|
|
// them just above the scheduled instruction.
|
|
|
|
for (SmallVectorImpl<SDep>::iterator I = Deps.begin(), E = Deps.end();
|
|
|
|
I != E; ++I) {
|
|
|
|
if (I->getKind() != SDep::Data || !TRI->isPhysicalRegister(I->getReg()))
|
|
|
|
continue;
|
|
|
|
SUnit *DepSU = I->getSUnit();
|
|
|
|
if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1)
|
|
|
|
continue;
|
|
|
|
MachineInstr *Copy = DepSU->getInstr();
|
|
|
|
if (!Copy->isCopy())
|
|
|
|
continue;
|
|
|
|
DEBUG(dbgs() << " Rescheduling physreg copy ";
|
|
|
|
I->getSUnit()->dump(DAG));
|
|
|
|
DAG->moveInstruction(Copy, InsertPos);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-05-24 22:11:09 +00:00
|
|
|
/// Update the scheduler's state after scheduling a node. This is the same node
|
2013-12-28 21:56:57 +00:00
|
|
|
/// that was just returned by pickNode(). However, ScheduleDAGMILive needs to
|
|
|
|
/// update it's state based on the current cycle before MachineSchedStrategy
|
|
|
|
/// does.
|
2013-04-13 06:07:40 +00:00
|
|
|
///
|
|
|
|
/// FIXME: Eventually, we may bundle physreg copies rather than rescheduling
|
|
|
|
/// them here. See comments in biasPhysRegCopy.
|
2013-09-19 23:10:59 +00:00
|
|
|
void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
|
2012-06-05 21:11:27 +00:00
|
|
|
if (IsTopNode) {
|
2013-12-07 05:59:44 +00:00
|
|
|
SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
|
2012-06-29 03:23:22 +00:00
|
|
|
Top.bumpNode(SU);
|
2013-04-13 06:07:40 +00:00
|
|
|
if (SU->hasPhysRegUses)
|
|
|
|
reschedulePhysRegCopies(SU, true);
|
2016-04-21 01:54:13 +00:00
|
|
|
} else {
|
2013-12-07 05:59:44 +00:00
|
|
|
SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle());
|
2012-06-29 03:23:22 +00:00
|
|
|
Bot.bumpNode(SU);
|
2013-04-13 06:07:40 +00:00
|
|
|
if (SU->hasPhysRegDefs)
|
|
|
|
reschedulePhysRegCopies(SU, false);
|
2012-05-10 21:06:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-03-14 04:00:41 +00:00
|
|
|
/// Create the standard converging machine scheduler. This will be used as the
|
|
|
|
/// default scheduler if the target does not set a default.
|
2013-12-28 21:56:57 +00:00
|
|
|
static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C) {
|
2014-04-21 20:32:32 +00:00
|
|
|
ScheduleDAGMILive *DAG = new ScheduleDAGMILive(C, make_unique<GenericScheduler>(C));
|
2012-11-12 19:40:10 +00:00
|
|
|
// Register DAG post-processors.
|
2013-04-24 15:54:43 +00:00
|
|
|
//
|
|
|
|
// FIXME: extend the mutation API to allow earlier mutations to instantiate
|
|
|
|
// data and pass it to later mutations. Have a single mutation that gathers
|
|
|
|
// the interesting nodes in one pass.
|
2016-08-19 19:59:18 +00:00
|
|
|
DAG->addMutation(createCopyConstrainDAGMutation(DAG->TII, DAG->TRI));
|
2016-04-15 14:58:38 +00:00
|
|
|
if (EnableMemOpCluster) {
|
|
|
|
if (DAG->TII->enableClusterLoads())
|
2016-08-19 19:59:18 +00:00
|
|
|
DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
|
2016-04-15 14:58:38 +00:00
|
|
|
if (DAG->TII->enableClusterStores())
|
2016-08-19 19:59:18 +00:00
|
|
|
DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
|
2016-04-15 14:58:38 +00:00
|
|
|
}
|
2012-11-12 19:52:20 +00:00
|
|
|
if (EnableMacroFusion)
|
2016-11-11 01:34:21 +00:00
|
|
|
DAG->addMutation(createMacroFusionDAGMutation(DAG->TII));
|
2012-11-12 19:40:10 +00:00
|
|
|
return DAG;
|
2012-01-17 06:55:03 +00:00
|
|
|
}
|
2013-12-28 21:56:57 +00:00
|
|
|
|
2012-03-14 04:00:41 +00:00
|
|
|
static MachineSchedRegistry
|
2013-09-19 23:10:59 +00:00
|
|
|
GenericSchedRegistry("converge", "Standard converging scheduler.",
|
2013-12-28 21:56:57 +00:00
|
|
|
createGenericSchedLive);
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// PostGenericScheduler - Generic PostRA implementation of MachineSchedStrategy.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2014-06-04 07:06:18 +00:00
|
|
|
void PostGenericScheduler::initialize(ScheduleDAGMI *Dag) {
|
|
|
|
DAG = Dag;
|
|
|
|
SchedModel = DAG->getSchedModel();
|
|
|
|
TRI = DAG->TRI;
|
2013-12-28 21:56:57 +00:00
|
|
|
|
2014-06-04 07:06:18 +00:00
|
|
|
Rem.init(DAG, SchedModel);
|
|
|
|
Top.init(DAG, SchedModel, &Rem);
|
|
|
|
BotRoots.clear();
|
2013-12-28 21:56:57 +00:00
|
|
|
|
2014-06-04 07:06:18 +00:00
|
|
|
// Initialize the HazardRecognizers. If itineraries don't exist, are empty,
|
|
|
|
// or are disabled, then these HazardRecs will be disabled.
|
|
|
|
const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
|
|
|
|
if (!Top.HazardRec) {
|
|
|
|
Top.HazardRec =
|
2014-10-14 06:56:25 +00:00
|
|
|
DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
|
2014-08-04 21:25:23 +00:00
|
|
|
Itin, DAG);
|
2013-12-28 21:56:57 +00:00
|
|
|
}
|
2014-06-04 07:06:18 +00:00
|
|
|
}
|
2013-12-28 21:56:57 +00:00
|
|
|
|
|
|
|
|
|
|
|
void PostGenericScheduler::registerRoots() {
|
|
|
|
Rem.CriticalPath = DAG->ExitSU.getDepth();
|
|
|
|
|
|
|
|
// Some roots may not feed into ExitSU. Check all of them in case.
|
|
|
|
for (SmallVectorImpl<SUnit*>::const_iterator
|
|
|
|
I = BotRoots.begin(), E = BotRoots.end(); I != E; ++I) {
|
|
|
|
if ((*I)->getDepth() > Rem.CriticalPath)
|
|
|
|
Rem.CriticalPath = (*I)->getDepth();
|
|
|
|
}
|
2014-08-07 21:49:44 +00:00
|
|
|
DEBUG(dbgs() << "Critical Path: (PGS-RR) " << Rem.CriticalPath << '\n');
|
|
|
|
if (DumpCriticalPathLength) {
|
|
|
|
errs() << "Critical Path(PGS-RR ): " << Rem.CriticalPath << " \n";
|
|
|
|
}
|
2013-12-28 21:56:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Apply a set of heursitics to a new candidate for PostRA scheduling.
|
|
|
|
///
|
|
|
|
/// \param Cand provides the policy and current best candidate.
|
|
|
|
/// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
|
|
|
|
void PostGenericScheduler::tryCandidate(SchedCandidate &Cand,
|
|
|
|
SchedCandidate &TryCand) {
|
|
|
|
|
|
|
|
// Initialize the candidate if needed.
|
|
|
|
if (!Cand.isValid()) {
|
|
|
|
TryCand.Reason = NodeOrder;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Prioritize instructions that read unbuffered resources by stall cycles.
|
|
|
|
if (tryLess(Top.getLatencyStallCycles(TryCand.SU),
|
|
|
|
Top.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Avoid critical resource consumption and balance the schedule.
|
|
|
|
if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
|
|
|
|
TryCand, Cand, ResourceReduce))
|
|
|
|
return;
|
|
|
|
if (tryGreater(TryCand.ResDelta.DemandedResources,
|
|
|
|
Cand.ResDelta.DemandedResources,
|
|
|
|
TryCand, Cand, ResourceDemand))
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Avoid serializing long latency dependence chains.
|
|
|
|
if (Cand.Policy.ReduceLatency && tryLatency(TryCand, Cand, Top)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fall through to original instruction order.
|
|
|
|
if (TryCand.SU->NodeNum < Cand.SU->NodeNum)
|
|
|
|
TryCand.Reason = NodeOrder;
|
|
|
|
}
|
|
|
|
|
|
|
|
void PostGenericScheduler::pickNodeFromQueue(SchedCandidate &Cand) {
|
|
|
|
ReadyQueue &Q = Top.Available;
|
|
|
|
for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
|
|
|
|
SchedCandidate TryCand(Cand.Policy);
|
|
|
|
TryCand.SU = *I;
|
2016-06-25 00:23:00 +00:00
|
|
|
TryCand.AtTop = true;
|
2013-12-28 21:56:57 +00:00
|
|
|
TryCand.initResourceDelta(DAG, SchedModel);
|
|
|
|
tryCandidate(Cand, TryCand);
|
|
|
|
if (TryCand.Reason != NoCand) {
|
|
|
|
Cand.setBest(TryCand);
|
|
|
|
DEBUG(traceCandidate(Cand));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Pick the next node to schedule.
|
|
|
|
SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) {
|
|
|
|
if (DAG->top() == DAG->bottom()) {
|
|
|
|
assert(Top.Available.empty() && Top.Pending.empty() && "ReadyQ garbage");
|
2014-04-14 00:51:57 +00:00
|
|
|
return nullptr;
|
2013-12-28 21:56:57 +00:00
|
|
|
}
|
|
|
|
SUnit *SU;
|
|
|
|
do {
|
|
|
|
SU = Top.pickOnlyChoice();
|
2016-05-27 22:14:26 +00:00
|
|
|
if (SU) {
|
|
|
|
tracePick(Only1, true);
|
|
|
|
} else {
|
2013-12-28 21:56:57 +00:00
|
|
|
CandPolicy NoPolicy;
|
|
|
|
SchedCandidate TopCand(NoPolicy);
|
|
|
|
// Set the top-down policy based on the state of the current top zone and
|
|
|
|
// the instructions outside the zone, including the bottom zone.
|
2014-04-14 00:51:57 +00:00
|
|
|
setPolicy(TopCand.Policy, /*IsPostRA=*/true, Top, nullptr);
|
2013-12-28 21:56:57 +00:00
|
|
|
pickNodeFromQueue(TopCand);
|
|
|
|
assert(TopCand.Reason != NoCand && "failed to find a candidate");
|
2016-06-25 00:23:00 +00:00
|
|
|
tracePick(TopCand);
|
2013-12-28 21:56:57 +00:00
|
|
|
SU = TopCand.SU;
|
|
|
|
}
|
|
|
|
} while (SU->isScheduled);
|
|
|
|
|
|
|
|
IsTopNode = true;
|
|
|
|
Top.removeReady(SU);
|
|
|
|
|
|
|
|
DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
|
|
|
|
return SU;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Called after ScheduleDAGMI has scheduled an instruction and updated
|
|
|
|
/// scheduled/remaining flags in the DAG nodes.
|
|
|
|
void PostGenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
|
|
|
|
SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
|
|
|
|
Top.bumpNode(SU);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Create a generic scheduler with no vreg liveness or DAG mutation passes.
|
|
|
|
static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C) {
|
2016-11-09 09:59:27 +00:00
|
|
|
return new ScheduleDAGMI(C, make_unique<PostGenericScheduler>(C),
|
|
|
|
/*RemoveKillFlags=*/true);
|
2013-12-28 21:56:57 +00:00
|
|
|
}
|
2012-01-17 06:55:03 +00:00
|
|
|
|
2012-10-15 18:02:27 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// ILP Scheduler. Currently for experimental analysis of heuristics.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
/// \brief Order nodes by the ILP metric.
|
|
|
|
struct ILPOrder {
|
2013-01-25 04:01:04 +00:00
|
|
|
const SchedDFSResult *DFSResult;
|
|
|
|
const BitVector *ScheduledTrees;
|
2012-10-15 18:02:27 +00:00
|
|
|
bool MaximizeILP;
|
|
|
|
|
2014-04-14 00:51:57 +00:00
|
|
|
ILPOrder(bool MaxILP)
|
|
|
|
: DFSResult(nullptr), ScheduledTrees(nullptr), MaximizeILP(MaxILP) {}
|
2012-10-15 18:02:27 +00:00
|
|
|
|
|
|
|
/// \brief Apply a less-than relation on node priority.
|
2012-11-28 05:13:28 +00:00
|
|
|
///
|
|
|
|
/// (Return true if A comes after B in the Q.)
|
2012-10-15 18:02:27 +00:00
|
|
|
bool operator()(const SUnit *A, const SUnit *B) const {
|
2012-11-28 05:13:28 +00:00
|
|
|
unsigned SchedTreeA = DFSResult->getSubtreeID(A);
|
|
|
|
unsigned SchedTreeB = DFSResult->getSubtreeID(B);
|
|
|
|
if (SchedTreeA != SchedTreeB) {
|
|
|
|
// Unscheduled trees have lower priority.
|
|
|
|
if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB))
|
|
|
|
return ScheduledTrees->test(SchedTreeB);
|
|
|
|
|
|
|
|
// Trees with shallower connections have have lower priority.
|
|
|
|
if (DFSResult->getSubtreeLevel(SchedTreeA)
|
|
|
|
!= DFSResult->getSubtreeLevel(SchedTreeB)) {
|
|
|
|
return DFSResult->getSubtreeLevel(SchedTreeA)
|
|
|
|
< DFSResult->getSubtreeLevel(SchedTreeB);
|
|
|
|
}
|
|
|
|
}
|
2012-10-15 18:02:27 +00:00
|
|
|
if (MaximizeILP)
|
2012-11-28 05:13:28 +00:00
|
|
|
return DFSResult->getILP(A) < DFSResult->getILP(B);
|
2012-10-15 18:02:27 +00:00
|
|
|
else
|
2012-11-28 05:13:28 +00:00
|
|
|
return DFSResult->getILP(A) > DFSResult->getILP(B);
|
2012-10-15 18:02:27 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
/// \brief Schedule based on the ILP metric.
|
|
|
|
class ILPScheduler : public MachineSchedStrategy {
|
2013-12-28 21:56:47 +00:00
|
|
|
ScheduleDAGMILive *DAG;
|
2012-10-15 18:02:27 +00:00
|
|
|
ILPOrder Cmp;
|
|
|
|
|
|
|
|
std::vector<SUnit*> ReadyQ;
|
|
|
|
public:
|
2014-04-14 00:51:57 +00:00
|
|
|
ILPScheduler(bool MaximizeILP): DAG(nullptr), Cmp(MaximizeILP) {}
|
2012-10-15 18:02:27 +00:00
|
|
|
|
2014-03-07 09:26:03 +00:00
|
|
|
void initialize(ScheduleDAGMI *dag) override {
|
2013-12-28 21:56:47 +00:00
|
|
|
assert(dag->hasVRegLiveness() && "ILPScheduler needs vreg liveness");
|
|
|
|
DAG = static_cast<ScheduleDAGMILive*>(dag);
|
2013-01-25 06:33:57 +00:00
|
|
|
DAG->computeDFSResult();
|
2013-01-25 04:01:04 +00:00
|
|
|
Cmp.DFSResult = DAG->getDFSResult();
|
|
|
|
Cmp.ScheduledTrees = &DAG->getScheduledTrees();
|
2012-10-15 18:02:27 +00:00
|
|
|
ReadyQ.clear();
|
|
|
|
}
|
|
|
|
|
2014-03-07 09:26:03 +00:00
|
|
|
void registerRoots() override {
|
2012-11-29 14:36:26 +00:00
|
|
|
// Restore the heap in ReadyQ with the updated DFS results.
|
|
|
|
std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
|
2012-10-15 18:02:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Implement MachineSchedStrategy interface.
|
|
|
|
/// -----------------------------------------
|
|
|
|
|
2012-11-28 05:13:28 +00:00
|
|
|
/// Callback to select the highest priority node from the ready Q.
|
2014-03-07 09:26:03 +00:00
|
|
|
SUnit *pickNode(bool &IsTopNode) override {
|
2014-04-14 00:51:57 +00:00
|
|
|
if (ReadyQ.empty()) return nullptr;
|
2013-03-21 00:57:21 +00:00
|
|
|
std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
|
2012-10-15 18:02:27 +00:00
|
|
|
SUnit *SU = ReadyQ.back();
|
|
|
|
ReadyQ.pop_back();
|
|
|
|
IsTopNode = false;
|
2013-04-13 06:07:49 +00:00
|
|
|
DEBUG(dbgs() << "Pick node " << "SU(" << SU->NodeNum << ") "
|
2013-01-25 04:01:04 +00:00
|
|
|
<< " ILP: " << DAG->getDFSResult()->getILP(SU)
|
|
|
|
<< " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) << " @"
|
|
|
|
<< DAG->getDFSResult()->getSubtreeLevel(
|
2013-04-13 06:07:49 +00:00
|
|
|
DAG->getDFSResult()->getSubtreeID(SU)) << '\n'
|
|
|
|
<< "Scheduling " << *SU->getInstr());
|
2012-10-15 18:02:27 +00:00
|
|
|
return SU;
|
|
|
|
}
|
|
|
|
|
2013-01-25 04:01:04 +00:00
|
|
|
/// \brief Scheduler callback to notify that a new subtree is scheduled.
|
2014-03-07 09:26:03 +00:00
|
|
|
void scheduleTree(unsigned SubtreeID) override {
|
2013-01-25 04:01:04 +00:00
|
|
|
std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
|
|
|
|
}
|
|
|
|
|
2012-11-28 05:13:28 +00:00
|
|
|
/// Callback after a node is scheduled. Mark a newly scheduled tree, notify
|
|
|
|
/// DFSResults, and resort the priority Q.
|
2014-03-07 09:26:03 +00:00
|
|
|
void schedNode(SUnit *SU, bool IsTopNode) override {
|
2012-11-28 05:13:28 +00:00
|
|
|
assert(!IsTopNode && "SchedDFSResult needs bottom-up");
|
|
|
|
}
|
2012-10-15 18:02:27 +00:00
|
|
|
|
2014-03-07 09:26:03 +00:00
|
|
|
void releaseTopNode(SUnit *) override { /*only called for top roots*/ }
|
2012-10-15 18:02:27 +00:00
|
|
|
|
2014-03-07 09:26:03 +00:00
|
|
|
void releaseBottomNode(SUnit *SU) override {
|
2012-10-15 18:02:27 +00:00
|
|
|
ReadyQ.push_back(SU);
|
|
|
|
std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) {
|
2014-04-21 20:32:32 +00:00
|
|
|
return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(true));
|
2012-10-15 18:02:27 +00:00
|
|
|
}
|
|
|
|
static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) {
|
2014-04-21 20:32:32 +00:00
|
|
|
return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(false));
|
2012-10-15 18:02:27 +00:00
|
|
|
}
|
|
|
|
static MachineSchedRegistry ILPMaxRegistry(
|
|
|
|
"ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler);
|
|
|
|
static MachineSchedRegistry ILPMinRegistry(
|
|
|
|
"ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler);
|
|
|
|
|
2012-01-14 02:17:06 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Machine Instruction Shuffler for Correctness Testing
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2012-01-13 06:30:30 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
namespace {
|
2012-03-14 04:00:41 +00:00
|
|
|
/// Apply a less-than relation on the node order, which corresponds to the
|
|
|
|
/// instruction order prior to scheduling. IsReverse implements greater-than.
|
|
|
|
template<bool IsReverse>
|
|
|
|
struct SUnitOrder {
|
2012-01-17 06:55:07 +00:00
|
|
|
bool operator()(SUnit *A, SUnit *B) const {
|
2012-03-14 04:00:41 +00:00
|
|
|
if (IsReverse)
|
|
|
|
return A->NodeNum > B->NodeNum;
|
|
|
|
else
|
|
|
|
return A->NodeNum < B->NodeNum;
|
2012-01-17 06:55:07 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2012-01-13 06:30:30 +00:00
|
|
|
/// Reorder instructions as much as possible.
|
2012-03-14 04:00:41 +00:00
|
|
|
class InstructionShuffler : public MachineSchedStrategy {
|
|
|
|
bool IsAlternating;
|
|
|
|
bool IsTopDown;
|
|
|
|
|
|
|
|
// Using a less-than relation (SUnitOrder<false>) for the TopQ priority
|
|
|
|
// gives nodes with a higher number higher priority causing the latest
|
|
|
|
// instructions to be scheduled first.
|
|
|
|
PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> >
|
|
|
|
TopQ;
|
|
|
|
// When scheduling bottom-up, use greater-than as the queue priority.
|
|
|
|
PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> >
|
|
|
|
BottomQ;
|
2012-01-13 06:30:30 +00:00
|
|
|
public:
|
2012-03-14 04:00:41 +00:00
|
|
|
InstructionShuffler(bool alternate, bool topdown)
|
|
|
|
: IsAlternating(alternate), IsTopDown(topdown) {}
|
2012-01-13 06:30:30 +00:00
|
|
|
|
2014-04-29 07:58:41 +00:00
|
|
|
void initialize(ScheduleDAGMI*) override {
|
2012-03-14 04:00:41 +00:00
|
|
|
TopQ.clear();
|
|
|
|
BottomQ.clear();
|
|
|
|
}
|
2012-01-17 06:55:07 +00:00
|
|
|
|
2012-03-14 04:00:41 +00:00
|
|
|
/// Implement MachineSchedStrategy interface.
|
|
|
|
/// -----------------------------------------
|
|
|
|
|
2014-04-29 07:58:41 +00:00
|
|
|
SUnit *pickNode(bool &IsTopNode) override {
|
2012-03-14 04:00:41 +00:00
|
|
|
SUnit *SU;
|
|
|
|
if (IsTopDown) {
|
|
|
|
do {
|
2014-04-14 00:51:57 +00:00
|
|
|
if (TopQ.empty()) return nullptr;
|
2012-03-14 04:00:41 +00:00
|
|
|
SU = TopQ.top();
|
|
|
|
TopQ.pop();
|
|
|
|
} while (SU->isScheduled);
|
|
|
|
IsTopNode = true;
|
2016-04-21 01:54:13 +00:00
|
|
|
} else {
|
2012-03-14 04:00:41 +00:00
|
|
|
do {
|
2014-04-14 00:51:57 +00:00
|
|
|
if (BottomQ.empty()) return nullptr;
|
2012-03-14 04:00:41 +00:00
|
|
|
SU = BottomQ.top();
|
|
|
|
BottomQ.pop();
|
|
|
|
} while (SU->isScheduled);
|
|
|
|
IsTopNode = false;
|
|
|
|
}
|
|
|
|
if (IsAlternating)
|
|
|
|
IsTopDown = !IsTopDown;
|
2012-01-17 06:55:07 +00:00
|
|
|
return SU;
|
|
|
|
}
|
|
|
|
|
2014-04-29 07:58:41 +00:00
|
|
|
void schedNode(SUnit *SU, bool IsTopNode) override {}
|
2012-05-24 22:11:09 +00:00
|
|
|
|
2014-04-29 07:58:41 +00:00
|
|
|
void releaseTopNode(SUnit *SU) override {
|
2012-03-14 04:00:41 +00:00
|
|
|
TopQ.push(SU);
|
|
|
|
}
|
2014-04-29 07:58:41 +00:00
|
|
|
void releaseBottomNode(SUnit *SU) override {
|
2012-03-14 04:00:41 +00:00
|
|
|
BottomQ.push(SU);
|
2012-01-13 06:30:30 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
2012-03-08 01:41:12 +00:00
|
|
|
static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) {
|
2012-03-14 04:00:41 +00:00
|
|
|
bool Alternate = !ForceTopDown && !ForceBottomUp;
|
|
|
|
bool TopDown = !ForceBottomUp;
|
2012-03-14 11:26:37 +00:00
|
|
|
assert((TopDown || !ForceTopDown) &&
|
2012-03-14 04:00:41 +00:00
|
|
|
"-misched-topdown incompatible with -misched-bottomup");
|
2014-04-21 20:32:32 +00:00
|
|
|
return new ScheduleDAGMILive(C, make_unique<InstructionShuffler>(Alternate, TopDown));
|
2012-01-13 06:30:30 +00:00
|
|
|
}
|
2012-03-14 04:00:41 +00:00
|
|
|
static MachineSchedRegistry ShufflerRegistry(
|
|
|
|
"shuffle", "Shuffle machine instructions alternating directions",
|
|
|
|
createInstructionShuffler);
|
2012-01-13 06:30:30 +00:00
|
|
|
#endif // !NDEBUG
|
2013-01-25 07:45:29 +00:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
2013-12-28 21:56:47 +00:00
|
|
|
// GraphWriter support for ScheduleDAGMILive.
|
2013-01-25 07:45:29 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#ifndef NDEBUG
|
|
|
|
namespace llvm {
|
|
|
|
|
|
|
|
template<> struct GraphTraits<
|
|
|
|
ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {};
|
|
|
|
|
|
|
|
template<>
|
|
|
|
struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits {
|
|
|
|
|
|
|
|
DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
|
|
|
|
|
|
|
|
static std::string getGraphName(const ScheduleDAG *G) {
|
|
|
|
return G->MF.getName();
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool renderGraphFromBottomUp() {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool isNodeHidden(const SUnit *Node) {
|
2015-09-17 21:09:59 +00:00
|
|
|
if (ViewMISchedCutoff == 0)
|
|
|
|
return false;
|
|
|
|
return (Node->Preds.size() > ViewMISchedCutoff
|
|
|
|
|| Node->Succs.size() > ViewMISchedCutoff);
|
2013-01-25 07:45:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// If you want to override the dot attributes printed for a particular
|
|
|
|
/// edge, override this method.
|
|
|
|
static std::string getEdgeAttributes(const SUnit *Node,
|
|
|
|
SUnitIterator EI,
|
|
|
|
const ScheduleDAG *Graph) {
|
|
|
|
if (EI.isArtificialDep())
|
|
|
|
return "color=cyan,style=dashed";
|
|
|
|
if (EI.isCtrlDep())
|
|
|
|
return "color=blue,style=dashed";
|
|
|
|
return "";
|
|
|
|
}
|
|
|
|
|
|
|
|
static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) {
|
2014-06-26 22:52:05 +00:00
|
|
|
std::string Str;
|
|
|
|
raw_string_ostream SS(Str);
|
2013-12-28 21:56:47 +00:00
|
|
|
const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
|
|
|
|
const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
|
2014-04-14 00:51:57 +00:00
|
|
|
static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
|
2013-09-06 17:32:42 +00:00
|
|
|
SS << "SU:" << SU->NodeNum;
|
|
|
|
if (DFS)
|
|
|
|
SS << " I:" << DFS->getNumInstrs(SU);
|
2013-01-25 07:45:29 +00:00
|
|
|
return SS.str();
|
|
|
|
}
|
|
|
|
static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) {
|
|
|
|
return G->getGraphNodeLabel(SU);
|
|
|
|
}
|
|
|
|
|
2013-12-28 21:56:47 +00:00
|
|
|
static std::string getNodeAttributes(const SUnit *N, const ScheduleDAG *G) {
|
2013-01-25 07:45:29 +00:00
|
|
|
std::string Str("shape=Mrecord");
|
2013-12-28 21:56:47 +00:00
|
|
|
const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
|
|
|
|
const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
|
2014-04-14 00:51:57 +00:00
|
|
|
static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
|
2013-01-25 07:45:29 +00:00
|
|
|
if (DFS) {
|
|
|
|
Str += ",style=filled,fillcolor=\"#";
|
|
|
|
Str += DOT::getColorString(DFS->getSubtreeID(N));
|
|
|
|
Str += '"';
|
|
|
|
}
|
|
|
|
return Str;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
} // namespace llvm
|
|
|
|
#endif // NDEBUG
|
|
|
|
|
|
|
|
/// viewGraph - Pop up a ghostview window with the reachable parts of the DAG
|
|
|
|
/// rendered using 'dot'.
|
|
|
|
///
|
|
|
|
void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) {
|
|
|
|
#ifndef NDEBUG
|
|
|
|
ViewGraph(this, Name, false, Title);
|
|
|
|
#else
|
|
|
|
errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on "
|
|
|
|
<< "systems with Graphviz or gv!\n";
|
|
|
|
#endif // NDEBUG
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Out-of-line implementation with no arguments is handy for gdb.
|
|
|
|
void ScheduleDAGMI::viewGraph() {
|
|
|
|
viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName());
|
|
|
|
}
|