mirror of
https://github.com/RPCSX/llvm.git
synced 2024-11-27 13:40:30 +00:00
Properly model the latency of register defs which are 1) function returns or
2) live-outs. Previously the post-RA schedulers completely ignore these dependencies since returns, branches, etc. are all scheduling barriers. This patch model the latencies between instructions being scheduled and the barriers. It also handle calls by marking their register uses. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@117193 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
dd9dd6f857
commit
ec6906ba47
@ -141,6 +141,39 @@ void ScheduleDAGInstrs::StartBlock(MachineBasicBlock *BB) {
|
||||
}
|
||||
}
|
||||
|
||||
/// AddSchedBarrierDeps - Add dependencies from instructions in the current
|
||||
/// list of instructions being scheduled to scheduling barrier by adding
|
||||
/// the exit SU to the register defs and use list. This is because we want to
|
||||
/// make sure instructions which define registers that are either used by
|
||||
/// the terminator or are live-out are properly scheduled. This is
|
||||
/// especially important when the definition latency of the return value(s)
|
||||
/// are too high to be hidden by the branch or when the liveout registers
|
||||
/// used by instructions in the fallthrough block.
|
||||
void ScheduleDAGInstrs::AddSchedBarrierDeps() {
|
||||
MachineInstr *ExitMI = InsertPos != BB->end() ? &*InsertPos : 0;
|
||||
ExitSU.setInstr(ExitMI);
|
||||
bool AllDepKnown = ExitMI &&
|
||||
(ExitMI->getDesc().isCall() || ExitMI->getDesc().isBarrier());
|
||||
if (ExitMI && AllDepKnown) {
|
||||
// If it's a call or a barrier, add dependencies on the defs and uses of
|
||||
// instruction.
|
||||
for (unsigned i = 0, e = ExitMI->getNumOperands(); i != e; ++i) {
|
||||
const MachineOperand &MO = ExitMI->getOperand(i);
|
||||
if (!MO.isReg() || MO.isDef()) continue;
|
||||
unsigned Reg = MO.getReg();
|
||||
if (Reg == 0) continue;
|
||||
|
||||
assert(TRI->isPhysicalRegister(Reg) && "Virtual register encountered!");
|
||||
Uses[Reg].push_back(&ExitSU);
|
||||
}
|
||||
} else {
|
||||
// For others, e.g. fallthrough, conditional branch, assume the exit
|
||||
// uses all the registers.
|
||||
for (int i = 0, e = TRI->getNumRegs(); i != e; ++i)
|
||||
Uses[i].push_back(&ExitSU);
|
||||
}
|
||||
}
|
||||
|
||||
void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
|
||||
// We'll be allocating one SUnit for each instruction, plus one for
|
||||
// the region exit node.
|
||||
@ -175,6 +208,10 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
|
||||
// without emitting the info from the previous call.
|
||||
DbgValueVec.clear();
|
||||
|
||||
// Model data dependencies between instructions being scheduled and the
|
||||
// ExitSU.
|
||||
AddSchedBarrierDeps();
|
||||
|
||||
// Walk the list of instructions, from bottom moving up.
|
||||
for (MachineBasicBlock::iterator MII = InsertPos, MIE = Begin;
|
||||
MII != MIE; --MII) {
|
||||
@ -228,6 +265,8 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
|
||||
unsigned AOLatency = (Kind == SDep::Anti) ? 0 : 1;
|
||||
for (unsigned i = 0, e = DefList.size(); i != e; ++i) {
|
||||
SUnit *DefSU = DefList[i];
|
||||
if (DefSU == &ExitSU)
|
||||
continue;
|
||||
if (DefSU != SU &&
|
||||
(Kind != SDep::Output || !MO.isDead() ||
|
||||
!DefSU->getInstr()->registerDefIsDead(Reg)))
|
||||
@ -237,6 +276,8 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
|
||||
std::vector<SUnit *> &DefList = Defs[*Alias];
|
||||
for (unsigned i = 0, e = DefList.size(); i != e; ++i) {
|
||||
SUnit *DefSU = DefList[i];
|
||||
if (DefSU == &ExitSU)
|
||||
continue;
|
||||
if (DefSU != SU &&
|
||||
(Kind != SDep::Output || !MO.isDead() ||
|
||||
!DefSU->getInstr()->registerDefIsDead(*Alias)))
|
||||
@ -258,12 +299,14 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
|
||||
// TODO: Perhaps we should get rid of
|
||||
// SpecialAddressLatency and just move this into
|
||||
// adjustSchedDependency for the targets that care about it.
|
||||
if (SpecialAddressLatency != 0 && !UnitLatencies) {
|
||||
if (SpecialAddressLatency != 0 && !UnitLatencies &&
|
||||
UseSU != &ExitSU) {
|
||||
MachineInstr *UseMI = UseSU->getInstr();
|
||||
const TargetInstrDesc &UseTID = UseMI->getDesc();
|
||||
int RegUseIndex = UseMI->findRegisterUseOperandIdx(Reg);
|
||||
assert(RegUseIndex >= 0 && "UseMI doesn's use register!");
|
||||
if ((UseTID.mayLoad() || UseTID.mayStore()) &&
|
||||
if (RegUseIndex >= 0 &&
|
||||
(UseTID.mayLoad() || UseTID.mayStore()) &&
|
||||
(unsigned)RegUseIndex < UseTID.getNumOperands() &&
|
||||
UseTID.OpInfo[RegUseIndex].isLookupPtrRegClass())
|
||||
LDataLatency += SpecialAddressLatency;
|
||||
@ -446,6 +489,14 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
|
||||
// Treat all other stores conservatively.
|
||||
goto new_alias_chain;
|
||||
}
|
||||
|
||||
if (!ExitSU.isPred(SU))
|
||||
// Push store's up a bit to avoid them getting in between cmp
|
||||
// and branches.
|
||||
ExitSU.addPred(SDep(SU, SDep::Order, 0,
|
||||
/*Reg=*/0, /*isNormalMemory=*/false,
|
||||
/*isMustAlias=*/false,
|
||||
/*isArtificial=*/true));
|
||||
} else if (TID.mayLoad()) {
|
||||
bool MayAlias = true;
|
||||
TrueMemOrderLatency = 0;
|
||||
@ -541,21 +592,30 @@ void ScheduleDAGInstrs::ComputeOperandLatency(SUnit *Def, SUnit *Use,
|
||||
MachineInstr *UseMI = Use->getInstr();
|
||||
// For all uses of the register, calculate the maxmimum latency
|
||||
int Latency = -1;
|
||||
for (unsigned i = 0, e = UseMI->getNumOperands(); i != e; ++i) {
|
||||
const MachineOperand &MO = UseMI->getOperand(i);
|
||||
if (!MO.isReg() || !MO.isUse())
|
||||
continue;
|
||||
unsigned MOReg = MO.getReg();
|
||||
if (MOReg != Reg)
|
||||
continue;
|
||||
if (UseMI) {
|
||||
for (unsigned i = 0, e = UseMI->getNumOperands(); i != e; ++i) {
|
||||
const MachineOperand &MO = UseMI->getOperand(i);
|
||||
if (!MO.isReg() || !MO.isUse())
|
||||
continue;
|
||||
unsigned MOReg = MO.getReg();
|
||||
if (MOReg != Reg)
|
||||
continue;
|
||||
|
||||
int UseCycle = TII->getOperandLatency(InstrItins, DefMI, DefIdx, UseMI, i);
|
||||
Latency = std::max(Latency, UseCycle);
|
||||
|
||||
// If we found a latency, then replace the existing dependence latency.
|
||||
if (Latency >= 0)
|
||||
dep.setLatency(Latency);
|
||||
int UseCycle = TII->getOperandLatency(InstrItins, DefMI, DefIdx,
|
||||
UseMI, i);
|
||||
Latency = std::max(Latency, UseCycle);
|
||||
}
|
||||
} else {
|
||||
// UseMI is null, then it must be a scheduling barrier.
|
||||
if (!InstrItins || InstrItins->isEmpty())
|
||||
return;
|
||||
unsigned DefClass = DefMI->getDesc().getSchedClass();
|
||||
Latency = InstrItins->getOperandCycle(DefClass, DefIdx);
|
||||
}
|
||||
|
||||
// If we found a latency, then replace the existing dependence latency.
|
||||
if (Latency >= 0)
|
||||
dep.setLatency(Latency);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -164,6 +164,15 @@ namespace llvm {
|
||||
/// input.
|
||||
virtual void BuildSchedGraph(AliasAnalysis *AA);
|
||||
|
||||
/// AddSchedBarrierDeps - Add dependencies from instructions in the current
|
||||
/// list of instructions being scheduled to scheduling barrier. We want to
|
||||
/// make sure instructions which define registers that are either used by
|
||||
/// the terminator or are live-out are properly scheduled. This is
|
||||
/// especially important when the definition latency of the return value(s)
|
||||
/// are too high to be hidden by the branch or when the liveout registers
|
||||
/// used by instructions in the fallthrough block.
|
||||
void AddSchedBarrierDeps();
|
||||
|
||||
/// ComputeLatency - Compute node latency.
|
||||
///
|
||||
virtual void ComputeLatency(SUnit *SU);
|
||||
|
Loading…
Reference in New Issue
Block a user