2008-01-01 01:03:04 +00:00
|
|
|
//===-- TargetInstrInfoImpl.cpp - Target Instruction Information ----------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file implements the TargetInstrInfoImpl class, it just provides default
|
|
|
|
// implementations of various methods.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "llvm/Target/TargetInstrInfo.h"
|
2010-06-18 23:09:54 +00:00
|
|
|
#include "llvm/Target/TargetLowering.h"
|
2009-10-09 23:27:56 +00:00
|
|
|
#include "llvm/Target/TargetMachine.h"
|
|
|
|
#include "llvm/Target/TargetRegisterInfo.h"
|
2008-08-14 22:49:33 +00:00
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2008-12-03 18:43:12 +00:00
|
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
2008-01-01 01:03:04 +00:00
|
|
|
#include "llvm/CodeGen/MachineInstr.h"
|
2008-06-16 07:33:11 +00:00
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
2009-09-25 20:36:54 +00:00
|
|
|
#include "llvm/CodeGen/MachineMemOperand.h"
|
2009-10-09 23:27:56 +00:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2010-12-08 20:04:29 +00:00
|
|
|
#include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
|
2008-12-03 18:43:12 +00:00
|
|
|
#include "llvm/CodeGen/PseudoSourceValue.h"
|
2011-12-15 22:58:58 +00:00
|
|
|
#include "llvm/MC/MCInstrItineraries.h"
|
2011-01-21 05:51:33 +00:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
2010-07-13 00:23:30 +00:00
|
|
|
#include "llvm/Support/Debug.h"
|
2009-07-10 23:26:12 +00:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2008-01-01 01:03:04 +00:00
|
|
|
using namespace llvm;
|
|
|
|
|
2011-01-21 05:51:33 +00:00
|
|
|
static cl::opt<bool> DisableHazardRecognizer(
|
|
|
|
"disable-sched-hazard", cl::Hidden, cl::init(false),
|
|
|
|
cl::desc("Disable hazard detection during preRA scheduling"));
|
|
|
|
|
2010-06-22 01:18:16 +00:00
|
|
|
/// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
|
|
|
|
/// after it, replacing it with an unconditional branch to NewDest.
|
2010-06-18 23:09:54 +00:00
|
|
|
void
|
|
|
|
TargetInstrInfoImpl::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
|
|
|
|
MachineBasicBlock *NewDest) const {
|
|
|
|
MachineBasicBlock *MBB = Tail->getParent();
|
|
|
|
|
|
|
|
// Remove all the old successors of MBB from the CFG.
|
|
|
|
while (!MBB->succ_empty())
|
|
|
|
MBB->removeSuccessor(MBB->succ_begin());
|
|
|
|
|
|
|
|
// Remove all the dead instructions from the end of MBB.
|
|
|
|
MBB->erase(Tail, MBB->end());
|
|
|
|
|
|
|
|
// If MBB isn't immediately before MBB, insert a branch to it.
|
|
|
|
if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest))
|
|
|
|
InsertBranch(*MBB, NewDest, 0, SmallVector<MachineOperand, 0>(),
|
|
|
|
Tail->getDebugLoc());
|
|
|
|
MBB->addSuccessor(NewDest);
|
|
|
|
}
|
|
|
|
|
2008-01-01 01:03:04 +00:00
|
|
|
// commuteInstruction - The default implementation of this method just exchanges
|
2009-07-10 23:26:12 +00:00
|
|
|
// the two operands returned by findCommutedOpIndices.
|
2008-06-16 07:33:11 +00:00
|
|
|
MachineInstr *TargetInstrInfoImpl::commuteInstruction(MachineInstr *MI,
|
|
|
|
bool NewMI) const {
|
2011-06-28 19:10:37 +00:00
|
|
|
const MCInstrDesc &MCID = MI->getDesc();
|
|
|
|
bool HasDef = MCID.getNumDefs();
|
2009-07-10 23:26:12 +00:00
|
|
|
if (HasDef && !MI->getOperand(0).isReg())
|
|
|
|
// No idea how to commute this instruction. Target should implement its own.
|
|
|
|
return 0;
|
|
|
|
unsigned Idx1, Idx2;
|
|
|
|
if (!findCommutedOpIndices(MI, Idx1, Idx2)) {
|
|
|
|
std::string msg;
|
|
|
|
raw_string_ostream Msg(msg);
|
|
|
|
Msg << "Don't know how to commute: " << *MI;
|
2010-04-07 22:58:41 +00:00
|
|
|
report_fatal_error(Msg.str());
|
2009-07-10 23:26:12 +00:00
|
|
|
}
|
2009-07-01 08:29:08 +00:00
|
|
|
|
|
|
|
assert(MI->getOperand(Idx1).isReg() && MI->getOperand(Idx2).isReg() &&
|
2008-01-01 01:03:04 +00:00
|
|
|
"This only knows how to commute register operands so far");
|
2011-08-22 23:04:56 +00:00
|
|
|
unsigned Reg0 = HasDef ? MI->getOperand(0).getReg() : 0;
|
2009-07-01 08:29:08 +00:00
|
|
|
unsigned Reg1 = MI->getOperand(Idx1).getReg();
|
|
|
|
unsigned Reg2 = MI->getOperand(Idx2).getReg();
|
2012-03-28 17:02:22 +00:00
|
|
|
unsigned SubReg0 = HasDef ? MI->getOperand(0).getSubReg() : 0;
|
|
|
|
unsigned SubReg1 = MI->getOperand(Idx1).getSubReg();
|
|
|
|
unsigned SubReg2 = MI->getOperand(Idx2).getSubReg();
|
2009-07-01 08:29:08 +00:00
|
|
|
bool Reg1IsKill = MI->getOperand(Idx1).isKill();
|
|
|
|
bool Reg2IsKill = MI->getOperand(Idx2).isKill();
|
2011-08-22 23:04:56 +00:00
|
|
|
// If destination is tied to either of the commuted source register, then
|
|
|
|
// it must be updated.
|
|
|
|
if (HasDef && Reg0 == Reg1 &&
|
|
|
|
MI->getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
|
2008-02-13 02:46:49 +00:00
|
|
|
Reg2IsKill = false;
|
2011-08-22 23:04:56 +00:00
|
|
|
Reg0 = Reg2;
|
2012-03-28 17:02:22 +00:00
|
|
|
SubReg0 = SubReg2;
|
2011-08-22 23:04:56 +00:00
|
|
|
} else if (HasDef && Reg0 == Reg2 &&
|
|
|
|
MI->getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
|
|
|
|
Reg1IsKill = false;
|
|
|
|
Reg0 = Reg1;
|
2012-03-28 17:02:22 +00:00
|
|
|
SubReg0 = SubReg1;
|
2008-06-16 07:33:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (NewMI) {
|
|
|
|
// Create a new instruction.
|
2009-07-01 08:29:08 +00:00
|
|
|
bool Reg0IsDead = HasDef ? MI->getOperand(0).isDead() : false;
|
2008-07-07 23:14:23 +00:00
|
|
|
MachineFunction &MF = *MI->getParent()->getParent();
|
2009-07-01 08:29:08 +00:00
|
|
|
if (HasDef)
|
|
|
|
return BuildMI(MF, MI->getDebugLoc(), MI->getDesc())
|
2012-03-28 17:02:22 +00:00
|
|
|
.addReg(Reg0, RegState::Define | getDeadRegState(Reg0IsDead), SubReg0)
|
|
|
|
.addReg(Reg2, getKillRegState(Reg2IsKill), SubReg2)
|
|
|
|
.addReg(Reg1, getKillRegState(Reg1IsKill), SubReg1);
|
2009-07-01 08:29:08 +00:00
|
|
|
else
|
|
|
|
return BuildMI(MF, MI->getDebugLoc(), MI->getDesc())
|
2012-03-28 17:02:22 +00:00
|
|
|
.addReg(Reg2, getKillRegState(Reg2IsKill), SubReg2)
|
|
|
|
.addReg(Reg1, getKillRegState(Reg1IsKill), SubReg1);
|
2008-02-13 02:46:49 +00:00
|
|
|
}
|
2008-06-16 07:33:11 +00:00
|
|
|
|
2012-03-28 17:02:22 +00:00
|
|
|
if (HasDef) {
|
2011-08-22 23:04:56 +00:00
|
|
|
MI->getOperand(0).setReg(Reg0);
|
2012-03-28 17:02:22 +00:00
|
|
|
MI->getOperand(0).setSubReg(SubReg0);
|
|
|
|
}
|
2009-07-01 08:29:08 +00:00
|
|
|
MI->getOperand(Idx2).setReg(Reg1);
|
|
|
|
MI->getOperand(Idx1).setReg(Reg2);
|
2012-03-28 17:02:22 +00:00
|
|
|
MI->getOperand(Idx2).setSubReg(SubReg1);
|
|
|
|
MI->getOperand(Idx1).setSubReg(SubReg2);
|
2009-07-01 08:29:08 +00:00
|
|
|
MI->getOperand(Idx2).setIsKill(Reg1IsKill);
|
|
|
|
MI->getOperand(Idx1).setIsKill(Reg2IsKill);
|
2008-01-01 01:03:04 +00:00
|
|
|
return MI;
|
|
|
|
}
|
|
|
|
|
2009-07-10 19:15:51 +00:00
|
|
|
/// findCommutedOpIndices - If specified MI is commutable, return the two
|
|
|
|
/// operand indices that would swap value. Return true if the instruction
|
|
|
|
/// is not in a form which this routine understands.
|
|
|
|
bool TargetInstrInfoImpl::findCommutedOpIndices(MachineInstr *MI,
|
|
|
|
unsigned &SrcOpIdx1,
|
|
|
|
unsigned &SrcOpIdx2) const {
|
2011-12-14 02:11:42 +00:00
|
|
|
assert(!MI->isBundle() &&
|
2011-12-07 07:15:52 +00:00
|
|
|
"TargetInstrInfoImpl::findCommutedOpIndices() can't handle bundles");
|
|
|
|
|
2011-06-28 19:10:37 +00:00
|
|
|
const MCInstrDesc &MCID = MI->getDesc();
|
|
|
|
if (!MCID.isCommutable())
|
2009-07-01 08:29:08 +00:00
|
|
|
return false;
|
2009-07-10 19:15:51 +00:00
|
|
|
// This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
|
|
|
|
// is not true, then the target must implement this.
|
2011-06-28 19:10:37 +00:00
|
|
|
SrcOpIdx1 = MCID.getNumDefs();
|
2009-07-10 19:15:51 +00:00
|
|
|
SrcOpIdx2 = SrcOpIdx1 + 1;
|
|
|
|
if (!MI->getOperand(SrcOpIdx1).isReg() ||
|
|
|
|
!MI->getOperand(SrcOpIdx2).isReg())
|
|
|
|
// No idea.
|
|
|
|
return false;
|
|
|
|
return true;
|
2008-02-15 18:21:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-12-09 06:41:08 +00:00
|
|
|
bool
|
|
|
|
TargetInstrInfoImpl::isUnpredicatedTerminator(const MachineInstr *MI) const {
|
|
|
|
if (!MI->isTerminator()) return false;
|
|
|
|
|
|
|
|
// Conditional branch is a special case.
|
|
|
|
if (MI->isBranch() && !MI->isBarrier())
|
|
|
|
return true;
|
|
|
|
if (!MI->isPredicable())
|
|
|
|
return true;
|
|
|
|
return !isPredicated(MI);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-01-01 01:03:04 +00:00
|
|
|
bool TargetInstrInfoImpl::PredicateInstruction(MachineInstr *MI,
|
2008-08-14 22:49:33 +00:00
|
|
|
const SmallVectorImpl<MachineOperand> &Pred) const {
|
2008-01-01 01:03:04 +00:00
|
|
|
bool MadeChange = false;
|
2011-12-07 07:15:52 +00:00
|
|
|
|
2011-12-14 02:11:42 +00:00
|
|
|
assert(!MI->isBundle() &&
|
2011-12-07 07:15:52 +00:00
|
|
|
"TargetInstrInfoImpl::PredicateInstruction() can't handle bundles");
|
|
|
|
|
2011-06-28 19:10:37 +00:00
|
|
|
const MCInstrDesc &MCID = MI->getDesc();
|
2011-12-07 07:15:52 +00:00
|
|
|
if (!MI->isPredicable())
|
2008-01-07 07:27:27 +00:00
|
|
|
return false;
|
2010-12-08 20:04:29 +00:00
|
|
|
|
2008-01-07 07:27:27 +00:00
|
|
|
for (unsigned j = 0, i = 0, e = MI->getNumOperands(); i != e; ++i) {
|
2011-06-28 19:10:37 +00:00
|
|
|
if (MCID.OpInfo[i].isPredicate()) {
|
2008-01-07 07:27:27 +00:00
|
|
|
MachineOperand &MO = MI->getOperand(i);
|
2008-10-03 15:45:36 +00:00
|
|
|
if (MO.isReg()) {
|
2008-01-07 07:27:27 +00:00
|
|
|
MO.setReg(Pred[j].getReg());
|
|
|
|
MadeChange = true;
|
2008-10-03 15:45:36 +00:00
|
|
|
} else if (MO.isImm()) {
|
2008-01-07 07:27:27 +00:00
|
|
|
MO.setImm(Pred[j].getImm());
|
|
|
|
MadeChange = true;
|
2008-10-03 15:45:36 +00:00
|
|
|
} else if (MO.isMBB()) {
|
2008-01-07 07:27:27 +00:00
|
|
|
MO.setMBB(Pred[j].getMBB());
|
|
|
|
MadeChange = true;
|
2008-01-01 01:03:04 +00:00
|
|
|
}
|
2008-01-07 07:27:27 +00:00
|
|
|
++j;
|
2008-01-01 01:03:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return MadeChange;
|
|
|
|
}
|
2008-03-31 20:40:39 +00:00
|
|
|
|
2011-08-08 20:53:24 +00:00
|
|
|
bool TargetInstrInfoImpl::hasLoadFromStackSlot(const MachineInstr *MI,
|
|
|
|
const MachineMemOperand *&MMO,
|
|
|
|
int &FrameIndex) const {
|
|
|
|
for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
|
|
|
|
oe = MI->memoperands_end();
|
|
|
|
o != oe;
|
|
|
|
++o) {
|
|
|
|
if ((*o)->isLoad() && (*o)->getValue())
|
|
|
|
if (const FixedStackPseudoSourceValue *Value =
|
|
|
|
dyn_cast<const FixedStackPseudoSourceValue>((*o)->getValue())) {
|
|
|
|
FrameIndex = Value->getFrameIndex();
|
|
|
|
MMO = *o;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool TargetInstrInfoImpl::hasStoreToStackSlot(const MachineInstr *MI,
|
|
|
|
const MachineMemOperand *&MMO,
|
|
|
|
int &FrameIndex) const {
|
|
|
|
for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
|
|
|
|
oe = MI->memoperands_end();
|
|
|
|
o != oe;
|
|
|
|
++o) {
|
|
|
|
if ((*o)->isStore() && (*o)->getValue())
|
|
|
|
if (const FixedStackPseudoSourceValue *Value =
|
|
|
|
dyn_cast<const FixedStackPseudoSourceValue>((*o)->getValue())) {
|
|
|
|
FrameIndex = Value->getFrameIndex();
|
|
|
|
MMO = *o;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2008-03-31 20:40:39 +00:00
|
|
|
void TargetInstrInfoImpl::reMaterialize(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator I,
|
|
|
|
unsigned DestReg,
|
2009-07-16 09:20:10 +00:00
|
|
|
unsigned SubIdx,
|
2009-11-14 02:55:43 +00:00
|
|
|
const MachineInstr *Orig,
|
2010-06-02 22:47:25 +00:00
|
|
|
const TargetRegisterInfo &TRI) const {
|
2008-07-07 23:14:23 +00:00
|
|
|
MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
|
2010-06-02 22:47:25 +00:00
|
|
|
MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
|
2008-03-31 20:40:39 +00:00
|
|
|
MBB.insert(I, MI);
|
|
|
|
}
|
|
|
|
|
2011-01-20 08:34:58 +00:00
|
|
|
bool
|
|
|
|
TargetInstrInfoImpl::produceSameValue(const MachineInstr *MI0,
|
|
|
|
const MachineInstr *MI1,
|
|
|
|
const MachineRegisterInfo *MRI) const {
|
2010-03-03 01:44:33 +00:00
|
|
|
return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
|
|
|
|
}
|
|
|
|
|
2010-01-06 23:47:07 +00:00
|
|
|
MachineInstr *TargetInstrInfoImpl::duplicate(MachineInstr *Orig,
|
|
|
|
MachineFunction &MF) const {
|
2011-12-07 07:15:52 +00:00
|
|
|
assert(!Orig->isNotDuplicable() &&
|
2010-01-06 23:47:07 +00:00
|
|
|
"Instruction cannot be duplicated");
|
|
|
|
return MF.CloneMachineInstr(Orig);
|
|
|
|
}
|
|
|
|
|
2010-07-09 20:43:13 +00:00
|
|
|
// If the COPY instruction in MI can be folded to a stack operation, return
|
|
|
|
// the register class to use.
|
|
|
|
static const TargetRegisterClass *canFoldCopy(const MachineInstr *MI,
|
|
|
|
unsigned FoldIdx) {
|
|
|
|
assert(MI->isCopy() && "MI must be a COPY instruction");
|
|
|
|
if (MI->getNumOperands() != 2)
|
|
|
|
return 0;
|
|
|
|
assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
|
|
|
|
|
|
|
|
const MachineOperand &FoldOp = MI->getOperand(FoldIdx);
|
|
|
|
const MachineOperand &LiveOp = MI->getOperand(1-FoldIdx);
|
|
|
|
|
|
|
|
if (FoldOp.getSubReg() || LiveOp.getSubReg())
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
unsigned FoldReg = FoldOp.getReg();
|
|
|
|
unsigned LiveReg = LiveOp.getReg();
|
|
|
|
|
|
|
|
assert(TargetRegisterInfo::isVirtualRegister(FoldReg) &&
|
|
|
|
"Cannot fold physregs");
|
|
|
|
|
|
|
|
const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
|
|
|
|
const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
|
|
|
|
|
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(LiveOp.getReg()))
|
|
|
|
return RC->contains(LiveOp.getReg()) ? RC : 0;
|
|
|
|
|
2011-06-02 05:43:46 +00:00
|
|
|
if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
|
2010-07-09 20:43:13 +00:00
|
|
|
return RC;
|
|
|
|
|
|
|
|
// FIXME: Allow folding when register classes are memory compatible.
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool TargetInstrInfoImpl::
|
|
|
|
canFoldMemoryOperand(const MachineInstr *MI,
|
|
|
|
const SmallVectorImpl<unsigned> &Ops) const {
|
|
|
|
return MI->isCopy() && Ops.size() == 1 && canFoldCopy(MI, Ops[0]);
|
|
|
|
}
|
|
|
|
|
2008-12-03 18:43:12 +00:00
|
|
|
/// foldMemoryOperand - Attempt to fold a load or store of the specified stack
|
|
|
|
/// slot into the specified machine instruction for the specified operand(s).
|
|
|
|
/// If this is possible, a new instruction is returned with the specified
|
|
|
|
/// operand folded, otherwise NULL is returned. The client is responsible for
|
|
|
|
/// removing the old instruction and adding the new one in the instruction
|
|
|
|
/// stream.
|
|
|
|
MachineInstr*
|
2010-07-09 17:29:08 +00:00
|
|
|
TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
|
2008-12-03 18:43:12 +00:00
|
|
|
const SmallVectorImpl<unsigned> &Ops,
|
2010-07-09 20:43:13 +00:00
|
|
|
int FI) const {
|
2008-12-03 18:43:12 +00:00
|
|
|
unsigned Flags = 0;
|
|
|
|
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
|
|
|
|
if (MI->getOperand(Ops[i]).isDef())
|
|
|
|
Flags |= MachineMemOperand::MOStore;
|
|
|
|
else
|
|
|
|
Flags |= MachineMemOperand::MOLoad;
|
|
|
|
|
2010-07-09 20:43:13 +00:00
|
|
|
MachineBasicBlock *MBB = MI->getParent();
|
|
|
|
assert(MBB && "foldMemoryOperand needs an inserted instruction");
|
|
|
|
MachineFunction &MF = *MBB->getParent();
|
2010-07-09 17:29:08 +00:00
|
|
|
|
2008-12-03 18:43:12 +00:00
|
|
|
// Ask the target to do the actual folding.
|
2010-07-13 00:23:30 +00:00
|
|
|
if (MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, FI)) {
|
|
|
|
// Add a memory operand, foldMemoryOperandImpl doesn't do that.
|
|
|
|
assert((!(Flags & MachineMemOperand::MOStore) ||
|
2011-12-07 07:15:52 +00:00
|
|
|
NewMI->mayStore()) &&
|
2010-07-13 00:23:30 +00:00
|
|
|
"Folded a def to a non-store!");
|
|
|
|
assert((!(Flags & MachineMemOperand::MOLoad) ||
|
2011-12-07 07:15:52 +00:00
|
|
|
NewMI->mayLoad()) &&
|
2010-07-13 00:23:30 +00:00
|
|
|
"Folded a use to a non-load!");
|
|
|
|
const MachineFrameInfo &MFI = *MF.getFrameInfo();
|
|
|
|
assert(MFI.getObjectOffset(FI) != -1);
|
|
|
|
MachineMemOperand *MMO =
|
2011-11-15 07:51:13 +00:00
|
|
|
MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI),
|
2010-09-21 04:46:39 +00:00
|
|
|
Flags, MFI.getObjectSize(FI),
|
2010-07-13 00:23:30 +00:00
|
|
|
MFI.getObjectAlignment(FI));
|
|
|
|
NewMI->addMemOperand(MF, MMO);
|
2010-07-09 20:43:13 +00:00
|
|
|
|
2010-07-13 00:23:30 +00:00
|
|
|
// FIXME: change foldMemoryOperandImpl semantics to also insert NewMI.
|
|
|
|
return MBB->insert(MI, NewMI);
|
|
|
|
}
|
2010-07-09 20:43:13 +00:00
|
|
|
|
2010-07-13 00:23:30 +00:00
|
|
|
// Straight COPY may fold as load/store.
|
|
|
|
if (!MI->isCopy() || Ops.size() != 1)
|
|
|
|
return 0;
|
2010-07-09 20:43:13 +00:00
|
|
|
|
2010-07-13 00:23:30 +00:00
|
|
|
const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
|
|
|
|
if (!RC)
|
|
|
|
return 0;
|
2010-07-09 20:43:13 +00:00
|
|
|
|
2010-07-13 00:23:30 +00:00
|
|
|
const MachineOperand &MO = MI->getOperand(1-Ops[0]);
|
|
|
|
MachineBasicBlock::iterator Pos = MI;
|
|
|
|
const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
|
2010-07-09 20:43:13 +00:00
|
|
|
|
2010-07-13 00:23:30 +00:00
|
|
|
if (Flags == MachineMemOperand::MOStore)
|
|
|
|
storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
|
|
|
|
else
|
|
|
|
loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI);
|
|
|
|
return --Pos;
|
2008-12-03 18:43:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// foldMemoryOperand - Same as the previous version except it allows folding
|
|
|
|
/// of any load and store from / to any address, not just from a specific
|
|
|
|
/// stack slot.
|
|
|
|
MachineInstr*
|
2010-07-09 17:29:08 +00:00
|
|
|
TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
|
2008-12-03 18:43:12 +00:00
|
|
|
const SmallVectorImpl<unsigned> &Ops,
|
|
|
|
MachineInstr* LoadMI) const {
|
2011-12-07 07:15:52 +00:00
|
|
|
assert(LoadMI->canFoldAsLoad() && "LoadMI isn't foldable!");
|
2008-12-03 18:43:12 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
|
|
|
|
assert(MI->getOperand(Ops[i]).isUse() && "Folding load into def!");
|
|
|
|
#endif
|
2010-07-09 17:29:08 +00:00
|
|
|
MachineBasicBlock &MBB = *MI->getParent();
|
|
|
|
MachineFunction &MF = *MBB.getParent();
|
2008-12-03 18:43:12 +00:00
|
|
|
|
|
|
|
// Ask the target to do the actual folding.
|
|
|
|
MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, LoadMI);
|
|
|
|
if (!NewMI) return 0;
|
|
|
|
|
2010-07-09 17:29:08 +00:00
|
|
|
NewMI = MBB.insert(MI, NewMI);
|
|
|
|
|
2008-12-03 18:43:12 +00:00
|
|
|
// Copy the memoperands from the load to the folded instruction.
|
2009-09-25 20:36:54 +00:00
|
|
|
NewMI->setMemRefs(LoadMI->memoperands_begin(),
|
|
|
|
LoadMI->memoperands_end());
|
2008-12-03 18:43:12 +00:00
|
|
|
|
|
|
|
return NewMI;
|
|
|
|
}
|
2009-10-09 23:27:56 +00:00
|
|
|
|
2010-06-12 00:11:53 +00:00
|
|
|
bool TargetInstrInfo::
|
|
|
|
isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
|
|
|
|
AliasAnalysis *AA) const {
|
2009-10-09 23:27:56 +00:00
|
|
|
const MachineFunction &MF = *MI->getParent()->getParent();
|
|
|
|
const MachineRegisterInfo &MRI = MF.getRegInfo();
|
|
|
|
const TargetMachine &TM = MF.getTarget();
|
|
|
|
const TargetInstrInfo &TII = *TM.getInstrInfo();
|
|
|
|
|
2011-09-01 18:27:51 +00:00
|
|
|
// Remat clients assume operand 0 is the defined register.
|
|
|
|
if (!MI->getNumOperands() || !MI->getOperand(0).isReg())
|
|
|
|
return false;
|
|
|
|
unsigned DefReg = MI->getOperand(0).getReg();
|
|
|
|
|
2011-09-01 17:18:50 +00:00
|
|
|
// A sub-register definition can only be rematerialized if the instruction
|
|
|
|
// doesn't read the other parts of the register. Otherwise it is really a
|
|
|
|
// read-modify-write operation on the full virtual register which cannot be
|
|
|
|
// moved safely.
|
2011-09-01 18:27:51 +00:00
|
|
|
if (TargetRegisterInfo::isVirtualRegister(DefReg) &&
|
|
|
|
MI->getOperand(0).getSubReg() && MI->readsVirtualRegister(DefReg))
|
2011-09-01 17:18:50 +00:00
|
|
|
return false;
|
|
|
|
|
2009-10-09 23:27:56 +00:00
|
|
|
// A load from a fixed stack slot can be rematerialized. This may be
|
|
|
|
// redundant with subsequent checks, but it's target-independent,
|
|
|
|
// simple, and a common case.
|
|
|
|
int FrameIdx = 0;
|
|
|
|
if (TII.isLoadFromStackSlot(MI, FrameIdx) &&
|
|
|
|
MF.getFrameInfo()->isImmutableObjectIndex(FrameIdx))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// Avoid instructions obviously unsafe for remat.
|
2011-12-07 07:15:52 +00:00
|
|
|
if (MI->isNotDuplicable() || MI->mayStore() ||
|
2011-01-07 23:50:32 +00:00
|
|
|
MI->hasUnmodeledSideEffects())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Don't remat inline asm. We have no idea how expensive it is
|
|
|
|
// even if it's side effect free.
|
|
|
|
if (MI->isInlineAsm())
|
2009-10-09 23:27:56 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Avoid instructions which load from potentially varying memory.
|
2011-12-07 07:15:52 +00:00
|
|
|
if (MI->mayLoad() && !MI->isInvariantLoad(AA))
|
2009-10-09 23:27:56 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// If any of the registers accessed are non-constant, conservatively assume
|
|
|
|
// the instruction is not rematerializable.
|
|
|
|
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
|
|
|
|
const MachineOperand &MO = MI->getOperand(i);
|
|
|
|
if (!MO.isReg()) continue;
|
|
|
|
unsigned Reg = MO.getReg();
|
|
|
|
if (Reg == 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Check for a well-behaved physical register.
|
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
|
|
|
|
if (MO.isUse()) {
|
|
|
|
// If the physreg has no defs anywhere, it's just an ambient register
|
|
|
|
// and we can freely move its uses. Alternatively, if it's allocatable,
|
|
|
|
// it could get allocated to something with a def during allocation.
|
2012-01-16 22:34:08 +00:00
|
|
|
if (!MRI.isConstantPhysReg(Reg, MF))
|
2009-10-09 23:27:56 +00:00
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
// A physreg def. We can't remat it.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2011-09-01 18:27:51 +00:00
|
|
|
// Only allow one virtual-register def. There may be multiple defs of the
|
|
|
|
// same virtual register, though.
|
|
|
|
if (MO.isDef() && Reg != DefReg)
|
2009-10-09 23:27:56 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Don't allow any virtual-register uses. Rematting an instruction with
|
|
|
|
// virtual register uses would length the live ranges of the uses, which
|
|
|
|
// is not necessarily a good idea, certainly not "trivial".
|
|
|
|
if (MO.isUse())
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Everything checked out.
|
|
|
|
return true;
|
|
|
|
}
|
2010-06-14 21:06:53 +00:00
|
|
|
|
2010-06-18 23:09:54 +00:00
|
|
|
/// isSchedulingBoundary - Test if the given instruction should be
|
|
|
|
/// considered a scheduling boundary. This primarily includes labels
|
|
|
|
/// and terminators.
|
|
|
|
bool TargetInstrInfoImpl::isSchedulingBoundary(const MachineInstr *MI,
|
|
|
|
const MachineBasicBlock *MBB,
|
|
|
|
const MachineFunction &MF) const{
|
|
|
|
// Terminators and labels can't be scheduled around.
|
2011-12-07 07:15:52 +00:00
|
|
|
if (MI->isTerminator() || MI->isLabel())
|
2010-06-18 23:09:54 +00:00
|
|
|
return true;
|
|
|
|
|
|
|
|
// Don't attempt to schedule around any instruction that defines
|
|
|
|
// a stack-oriented pointer, as it's unlikely to be profitable. This
|
|
|
|
// saves compile time, because it doesn't require every single
|
|
|
|
// stack slot reference to depend on the instruction that does the
|
|
|
|
// modification.
|
|
|
|
const TargetLowering &TLI = *MF.getTarget().getTargetLowering();
|
|
|
|
if (MI->definesRegister(TLI.getStackPointerRegisterToSaveRestore()))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2011-01-21 05:51:33 +00:00
|
|
|
// Provide a global flag for disabling the PreRA hazard recognizer that targets
|
|
|
|
// may choose to honor.
|
|
|
|
bool TargetInstrInfoImpl::usePreRAHazardRecognizer() const {
|
|
|
|
return !DisableHazardRecognizer;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Default implementation of CreateTargetRAHazardRecognizer.
|
Various bits of framework needed for precise machine-level selection
DAG scheduling during isel. Most new functionality is currently
guarded by -enable-sched-cycles and -enable-sched-hazard.
Added InstrItineraryData::IssueWidth field, currently derived from
ARM itineraries, but could be initialized differently on other targets.
Added ScheduleHazardRecognizer::MaxLookAhead to indicate whether it is
active, and if so how many cycles of state it holds.
Added SchedulingPriorityQueue::HasReadyFilter to allowing gating entry
into the scheduler's available queue.
ScoreboardHazardRecognizer now accesses the ScheduleDAG in order to
get information about it's SUnits, provides RecedeCycle for bottom-up
scheduling, correctly computes scoreboard depth, tracks IssueCount, and
considers potential stall cycles when checking for hazards.
ScheduleDAGRRList now models machine cycles and hazards (under
flags). It tracks MinAvailableCycle, drives the hazard recognizer and
priority queue's ready filter, manages a new PendingQueue, properly
accounts for stall cycles, etc.
llvm-svn: 122541
2010-12-24 05:03:26 +00:00
|
|
|
ScheduleHazardRecognizer *TargetInstrInfoImpl::
|
|
|
|
CreateTargetHazardRecognizer(const TargetMachine *TM,
|
|
|
|
const ScheduleDAG *DAG) const {
|
|
|
|
// Dummy hazard recognizer allows all instructions to issue.
|
|
|
|
return new ScheduleHazardRecognizer();
|
|
|
|
}
|
|
|
|
|
2010-06-14 21:06:53 +00:00
|
|
|
// Default implementation of CreateTargetPostRAHazardRecognizer.
|
|
|
|
ScheduleHazardRecognizer *TargetInstrInfoImpl::
|
Various bits of framework needed for precise machine-level selection
DAG scheduling during isel. Most new functionality is currently
guarded by -enable-sched-cycles and -enable-sched-hazard.
Added InstrItineraryData::IssueWidth field, currently derived from
ARM itineraries, but could be initialized differently on other targets.
Added ScheduleHazardRecognizer::MaxLookAhead to indicate whether it is
active, and if so how many cycles of state it holds.
Added SchedulingPriorityQueue::HasReadyFilter to allowing gating entry
into the scheduler's available queue.
ScoreboardHazardRecognizer now accesses the ScheduleDAG in order to
get information about it's SUnits, provides RecedeCycle for bottom-up
scheduling, correctly computes scoreboard depth, tracks IssueCount, and
considers potential stall cycles when checking for hazards.
ScheduleDAGRRList now models machine cycles and hazards (under
flags). It tracks MinAvailableCycle, drives the hazard recognizer and
priority queue's ready filter, manages a new PendingQueue, properly
accounts for stall cycles, etc.
llvm-svn: 122541
2010-12-24 05:03:26 +00:00
|
|
|
CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
|
|
|
|
const ScheduleDAG *DAG) const {
|
|
|
|
return (ScheduleHazardRecognizer *)
|
|
|
|
new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
|
2010-06-14 21:06:53 +00:00
|
|
|
}
|
2011-12-15 22:58:58 +00:00
|
|
|
|
|
|
|
int
|
2011-12-19 20:06:03 +00:00
|
|
|
TargetInstrInfoImpl::getOperandLatency(const InstrItineraryData *ItinData,
|
|
|
|
SDNode *DefNode, unsigned DefIdx,
|
|
|
|
SDNode *UseNode, unsigned UseIdx) const {
|
2011-12-15 22:58:58 +00:00
|
|
|
if (!ItinData || ItinData->isEmpty())
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (!DefNode->isMachineOpcode())
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
|
|
|
|
if (!UseNode->isMachineOpcode())
|
|
|
|
return ItinData->getOperandCycle(DefClass, DefIdx);
|
|
|
|
unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
|
|
|
|
return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
|
|
|
|
}
|
|
|
|
|
2011-12-19 20:06:03 +00:00
|
|
|
int TargetInstrInfoImpl::getInstrLatency(const InstrItineraryData *ItinData,
|
|
|
|
SDNode *N) const {
|
2011-12-15 22:58:58 +00:00
|
|
|
if (!ItinData || ItinData->isEmpty())
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (!N->isMachineOpcode())
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
|
|
|
|
}
|
|
|
|
|