CodeGen: Use MachineInstr& in TargetInstrInfo, NFC

This is mostly a mechanical change to make TargetInstrInfo API take
MachineInstr& (instead of MachineInstr* or MachineBasicBlock::iterator)
when the argument is expected to be a valid MachineInstr.  This is a
general API improvement.

Although it would be possible to do this one function at a time, that
would demand a quadratic amount of churn since many of these functions
call each other.  Instead I've done everything as a block and just
updated what was necessary.

This is mostly mechanical fixes: adding and removing `*` and `&`
operators.  The only non-mechanical change is to split
ARMBaseInstrInfo::getOperandLatencyImpl out from
ARMBaseInstrInfo::getOperandLatency.  Previously, the latter took a
`MachineInstr*` which it updated to the instruction bundle leader; now,
the latter calls the former either with the same `MachineInstr&` or the
bundle leader.

As a side effect, this removes a bunch of MachineInstr* to
MachineBasicBlock::iterator implicit conversions, a necessary step
toward fixing PR26753.

Note: I updated WebAssembly, Lanai, and AVR (despite being
off-by-default) since it turned out to be easy.  I couldn't run tests
for AVR since llc doesn't link with it turned on.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@274189 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Duncan P. N. Exon Smith 2016-06-30 00:01:54 +00:00
parent cd92e27aaf
commit 567409db69
92 changed files with 3285 additions and 3330 deletions

View File

@ -78,10 +78,10 @@ public:
/// This means the only allowed uses are constants and unallocatable physical
/// registers so that the instructions result is independent of the place
/// in the function.
bool isTriviallyReMaterializable(const MachineInstr *MI,
bool isTriviallyReMaterializable(const MachineInstr &MI,
AliasAnalysis *AA = nullptr) const {
return MI->getOpcode() == TargetOpcode::IMPLICIT_DEF ||
(MI->getDesc().isRematerializable() &&
return MI.getOpcode() == TargetOpcode::IMPLICIT_DEF ||
(MI.getDesc().isRematerializable() &&
(isReallyTriviallyReMaterializable(MI, AA) ||
isReallyTriviallyReMaterializableGeneric(MI, AA)));
}
@ -94,7 +94,7 @@ protected:
/// than producing a value, or if it requres any address registers that are
/// not always available.
/// Requirements must be check as stated in isTriviallyReMaterializable() .
virtual bool isReallyTriviallyReMaterializable(const MachineInstr *MI,
virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI,
AliasAnalysis *AA) const {
return false;
}
@ -114,8 +114,7 @@ protected:
/// Do not call this method for a non-commutable instruction.
/// Even though the instruction is commutable, the method may still
/// fail to commute the operands, null pointer is returned in such cases.
virtual MachineInstr *commuteInstructionImpl(MachineInstr *MI,
bool NewMI,
virtual MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI,
unsigned OpIdx1,
unsigned OpIdx2) const;
@ -139,7 +138,7 @@ private:
/// set and the target hook isReallyTriviallyReMaterializable returns false,
/// this function does target-independent tests to determine if the
/// instruction is really trivially rematerializable.
bool isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
bool isReallyTriviallyReMaterializableGeneric(const MachineInstr &MI,
AliasAnalysis *AA) const;
public:
@ -157,7 +156,7 @@ public:
/// as part of a call sequence. By default, only call frame setup/destroy
/// instructions adjust the stack, but targets may want to override this
/// to enable more fine-grained adjustment, or adjust by a different value.
virtual int getSPAdjust(const MachineInstr *MI) const;
virtual int getSPAdjust(const MachineInstr &MI) const;
/// Return true if the instruction is a "coalescable" extension instruction.
/// That is, it's like a copy where it's legal for the source to overlap the
@ -175,14 +174,14 @@ public:
/// the destination along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than loading from the stack slot.
virtual unsigned isLoadFromStackSlot(const MachineInstr *MI,
virtual unsigned isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
return 0;
}
/// Check for post-frame ptr elimination stack locations as well.
/// This uses a heuristic so it isn't reliable for correctness.
virtual unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI,
virtual unsigned isLoadFromStackSlotPostFE(const MachineInstr &MI,
int &FrameIndex) const {
return 0;
}
@ -193,7 +192,7 @@ public:
/// If not, return false. Unlike isLoadFromStackSlot, this returns true for
/// any instructions that loads from the stack. This is just a hint, as some
/// cases may be missed.
virtual bool hasLoadFromStackSlot(const MachineInstr *MI,
virtual bool hasLoadFromStackSlot(const MachineInstr &MI,
const MachineMemOperand *&MMO,
int &FrameIndex) const;
@ -202,14 +201,14 @@ public:
/// the source reg along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than storing to the stack slot.
virtual unsigned isStoreToStackSlot(const MachineInstr *MI,
virtual unsigned isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
return 0;
}
/// Check for post-frame ptr elimination stack locations as well.
/// This uses a heuristic, so it isn't reliable for correctness.
virtual unsigned isStoreToStackSlotPostFE(const MachineInstr *MI,
virtual unsigned isStoreToStackSlotPostFE(const MachineInstr &MI,
int &FrameIndex) const {
return 0;
}
@ -220,14 +219,14 @@ public:
/// If not, return false. Unlike isStoreToStackSlot,
/// this returns true for any instructions that stores to the
/// stack. This is just a hint, as some cases may be missed.
virtual bool hasStoreToStackSlot(const MachineInstr *MI,
virtual bool hasStoreToStackSlot(const MachineInstr &MI,
const MachineMemOperand *&MMO,
int &FrameIndex) const;
/// Return true if the specified machine instruction
/// is a copy of one stack slot to another and has no other effect.
/// Provide the identity of the two frame indices.
virtual bool isStackSlotCopy(const MachineInstr *MI, int &DestFrameIndex,
virtual bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex,
int &SrcFrameIndex) const {
return false;
}
@ -253,8 +252,8 @@ public:
///
/// Targets for different archs need to override this, and different
/// micro-architectures can also be finely tuned inside.
virtual bool isAsCheapAsAMove(const MachineInstr *MI) const {
return MI->isAsCheapAsAMove();
virtual bool isAsCheapAsAMove(const MachineInstr &MI) const {
return MI.isAsCheapAsAMove();
}
/// Return true if the instruction should be sunk by MachineSink.
@ -275,9 +274,8 @@ public:
/// DestReg:SubIdx. Any existing subreg index is preserved or composed with
/// SubIdx.
virtual void reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, unsigned SubIdx,
const MachineInstr *Orig,
MachineBasicBlock::iterator MI, unsigned DestReg,
unsigned SubIdx, const MachineInstr &Orig,
const TargetRegisterInfo &TRI) const;
/// Create a duplicate of the Orig instruction in MF. This is like
@ -285,7 +283,7 @@ public:
/// that are required to be unique.
///
/// The instruction must be duplicable as indicated by isNotDuplicable().
virtual MachineInstr *duplicate(MachineInstr *Orig,
virtual MachineInstr *duplicate(MachineInstr &Orig,
MachineFunction &MF) const;
/// This method must be implemented by targets that
@ -298,9 +296,9 @@ public:
/// This method returns a null pointer if the transformation cannot be
/// performed, otherwise it returns the last new instruction.
///
virtual MachineInstr *
convertToThreeAddress(MachineFunction::iterator &MFI,
MachineBasicBlock::iterator &MBBI, LiveVariables *LV) const {
virtual MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI,
MachineInstr &MI,
LiveVariables *LV) const {
return nullptr;
}
@ -327,8 +325,7 @@ public:
/// Even though the instruction is commutable, the method may still
/// fail to commute the operands, null pointer is returned in such cases.
MachineInstr *
commuteInstruction(MachineInstr *MI,
bool NewMI = false,
commuteInstruction(MachineInstr &MI, bool NewMI = false,
unsigned OpIdx1 = CommuteAnyOperandIndex,
unsigned OpIdx2 = CommuteAnyOperandIndex) const;
@ -349,7 +346,7 @@ public:
/// findCommutedOpIndices(MI, Op1, Op2);
/// can be interpreted as a query asking to find an operand that would be
/// commutable with the operand#1.
virtual bool findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
virtual bool findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const;
/// A pair composed of a register and a sub-register index.
@ -436,8 +433,8 @@ public:
/// are deemed identical except for defs. If this function is called when the
/// IR is still in SSA form, the caller can pass the MachineRegisterInfo for
/// aggressive checks.
virtual bool produceSameValue(const MachineInstr *MI0,
const MachineInstr *MI1,
virtual bool produceSameValue(const MachineInstr &MI0,
const MachineInstr &MI1,
const MachineRegisterInfo *MRI = nullptr) const;
/// Analyze the branching code at the end of MBB, returning
@ -713,11 +710,11 @@ public:
/// @param FalseOp Operand number of the value selected when Cond is false.
/// @param Optimizable Returned as true if MI is optimizable.
/// @returns False on success.
virtual bool analyzeSelect(const MachineInstr *MI,
virtual bool analyzeSelect(const MachineInstr &MI,
SmallVectorImpl<MachineOperand> &Cond,
unsigned &TrueOp, unsigned &FalseOp,
bool &Optimizable) const {
assert(MI && MI->getDesc().isSelect() && "MI must be a select instruction");
assert(MI.getDesc().isSelect() && "MI must be a select instruction");
return true;
}
@ -736,7 +733,7 @@ public:
/// MI. Has to be updated with any newly created MI or deleted ones.
/// @param PreferFalse Try to optimize FalseOp instead of TrueOp.
/// @returns Optimized instruction or NULL.
virtual MachineInstr *optimizeSelect(MachineInstr *MI,
virtual MachineInstr *optimizeSelect(MachineInstr &MI,
SmallPtrSetImpl<MachineInstr *> &NewMIs,
bool PreferFalse = false) const {
// This function must be implemented if Optimizable is ever set.
@ -789,9 +786,7 @@ public:
/// into real instructions. The target can edit MI in place, or it can insert
/// new instructions and erase MI. The function should return true if
/// anything was changed.
virtual bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
return false;
}
virtual bool expandPostRAPseudo(MachineInstr &MI) const { return false; }
/// Attempt to fold a load or store of the specified stack
/// slot into the specified machine instruction for the specified operand(s).
@ -799,15 +794,14 @@ public:
/// operand folded, otherwise NULL is returned.
/// The new instruction is inserted before MI, and the client is responsible
/// for removing the old instruction.
MachineInstr *foldMemoryOperand(MachineBasicBlock::iterator MI,
ArrayRef<unsigned> Ops, int FrameIndex,
MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef<unsigned> Ops,
int FrameIndex,
LiveIntervals *LIS = nullptr) const;
/// Same as the previous version except it allows folding of any load and
/// store from / to any address, not just from a specific stack slot.
MachineInstr *foldMemoryOperand(MachineBasicBlock::iterator MI,
ArrayRef<unsigned> Ops,
MachineInstr *LoadMI,
MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef<unsigned> Ops,
MachineInstr &LoadMI,
LiveIntervals *LIS = nullptr) const;
/// Return true when there is potentially a faster code sequence
@ -885,10 +879,11 @@ protected:
/// take care of adding a MachineMemOperand to the newly created instruction.
/// The instruction and any auxiliary instructions necessary will be inserted
/// at InsertPt.
virtual MachineInstr *foldMemoryOperandImpl(
MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, int FrameIndex,
LiveIntervals *LIS = nullptr) const {
virtual MachineInstr *
foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, int FrameIndex,
LiveIntervals *LIS = nullptr) const {
return nullptr;
}
@ -898,8 +893,8 @@ protected:
/// The instruction and any auxiliary instructions necessary will be inserted
/// at InsertPt.
virtual MachineInstr *foldMemoryOperandImpl(
MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI,
MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
LiveIntervals *LIS = nullptr) const {
return nullptr;
}
@ -951,9 +946,10 @@ public:
/// unfoldMemoryOperand - Separate a single instruction which folded a load or
/// a store or a load and a store into two or more instruction. If this is
/// possible, returns true as well as the new instructions by reference.
virtual bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
SmallVectorImpl<MachineInstr*> &NewMIs) const{
virtual bool
unfoldMemoryOperand(MachineFunction &MF, MachineInstr &MI, unsigned Reg,
bool UnfoldLoad, bool UnfoldStore,
SmallVectorImpl<MachineInstr *> &NewMIs) const {
return false;
}
@ -999,7 +995,7 @@ public:
/// Get the base register and byte offset of an instruction that reads/writes
/// memory.
virtual bool getMemOpBaseRegImmOfs(MachineInstr *MemOp, unsigned &BaseReg,
virtual bool getMemOpBaseRegImmOfs(MachineInstr &MemOp, unsigned &BaseReg,
int64_t &Offset,
const TargetRegisterInfo *TRI) const {
return false;
@ -1009,16 +1005,16 @@ public:
virtual bool enableClusterStores() const { return false; }
virtual bool shouldClusterMemOps(MachineInstr *FirstLdSt,
MachineInstr *SecondLdSt,
virtual bool shouldClusterMemOps(MachineInstr &FirstLdSt,
MachineInstr &SecondLdSt,
unsigned NumLoads) const {
return false;
}
/// Can this target fuse the given instructions if they are scheduled
/// adjacent.
virtual bool shouldScheduleAdjacent(MachineInstr* First,
MachineInstr *Second) const {
virtual bool shouldScheduleAdjacent(MachineInstr &First,
MachineInstr &Second) const {
return false;
}
@ -1083,7 +1079,7 @@ public:
/// Test if the given instruction should be considered a scheduling boundary.
/// This primarily includes labels and terminators.
virtual bool isSchedulingBoundary(const MachineInstr *MI,
virtual bool isSchedulingBoundary(const MachineInstr &MI,
const MachineBasicBlock *MBB,
const MachineFunction &MF) const;
@ -1125,22 +1121,20 @@ public:
/// in SrcReg and SrcReg2 if having two register operands, and the value it
/// compares against in CmpValue. Return true if the comparison instruction
/// can be analyzed.
virtual bool analyzeCompare(const MachineInstr *MI,
unsigned &SrcReg, unsigned &SrcReg2,
int &Mask, int &Value) const {
virtual bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
unsigned &SrcReg2, int &Mask, int &Value) const {
return false;
}
/// See if the comparison instruction can be converted
/// into something more efficient. E.g., on ARM most instructions can set the
/// flags register, obviating the need for a separate CMP.
virtual bool optimizeCompareInstr(MachineInstr *CmpInstr,
unsigned SrcReg, unsigned SrcReg2,
int Mask, int Value,
virtual bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
unsigned SrcReg2, int Mask, int Value,
const MachineRegisterInfo *MRI) const {
return false;
}
virtual bool optimizeCondBranch(MachineInstr *MI) const { return false; }
virtual bool optimizeCondBranch(MachineInstr &MI) const { return false; }
/// Try to remove the load by folding it to a register operand at the use.
/// We fold the load instructions if and only if the
@ -1149,10 +1143,10 @@ public:
/// defined by the load we are trying to fold. DefMI returns the machine
/// instruction that defines FoldAsLoadDefReg, and the function returns
/// the machine instruction generated due to folding.
virtual MachineInstr* optimizeLoadInstr(MachineInstr *MI,
const MachineRegisterInfo *MRI,
unsigned &FoldAsLoadDefReg,
MachineInstr *&DefMI) const {
virtual MachineInstr *optimizeLoadInstr(MachineInstr &MI,
const MachineRegisterInfo *MRI,
unsigned &FoldAsLoadDefReg,
MachineInstr *&DefMI) const {
return nullptr;
}
@ -1162,7 +1156,7 @@ public:
/// then the caller may assume that DefMI has been erased from its parent
/// block. The caller may assume that it will not be erased by this
/// function otherwise.
virtual bool FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,
virtual bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
unsigned Reg, MachineRegisterInfo *MRI) const {
return false;
}
@ -1172,7 +1166,7 @@ public:
/// IssueWidth is the number of microops that can be dispatched each
/// cycle. An instruction with zero microops takes no dispatch resources.
virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData,
const MachineInstr *MI) const;
const MachineInstr &MI) const;
/// Return true for pseudo instructions that don't consume any
/// machine resources in their current form. These are common cases that the
@ -1195,22 +1189,31 @@ public:
/// by a target. Use computeOperandLatency to get the best estimate of
/// latency.
virtual int getOperandLatency(const InstrItineraryData *ItinData,
const MachineInstr *DefMI, unsigned DefIdx,
const MachineInstr *UseMI,
const MachineInstr &DefMI, unsigned DefIdx,
const MachineInstr &UseMI,
unsigned UseIdx) const;
/// Compute and return the latency of the given data
/// dependent def and use when the operand indices are already known.
/// Compute and return the latency of the given data dependent def and use
/// when the operand indices are already known. UseMI may be \c nullptr for
/// an unknown use.
///
/// FindMin may be set to get the minimum vs. expected latency. Minimum
/// latency is used for scheduling groups, while expected latency is for
/// instruction cost and critical path.
///
/// Depending on the subtarget's itinerary properties, this may or may not
/// need to call getOperandLatency(). For most subtargets, we don't need
/// DefIdx or UseIdx to compute min latency.
unsigned computeOperandLatency(const InstrItineraryData *ItinData,
const MachineInstr *DefMI, unsigned DefIdx,
const MachineInstr *UseMI, unsigned UseIdx)
const;
const MachineInstr &DefMI, unsigned DefIdx,
const MachineInstr *UseMI,
unsigned UseIdx) const;
/// Compute the instruction latency of a given instruction.
/// If the instruction has higher cost when predicated, it's returned via
/// PredCost.
virtual unsigned getInstrLatency(const InstrItineraryData *ItinData,
const MachineInstr *MI,
const MachineInstr &MI,
unsigned *PredCost = nullptr) const;
virtual unsigned getPredicationCost(const MachineInstr &MI) const;
@ -1218,12 +1221,12 @@ public:
virtual int getInstrLatency(const InstrItineraryData *ItinData,
SDNode *Node) const;
/// Return the default expected latency for a def based on it's opcode.
/// Return the default expected latency for a def based on its opcode.
unsigned defaultDefLatency(const MCSchedModel &SchedModel,
const MachineInstr *DefMI) const;
const MachineInstr &DefMI) const;
int computeDefOperandLatency(const InstrItineraryData *ItinData,
const MachineInstr *DefMI) const;
const MachineInstr &DefMI) const;
/// Return true if this opcode has high latency to its result.
virtual bool isHighLatencyDef(int opc) const { return false; }
@ -1233,23 +1236,23 @@ public:
/// it 'high'. This is used by optimization passes such as machine LICM to
/// determine whether it makes sense to hoist an instruction out even in a
/// high register pressure situation.
virtual
bool hasHighOperandLatency(const TargetSchedModel &SchedModel,
const MachineRegisterInfo *MRI,
const MachineInstr *DefMI, unsigned DefIdx,
const MachineInstr *UseMI, unsigned UseIdx) const {
virtual bool hasHighOperandLatency(const TargetSchedModel &SchedModel,
const MachineRegisterInfo *MRI,
const MachineInstr &DefMI, unsigned DefIdx,
const MachineInstr &UseMI,
unsigned UseIdx) const {
return false;
}
/// Compute operand latency of a def of 'Reg'. Return true
/// if the target considered it 'low'.
virtual
bool hasLowDefLatency(const TargetSchedModel &SchedModel,
const MachineInstr *DefMI, unsigned DefIdx) const;
virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel,
const MachineInstr &DefMI,
unsigned DefIdx) const;
/// Perform target-specific instruction verification.
virtual
bool verifyInstruction(const MachineInstr *MI, StringRef &ErrInfo) const {
virtual bool verifyInstruction(const MachineInstr &MI,
StringRef &ErrInfo) const {
return true;
}
@ -1273,7 +1276,7 @@ public:
/// execution domain.
///
virtual std::pair<uint16_t, uint16_t>
getExecutionDomain(const MachineInstr *MI) const {
getExecutionDomain(const MachineInstr &MI) const {
return std::make_pair(0, 0);
}
@ -1281,7 +1284,7 @@ public:
///
/// The bit (1 << Domain) must be set in the mask returned from
/// getExecutionDomain(MI).
virtual void setExecutionDomain(MachineInstr *MI, unsigned Domain) const {}
virtual void setExecutionDomain(MachineInstr &MI, unsigned Domain) const {}
/// Returns the preferred minimum clearance
/// before an instruction with an unwanted partial register update.
@ -1323,7 +1326,7 @@ public:
/// allows the target to insert a dependency breaking instruction.
///
virtual unsigned
getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum,
getPartialRegUpdateClearance(const MachineInstr &MI, unsigned OpNum,
const TargetRegisterInfo *TRI) const {
// The default implementation returns 0 for no partial register dependency.
return 0;
@ -1343,7 +1346,7 @@ public:
/// This hook works similarly to getPartialRegUpdateClearance, except that it
/// does not take an operand index. Instead sets \p OpNum to the index of the
/// unused register.
virtual unsigned getUndefRegClearance(const MachineInstr *MI, unsigned &OpNum,
virtual unsigned getUndefRegClearance(const MachineInstr &MI, unsigned &OpNum,
const TargetRegisterInfo *TRI) const {
// The default implementation returns 0 for no undef register dependency.
return 0;
@ -1366,9 +1369,8 @@ public:
/// An <imp-kill> operand should be added to MI if an instruction was
/// inserted. This ties the instructions together in the post-ra scheduler.
///
virtual void
breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum,
const TargetRegisterInfo *TRI) const {}
virtual void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum,
const TargetRegisterInfo *TRI) const {}
/// Create machine specific model for scheduling.
virtual DFAPacketizer *
@ -1381,11 +1383,11 @@ public:
// memory addresses. This function returns true if two MIs access different
// memory addresses and false otherwise.
virtual bool
areMemAccessesTriviallyDisjoint(MachineInstr *MIa, MachineInstr *MIb,
areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb,
AliasAnalysis *AA = nullptr) const {
assert(MIa && (MIa->mayLoad() || MIa->mayStore()) &&
assert((MIa.mayLoad() || MIa.mayStore()) &&
"MIa must load from or modify a memory location");
assert(MIb && (MIb->mayLoad() || MIb->mayStore()) &&
assert((MIb.mayLoad() || MIb.mayStore()) &&
"MIb must load from or modify a memory location");
return false;
}

View File

@ -636,20 +636,20 @@ static void emitComments(const MachineInstr &MI, raw_ostream &CommentOS) {
// We assume a single instruction only has a spill or reload, not
// both.
const MachineMemOperand *MMO;
if (TII->isLoadFromStackSlotPostFE(&MI, FI)) {
if (TII->isLoadFromStackSlotPostFE(MI, FI)) {
if (FrameInfo->isSpillSlotObjectIndex(FI)) {
MMO = *MI.memoperands_begin();
CommentOS << MMO->getSize() << "-byte Reload\n";
}
} else if (TII->hasLoadFromStackSlot(&MI, MMO, FI)) {
} else if (TII->hasLoadFromStackSlot(MI, MMO, FI)) {
if (FrameInfo->isSpillSlotObjectIndex(FI))
CommentOS << MMO->getSize() << "-byte Folded Reload\n";
} else if (TII->isStoreToStackSlotPostFE(&MI, FI)) {
} else if (TII->isStoreToStackSlotPostFE(MI, FI)) {
if (FrameInfo->isSpillSlotObjectIndex(FI)) {
MMO = *MI.memoperands_begin();
CommentOS << MMO->getSize() << "-byte Spill\n";
}
} else if (TII->hasStoreToStackSlot(&MI, MMO, FI)) {
} else if (TII->hasStoreToStackSlot(MI, MMO, FI)) {
if (FrameInfo->isSpillSlotObjectIndex(FI))
CommentOS << MMO->getSize() << "-byte Folded Spill\n";
}

View File

@ -121,7 +121,7 @@ static bool isRematerializable(const LiveInterval &LI,
}
}
if (!TII.isTriviallyReMaterializable(MI, LIS.getAliasAnalysis()))
if (!TII.isTriviallyReMaterializable(*MI, LIS.getAliasAnalysis()))
return false;
}
return true;

View File

@ -320,7 +320,7 @@ void ExeDepsFix::collapse(DomainValue *dv, unsigned domain) {
// Collapse all the instructions.
while (!dv->Instrs.empty())
TII->setExecutionDomain(dv->Instrs.pop_back_val(), domain);
TII->setExecutionDomain(*dv->Instrs.pop_back_val(), domain);
dv->setSingleDomain(domain);
// If there are multiple users, give them new, unique DomainValues.
@ -460,7 +460,7 @@ void ExeDepsFix::visitInstr(MachineInstr *MI) {
return;
// Update instructions with explicit execution domains.
std::pair<uint16_t, uint16_t> DomP = TII->getExecutionDomain(MI);
std::pair<uint16_t, uint16_t> DomP = TII->getExecutionDomain(*MI);
if (DomP.first) {
if (DomP.second)
visitSoftInstr(MI, DomP.second);
@ -508,7 +508,7 @@ void ExeDepsFix::processDefs(MachineInstr *MI, bool Kill) {
// Break dependence on undef uses. Do this before updating LiveRegs below.
unsigned OpNum;
unsigned Pref = TII->getUndefRegClearance(MI, OpNum, TRI);
unsigned Pref = TII->getUndefRegClearance(*MI, OpNum, TRI);
if (Pref) {
if (shouldBreakDependence(MI, OpNum, Pref))
UndefReads.push_back(std::make_pair(MI, OpNum));
@ -531,9 +531,9 @@ void ExeDepsFix::processDefs(MachineInstr *MI, bool Kill) {
// Check clearance before partial register updates.
// Call breakDependence before setting LiveRegs[rx].Def.
unsigned Pref = TII->getPartialRegUpdateClearance(MI, i, TRI);
unsigned Pref = TII->getPartialRegUpdateClearance(*MI, i, TRI);
if (Pref && shouldBreakDependence(MI, i, Pref))
TII->breakPartialRegDependency(MI, i, TRI);
TII->breakPartialRegDependency(*MI, i, TRI);
// How many instructions since rx was last written?
LiveRegs[rx].Def = CurInstr;
@ -571,7 +571,7 @@ void ExeDepsFix::processUndefReads(MachineBasicBlock *MBB) {
if (UndefMI == &I) {
if (!LiveRegSet.contains(UndefMI->getOperand(OpIdx).getReg()))
TII->breakPartialRegDependency(UndefMI, OpIdx, TRI);
TII->breakPartialRegDependency(*UndefMI, OpIdx, TRI);
UndefReads.pop_back();
if (UndefReads.empty())
@ -645,7 +645,7 @@ void ExeDepsFix::visitSoftInstr(MachineInstr *mi, unsigned mask) {
// If the collapsed operands force a single domain, propagate the collapse.
if (isPowerOf2_32(available)) {
unsigned domain = countTrailingZeros(available);
TII->setExecutionDomain(mi, domain);
TII->setExecutionDomain(*mi, domain);
visitHardInstr(mi, domain);
return;
}

View File

@ -192,12 +192,12 @@ bool ExpandPostRA::runOnMachineFunction(MachineFunction &MF) {
mbbi != mbbe; ++mbbi) {
for (MachineBasicBlock::iterator mi = mbbi->begin(), me = mbbi->end();
mi != me;) {
MachineInstr *MI = mi;
MachineInstr &MI = *mi;
// Advance iterator here because MI may be erased.
++mi;
// Only expand pseudos.
if (!MI->isPseudo())
if (!MI.isPseudo())
continue;
// Give targets a chance to expand even standard pseudos.
@ -207,12 +207,12 @@ bool ExpandPostRA::runOnMachineFunction(MachineFunction &MF) {
}
// Expand standard pseudos.
switch (MI->getOpcode()) {
switch (MI.getOpcode()) {
case TargetOpcode::SUBREG_TO_REG:
MadeChange |= LowerSubregToReg(MI);
MadeChange |= LowerSubregToReg(&MI);
break;
case TargetOpcode::COPY:
MadeChange |= LowerCopy(MI);
MadeChange |= LowerCopy(&MI);
break;
case TargetOpcode::DBG_VALUE:
continue;

View File

@ -421,14 +421,14 @@ bool ImplicitNullChecks::analyzeBlockForNullChecks(
for (auto MII = NotNullSucc->begin(), MIE = NotNullSucc->end(); MII != MIE;
++MII) {
MachineInstr *MI = &*MII;
MachineInstr &MI = *MII;
unsigned BaseReg;
int64_t Offset;
MachineInstr *Dependency = nullptr;
if (TII->getMemOpBaseRegImmOfs(MI, BaseReg, Offset, TRI))
if (MI->mayLoad() && !MI->isPredicable() && BaseReg == PointerReg &&
Offset < PageSize && MI->getDesc().getNumDefs() <= 1 &&
HD.isSafeToHoist(MI, Dependency)) {
if (MI.mayLoad() && !MI.isPredicable() && BaseReg == PointerReg &&
Offset < PageSize && MI.getDesc().getNumDefs() <= 1 &&
HD.isSafeToHoist(&MI, Dependency)) {
auto DependencyOperandIsOk = [&](MachineOperand &MO) {
assert(!(MO.isReg() && MO.isUse()) &&
@ -463,13 +463,13 @@ bool ImplicitNullChecks::analyzeBlockForNullChecks(
all_of(Dependency->operands(), DependencyOperandIsOk);
if (DependencyOperandsAreOk) {
NullCheckList.emplace_back(MI, MBP.ConditionDef, &MBB, NotNullSucc,
NullCheckList.emplace_back(&MI, MBP.ConditionDef, &MBB, NotNullSucc,
NullSucc, Dependency);
return true;
}
}
HD.rememberInstruction(MI);
HD.rememberInstruction(&MI);
if (HD.isClobbered())
return false;
}

View File

@ -236,13 +236,13 @@ Spiller *createInlineSpiller(MachineFunctionPass &pass,
/// isFullCopyOf - If MI is a COPY to or from Reg, return the other register,
/// otherwise return 0.
static unsigned isFullCopyOf(const MachineInstr *MI, unsigned Reg) {
if (!MI->isFullCopy())
static unsigned isFullCopyOf(const MachineInstr &MI, unsigned Reg) {
if (!MI.isFullCopy())
return 0;
if (MI->getOperand(0).getReg() == Reg)
return MI->getOperand(1).getReg();
if (MI->getOperand(1).getReg() == Reg)
return MI->getOperand(0).getReg();
if (MI.getOperand(0).getReg() == Reg)
return MI.getOperand(1).getReg();
if (MI.getOperand(1).getReg() == Reg)
return MI.getOperand(0).getReg();
return 0;
}
@ -268,7 +268,7 @@ bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) {
for (MachineRegisterInfo::reg_instr_nodbg_iterator
RI = MRI.reg_instr_nodbg_begin(SnipLI.reg),
E = MRI.reg_instr_nodbg_end(); RI != E; ) {
MachineInstr *MI = &*(RI++);
MachineInstr &MI = *RI++;
// Allow copies to/from Reg.
if (isFullCopyOf(MI, Reg))
@ -284,9 +284,9 @@ bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) {
continue;
// Allow a single additional instruction.
if (UseMI && MI != UseMI)
if (UseMI && &MI != UseMI)
return false;
UseMI = MI;
UseMI = &MI;
}
return true;
}
@ -307,14 +307,14 @@ void InlineSpiller::collectRegsToSpill() {
for (MachineRegisterInfo::reg_instr_iterator
RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end(); RI != E; ) {
MachineInstr *MI = &*(RI++);
MachineInstr &MI = *RI++;
unsigned SnipReg = isFullCopyOf(MI, Reg);
if (!isSibling(SnipReg))
continue;
LiveInterval &SnipLI = LIS.getInterval(SnipReg);
if (!isSnippet(SnipLI))
continue;
SnippetCopies.insert(MI);
SnippetCopies.insert(&MI);
if (isRegToSpill(SnipReg))
continue;
RegsToSpill.push_back(SnipReg);
@ -426,10 +426,10 @@ void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
for (MachineRegisterInfo::use_instr_nodbg_iterator
UI = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
UI != E; ) {
MachineInstr *MI = &*(UI++);
if (!MI->isCopy() && !MI->mayStore())
MachineInstr &MI = *UI++;
if (!MI.isCopy() && !MI.mayStore())
continue;
SlotIndex Idx = LIS.getInstructionIndex(*MI);
SlotIndex Idx = LIS.getInstructionIndex(MI);
if (LI->getVNInfoAt(Idx) != VNI)
continue;
@ -448,12 +448,12 @@ void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
// Erase spills.
int FI;
if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) {
DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << *MI);
DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << MI);
// eliminateDeadDefs won't normally remove stores, so switch opcode.
MI->setDesc(TII.get(TargetOpcode::KILL));
DeadDefs.push_back(MI);
MI.setDesc(TII.get(TargetOpcode::KILL));
DeadDefs.push_back(&MI);
++NumSpillsRemoved;
if (HSpiller.rmFromMergeableSpills(MI, StackSlot))
if (HSpiller.rmFromMergeableSpills(&MI, StackSlot))
--NumSpills;
}
}
@ -656,10 +656,10 @@ void InlineSpiller::reMaterializeAll() {
/// If MI is a load or store of StackSlot, it can be removed.
bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, unsigned Reg) {
int FI = 0;
unsigned InstrReg = TII.isLoadFromStackSlot(MI, FI);
unsigned InstrReg = TII.isLoadFromStackSlot(*MI, FI);
bool IsLoad = InstrReg;
if (!IsLoad)
InstrReg = TII.isStoreToStackSlot(MI, FI);
InstrReg = TII.isStoreToStackSlot(*MI, FI);
// We have a stack access. Is it the right register and slot?
if (InstrReg != Reg || FI != StackSlot)
@ -765,8 +765,8 @@ foldMemoryOperand(ArrayRef<std::pair<MachineInstr*, unsigned> > Ops,
MachineInstrSpan MIS(MI);
MachineInstr *FoldMI =
LoadMI ? TII.foldMemoryOperand(MI, FoldOps, LoadMI, &LIS)
: TII.foldMemoryOperand(MI, FoldOps, StackSlot, &LIS);
LoadMI ? TII.foldMemoryOperand(*MI, FoldOps, *LoadMI, &LIS)
: TII.foldMemoryOperand(*MI, FoldOps, StackSlot, &LIS);
if (!FoldMI)
return false;
@ -793,7 +793,7 @@ foldMemoryOperand(ArrayRef<std::pair<MachineInstr*, unsigned> > Ops,
}
int FI;
if (TII.isStoreToStackSlot(MI, FI) && HSpiller.rmFromMergeableSpills(MI, FI))
if (TII.isStoreToStackSlot(*MI, FI) && HSpiller.rmFromMergeableSpills(MI, FI))
--NumSpills;
LIS.ReplaceMachineInstrInMaps(*MI, *FoldMI);
MI->eraseFromParent();
@ -913,7 +913,7 @@ void InlineSpiller::spillAroundUses(unsigned Reg) {
Idx = VNI->def;
// Check for a sibling copy.
unsigned SibReg = isFullCopyOf(MI, Reg);
unsigned SibReg = isFullCopyOf(*MI, Reg);
if (SibReg && isSibling(SibReg)) {
// This may actually be a copy between snippets.
if (isRegToSpill(SibReg)) {

View File

@ -53,7 +53,7 @@ bool LiveRangeEdit::checkRematerializable(VNInfo *VNI,
AliasAnalysis *aa) {
assert(DefMI && "Missing instruction");
ScannedRemattable = true;
if (!TII.isTriviallyReMaterializable(DefMI, aa))
if (!TII.isTriviallyReMaterializable(*DefMI, aa))
return false;
Remattable.insert(VNI);
return true;
@ -130,7 +130,7 @@ bool LiveRangeEdit::canRematerializeAt(Remat &RM, VNInfo *OrigVNI,
DefIdx = LIS.getInstructionIndex(*RM.OrigMI);
// If only cheap remats were requested, bail out early.
if (cheapAsAMove && !TII.isAsCheapAsAMove(RM.OrigMI))
if (cheapAsAMove && !TII.isAsCheapAsAMove(*RM.OrigMI))
return false;
// Verify that all used registers are available with the same values.
@ -147,7 +147,7 @@ SlotIndex LiveRangeEdit::rematerializeAt(MachineBasicBlock &MBB,
const TargetRegisterInfo &tri,
bool Late) {
assert(RM.OrigMI && "Invalid remat");
TII.reMaterialize(MBB, MI, DestReg, 0, RM.OrigMI, tri);
TII.reMaterialize(MBB, MI, DestReg, 0, *RM.OrigMI, tri);
// DestReg of the cloned instruction cannot be Dead. Set isDead of DestReg
// to false anyway in case the isDead flag of RM.OrigMI's dest register
// is true.
@ -205,7 +205,7 @@ bool LiveRangeEdit::foldAsLoad(LiveInterval *LI,
if (UseMI->readsWritesVirtualRegister(LI->reg, &Ops).second)
return false;
MachineInstr *FoldMI = TII.foldMemoryOperand(UseMI, Ops, DefMI, &LIS);
MachineInstr *FoldMI = TII.foldMemoryOperand(*UseMI, Ops, *DefMI, &LIS);
if (!FoldMI)
return false;
DEBUG(dbgs() << " folded: " << *FoldMI);

View File

@ -389,7 +389,7 @@ bool MachineCSE::isProfitableToCSE(unsigned CSReg, unsigned Reg,
// Heuristics #1: Don't CSE "cheap" computation if the def is not local or in
// an immediate predecessor. We don't want to increase register pressure and
// end up causing other computation to be spilled.
if (TII->isAsCheapAsAMove(MI)) {
if (TII->isAsCheapAsAMove(*MI)) {
MachineBasicBlock *CSBB = CSMI->getParent();
MachineBasicBlock *BB = MI->getParent();
if (CSBB != BB && !CSBB->isSuccessor(BB))
@ -478,8 +478,7 @@ bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) {
// Commute commutable instructions.
bool Commuted = false;
if (!FoundCSE && MI->isCommutable()) {
MachineInstr *NewMI = TII->commuteInstruction(MI);
if (NewMI) {
if (MachineInstr *NewMI = TII->commuteInstruction(*MI)) {
Commuted = true;
FoundCSE = VNT.count(NewMI);
if (NewMI != MI) {
@ -488,7 +487,7 @@ bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) {
Changed = true;
} else if (!FoundCSE)
// MI was changed but it didn't help, commute it back!
(void)TII->commuteInstruction(MI);
(void)TII->commuteInstruction(*MI);
}
}

View File

@ -428,7 +428,7 @@ void MachineLICM::ProcessMI(MachineInstr *MI,
if (Def && !RuledOut) {
int FI = INT_MIN;
if ((!HasNonInvariantUse && IsLICMCandidate(*MI)) ||
(TII->isLoadFromStackSlot(MI, FI) && MFI->isSpillSlotObjectIndex(FI)))
(TII->isLoadFromStackSlot(*MI, FI) && MFI->isSpillSlotObjectIndex(FI)))
Candidates.push_back(CandidateInfo(MI, Def, FI));
}
}
@ -982,7 +982,7 @@ bool MachineLICM::HasHighOperandLatency(MachineInstr &MI,
if (MOReg != Reg)
continue;
if (TII->hasHighOperandLatency(SchedModel, MRI, &MI, DefIdx, &UseMI, i))
if (TII->hasHighOperandLatency(SchedModel, MRI, MI, DefIdx, UseMI, i))
return true;
}
@ -996,7 +996,7 @@ bool MachineLICM::HasHighOperandLatency(MachineInstr &MI,
/// Return true if the instruction is marked "cheap" or the operand latency
/// between its def and a use is one or less.
bool MachineLICM::IsCheapInstruction(MachineInstr &MI) const {
if (TII->isAsCheapAsAMove(&MI) || MI.isCopyLike())
if (TII->isAsCheapAsAMove(MI) || MI.isCopyLike())
return true;
bool isCheap = false;
@ -1010,7 +1010,7 @@ bool MachineLICM::IsCheapInstruction(MachineInstr &MI) const {
if (TargetRegisterInfo::isPhysicalRegister(Reg))
continue;
if (!TII->hasLowDefLatency(SchedModel, &MI, i))
if (!TII->hasLowDefLatency(SchedModel, MI, i))
return false;
isCheap = true;
}
@ -1086,7 +1086,7 @@ bool MachineLICM::IsProfitableToHoist(MachineInstr &MI) {
// Rematerializable instructions should always be hoisted since the register
// allocator can just pull them down again when needed.
if (TII->isTriviallyReMaterializable(&MI, AA))
if (TII->isTriviallyReMaterializable(MI, AA))
return true;
// FIXME: If there are long latency loop-invariant instructions inside the
@ -1139,8 +1139,7 @@ bool MachineLICM::IsProfitableToHoist(MachineInstr &MI) {
// High register pressure situation, only hoist if the instruction is going
// to be remat'ed.
if (!TII->isTriviallyReMaterializable(&MI, AA) &&
!MI.isInvariantLoad(AA)) {
if (!TII->isTriviallyReMaterializable(MI, AA) && !MI.isInvariantLoad(AA)) {
DEBUG(dbgs() << "Can't remat / high reg-pressure: " << MI);
return false;
}
@ -1177,10 +1176,9 @@ MachineInstr *MachineLICM::ExtractHoistableLoad(MachineInstr *MI) {
unsigned Reg = MRI->createVirtualRegister(RC);
SmallVector<MachineInstr *, 2> NewMIs;
bool Success =
TII->unfoldMemoryOperand(MF, MI, Reg,
/*UnfoldLoad=*/true, /*UnfoldStore=*/false,
NewMIs);
bool Success = TII->unfoldMemoryOperand(MF, *MI, Reg,
/*UnfoldLoad=*/true,
/*UnfoldStore=*/false, NewMIs);
(void)Success;
assert(Success &&
"unfoldMemoryOperand failed when getOpcodeAfterMemoryUnfold "
@ -1221,7 +1219,7 @@ const MachineInstr*
MachineLICM::LookForDuplicate(const MachineInstr *MI,
std::vector<const MachineInstr*> &PrevMIs) {
for (const MachineInstr *PrevMI : PrevMIs)
if (TII->produceSameValue(MI, PrevMI, (PreRegAlloc ? MRI : nullptr)))
if (TII->produceSameValue(*MI, *PrevMI, (PreRegAlloc ? MRI : nullptr)))
return PrevMI;
return nullptr;

View File

@ -406,7 +406,7 @@ static bool isSchedBoundary(MachineBasicBlock::iterator MI,
MachineBasicBlock *MBB,
MachineFunction *MF,
const TargetInstrInfo *TII) {
return MI->isCall() || TII->isSchedulingBoundary(MI, MBB, *MF);
return MI->isCall() || TII->isSchedulingBoundary(*MI, MBB, *MF);
}
/// Main driver for both MachineScheduler and PostMachineScheduler.
@ -1402,7 +1402,7 @@ void BaseMemOpClusterMutation::clusterNeighboringMemOps(
SUnit *SU = MemOps[Idx];
unsigned BaseReg;
int64_t Offset;
if (TII->getMemOpBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI))
if (TII->getMemOpBaseRegImmOfs(*SU->getInstr(), BaseReg, Offset, TRI))
MemOpRecords.push_back(MemOpInfo(SU, BaseReg, Offset));
}
if (MemOpRecords.size() < 2)
@ -1418,8 +1418,9 @@ void BaseMemOpClusterMutation::clusterNeighboringMemOps(
SUnit *SUa = MemOpRecords[Idx].SU;
SUnit *SUb = MemOpRecords[Idx+1].SU;
if (TII->shouldClusterMemOps(SUa->getInstr(), SUb->getInstr(), ClusterLength)
&& DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) {
if (TII->shouldClusterMemOps(*SUa->getInstr(), *SUb->getInstr(),
ClusterLength) &&
DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) {
DEBUG(dbgs() << "Cluster ld/st SU(" << SUa->NodeNum << ") - SU("
<< SUb->NodeNum << ")\n");
// Copy successor edges from SUa to SUb. Interleaving computation
@ -1529,7 +1530,7 @@ void MacroFusion::apply(ScheduleDAGInstrs *DAGInstrs) {
if (!HasDataDep(TRI, *Branch, *Pred))
continue;
if (!TII.shouldScheduleAdjacent(Pred, Branch))
if (!TII.shouldScheduleAdjacent(*Pred, *Branch))
continue;
// Create a single weak edge from SU to ExitSU. The only effect is to cause

View File

@ -366,7 +366,7 @@ bool MachineSinking::isWorthBreakingCriticalEdge(MachineInstr *MI,
if (!CEBCandidates.insert(std::make_pair(From, To)).second)
return true;
if (!MI->isCopy() && !TII->isAsCheapAsAMove(MI))
if (!MI->isCopy() && !TII->isAsCheapAsAMove(*MI))
return true;
// MI is cheap, we probably don't want to break the critical edge for it.
@ -700,7 +700,7 @@ static bool SinkingPreventsImplicitNullCheck(MachineInstr *MI,
unsigned BaseReg;
int64_t Offset;
if (!TII->getMemOpBaseRegImmOfs(MI, BaseReg, Offset, TRI))
if (!TII->getMemOpBaseRegImmOfs(*MI, BaseReg, Offset, TRI))
return false;
if (!(MI->mayLoad() && !MI->isPredicable()))

View File

@ -880,7 +880,7 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
}
StringRef ErrorInfo;
if (!TII->verifyInstruction(MI, ErrorInfo))
if (!TII->verifyInstruction(*MI, ErrorInfo))
report(ErrorInfo.data(), MI);
}

View File

@ -564,13 +564,13 @@ bool PeepholeOptimizer::optimizeCmpInstr(MachineInstr *MI,
// physical register, we can try to optimize it.
unsigned SrcReg, SrcReg2;
int CmpMask, CmpValue;
if (!TII->analyzeCompare(MI, SrcReg, SrcReg2, CmpMask, CmpValue) ||
if (!TII->analyzeCompare(*MI, SrcReg, SrcReg2, CmpMask, CmpValue) ||
TargetRegisterInfo::isPhysicalRegister(SrcReg) ||
(SrcReg2 != 0 && TargetRegisterInfo::isPhysicalRegister(SrcReg2)))
return false;
// Attempt to optimize the comparison instruction.
if (TII->optimizeCompareInstr(MI, SrcReg, SrcReg2, CmpMask, CmpValue, MRI)) {
if (TII->optimizeCompareInstr(*MI, SrcReg, SrcReg2, CmpMask, CmpValue, MRI)) {
++NumCmps;
return true;
}
@ -585,11 +585,11 @@ bool PeepholeOptimizer::optimizeSelect(MachineInstr *MI,
unsigned FalseOp = 0;
bool Optimizable = false;
SmallVector<MachineOperand, 4> Cond;
if (TII->analyzeSelect(MI, Cond, TrueOp, FalseOp, Optimizable))
if (TII->analyzeSelect(*MI, Cond, TrueOp, FalseOp, Optimizable))
return false;
if (!Optimizable)
return false;
if (!TII->optimizeSelect(MI, LocalMIs))
if (!TII->optimizeSelect(*MI, LocalMIs))
return false;
MI->eraseFromParent();
++NumSelects;
@ -599,7 +599,7 @@ bool PeepholeOptimizer::optimizeSelect(MachineInstr *MI,
/// \brief Check if a simpler conditional branch can be
// generated
bool PeepholeOptimizer::optimizeCondBranch(MachineInstr *MI) {
return TII->optimizeCondBranch(MI);
return TII->optimizeCondBranch(*MI);
}
/// \brief Try to find the next source that share the same register file
@ -1351,7 +1351,7 @@ bool PeepholeOptimizer::foldImmediate(
continue;
DenseMap<unsigned, MachineInstr*>::iterator II = ImmDefMIs.find(Reg);
assert(II != ImmDefMIs.end() && "couldn't find immediate definition");
if (TII->FoldImmediate(MI, II->second, Reg, MRI)) {
if (TII->FoldImmediate(*MI, *II->second, Reg, MRI)) {
++NumImmFold;
return true;
}
@ -1636,10 +1636,8 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
// we need it for markUsesInDebugValueAsUndef().
unsigned FoldedReg = FoldAsLoadDefReg;
MachineInstr *DefMI = nullptr;
MachineInstr *FoldMI = TII->optimizeLoadInstr(MI, MRI,
FoldAsLoadDefReg,
DefMI);
if (FoldMI) {
if (MachineInstr *FoldMI =
TII->optimizeLoadInstr(*MI, MRI, FoldAsLoadDefReg, DefMI)) {
// Update LocalMIs since we replaced MI with FoldMI and deleted
// DefMI.
DEBUG(dbgs() << "Replacing: " << *MI);

View File

@ -340,7 +340,7 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
// Calls are not scheduling boundaries before register allocation, but
// post-ra we don't gain anything by scheduling across calls since we
// don't need to worry about register pressure.
if (MI->isCall() || TII->isSchedulingBoundary(MI, &MBB, Fn)) {
if (MI->isCall() || TII->isSchedulingBoundary(*MI, &MBB, Fn)) {
Scheduler.enterRegion(&MBB, I, Current, CurrentCount - Count);
Scheduler.setEndIndex(CurrentCount);
Scheduler.schedule();

View File

@ -1056,7 +1056,7 @@ void PEI::replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &Fn,
if (I->getOpcode() == FrameSetupOpcode ||
I->getOpcode() == FrameDestroyOpcode) {
InsideCallSequence = (I->getOpcode() == FrameSetupOpcode);
SPAdj += TII.getSPAdjust(I);
SPAdj += TII.getSPAdjust(*I);
I = TFI->eliminateCallFramePseudoInstr(Fn, *BB, I);
continue;
@ -1135,7 +1135,7 @@ void PEI::replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &Fn,
// if I itself referred to a frame index, we shouldn't count its own
// adjustment.
if (MI && InsideCallSequence)
SPAdj += TII.getSPAdjust(MI);
SPAdj += TII.getSPAdjust(*MI);
if (DoIncr && I != BB->end()) ++I;

View File

@ -684,7 +684,7 @@ bool RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP,
// operands then all possible variants (i.e. op#1<->op#2, op#1<->op#3,
// op#2<->op#3) of commute transformation should be considered/tried here.
unsigned NewDstIdx = TargetInstrInfo::CommuteAnyOperandIndex;
if (!TII->findCommutedOpIndices(DefMI, UseOpIdx, NewDstIdx))
if (!TII->findCommutedOpIndices(*DefMI, UseOpIdx, NewDstIdx))
return false;
MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx);
@ -718,7 +718,7 @@ bool RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP,
// transformation. Start by commuting the instruction.
MachineBasicBlock *MBB = DefMI->getParent();
MachineInstr *NewMI =
TII->commuteInstruction(DefMI, false, UseOpIdx, NewDstIdx);
TII->commuteInstruction(*DefMI, false, UseOpIdx, NewDstIdx);
if (!NewMI)
return false;
if (TargetRegisterInfo::isVirtualRegister(IntA.reg) &&
@ -901,9 +901,9 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP,
IsDefCopy = true;
return false;
}
if (!TII->isAsCheapAsAMove(DefMI))
if (!TII->isAsCheapAsAMove(*DefMI))
return false;
if (!TII->isTriviallyReMaterializable(DefMI, AA))
if (!TII->isTriviallyReMaterializable(*DefMI, AA))
return false;
if (!definesFullReg(*DefMI, SrcReg))
return false;
@ -953,7 +953,7 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP,
MachineBasicBlock *MBB = CopyMI->getParent();
MachineBasicBlock::iterator MII =
std::next(MachineBasicBlock::iterator(CopyMI));
TII->reMaterialize(*MBB, MII, DstReg, SrcIdx, DefMI, *TRI);
TII->reMaterialize(*MBB, MII, DstReg, SrcIdx, *DefMI, *TRI);
MachineInstr *NewMI = std::prev(MII);
NewMI->setDebugLoc(DL);

View File

@ -569,7 +569,7 @@ static bool MIsNeedChainEdge(AliasAnalysis *AA, const MachineFrameInfo *MFI,
"Dependency checked between two loads");
// Let the target decide if memory accesses cannot possibly overlap.
if (TII->areMemAccessesTriviallyDisjoint(MIa, MIb, AA))
if (TII->areMemAccessesTriviallyDisjoint(*MIa, *MIb, AA))
return false;
// To this point analysis is generic. From here on we do need AA.

View File

@ -386,8 +386,7 @@ bool StackSlotColoring::RemoveDeadStores(MachineBasicBlock* MBB) {
break;
int FirstSS, SecondSS;
if (TII->isStackSlotCopy(I, FirstSS, SecondSS) &&
FirstSS == SecondSS &&
if (TII->isStackSlotCopy(*I, FirstSS, SecondSS) && FirstSS == SecondSS &&
FirstSS != -1) {
++NumDead;
changed = true;
@ -400,8 +399,10 @@ bool StackSlotColoring::RemoveDeadStores(MachineBasicBlock* MBB) {
unsigned LoadReg = 0;
unsigned StoreReg = 0;
if (!(LoadReg = TII->isLoadFromStackSlot(I, FirstSS))) continue;
if (!(StoreReg = TII->isStoreToStackSlot(NextMI, SecondSS))) continue;
if (!(LoadReg = TII->isLoadFromStackSlot(*I, FirstSS)))
continue;
if (!(StoreReg = TII->isStoreToStackSlot(*NextMI, SecondSS)))
continue;
if (FirstSS != SecondSS || LoadReg != StoreReg || FirstSS == -1) continue;
++NumDead;

View File

@ -341,7 +341,7 @@ void TailDuplicator::duplicateInstruction(
MachineFunction &MF,
DenseMap<unsigned, RegSubRegPair> &LocalVRMap,
const DenseSet<unsigned> &UsedByPhi) {
MachineInstr *NewMI = TII->duplicate(MI, MF);
MachineInstr *NewMI = TII->duplicate(*MI, MF);
if (PreRegAlloc) {
for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = NewMI->getOperand(i);

View File

@ -119,13 +119,12 @@ TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
MBB->addSuccessor(NewDest);
}
MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr *MI,
bool NewMI,
unsigned Idx1,
MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr &MI,
bool NewMI, unsigned Idx1,
unsigned Idx2) const {
const MCInstrDesc &MCID = MI->getDesc();
const MCInstrDesc &MCID = MI.getDesc();
bool HasDef = MCID.getNumDefs();
if (HasDef && !MI->getOperand(0).isReg())
if (HasDef && !MI.getOperand(0).isReg())
// No idea how to commute this instruction. Target should implement its own.
return nullptr;
@ -134,60 +133,62 @@ MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr *MI,
assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) &&
CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 &&
"TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
assert(MI->getOperand(Idx1).isReg() && MI->getOperand(Idx2).isReg() &&
assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() &&
"This only knows how to commute register operands so far");
unsigned Reg0 = HasDef ? MI->getOperand(0).getReg() : 0;
unsigned Reg1 = MI->getOperand(Idx1).getReg();
unsigned Reg2 = MI->getOperand(Idx2).getReg();
unsigned SubReg0 = HasDef ? MI->getOperand(0).getSubReg() : 0;
unsigned SubReg1 = MI->getOperand(Idx1).getSubReg();
unsigned SubReg2 = MI->getOperand(Idx2).getSubReg();
bool Reg1IsKill = MI->getOperand(Idx1).isKill();
bool Reg2IsKill = MI->getOperand(Idx2).isKill();
bool Reg1IsUndef = MI->getOperand(Idx1).isUndef();
bool Reg2IsUndef = MI->getOperand(Idx2).isUndef();
bool Reg1IsInternal = MI->getOperand(Idx1).isInternalRead();
bool Reg2IsInternal = MI->getOperand(Idx2).isInternalRead();
unsigned Reg0 = HasDef ? MI.getOperand(0).getReg() : 0;
unsigned Reg1 = MI.getOperand(Idx1).getReg();
unsigned Reg2 = MI.getOperand(Idx2).getReg();
unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0;
unsigned SubReg1 = MI.getOperand(Idx1).getSubReg();
unsigned SubReg2 = MI.getOperand(Idx2).getSubReg();
bool Reg1IsKill = MI.getOperand(Idx1).isKill();
bool Reg2IsKill = MI.getOperand(Idx2).isKill();
bool Reg1IsUndef = MI.getOperand(Idx1).isUndef();
bool Reg2IsUndef = MI.getOperand(Idx2).isUndef();
bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead();
bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead();
// If destination is tied to either of the commuted source register, then
// it must be updated.
if (HasDef && Reg0 == Reg1 &&
MI->getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
Reg2IsKill = false;
Reg0 = Reg2;
SubReg0 = SubReg2;
} else if (HasDef && Reg0 == Reg2 &&
MI->getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
Reg1IsKill = false;
Reg0 = Reg1;
SubReg0 = SubReg1;
}
MachineInstr *CommutedMI = nullptr;
if (NewMI) {
// Create a new instruction.
MachineFunction &MF = *MI->getParent()->getParent();
MI = MF.CloneMachineInstr(MI);
MachineFunction &MF = *MI.getParent()->getParent();
CommutedMI = MF.CloneMachineInstr(&MI);
} else {
CommutedMI = &MI;
}
if (HasDef) {
MI->getOperand(0).setReg(Reg0);
MI->getOperand(0).setSubReg(SubReg0);
CommutedMI->getOperand(0).setReg(Reg0);
CommutedMI->getOperand(0).setSubReg(SubReg0);
}
MI->getOperand(Idx2).setReg(Reg1);
MI->getOperand(Idx1).setReg(Reg2);
MI->getOperand(Idx2).setSubReg(SubReg1);
MI->getOperand(Idx1).setSubReg(SubReg2);
MI->getOperand(Idx2).setIsKill(Reg1IsKill);
MI->getOperand(Idx1).setIsKill(Reg2IsKill);
MI->getOperand(Idx2).setIsUndef(Reg1IsUndef);
MI->getOperand(Idx1).setIsUndef(Reg2IsUndef);
MI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal);
MI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
return MI;
CommutedMI->getOperand(Idx2).setReg(Reg1);
CommutedMI->getOperand(Idx1).setReg(Reg2);
CommutedMI->getOperand(Idx2).setSubReg(SubReg1);
CommutedMI->getOperand(Idx1).setSubReg(SubReg2);
CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill);
CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill);
CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef);
CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef);
CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal);
CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
return CommutedMI;
}
MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr *MI,
bool NewMI,
MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr &MI, bool NewMI,
unsigned OpIdx1,
unsigned OpIdx2) const {
// If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
@ -195,7 +196,7 @@ MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr *MI,
// called below.
if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) &&
!findCommutedOpIndices(MI, OpIdx1, OpIdx2)) {
assert(MI->isCommutable() &&
assert(MI.isCommutable() &&
"Precondition violation: MI must be commutable.");
return nullptr;
}
@ -233,13 +234,13 @@ bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1,
return true;
}
bool TargetInstrInfo::findCommutedOpIndices(MachineInstr *MI,
bool TargetInstrInfo::findCommutedOpIndices(MachineInstr &MI,
unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const {
assert(!MI->isBundle() &&
assert(!MI.isBundle() &&
"TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
const MCInstrDesc &MCID = MI->getDesc();
const MCInstrDesc &MCID = MI.getDesc();
if (!MCID.isCommutable())
return false;
@ -251,8 +252,7 @@ bool TargetInstrInfo::findCommutedOpIndices(MachineInstr *MI,
CommutableOpIdx1, CommutableOpIdx2))
return false;
if (!MI->getOperand(SrcOpIdx1).isReg() ||
!MI->getOperand(SrcOpIdx2).isReg())
if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg())
// No idea.
return false;
return true;
@ -299,13 +299,12 @@ bool TargetInstrInfo::PredicateInstruction(
return MadeChange;
}
bool TargetInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
bool TargetInstrInfo::hasLoadFromStackSlot(const MachineInstr &MI,
const MachineMemOperand *&MMO,
int &FrameIndex) const {
for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
oe = MI->memoperands_end();
o != oe;
++o) {
for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
oe = MI.memoperands_end();
o != oe; ++o) {
if ((*o)->isLoad()) {
if (const FixedStackPseudoSourceValue *Value =
dyn_cast_or_null<FixedStackPseudoSourceValue>(
@ -319,13 +318,12 @@ bool TargetInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
return false;
}
bool TargetInstrInfo::hasStoreToStackSlot(const MachineInstr *MI,
bool TargetInstrInfo::hasStoreToStackSlot(const MachineInstr &MI,
const MachineMemOperand *&MMO,
int &FrameIndex) const {
for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
oe = MI->memoperands_end();
o != oe;
++o) {
for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
oe = MI.memoperands_end();
o != oe; ++o) {
if ((*o)->isStore()) {
if (const FixedStackPseudoSourceValue *Value =
dyn_cast_or_null<FixedStackPseudoSourceValue>(
@ -372,40 +370,37 @@ bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC,
void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
unsigned DestReg,
unsigned SubIdx,
const MachineInstr *Orig,
unsigned DestReg, unsigned SubIdx,
const MachineInstr &Orig,
const TargetRegisterInfo &TRI) const {
MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
MBB.insert(I, MI);
}
bool
TargetInstrInfo::produceSameValue(const MachineInstr *MI0,
const MachineInstr *MI1,
const MachineRegisterInfo *MRI) const {
return MI0->isIdenticalTo(*MI1, MachineInstr::IgnoreVRegDefs);
bool TargetInstrInfo::produceSameValue(const MachineInstr &MI0,
const MachineInstr &MI1,
const MachineRegisterInfo *MRI) const {
return MI0.isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
}
MachineInstr *TargetInstrInfo::duplicate(MachineInstr *Orig,
MachineInstr *TargetInstrInfo::duplicate(MachineInstr &Orig,
MachineFunction &MF) const {
assert(!Orig->isNotDuplicable() &&
"Instruction cannot be duplicated");
return MF.CloneMachineInstr(Orig);
assert(!Orig.isNotDuplicable() && "Instruction cannot be duplicated");
return MF.CloneMachineInstr(&Orig);
}
// If the COPY instruction in MI can be folded to a stack operation, return
// the register class to use.
static const TargetRegisterClass *canFoldCopy(const MachineInstr *MI,
static const TargetRegisterClass *canFoldCopy(const MachineInstr &MI,
unsigned FoldIdx) {
assert(MI->isCopy() && "MI must be a COPY instruction");
if (MI->getNumOperands() != 2)
assert(MI.isCopy() && "MI must be a COPY instruction");
if (MI.getNumOperands() != 2)
return nullptr;
assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
const MachineOperand &FoldOp = MI->getOperand(FoldIdx);
const MachineOperand &LiveOp = MI->getOperand(1-FoldIdx);
const MachineOperand &FoldOp = MI.getOperand(FoldIdx);
const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx);
if (FoldOp.getSubReg() || LiveOp.getSubReg())
return nullptr;
@ -416,7 +411,7 @@ static const TargetRegisterClass *canFoldCopy(const MachineInstr *MI,
assert(TargetRegisterInfo::isVirtualRegister(FoldReg) &&
"Cannot fold physregs");
const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
if (TargetRegisterInfo::isPhysicalRegister(LiveOp.getReg()))
@ -433,17 +428,17 @@ void TargetInstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
llvm_unreachable("Not a MachO target");
}
static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr *MI,
static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr &MI,
ArrayRef<unsigned> Ops, int FrameIndex,
const TargetInstrInfo &TII) {
unsigned StartIdx = 0;
switch (MI->getOpcode()) {
switch (MI.getOpcode()) {
case TargetOpcode::STACKMAP:
StartIdx = 2; // Skip ID, nShadowBytes.
break;
case TargetOpcode::PATCHPOINT: {
// For PatchPoint, the call args are not foldable.
PatchPointOpers opers(MI);
PatchPointOpers opers(&MI);
StartIdx = opers.getVarIdx();
break;
}
@ -459,15 +454,15 @@ static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr *MI,
}
MachineInstr *NewMI =
MF.CreateMachineInstr(TII.get(MI->getOpcode()), MI->getDebugLoc(), true);
MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true);
MachineInstrBuilder MIB(MF, NewMI);
// No need to fold return, the meta data, and function arguments
for (unsigned i = 0; i < StartIdx; ++i)
MIB.addOperand(MI->getOperand(i));
MIB.addOperand(MI.getOperand(i));
for (unsigned i = StartIdx; i < MI->getNumOperands(); ++i) {
MachineOperand &MO = MI->getOperand(i);
for (unsigned i = StartIdx; i < MI.getNumOperands(); ++i) {
MachineOperand &MO = MI.getOperand(i);
if (std::find(Ops.begin(), Ops.end(), i) != Ops.end()) {
unsigned SpillSize;
unsigned SpillOffset;
@ -495,25 +490,24 @@ static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr *MI,
/// operand folded, otherwise NULL is returned. The client is responsible for
/// removing the old instruction and adding the new one in the instruction
/// stream.
MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
ArrayRef<unsigned> Ops,
int FI,
MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
ArrayRef<unsigned> Ops, int FI,
LiveIntervals *LIS) const {
unsigned Flags = 0;
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
if (MI->getOperand(Ops[i]).isDef())
if (MI.getOperand(Ops[i]).isDef())
Flags |= MachineMemOperand::MOStore;
else
Flags |= MachineMemOperand::MOLoad;
MachineBasicBlock *MBB = MI->getParent();
MachineBasicBlock *MBB = MI.getParent();
assert(MBB && "foldMemoryOperand needs an inserted instruction");
MachineFunction &MF = *MBB->getParent();
MachineInstr *NewMI = nullptr;
if (MI->getOpcode() == TargetOpcode::STACKMAP ||
MI->getOpcode() == TargetOpcode::PATCHPOINT) {
if (MI.getOpcode() == TargetOpcode::STACKMAP ||
MI.getOpcode() == TargetOpcode::PATCHPOINT) {
// Fold stackmap/patchpoint.
NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
if (NewMI)
@ -524,7 +518,7 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
}
if (NewMI) {
NewMI->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
NewMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
// Add a memory operand, foldMemoryOperandImpl doesn't do that.
assert((!(Flags & MachineMemOperand::MOStore) ||
NewMI->mayStore()) &&
@ -543,14 +537,14 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
}
// Straight COPY may fold as load/store.
if (!MI->isCopy() || Ops.size() != 1)
if (!MI.isCopy() || Ops.size() != 1)
return nullptr;
const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
if (!RC)
return nullptr;
const MachineOperand &MO = MI->getOperand(1-Ops[0]);
const MachineOperand &MO = MI.getOperand(1 - Ops[0]);
MachineBasicBlock::iterator Pos = MI;
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
@ -777,24 +771,24 @@ void TargetInstrInfo::genAlternativeCodeSequence(
/// foldMemoryOperand - Same as the previous version except it allows folding
/// of any load and store from / to any address, not just from a specific
/// stack slot.
MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
ArrayRef<unsigned> Ops,
MachineInstr *LoadMI,
MachineInstr &LoadMI,
LiveIntervals *LIS) const {
assert(LoadMI->canFoldAsLoad() && "LoadMI isn't foldable!");
assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!");
#ifndef NDEBUG
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
assert(MI->getOperand(Ops[i]).isUse() && "Folding load into def!");
assert(MI.getOperand(Ops[i]).isUse() && "Folding load into def!");
#endif
MachineBasicBlock &MBB = *MI->getParent();
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
// Ask the target to do the actual folding.
MachineInstr *NewMI = nullptr;
int FrameIndex = 0;
if ((MI->getOpcode() == TargetOpcode::STACKMAP ||
MI->getOpcode() == TargetOpcode::PATCHPOINT) &&
if ((MI.getOpcode() == TargetOpcode::STACKMAP ||
MI.getOpcode() == TargetOpcode::PATCHPOINT) &&
isLoadFromStackSlot(LoadMI, FrameIndex)) {
// Fold stackmap/patchpoint.
NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
@ -808,39 +802,37 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
if (!NewMI) return nullptr;
// Copy the memoperands from the load to the folded instruction.
if (MI->memoperands_empty()) {
NewMI->setMemRefs(LoadMI->memoperands_begin(),
LoadMI->memoperands_end());
if (MI.memoperands_empty()) {
NewMI->setMemRefs(LoadMI.memoperands_begin(), LoadMI.memoperands_end());
}
else {
// Handle the rare case of folding multiple loads.
NewMI->setMemRefs(MI->memoperands_begin(),
MI->memoperands_end());
for (MachineInstr::mmo_iterator I = LoadMI->memoperands_begin(),
E = LoadMI->memoperands_end(); I != E; ++I) {
NewMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
for (MachineInstr::mmo_iterator I = LoadMI.memoperands_begin(),
E = LoadMI.memoperands_end();
I != E; ++I) {
NewMI->addMemOperand(MF, *I);
}
}
return NewMI;
}
bool TargetInstrInfo::
isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
AliasAnalysis *AA) const {
const MachineFunction &MF = *MI->getParent()->getParent();
bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(
const MachineInstr &MI, AliasAnalysis *AA) const {
const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
// Remat clients assume operand 0 is the defined register.
if (!MI->getNumOperands() || !MI->getOperand(0).isReg())
if (!MI.getNumOperands() || !MI.getOperand(0).isReg())
return false;
unsigned DefReg = MI->getOperand(0).getReg();
unsigned DefReg = MI.getOperand(0).getReg();
// A sub-register definition can only be rematerialized if the instruction
// doesn't read the other parts of the register. Otherwise it is really a
// read-modify-write operation on the full virtual register which cannot be
// moved safely.
if (TargetRegisterInfo::isVirtualRegister(DefReg) &&
MI->getOperand(0).getSubReg() && MI->readsVirtualRegister(DefReg))
MI.getOperand(0).getSubReg() && MI.readsVirtualRegister(DefReg))
return false;
// A load from a fixed stack slot can be rematerialized. This may be
@ -852,23 +844,22 @@ isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
return true;
// Avoid instructions obviously unsafe for remat.
if (MI->isNotDuplicable() || MI->mayStore() ||
MI->hasUnmodeledSideEffects())
if (MI.isNotDuplicable() || MI.mayStore() || MI.hasUnmodeledSideEffects())
return false;
// Don't remat inline asm. We have no idea how expensive it is
// even if it's side effect free.
if (MI->isInlineAsm())
if (MI.isInlineAsm())
return false;
// Avoid instructions which load from potentially varying memory.
if (MI->mayLoad() && !MI->isInvariantLoad(AA))
if (MI.mayLoad() && !MI.isInvariantLoad(AA))
return false;
// If any of the registers accessed are non-constant, conservatively assume
// the instruction is not rematerializable.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (Reg == 0)
@ -905,8 +896,8 @@ isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
return true;
}
int TargetInstrInfo::getSPAdjust(const MachineInstr *MI) const {
const MachineFunction *MF = MI->getParent()->getParent();
int TargetInstrInfo::getSPAdjust(const MachineInstr &MI) const {
const MachineFunction *MF = MI.getParent()->getParent();
const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
bool StackGrowsDown =
TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
@ -914,15 +905,15 @@ int TargetInstrInfo::getSPAdjust(const MachineInstr *MI) const {
unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
if (MI->getOpcode() != FrameSetupOpcode &&
MI->getOpcode() != FrameDestroyOpcode)
if (MI.getOpcode() != FrameSetupOpcode &&
MI.getOpcode() != FrameDestroyOpcode)
return 0;
int SPAdj = MI->getOperand(0).getImm();
int SPAdj = MI.getOperand(0).getImm();
SPAdj = TFI->alignSPAdjust(SPAdj);
if ((!StackGrowsDown && MI->getOpcode() == FrameSetupOpcode) ||
(StackGrowsDown && MI->getOpcode() == FrameDestroyOpcode))
if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) ||
(StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode))
SPAdj = -SPAdj;
return SPAdj;
@ -931,11 +922,11 @@ int TargetInstrInfo::getSPAdjust(const MachineInstr *MI) const {
/// isSchedulingBoundary - Test if the given instruction should be
/// considered a scheduling boundary. This primarily includes labels
/// and terminators.
bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
const MachineBasicBlock *MBB,
const MachineFunction &MF) const {
// Terminators and labels can't be scheduled around.
if (MI->isTerminator() || MI->isPosition())
if (MI.isTerminator() || MI.isPosition())
return true;
// Don't attempt to schedule around any instruction that defines
@ -945,7 +936,7 @@ bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
// modification.
const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
return MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI);
return MI.modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI);
}
// Provide a global flag for disabling the PreRA hazard recognizer that targets
@ -1014,13 +1005,12 @@ int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
// MachineInstr latency interface.
//===----------------------------------------------------------------------===//
unsigned
TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
const MachineInstr *MI) const {
unsigned TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
const MachineInstr &MI) const {
if (!ItinData || ItinData->isEmpty())
return 1;
unsigned Class = MI->getDesc().getSchedClass();
unsigned Class = MI.getDesc().getSchedClass();
int UOps = ItinData->Itineraries[Class].NumMicroOps;
if (UOps >= 0)
return UOps;
@ -1032,12 +1022,12 @@ TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
/// Return the default expected latency for a def based on it's opcode.
unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel &SchedModel,
const MachineInstr *DefMI) const {
if (DefMI->isTransient())
const MachineInstr &DefMI) const {
if (DefMI.isTransient())
return 0;
if (DefMI->mayLoad())
if (DefMI.mayLoad())
return SchedModel.LoadLatency;
if (isHighLatencyDef(DefMI->getOpcode()))
if (isHighLatencyDef(DefMI.getOpcode()))
return SchedModel.HighLatency;
return 1;
}
@ -1046,46 +1036,45 @@ unsigned TargetInstrInfo::getPredicationCost(const MachineInstr &) const {
return 0;
}
unsigned TargetInstrInfo::
getInstrLatency(const InstrItineraryData *ItinData,
const MachineInstr *MI,
unsigned *PredCost) const {
unsigned TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
const MachineInstr &MI,
unsigned *PredCost) const {
// Default to one cycle for no itinerary. However, an "empty" itinerary may
// still have a MinLatency property, which getStageLatency checks.
if (!ItinData)
return MI->mayLoad() ? 2 : 1;
return MI.mayLoad() ? 2 : 1;
return ItinData->getStageLatency(MI->getDesc().getSchedClass());
return ItinData->getStageLatency(MI.getDesc().getSchedClass());
}
bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel,
const MachineInstr *DefMI,
const MachineInstr &DefMI,
unsigned DefIdx) const {
const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
if (!ItinData || ItinData->isEmpty())
return false;
unsigned DefClass = DefMI->getDesc().getSchedClass();
unsigned DefClass = DefMI.getDesc().getSchedClass();
int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
return (DefCycle != -1 && DefCycle <= 1);
}
/// Both DefMI and UseMI must be valid. By default, call directly to the
/// itinerary. This may be overriden by the target.
int TargetInstrInfo::
getOperandLatency(const InstrItineraryData *ItinData,
const MachineInstr *DefMI, unsigned DefIdx,
const MachineInstr *UseMI, unsigned UseIdx) const {
unsigned DefClass = DefMI->getDesc().getSchedClass();
unsigned UseClass = UseMI->getDesc().getSchedClass();
int TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
const MachineInstr &DefMI,
unsigned DefIdx,
const MachineInstr &UseMI,
unsigned UseIdx) const {
unsigned DefClass = DefMI.getDesc().getSchedClass();
unsigned UseClass = UseMI.getDesc().getSchedClass();
return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
}
/// If we can determine the operand latency from the def only, without itinerary
/// lookup, do so. Otherwise return -1.
int TargetInstrInfo::computeDefOperandLatency(
const InstrItineraryData *ItinData,
const MachineInstr *DefMI) const {
const InstrItineraryData *ItinData, const MachineInstr &DefMI) const {
// Let the target hook getInstrLatency handle missing itineraries.
if (!ItinData)
@ -1098,21 +1087,9 @@ int TargetInstrInfo::computeDefOperandLatency(
return -1;
}
/// computeOperandLatency - Compute and return the latency of the given data
/// dependent def and use when the operand indices are already known. UseMI may
/// be NULL for an unknown use.
///
/// FindMin may be set to get the minimum vs. expected latency. Minimum
/// latency is used for scheduling groups, while expected latency is for
/// instruction cost and critical path.
///
/// Depending on the subtarget's itinerary properties, this may or may not need
/// to call getOperandLatency(). For most subtargets, we don't need DefIdx or
/// UseIdx to compute min latency.
unsigned TargetInstrInfo::
computeOperandLatency(const InstrItineraryData *ItinData,
const MachineInstr *DefMI, unsigned DefIdx,
const MachineInstr *UseMI, unsigned UseIdx) const {
unsigned TargetInstrInfo::computeOperandLatency(
const InstrItineraryData *ItinData, const MachineInstr &DefMI,
unsigned DefIdx, const MachineInstr *UseMI, unsigned UseIdx) const {
int DefLatency = computeDefOperandLatency(ItinData, DefMI);
if (DefLatency >= 0)
@ -1122,9 +1099,9 @@ computeOperandLatency(const InstrItineraryData *ItinData,
int OperLatency = 0;
if (UseMI)
OperLatency = getOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx);
OperLatency = getOperandLatency(ItinData, DefMI, DefIdx, *UseMI, UseIdx);
else {
unsigned DefClass = DefMI->getDesc().getSchedClass();
unsigned DefClass = DefMI.getDesc().getSchedClass();
OperLatency = ItinData->getOperandCycle(DefClass, DefIdx);
}
if (OperLatency >= 0)

View File

@ -77,7 +77,7 @@ unsigned TargetSchedModel::getNumMicroOps(const MachineInstr *MI,
const MCSchedClassDesc *SC) const {
if (hasInstrItineraries()) {
int UOps = InstrItins.getNumMicroOps(MI->getDesc().getSchedClass());
return (UOps >= 0) ? UOps : TII->getNumMicroOps(&InstrItins, MI);
return (UOps >= 0) ? UOps : TII->getNumMicroOps(&InstrItins, *MI);
}
if (hasInstrSchedModel()) {
if (!SC)
@ -156,13 +156,13 @@ unsigned TargetSchedModel::computeOperandLatency(
const MachineInstr *UseMI, unsigned UseOperIdx) const {
if (!hasInstrSchedModel() && !hasInstrItineraries())
return TII->defaultDefLatency(SchedModel, DefMI);
return TII->defaultDefLatency(SchedModel, *DefMI);
if (hasInstrItineraries()) {
int OperLatency = 0;
if (UseMI) {
OperLatency = TII->getOperandLatency(&InstrItins, DefMI, DefOperIdx,
UseMI, UseOperIdx);
OperLatency = TII->getOperandLatency(&InstrItins, *DefMI, DefOperIdx,
*UseMI, UseOperIdx);
}
else {
unsigned DefClass = DefMI->getDesc().getSchedClass();
@ -172,15 +172,15 @@ unsigned TargetSchedModel::computeOperandLatency(
return OperLatency;
// No operand latency was found.
unsigned InstrLatency = TII->getInstrLatency(&InstrItins, DefMI);
unsigned InstrLatency = TII->getInstrLatency(&InstrItins, *DefMI);
// Expected latency is the max of the stage latency and itinerary props.
// Rather than directly querying InstrItins stage latency, we call a TII
// hook to allow subtargets to specialize latency. This hook is only
// applicable to the InstrItins model. InstrSchedModel should model all
// special cases without TII hooks.
InstrLatency = std::max(InstrLatency,
TII->defaultDefLatency(SchedModel, DefMI));
InstrLatency =
std::max(InstrLatency, TII->defaultDefLatency(SchedModel, *DefMI));
return InstrLatency;
}
// hasInstrSchedModel()
@ -219,7 +219,7 @@ unsigned TargetSchedModel::computeOperandLatency(
// FIXME: Automatically giving all implicit defs defaultDefLatency is
// undesirable. We should only do it for defs that are known to the MC
// desc like flags. Truly implicit defs should get 1 cycle latency.
return DefMI->isTransient() ? 0 : TII->defaultDefLatency(SchedModel, DefMI);
return DefMI->isTransient() ? 0 : TII->defaultDefLatency(SchedModel, *DefMI);
}
unsigned
@ -254,14 +254,14 @@ TargetSchedModel::computeInstrLatency(const MachineInstr *MI,
// Allow subtargets to compute Bundle latencies outside the machine model.
if (hasInstrItineraries() || MI->isBundle() ||
(!hasInstrSchedModel() && !UseDefaultDefLatency))
return TII->getInstrLatency(&InstrItins, MI);
return TII->getInstrLatency(&InstrItins, *MI);
if (hasInstrSchedModel()) {
const MCSchedClassDesc *SCDesc = resolveSchedClass(MI);
if (SCDesc->isValid())
return computeInstrLatency(*SCDesc);
}
return TII->defaultDefLatency(SchedModel, MI);
return TII->defaultDefLatency(SchedModel, *MI);
}
unsigned TargetSchedModel::

View File

@ -647,7 +647,7 @@ bool TwoAddressInstructionPass::commuteInstruction(MachineInstr *MI,
unsigned Dist) {
unsigned RegC = MI->getOperand(RegCIdx).getReg();
DEBUG(dbgs() << "2addr: COMMUTING : " << *MI);
MachineInstr *NewMI = TII->commuteInstruction(MI, false, RegBIdx, RegCIdx);
MachineInstr *NewMI = TII->commuteInstruction(*MI, false, RegBIdx, RegCIdx);
if (NewMI == nullptr) {
DEBUG(dbgs() << "2addr: COMMUTING FAILED!\n");
@ -695,7 +695,7 @@ TwoAddressInstructionPass::convertInstTo3Addr(MachineBasicBlock::iterator &mi,
unsigned Dist) {
// FIXME: Why does convertToThreeAddress() need an iterator reference?
MachineFunction::iterator MFI = MBB->getIterator();
MachineInstr *NewMI = TII->convertToThreeAddress(MFI, mi, LV);
MachineInstr *NewMI = TII->convertToThreeAddress(MFI, *mi, LV);
assert(MBB->getIterator() == MFI &&
"convertToThreeAddress changed iterator reference");
if (!NewMI)
@ -861,7 +861,7 @@ rescheduleMIBelowKill(MachineBasicBlock::iterator &mi,
if (!MI->isSafeToMove(AA, SeenStore))
return false;
if (TII->getInstrLatency(InstrItins, MI) > 1)
if (TII->getInstrLatency(InstrItins, *MI) > 1)
// FIXME: Needs more sophisticated heuristics.
return false;
@ -993,7 +993,7 @@ bool TwoAddressInstructionPass::isDefTooClose(unsigned Reg, unsigned Dist,
return true; // Below MI
unsigned DefDist = DDI->second;
assert(Dist > DefDist && "Visited def already?");
if (TII->getInstrLatency(InstrItins, &DefMI) > (Dist - DefDist))
if (TII->getInstrLatency(InstrItins, DefMI) > (Dist - DefDist))
return true;
}
return false;
@ -1174,7 +1174,7 @@ bool TwoAddressInstructionPass::tryInstructionCommute(MachineInstr *MI,
// other commutable operands and does not change the values of passed
// variables.
if (OtherOpIdx == BaseOpIdx ||
!TII->findCommutedOpIndices(MI, BaseOpIdx, OtherOpIdx))
!TII->findCommutedOpIndices(*MI, BaseOpIdx, OtherOpIdx))
continue;
unsigned OtherOpReg = MI->getOperand(OtherOpIdx).getReg();
@ -1307,9 +1307,9 @@ tryInstructionTransform(MachineBasicBlock::iterator &mi,
TII->getRegClass(UnfoldMCID, LoadRegIndex, TRI, *MF));
unsigned Reg = MRI->createVirtualRegister(RC);
SmallVector<MachineInstr *, 2> NewMIs;
if (!TII->unfoldMemoryOperand(*MF, &MI, Reg,
/*UnfoldLoad=*/true,/*UnfoldStore=*/false,
NewMIs)) {
if (!TII->unfoldMemoryOperand(*MF, MI, Reg,
/*UnfoldLoad=*/true,
/*UnfoldStore=*/false, NewMIs)) {
DEBUG(dbgs() << "2addr: ABANDONING UNFOLD\n");
return false;
}

View File

@ -177,7 +177,7 @@ void AArch64BranchRelaxation::scanFunction() {
void AArch64BranchRelaxation::computeBlockSize(const MachineBasicBlock &MBB) {
unsigned Size = 0;
for (const MachineInstr &MI : MBB)
Size += TII->GetInstSizeInBytes(&MI);
Size += TII->GetInstSizeInBytes(MI);
BlockInfo[MBB.getNumber()].Size = Size;
}
@ -195,7 +195,7 @@ unsigned AArch64BranchRelaxation::getInstrOffset(MachineInstr *MI) const {
// Sum instructions before MI in MBB.
for (MachineBasicBlock::iterator I = MBB->begin(); &*I != MI; ++I) {
assert(I != MBB->end() && "Didn't find MI in its own basic block?");
Offset += TII->GetInstSizeInBytes(I);
Offset += TII->GetInstSizeInBytes(*I);
}
return Offset;
}
@ -420,7 +420,7 @@ bool AArch64BranchRelaxation::fixupConditionalBranch(MachineInstr *MI) {
MachineBasicBlock *NewBB = splitBlockBeforeInstr(MI);
// No need for the branch to the next block. We're adding an unconditional
// branch to the destination.
int delta = TII->GetInstSizeInBytes(&MBB->back());
int delta = TII->GetInstSizeInBytes(MBB->back());
BlockInfo[MBB->getNumber()].Size -= delta;
MBB->back().eraseFromParent();
// BlockInfo[SplitBB].Offset is wrong temporarily, fixed below
@ -446,12 +446,12 @@ bool AArch64BranchRelaxation::fixupConditionalBranch(MachineInstr *MI) {
if (MI->getOpcode() == AArch64::Bcc)
invertBccCondition(MIB);
MIB.addMBB(NextBB);
BlockInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(&MBB->back());
BlockInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(MBB->back());
BuildMI(MBB, DebugLoc(), TII->get(AArch64::B)).addMBB(DestBB);
BlockInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(&MBB->back());
BlockInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(MBB->back());
// Remove the old conditional branch. It may or may not still be in MBB.
BlockInfo[MI->getParent()->getNumber()].Size -= TII->GetInstSizeInBytes(MI);
BlockInfo[MI->getParent()->getNumber()].Size -= TII->GetInstSizeInBytes(*MI);
MI->eraseFromParent();
// Finally, keep the block offsets up to date.

View File

@ -35,15 +35,15 @@ AArch64InstrInfo::AArch64InstrInfo(const AArch64Subtarget &STI)
/// GetInstSize - Return the number of bytes of code the specified
/// instruction may be. This returns the maximum number of bytes.
unsigned AArch64InstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
const MachineBasicBlock &MBB = *MI->getParent();
unsigned AArch64InstrInfo::GetInstSizeInBytes(const MachineInstr &MI) const {
const MachineBasicBlock &MBB = *MI.getParent();
const MachineFunction *MF = MBB.getParent();
const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
if (MI->getOpcode() == AArch64::INLINEASM)
return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI);
if (MI.getOpcode() == AArch64::INLINEASM)
return getInlineAsmLength(MI.getOperand(0).getSymbolName(), *MAI);
const MCInstrDesc &Desc = MI->getDesc();
const MCInstrDesc &Desc = MI.getDesc();
switch (Desc.getOpcode()) {
default:
// Anything not explicitly designated otherwise is a nomal 4-byte insn.
@ -536,8 +536,8 @@ void AArch64InstrInfo::insertSelect(MachineBasicBlock &MBB,
}
/// Returns true if a MOVi32imm or MOVi64imm can be expanded to an ORRxx.
static bool canBeExpandedToORR(const MachineInstr *MI, unsigned BitSize) {
uint64_t Imm = MI->getOperand(1).getImm();
static bool canBeExpandedToORR(const MachineInstr &MI, unsigned BitSize) {
uint64_t Imm = MI.getOperand(1).getImm();
uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
uint64_t Encoding;
return AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding);
@ -545,13 +545,13 @@ static bool canBeExpandedToORR(const MachineInstr *MI, unsigned BitSize) {
// FIXME: this implementation should be micro-architecture dependent, so a
// micro-architecture target hook should be introduced here in future.
bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr *MI) const {
bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {
if (!Subtarget.hasCustomCheapAsMoveHandling())
return MI->isAsCheapAsAMove();
return MI.isAsCheapAsAMove();
unsigned Imm;
switch (MI->getOpcode()) {
switch (MI.getOpcode()) {
default:
return false;
@ -561,14 +561,14 @@ bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr *MI) const {
case AArch64::SUBWri:
case AArch64::SUBXri:
return (Subtarget.getProcFamily() == AArch64Subtarget::ExynosM1 ||
MI->getOperand(3).getImm() == 0);
MI.getOperand(3).getImm() == 0);
// add/sub on register with shift
case AArch64::ADDWrs:
case AArch64::ADDXrs:
case AArch64::SUBWrs:
case AArch64::SUBXrs:
Imm = MI->getOperand(3).getImm();
Imm = MI.getOperand(3).getImm();
return (Subtarget.getProcFamily() == AArch64Subtarget::ExynosM1 &&
AArch64_AM::getArithShiftValue(Imm) < 4);
@ -609,7 +609,7 @@ bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr *MI) const {
case AArch64::ORNXrs:
case AArch64::ORRWrs:
case AArch64::ORRXrs:
Imm = MI->getOperand(3).getImm();
Imm = MI.getOperand(3).getImm();
return (Subtarget.getProcFamily() == AArch64Subtarget::ExynosM1 &&
AArch64_AM::getShiftValue(Imm) < 4 &&
AArch64_AM::getShiftType(Imm) == AArch64_AM::LSL);
@ -645,20 +645,18 @@ bool AArch64InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
}
}
bool
AArch64InstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr *MIa,
MachineInstr *MIb,
AliasAnalysis *AA) const {
bool AArch64InstrInfo::areMemAccessesTriviallyDisjoint(
MachineInstr &MIa, MachineInstr &MIb, AliasAnalysis *AA) const {
const TargetRegisterInfo *TRI = &getRegisterInfo();
unsigned BaseRegA = 0, BaseRegB = 0;
int64_t OffsetA = 0, OffsetB = 0;
unsigned WidthA = 0, WidthB = 0;
assert(MIa && MIa->mayLoadOrStore() && "MIa must be a load or store.");
assert(MIb && MIb->mayLoadOrStore() && "MIb must be a load or store.");
assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
if (MIa->hasUnmodeledSideEffects() || MIb->hasUnmodeledSideEffects() ||
MIa->hasOrderedMemoryRef() || MIb->hasOrderedMemoryRef())
if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
return false;
// Retrieve the base register, offset from the base register and width. Width
@ -682,10 +680,10 @@ AArch64InstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr *MIa,
/// analyzeCompare - For a comparison instruction, return the source registers
/// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
/// Return true if the comparison instruction can be analyzed.
bool AArch64InstrInfo::analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
bool AArch64InstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
unsigned &SrcReg2, int &CmpMask,
int &CmpValue) const {
switch (MI->getOpcode()) {
switch (MI.getOpcode()) {
default:
break;
case AArch64::SUBSWrr:
@ -701,8 +699,8 @@ bool AArch64InstrInfo::analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
case AArch64::ADDSXrs:
case AArch64::ADDSXrx:
// Replace SUBSWrr with SUBWrr if NZCV is not used.
SrcReg = MI->getOperand(1).getReg();
SrcReg2 = MI->getOperand(2).getReg();
SrcReg = MI.getOperand(1).getReg();
SrcReg2 = MI.getOperand(2).getReg();
CmpMask = ~0;
CmpValue = 0;
return true;
@ -710,17 +708,17 @@ bool AArch64InstrInfo::analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
case AArch64::ADDSWri:
case AArch64::SUBSXri:
case AArch64::ADDSXri:
SrcReg = MI->getOperand(1).getReg();
SrcReg = MI.getOperand(1).getReg();
SrcReg2 = 0;
CmpMask = ~0;
// FIXME: In order to convert CmpValue to 0 or 1
CmpValue = (MI->getOperand(2).getImm() != 0);
CmpValue = MI.getOperand(2).getImm() != 0;
return true;
case AArch64::ANDSWri:
case AArch64::ANDSXri:
// ANDS does not use the same encoding scheme as the others xxxS
// instructions.
SrcReg = MI->getOperand(1).getReg();
SrcReg = MI.getOperand(1).getReg();
SrcReg2 = 0;
CmpMask = ~0;
// FIXME:The return val type of decodeLogicalImmediate is uint64_t,
@ -728,17 +726,17 @@ bool AArch64InstrInfo::analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
// the high 32 bits of uint64_t will be lost.
// In fact it causes a bug in spec2006-483.xalancbmk
// CmpValue is only used to compare with zero in OptimizeCompareInstr
CmpValue = (AArch64_AM::decodeLogicalImmediate(
MI->getOperand(2).getImm(),
MI->getOpcode() == AArch64::ANDSWri ? 32 : 64) != 0);
CmpValue = AArch64_AM::decodeLogicalImmediate(
MI.getOperand(2).getImm(),
MI.getOpcode() == AArch64::ANDSWri ? 32 : 64) != 0;
return true;
}
return false;
}
static bool UpdateOperandRegClass(MachineInstr *Instr) {
MachineBasicBlock *MBB = Instr->getParent();
static bool UpdateOperandRegClass(MachineInstr &Instr) {
MachineBasicBlock *MBB = Instr.getParent();
assert(MBB && "Can't get MachineBasicBlock here");
MachineFunction *MF = MBB->getParent();
assert(MF && "Can't get MachineFunction here");
@ -746,11 +744,11 @@ static bool UpdateOperandRegClass(MachineInstr *Instr) {
const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
MachineRegisterInfo *MRI = &MF->getRegInfo();
for (unsigned OpIdx = 0, EndIdx = Instr->getNumOperands(); OpIdx < EndIdx;
for (unsigned OpIdx = 0, EndIdx = Instr.getNumOperands(); OpIdx < EndIdx;
++OpIdx) {
MachineOperand &MO = Instr->getOperand(OpIdx);
MachineOperand &MO = Instr.getOperand(OpIdx);
const TargetRegisterClass *OpRegCstraints =
Instr->getRegClassConstraint(OpIdx, TII, TRI);
Instr.getRegClassConstraint(OpIdx, TII, TRI);
// If there's no constraint, there's nothing to do.
if (!OpRegCstraints)
@ -778,16 +776,16 @@ static bool UpdateOperandRegClass(MachineInstr *Instr) {
/// \brief Return the opcode that does not set flags when possible - otherwise
/// return the original opcode. The caller is responsible to do the actual
/// substitution and legality checking.
static unsigned convertFlagSettingOpcode(const MachineInstr *MI) {
static unsigned convertFlagSettingOpcode(const MachineInstr &MI) {
// Don't convert all compare instructions, because for some the zero register
// encoding becomes the sp register.
bool MIDefinesZeroReg = false;
if (MI->definesRegister(AArch64::WZR) || MI->definesRegister(AArch64::XZR))
if (MI.definesRegister(AArch64::WZR) || MI.definesRegister(AArch64::XZR))
MIDefinesZeroReg = true;
switch (MI->getOpcode()) {
switch (MI.getOpcode()) {
default:
return MI->getOpcode();
return MI.getOpcode();
case AArch64::ADDSWrr:
return AArch64::ADDWrr;
case AArch64::ADDSWri:
@ -834,14 +832,11 @@ enum AccessKind {
///
/// Note: If From and To are from different blocks it's assumed CC are accessed
/// on the path.
static bool areCFlagsAccessedBetweenInstrs(MachineInstr *From, MachineInstr *To,
const TargetRegisterInfo *TRI,
const AccessKind AccessToCheck = AK_All) {
// We iterate backward starting \p To until we hit \p From
MachineBasicBlock::iterator I = To, E = From, B = To->getParent()->begin();
static bool areCFlagsAccessedBetweenInstrs(
MachineBasicBlock::iterator From, MachineBasicBlock::iterator To,
const TargetRegisterInfo *TRI, const AccessKind AccessToCheck = AK_All) {
// Early exit if To is at the beginning of the BB.
if (I == B)
if (To == To->getParent()->begin())
return true;
// Check whether the instructions are in the same basic block
@ -856,8 +851,9 @@ static bool areCFlagsAccessedBetweenInstrs(MachineInstr *From, MachineInstr *To,
return &MI == From;
}) != To->getParent()->rend());
for (--I; I != E; --I) {
const MachineInstr &Instr = *I;
// We iterate backward starting \p To until we hit \p From.
for (--To; To != From; --To) {
const MachineInstr &Instr = *To;
if ( ((AccessToCheck & AK_Write) && Instr.modifiesRegister(AArch64::NZCV, TRI)) ||
((AccessToCheck & AK_Read) && Instr.readsRegister(AArch64::NZCV, TRI)))
@ -876,27 +872,26 @@ static bool areCFlagsAccessedBetweenInstrs(MachineInstr *From, MachineInstr *To,
/// condition code or an instruction which can be converted into such an instruction.
/// Only comparison with zero is supported.
bool AArch64InstrInfo::optimizeCompareInstr(
MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask,
MachineInstr &CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask,
int CmpValue, const MachineRegisterInfo *MRI) const {
assert(CmpInstr);
assert(CmpInstr->getParent());
assert(CmpInstr.getParent());
assert(MRI);
// Replace SUBSWrr with SUBWrr if NZCV is not used.
int DeadNZCVIdx = CmpInstr->findRegisterDefOperandIdx(AArch64::NZCV, true);
int DeadNZCVIdx = CmpInstr.findRegisterDefOperandIdx(AArch64::NZCV, true);
if (DeadNZCVIdx != -1) {
if (CmpInstr->definesRegister(AArch64::WZR) ||
CmpInstr->definesRegister(AArch64::XZR)) {
CmpInstr->eraseFromParent();
if (CmpInstr.definesRegister(AArch64::WZR) ||
CmpInstr.definesRegister(AArch64::XZR)) {
CmpInstr.eraseFromParent();
return true;
}
unsigned Opc = CmpInstr->getOpcode();
unsigned Opc = CmpInstr.getOpcode();
unsigned NewOpc = convertFlagSettingOpcode(CmpInstr);
if (NewOpc == Opc)
return false;
const MCInstrDesc &MCID = get(NewOpc);
CmpInstr->setDesc(MCID);
CmpInstr->RemoveOperand(DeadNZCVIdx);
CmpInstr.setDesc(MCID);
CmpInstr.RemoveOperand(DeadNZCVIdx);
bool succeeded = UpdateOperandRegClass(CmpInstr);
(void)succeeded;
assert(succeeded && "Some operands reg class are incompatible!");
@ -911,7 +906,7 @@ bool AArch64InstrInfo::optimizeCompareInstr(
return false;
// CmpInstr is a Compare instruction if destination register is not used.
if (!MRI->use_nodbg_empty(CmpInstr->getOperand(0).getReg()))
if (!MRI->use_nodbg_empty(CmpInstr.getOperand(0).getReg()))
return false;
return substituteCmpToZero(CmpInstr, SrcReg, MRI);
@ -1112,9 +1107,9 @@ static bool canInstrSubstituteCmpInstr(MachineInstr *MI, MachineInstr *CmpInstr,
/// which produces needed condition flags.
///
/// Return true on success.
bool AArch64InstrInfo::substituteCmpToZero(MachineInstr *CmpInstr,
unsigned SrcReg, const MachineRegisterInfo *MRI) const {
assert(CmpInstr);
bool AArch64InstrInfo::substituteCmpToZero(
MachineInstr &CmpInstr, unsigned SrcReg,
const MachineRegisterInfo *MRI) const {
assert(MRI);
// Get the unique definition of SrcReg.
MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
@ -1127,29 +1122,28 @@ bool AArch64InstrInfo::substituteCmpToZero(MachineInstr *CmpInstr,
if (NewOpc == AArch64::INSTRUCTION_LIST_END)
return false;
if (!canInstrSubstituteCmpInstr(MI, CmpInstr, TRI))
if (!canInstrSubstituteCmpInstr(MI, &CmpInstr, TRI))
return false;
// Update the instruction to set NZCV.
MI->setDesc(get(NewOpc));
CmpInstr->eraseFromParent();
bool succeeded = UpdateOperandRegClass(MI);
CmpInstr.eraseFromParent();
bool succeeded = UpdateOperandRegClass(*MI);
(void)succeeded;
assert(succeeded && "Some operands reg class are incompatible!");
MI->addRegisterDefined(AArch64::NZCV, TRI);
return true;
}
bool
AArch64InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
if (MI->getOpcode() != TargetOpcode::LOAD_STACK_GUARD)
bool AArch64InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
if (MI.getOpcode() != TargetOpcode::LOAD_STACK_GUARD)
return false;
MachineBasicBlock &MBB = *MI->getParent();
DebugLoc DL = MI->getDebugLoc();
unsigned Reg = MI->getOperand(0).getReg();
MachineBasicBlock &MBB = *MI.getParent();
DebugLoc DL = MI.getDebugLoc();
unsigned Reg = MI.getOperand(0).getReg();
const GlobalValue *GV =
cast<GlobalValue>((*MI->memoperands_begin())->getValue());
cast<GlobalValue>((*MI.memoperands_begin())->getValue());
const TargetMachine &TM = MBB.getParent()->getTarget();
unsigned char OpFlags = Subtarget.ClassifyGlobalReference(GV, TM);
const unsigned char MO_NC = AArch64II::MO_NC;
@ -1158,8 +1152,9 @@ AArch64InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
BuildMI(MBB, MI, DL, get(AArch64::LOADgot), Reg)
.addGlobalAddress(GV, 0, AArch64II::MO_GOT);
BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
.addReg(Reg, RegState::Kill).addImm(0)
.addMemOperand(*MI->memoperands_begin());
.addReg(Reg, RegState::Kill)
.addImm(0)
.addMemOperand(*MI.memoperands_begin());
} else if (TM.getCodeModel() == CodeModel::Large) {
BuildMI(MBB, MI, DL, get(AArch64::MOVZXi), Reg)
.addGlobalAddress(GV, 0, AArch64II::MO_G3).addImm(48);
@ -1173,8 +1168,9 @@ AArch64InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
.addReg(Reg, RegState::Kill)
.addGlobalAddress(GV, 0, AArch64II::MO_G0 | MO_NC).addImm(0);
BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
.addReg(Reg, RegState::Kill).addImm(0)
.addMemOperand(*MI->memoperands_begin());
.addReg(Reg, RegState::Kill)
.addImm(0)
.addMemOperand(*MI.memoperands_begin());
} else {
BuildMI(MBB, MI, DL, get(AArch64::ADRP), Reg)
.addGlobalAddress(GV, 0, OpFlags | AArch64II::MO_PAGE);
@ -1182,7 +1178,7 @@ AArch64InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
.addReg(Reg, RegState::Kill)
.addGlobalAddress(GV, 0, LoFlags)
.addMemOperand(*MI->memoperands_begin());
.addMemOperand(*MI.memoperands_begin());
}
MBB.erase(MI);
@ -1191,8 +1187,8 @@ AArch64InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
}
/// Return true if this is this instruction has a non-zero immediate
bool AArch64InstrInfo::hasShiftedReg(const MachineInstr *MI) const {
switch (MI->getOpcode()) {
bool AArch64InstrInfo::hasShiftedReg(const MachineInstr &MI) const {
switch (MI.getOpcode()) {
default:
break;
case AArch64::ADDSWrs:
@ -1227,8 +1223,8 @@ bool AArch64InstrInfo::hasShiftedReg(const MachineInstr *MI) const {
case AArch64::SUBSXrs:
case AArch64::SUBWrs:
case AArch64::SUBXrs:
if (MI->getOperand(3).isImm()) {
unsigned val = MI->getOperand(3).getImm();
if (MI.getOperand(3).isImm()) {
unsigned val = MI.getOperand(3).getImm();
return (val != 0);
}
break;
@ -1237,8 +1233,8 @@ bool AArch64InstrInfo::hasShiftedReg(const MachineInstr *MI) const {
}
/// Return true if this is this instruction has a non-zero immediate
bool AArch64InstrInfo::hasExtendedReg(const MachineInstr *MI) const {
switch (MI->getOpcode()) {
bool AArch64InstrInfo::hasExtendedReg(const MachineInstr &MI) const {
switch (MI.getOpcode()) {
default:
break;
case AArch64::ADDSWrx:
@ -1253,8 +1249,8 @@ bool AArch64InstrInfo::hasExtendedReg(const MachineInstr *MI) const {
case AArch64::SUBWrx:
case AArch64::SUBXrx:
case AArch64::SUBXrx64:
if (MI->getOperand(3).isImm()) {
unsigned val = MI->getOperand(3).getImm();
if (MI.getOperand(3).isImm()) {
unsigned val = MI.getOperand(3).getImm();
return (val != 0);
}
break;
@ -1265,51 +1261,51 @@ bool AArch64InstrInfo::hasExtendedReg(const MachineInstr *MI) const {
// Return true if this instruction simply sets its single destination register
// to zero. This is equivalent to a register rename of the zero-register.
bool AArch64InstrInfo::isGPRZero(const MachineInstr *MI) const {
switch (MI->getOpcode()) {
bool AArch64InstrInfo::isGPRZero(const MachineInstr &MI) const {
switch (MI.getOpcode()) {
default:
break;
case AArch64::MOVZWi:
case AArch64::MOVZXi: // movz Rd, #0 (LSL #0)
if (MI->getOperand(1).isImm() && MI->getOperand(1).getImm() == 0) {
assert(MI->getDesc().getNumOperands() == 3 &&
MI->getOperand(2).getImm() == 0 && "invalid MOVZi operands");
if (MI.getOperand(1).isImm() && MI.getOperand(1).getImm() == 0) {
assert(MI.getDesc().getNumOperands() == 3 &&
MI.getOperand(2).getImm() == 0 && "invalid MOVZi operands");
return true;
}
break;
case AArch64::ANDWri: // and Rd, Rzr, #imm
return MI->getOperand(1).getReg() == AArch64::WZR;
return MI.getOperand(1).getReg() == AArch64::WZR;
case AArch64::ANDXri:
return MI->getOperand(1).getReg() == AArch64::XZR;
return MI.getOperand(1).getReg() == AArch64::XZR;
case TargetOpcode::COPY:
return MI->getOperand(1).getReg() == AArch64::WZR;
return MI.getOperand(1).getReg() == AArch64::WZR;
}
return false;
}
// Return true if this instruction simply renames a general register without
// modifying bits.
bool AArch64InstrInfo::isGPRCopy(const MachineInstr *MI) const {
switch (MI->getOpcode()) {
bool AArch64InstrInfo::isGPRCopy(const MachineInstr &MI) const {
switch (MI.getOpcode()) {
default:
break;
case TargetOpcode::COPY: {
// GPR32 copies will by lowered to ORRXrs
unsigned DstReg = MI->getOperand(0).getReg();
unsigned DstReg = MI.getOperand(0).getReg();
return (AArch64::GPR32RegClass.contains(DstReg) ||
AArch64::GPR64RegClass.contains(DstReg));
}
case AArch64::ORRXrs: // orr Xd, Xzr, Xm (LSL #0)
if (MI->getOperand(1).getReg() == AArch64::XZR) {
assert(MI->getDesc().getNumOperands() == 4 &&
MI->getOperand(3).getImm() == 0 && "invalid ORRrs operands");
if (MI.getOperand(1).getReg() == AArch64::XZR) {
assert(MI.getDesc().getNumOperands() == 4 &&
MI.getOperand(3).getImm() == 0 && "invalid ORRrs operands");
return true;
}
break;
case AArch64::ADDXri: // add Xd, Xn, #0 (LSL #0)
if (MI->getOperand(2).getImm() == 0) {
assert(MI->getDesc().getNumOperands() == 4 &&
MI->getOperand(3).getImm() == 0 && "invalid ADDXri operands");
if (MI.getOperand(2).getImm() == 0) {
assert(MI.getDesc().getNumOperands() == 4 &&
MI.getOperand(3).getImm() == 0 && "invalid ADDXri operands");
return true;
}
break;
@ -1319,19 +1315,19 @@ bool AArch64InstrInfo::isGPRCopy(const MachineInstr *MI) const {
// Return true if this instruction simply renames a general register without
// modifying bits.
bool AArch64InstrInfo::isFPRCopy(const MachineInstr *MI) const {
switch (MI->getOpcode()) {
bool AArch64InstrInfo::isFPRCopy(const MachineInstr &MI) const {
switch (MI.getOpcode()) {
default:
break;
case TargetOpcode::COPY: {
// FPR64 copies will by lowered to ORR.16b
unsigned DstReg = MI->getOperand(0).getReg();
unsigned DstReg = MI.getOperand(0).getReg();
return (AArch64::FPR64RegClass.contains(DstReg) ||
AArch64::FPR128RegClass.contains(DstReg));
}
case AArch64::ORRv16i8:
if (MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) {
assert(MI->getDesc().getNumOperands() == 3 && MI->getOperand(0).isReg() &&
if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg()) {
assert(MI.getDesc().getNumOperands() == 3 && MI.getOperand(0).isReg() &&
"invalid ORRv16i8 operands");
return true;
}
@ -1340,9 +1336,9 @@ bool AArch64InstrInfo::isFPRCopy(const MachineInstr *MI) const {
return false;
}
unsigned AArch64InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
unsigned AArch64InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
switch (MI->getOpcode()) {
switch (MI.getOpcode()) {
default:
break;
case AArch64::LDRWui:
@ -1352,10 +1348,10 @@ unsigned AArch64InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
case AArch64::LDRSui:
case AArch64::LDRDui:
case AArch64::LDRQui:
if (MI->getOperand(0).getSubReg() == 0 && MI->getOperand(1).isFI() &&
MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) {
FrameIndex = MI->getOperand(1).getIndex();
return MI->getOperand(0).getReg();
if (MI.getOperand(0).getSubReg() == 0 && MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0) {
FrameIndex = MI.getOperand(1).getIndex();
return MI.getOperand(0).getReg();
}
break;
}
@ -1363,9 +1359,9 @@ unsigned AArch64InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
return 0;
}
unsigned AArch64InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
unsigned AArch64InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
switch (MI->getOpcode()) {
switch (MI.getOpcode()) {
default:
break;
case AArch64::STRWui:
@ -1375,10 +1371,10 @@ unsigned AArch64InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
case AArch64::STRSui:
case AArch64::STRDui:
case AArch64::STRQui:
if (MI->getOperand(0).getSubReg() == 0 && MI->getOperand(1).isFI() &&
MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) {
FrameIndex = MI->getOperand(1).getIndex();
return MI->getOperand(0).getReg();
if (MI.getOperand(0).getSubReg() == 0 && MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0) {
FrameIndex = MI.getOperand(1).getIndex();
return MI.getOperand(0).getReg();
}
break;
}
@ -1388,8 +1384,8 @@ unsigned AArch64InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
/// Return true if this is load/store scales or extends its register offset.
/// This refers to scaling a dynamic index as opposed to scaled immediates.
/// MI should be a memory op that allows scaled addressing.
bool AArch64InstrInfo::isScaledAddr(const MachineInstr *MI) const {
switch (MI->getOpcode()) {
bool AArch64InstrInfo::isScaledAddr(const MachineInstr &MI) const {
switch (MI.getOpcode()) {
default:
break;
case AArch64::LDRBBroW:
@ -1439,7 +1435,7 @@ bool AArch64InstrInfo::isScaledAddr(const MachineInstr *MI) const {
case AArch64::STRWroX:
case AArch64::STRXroX:
unsigned Val = MI->getOperand(3).getImm();
unsigned Val = MI.getOperand(3).getImm();
AArch64_AM::ShiftExtendType ExtType = AArch64_AM::getMemExtendType(Val);
return (ExtType != AArch64_AM::UXTX) || AArch64_AM::getMemDoShift(Val);
}
@ -1447,10 +1443,10 @@ bool AArch64InstrInfo::isScaledAddr(const MachineInstr *MI) const {
}
/// Check all MachineMemOperands for a hint to suppress pairing.
bool AArch64InstrInfo::isLdStPairSuppressed(const MachineInstr *MI) const {
bool AArch64InstrInfo::isLdStPairSuppressed(const MachineInstr &MI) const {
static_assert(MOSuppressPair < (1 << MachineMemOperand::MOTargetNumBits),
"Too many target MO flags");
for (auto *MM : MI->memoperands()) {
for (auto *MM : MI.memoperands()) {
if (MM->getFlags() &
(MOSuppressPair << MachineMemOperand::MOTargetStartBit)) {
return true;
@ -1460,13 +1456,13 @@ bool AArch64InstrInfo::isLdStPairSuppressed(const MachineInstr *MI) const {
}
/// Set a flag on the first MachineMemOperand to suppress pairing.
void AArch64InstrInfo::suppressLdStPair(MachineInstr *MI) const {
if (MI->memoperands_empty())
void AArch64InstrInfo::suppressLdStPair(MachineInstr &MI) const {
if (MI.memoperands_empty())
return;
static_assert(MOSuppressPair < (1 << MachineMemOperand::MOTargetNumBits),
"Too many target MO flags");
(*MI->memoperands_begin())
(*MI.memoperands_begin())
->setFlags(MOSuppressPair << MachineMemOperand::MOTargetStartBit);
}
@ -1495,27 +1491,27 @@ bool AArch64InstrInfo::isUnscaledLdSt(unsigned Opc) const {
}
}
bool AArch64InstrInfo::isUnscaledLdSt(MachineInstr *MI) const {
return isUnscaledLdSt(MI->getOpcode());
bool AArch64InstrInfo::isUnscaledLdSt(MachineInstr &MI) const {
return isUnscaledLdSt(MI.getOpcode());
}
// Is this a candidate for ld/st merging or pairing? For example, we don't
// touch volatiles or load/stores that have a hint to avoid pair formation.
bool AArch64InstrInfo::isCandidateToMergeOrPair(MachineInstr *MI) const {
bool AArch64InstrInfo::isCandidateToMergeOrPair(MachineInstr &MI) const {
// If this is a volatile load/store, don't mess with it.
if (MI->hasOrderedMemoryRef())
if (MI.hasOrderedMemoryRef())
return false;
// Make sure this is a reg+imm (as opposed to an address reloc).
assert(MI->getOperand(1).isReg() && "Expected a reg operand.");
if (!MI->getOperand(2).isImm())
assert(MI.getOperand(1).isReg() && "Expected a reg operand.");
if (!MI.getOperand(2).isImm())
return false;
// Can't merge/pair if the instruction modifies the base register.
// e.g., ldr x0, [x0]
unsigned BaseReg = MI->getOperand(1).getReg();
unsigned BaseReg = MI.getOperand(1).getReg();
const TargetRegisterInfo *TRI = &getRegisterInfo();
if (MI->modifiesRegister(BaseReg, TRI))
if (MI.modifiesRegister(BaseReg, TRI))
return false;
// Check if this load/store has a hint to avoid pair formation.
@ -1525,7 +1521,7 @@ bool AArch64InstrInfo::isCandidateToMergeOrPair(MachineInstr *MI) const {
// On some CPUs quad load/store pairs are slower than two single load/stores.
if (Subtarget.avoidQuadLdStPairs()) {
switch (MI->getOpcode()) {
switch (MI.getOpcode()) {
default:
break;
@ -1541,9 +1537,9 @@ bool AArch64InstrInfo::isCandidateToMergeOrPair(MachineInstr *MI) const {
}
bool AArch64InstrInfo::getMemOpBaseRegImmOfs(
MachineInstr *LdSt, unsigned &BaseReg, int64_t &Offset,
MachineInstr &LdSt, unsigned &BaseReg, int64_t &Offset,
const TargetRegisterInfo *TRI) const {
switch (LdSt->getOpcode()) {
switch (LdSt.getOpcode()) {
default:
return false;
// Scaled instructions.
@ -1576,17 +1572,18 @@ bool AArch64InstrInfo::getMemOpBaseRegImmOfs(
}
bool AArch64InstrInfo::getMemOpBaseRegImmOfsWidth(
MachineInstr *LdSt, unsigned &BaseReg, int64_t &Offset, unsigned &Width,
MachineInstr &LdSt, unsigned &BaseReg, int64_t &Offset, unsigned &Width,
const TargetRegisterInfo *TRI) const {
assert(LdSt->mayLoadOrStore() && "Expected a memory operation.");
assert(LdSt.mayLoadOrStore() && "Expected a memory operation.");
// Handle only loads/stores with base register followed by immediate offset.
if (LdSt->getNumExplicitOperands() == 3) {
if (LdSt.getNumExplicitOperands() == 3) {
// Non-paired instruction (e.g., ldr x1, [x0, #8]).
if (!LdSt->getOperand(1).isReg() || !LdSt->getOperand(2).isImm())
if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isImm())
return false;
} else if (LdSt->getNumExplicitOperands() == 4) {
} else if (LdSt.getNumExplicitOperands() == 4) {
// Paired instruction (e.g., ldp x1, x2, [x0, #8]).
if (!LdSt->getOperand(1).isReg() || !LdSt->getOperand(2).isReg() || !LdSt->getOperand(3).isImm())
if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isReg() ||
!LdSt.getOperand(3).isImm())
return false;
} else
return false;
@ -1594,7 +1591,7 @@ bool AArch64InstrInfo::getMemOpBaseRegImmOfsWidth(
// Offset is calculated as the immediate operand multiplied by the scaling factor.
// Unscaled instructions have scaling factor set to 1.
unsigned Scale = 0;
switch (LdSt->getOpcode()) {
switch (LdSt.getOpcode()) {
default:
return false;
case AArch64::LDURQi:
@ -1695,13 +1692,13 @@ bool AArch64InstrInfo::getMemOpBaseRegImmOfsWidth(
break;
}
if (LdSt->getNumExplicitOperands() == 3) {
BaseReg = LdSt->getOperand(1).getReg();
Offset = LdSt->getOperand(2).getImm() * Scale;
if (LdSt.getNumExplicitOperands() == 3) {
BaseReg = LdSt.getOperand(1).getReg();
Offset = LdSt.getOperand(2).getImm() * Scale;
} else {
assert(LdSt->getNumExplicitOperands() == 4 && "invalid number of operands");
BaseReg = LdSt->getOperand(2).getReg();
Offset = LdSt->getOperand(3).getImm() * Scale;
assert(LdSt.getNumExplicitOperands() == 4 && "invalid number of operands");
BaseReg = LdSt.getOperand(2).getReg();
Offset = LdSt.getOperand(3).getImm() * Scale;
}
return true;
}
@ -1763,16 +1760,16 @@ static bool canPairLdStOpc(unsigned FirstOpc, unsigned SecondOpc) {
/// Detect opportunities for ldp/stp formation.
///
/// Only called for LdSt for which getMemOpBaseRegImmOfs returns true.
bool AArch64InstrInfo::shouldClusterMemOps(MachineInstr *FirstLdSt,
MachineInstr *SecondLdSt,
bool AArch64InstrInfo::shouldClusterMemOps(MachineInstr &FirstLdSt,
MachineInstr &SecondLdSt,
unsigned NumLoads) const {
// Only cluster up to a single pair.
if (NumLoads > 1)
return false;
// Can we pair these instructions based on their opcodes?
unsigned FirstOpc = FirstLdSt->getOpcode();
unsigned SecondOpc = SecondLdSt->getOpcode();
unsigned FirstOpc = FirstLdSt.getOpcode();
unsigned SecondOpc = SecondLdSt.getOpcode();
if (!canPairLdStOpc(FirstOpc, SecondOpc))
return false;
@ -1783,11 +1780,11 @@ bool AArch64InstrInfo::shouldClusterMemOps(MachineInstr *FirstLdSt,
return false;
// isCandidateToMergeOrPair guarantees that operand 2 is an immediate.
int64_t Offset1 = FirstLdSt->getOperand(2).getImm();
int64_t Offset1 = FirstLdSt.getOperand(2).getImm();
if (isUnscaledLdSt(FirstOpc) && !scaleOffset(FirstOpc, Offset1))
return false;
int64_t Offset2 = SecondLdSt->getOperand(2).getImm();
int64_t Offset2 = SecondLdSt.getOperand(2).getImm();
if (isUnscaledLdSt(SecondOpc) && !scaleOffset(SecondOpc, Offset2))
return false;
@ -1800,13 +1797,13 @@ bool AArch64InstrInfo::shouldClusterMemOps(MachineInstr *FirstLdSt,
return Offset1 + 1 == Offset2;
}
bool AArch64InstrInfo::shouldScheduleAdjacent(MachineInstr *First,
MachineInstr *Second) const {
bool AArch64InstrInfo::shouldScheduleAdjacent(MachineInstr &First,
MachineInstr &Second) const {
if (Subtarget.hasMacroOpFusion()) {
// Fuse CMN, CMP, TST followed by Bcc.
unsigned SecondOpcode = Second->getOpcode();
unsigned SecondOpcode = Second.getOpcode();
if (SecondOpcode == AArch64::Bcc) {
switch (First->getOpcode()) {
switch (First.getOpcode()) {
default:
return false;
case AArch64::SUBSWri:
@ -1821,7 +1818,7 @@ bool AArch64InstrInfo::shouldScheduleAdjacent(MachineInstr *First,
// Fuse ALU operations followed by CBZ/CBNZ.
if (SecondOpcode == AArch64::CBNZW || SecondOpcode == AArch64::CBNZX ||
SecondOpcode == AArch64::CBZW || SecondOpcode == AArch64::CBZX) {
switch (First->getOpcode()) {
switch (First.getOpcode()) {
default:
return false;
case AArch64::ADDWri:
@ -2448,7 +2445,7 @@ void llvm::emitFrameOffset(MachineBasicBlock &MBB,
}
MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, int FrameIndex,
LiveIntervals *LIS) const {
// This is a bit of a hack. Consider this instruction:
@ -2464,9 +2461,9 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
//
// <rdar://problem/11522048>
//
if (MI->isCopy()) {
unsigned DstReg = MI->getOperand(0).getReg();
unsigned SrcReg = MI->getOperand(1).getReg();
if (MI.isCopy()) {
unsigned DstReg = MI.getOperand(0).getReg();
unsigned SrcReg = MI.getOperand(1).getReg();
if (SrcReg == AArch64::SP &&
TargetRegisterInfo::isVirtualRegister(DstReg)) {
MF.getRegInfo().constrainRegClass(DstReg, &AArch64::GPR64RegClass);
@ -2934,7 +2931,7 @@ static bool getMaddPatterns(MachineInstr &Root,
// When NZCV is live bail out.
if (Cmp_NZCV == -1)
return false;
unsigned NewOpc = convertFlagSettingOpcode(&Root);
unsigned NewOpc = convertFlagSettingOpcode(Root);
// When opcode can't change bail out.
// CHECKME: do we miss any cases for opcode conversion?
if (NewOpc == Opc)
@ -3830,11 +3827,11 @@ void AArch64InstrInfo::genAlternativeCodeSequence(
/// \param MI Conditional Branch
/// \return True when the simple conditional branch is generated
///
bool AArch64InstrInfo::optimizeCondBranch(MachineInstr *MI) const {
bool AArch64InstrInfo::optimizeCondBranch(MachineInstr &MI) const {
bool IsNegativeBranch = false;
bool IsTestAndBranch = false;
unsigned TargetBBInMI = 0;
switch (MI->getOpcode()) {
switch (MI.getOpcode()) {
default:
llvm_unreachable("Unknown branch instruction?");
case AArch64::Bcc:
@ -3863,15 +3860,15 @@ bool AArch64InstrInfo::optimizeCondBranch(MachineInstr *MI) const {
// So we increment a zero register and test for bits other
// than bit 0? Conservatively bail out in case the verifier
// missed this case.
if (IsTestAndBranch && MI->getOperand(1).getImm())
if (IsTestAndBranch && MI.getOperand(1).getImm())
return false;
// Find Definition.
assert(MI->getParent() && "Incomplete machine instruciton\n");
MachineBasicBlock *MBB = MI->getParent();
assert(MI.getParent() && "Incomplete machine instruciton\n");
MachineBasicBlock *MBB = MI.getParent();
MachineFunction *MF = MBB->getParent();
MachineRegisterInfo *MRI = &MF->getRegInfo();
unsigned VReg = MI->getOperand(0).getReg();
unsigned VReg = MI.getOperand(0).getReg();
if (!TargetRegisterInfo::isVirtualRegister(VReg))
return false;
@ -3914,8 +3911,8 @@ bool AArch64InstrInfo::optimizeCondBranch(MachineInstr *MI) const {
assert(!MRI->def_empty(NewReg) && "Register must be defined.");
MachineBasicBlock &RefToMBB = *MBB;
MachineBasicBlock *TBB = MI->getOperand(1).getMBB();
DebugLoc DL = MI->getDebugLoc();
MachineBasicBlock *TBB = MI.getOperand(1).getMBB();
DebugLoc DL = MI.getDebugLoc();
unsigned Imm = Log2_64(Mask);
unsigned Opc = (Imm < 32)
? (IsNegativeBranch ? AArch64::TBNZW : AArch64::TBZW)
@ -3934,7 +3931,7 @@ bool AArch64InstrInfo::optimizeCondBranch(MachineInstr *MI) const {
// 32-bit sub-part.
if (!Is32Bit && Imm < 32)
NewMI->getOperand(0).setSubReg(AArch64::sub_32);
MI->eraseFromParent();
MI.eraseFromParent();
return true;
}
// Look for CSINC
@ -3956,12 +3953,12 @@ bool AArch64InstrInfo::optimizeCondBranch(MachineInstr *MI) const {
if (areCFlagsAccessedBetweenInstrs(DefMI, MI, &getRegisterInfo(), AK_Write))
return false;
MachineBasicBlock &RefToMBB = *MBB;
MachineBasicBlock *TBB = MI->getOperand(TargetBBInMI).getMBB();
DebugLoc DL = MI->getDebugLoc();
MachineBasicBlock *TBB = MI.getOperand(TargetBBInMI).getMBB();
DebugLoc DL = MI.getDebugLoc();
if (IsNegativeBranch)
CC = AArch64CC::getInvertedCondCode(CC);
BuildMI(RefToMBB, MI, DL, get(AArch64::Bcc)).addImm(CC).addMBB(TBB);
MI->eraseFromParent();
MI.eraseFromParent();
return true;
}
}

View File

@ -45,65 +45,65 @@ public:
/// always be able to get register info as well (through this method).
const AArch64RegisterInfo &getRegisterInfo() const { return RI; }
unsigned GetInstSizeInBytes(const MachineInstr *MI) const;
unsigned GetInstSizeInBytes(const MachineInstr &MI) const;
bool isAsCheapAsAMove(const MachineInstr *MI) const override;
bool isAsCheapAsAMove(const MachineInstr &MI) const override;
bool isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg,
unsigned &DstReg, unsigned &SubIdx) const override;
bool
areMemAccessesTriviallyDisjoint(MachineInstr *MIa, MachineInstr *MIb,
areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb,
AliasAnalysis *AA = nullptr) const override;
unsigned isLoadFromStackSlot(const MachineInstr *MI,
unsigned isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
unsigned isStoreToStackSlot(const MachineInstr *MI,
unsigned isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
/// Returns true if there is a shiftable register and that the shift value
/// is non-zero.
bool hasShiftedReg(const MachineInstr *MI) const;
bool hasShiftedReg(const MachineInstr &MI) const;
/// Returns true if there is an extendable register and that the extending
/// value is non-zero.
bool hasExtendedReg(const MachineInstr *MI) const;
bool hasExtendedReg(const MachineInstr &MI) const;
/// \brief Does this instruction set its full destination register to zero?
bool isGPRZero(const MachineInstr *MI) const;
bool isGPRZero(const MachineInstr &MI) const;
/// \brief Does this instruction rename a GPR without modifying bits?
bool isGPRCopy(const MachineInstr *MI) const;
bool isGPRCopy(const MachineInstr &MI) const;
/// \brief Does this instruction rename an FPR without modifying bits?
bool isFPRCopy(const MachineInstr *MI) const;
bool isFPRCopy(const MachineInstr &MI) const;
/// Return true if this is load/store scales or extends its register offset.
/// This refers to scaling a dynamic index as opposed to scaled immediates.
/// MI should be a memory op that allows scaled addressing.
bool isScaledAddr(const MachineInstr *MI) const;
bool isScaledAddr(const MachineInstr &MI) const;
/// Return true if pairing the given load or store is hinted to be
/// unprofitable.
bool isLdStPairSuppressed(const MachineInstr *MI) const;
bool isLdStPairSuppressed(const MachineInstr &MI) const;
/// Return true if this is an unscaled load/store.
bool isUnscaledLdSt(unsigned Opc) const;
/// Return true if this is an unscaled load/store.
bool isUnscaledLdSt(MachineInstr *MI) const;
bool isUnscaledLdSt(MachineInstr &MI) const;
/// Return true if this is a load/store that can be potentially paired/merged.
bool isCandidateToMergeOrPair(MachineInstr *MI) const;
bool isCandidateToMergeOrPair(MachineInstr &MI) const;
/// Hint that pairing the given load or store is unprofitable.
void suppressLdStPair(MachineInstr *MI) const;
void suppressLdStPair(MachineInstr &MI) const;
bool getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
bool getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg,
int64_t &Offset,
const TargetRegisterInfo *TRI) const override;
bool getMemOpBaseRegImmOfsWidth(MachineInstr *LdSt, unsigned &BaseReg,
bool getMemOpBaseRegImmOfsWidth(MachineInstr &LdSt, unsigned &BaseReg,
int64_t &Offset, unsigned &Width,
const TargetRegisterInfo *TRI) const;
@ -111,11 +111,11 @@ public:
bool enableClusterStores() const override { return true; }
bool shouldClusterMemOps(MachineInstr *FirstLdSt, MachineInstr *SecondLdSt,
unsigned NumLoads) const override;
bool shouldClusterMemOps(MachineInstr &FirstLdSt, MachineInstr &SecondLdSt,
unsigned NumLoads) const override;
bool shouldScheduleAdjacent(MachineInstr *First,
MachineInstr *Second) const override;
bool shouldScheduleAdjacent(MachineInstr &First,
MachineInstr &Second) const override;
MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF, int FrameIx,
uint64_t Offset, const MDNode *Var,
@ -141,11 +141,11 @@ public:
const TargetRegisterInfo *TRI) const override;
using TargetInstrInfo::foldMemoryOperandImpl;
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt,
int FrameIndex,
LiveIntervals *LIS = nullptr) const override;
MachineInstr *
foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, int FrameIndex,
LiveIntervals *LIS = nullptr) const override;
bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
@ -168,15 +168,15 @@ public:
/// analyzeCompare - For a comparison instruction, return the source registers
/// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
/// Return true if the comparison instruction can be analyzed.
bool analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
unsigned &SrcReg2, int &CmpMask,
int &CmpValue) const override;
/// optimizeCompareInstr - Convert the instruction supplying the argument to
/// the comparison into one that sets the zero bit in the flags register.
bool optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg,
bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
unsigned SrcReg2, int CmpMask, int CmpValue,
const MachineRegisterInfo *MRI) const override;
bool optimizeCondBranch(MachineInstr *MI) const override;
bool optimizeCondBranch(MachineInstr &MI) const override;
/// Return true when a code sequence can improve throughput. It
/// should be called only for instructions in loops.
@ -201,7 +201,7 @@ public:
/// AArch64 supports MachineCombiner.
bool useMachineCombiner() const override;
bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
bool expandPostRAPseudo(MachineInstr &MI) const override;
std::pair<unsigned, unsigned>
decomposeMachineOperandsTargetFlags(unsigned TF) const override;
@ -214,8 +214,8 @@ private:
void instantiateCondBranch(MachineBasicBlock &MBB, const DebugLoc &DL,
MachineBasicBlock *TBB,
ArrayRef<MachineOperand> Cond) const;
bool substituteCmpToZero(MachineInstr *CmpInstr,
unsigned SrcReg, const MachineRegisterInfo *MRI) const;
bool substituteCmpToZero(MachineInstr &CmpInstr, unsigned SrcReg,
const MachineRegisterInfo *MRI) const;
};
/// emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg

View File

@ -604,10 +604,10 @@ static bool isLdOffsetInRangeOfSt(MachineInstr *LoadInst,
assert(isMatchingStore(LoadInst, StoreInst) && "Expect only matched ld/st.");
int LoadSize = getMemScale(LoadInst);
int StoreSize = getMemScale(StoreInst);
int UnscaledStOffset = TII->isUnscaledLdSt(StoreInst)
int UnscaledStOffset = TII->isUnscaledLdSt(*StoreInst)
? getLdStOffsetOp(StoreInst).getImm()
: getLdStOffsetOp(StoreInst).getImm() * StoreSize;
int UnscaledLdOffset = TII->isUnscaledLdSt(LoadInst)
int UnscaledLdOffset = TII->isUnscaledLdSt(*LoadInst)
? getLdStOffsetOp(LoadInst).getImm()
: getLdStOffsetOp(LoadInst).getImm() * LoadSize;
return (UnscaledStOffset <= UnscaledLdOffset) &&
@ -963,8 +963,8 @@ AArch64LoadStoreOpt::promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
// performance and correctness are verified only in little-endian.
if (!Subtarget->isLittleEndian())
return NextI;
bool IsUnscaled = TII->isUnscaledLdSt(LoadI);
assert(IsUnscaled == TII->isUnscaledLdSt(StoreI) &&
bool IsUnscaled = TII->isUnscaledLdSt(*LoadI);
assert(IsUnscaled == TII->isUnscaledLdSt(*StoreI) &&
"Unsupported ld/st match");
assert(LoadSize <= StoreSize && "Invalid load size");
int UnscaledLdOffset = IsUnscaled
@ -1072,24 +1072,24 @@ static int alignTo(int Num, int PowOf2) {
return (Num + PowOf2 - 1) & ~(PowOf2 - 1);
}
static bool mayAlias(MachineInstr *MIa, MachineInstr *MIb,
static bool mayAlias(MachineInstr &MIa, MachineInstr &MIb,
const AArch64InstrInfo *TII) {
// One of the instructions must modify memory.
if (!MIa->mayStore() && !MIb->mayStore())
if (!MIa.mayStore() && !MIb.mayStore())
return false;
// Both instructions must be memory operations.
if (!MIa->mayLoadOrStore() && !MIb->mayLoadOrStore())
if (!MIa.mayLoadOrStore() && !MIb.mayLoadOrStore())
return false;
return !TII->areMemAccessesTriviallyDisjoint(MIa, MIb);
}
static bool mayAlias(MachineInstr *MIa,
static bool mayAlias(MachineInstr &MIa,
SmallVectorImpl<MachineInstr *> &MemInsns,
const AArch64InstrInfo *TII) {
for (auto &MIb : MemInsns)
if (mayAlias(MIa, MIb, TII))
for (MachineInstr *MIb : MemInsns)
if (mayAlias(MIa, *MIb, TII))
return true;
return false;
@ -1146,7 +1146,7 @@ bool AArch64LoadStoreOpt::findMatchingStore(
return false;
// If we encounter a store aliased with the load, return early.
if (MI->mayStore() && mayAlias(LoadMI, MI, TII))
if (MI->mayStore() && mayAlias(*LoadMI, *MI, TII))
return false;
} while (MBBI != B && Count < Limit);
return false;
@ -1158,12 +1158,12 @@ static bool areCandidatesToMergeOrPair(MachineInstr *FirstMI, MachineInstr *MI,
LdStPairFlags &Flags,
const AArch64InstrInfo *TII) {
// If this is volatile or if pairing is suppressed, not a candidate.
if (MI->hasOrderedMemoryRef() || TII->isLdStPairSuppressed(MI))
if (MI->hasOrderedMemoryRef() || TII->isLdStPairSuppressed(*MI))
return false;
// We should have already checked FirstMI for pair suppression and volatility.
assert(!FirstMI->hasOrderedMemoryRef() &&
!TII->isLdStPairSuppressed(FirstMI) &&
!TII->isLdStPairSuppressed(*FirstMI) &&
"FirstMI shouldn't get here if either of these checks are true.");
unsigned OpcA = FirstMI->getOpcode();
@ -1212,7 +1212,7 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
++MBBI;
bool MayLoad = FirstMI->mayLoad();
bool IsUnscaled = TII->isUnscaledLdSt(FirstMI);
bool IsUnscaled = TII->isUnscaledLdSt(*FirstMI);
unsigned Reg = getLdStRegOp(FirstMI).getReg();
unsigned BaseReg = getLdStBaseOp(FirstMI).getReg();
int Offset = getLdStOffsetOp(FirstMI).getImm();
@ -1249,7 +1249,7 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
// a relocation.
unsigned MIBaseReg = getLdStBaseOp(MI).getReg();
int MIOffset = getLdStOffsetOp(MI).getImm();
bool MIIsUnscaled = TII->isUnscaledLdSt(MI);
bool MIIsUnscaled = TII->isUnscaledLdSt(*MI);
if (IsUnscaled != MIIsUnscaled) {
// We're trying to pair instructions that differ in how they are scaled.
// If FirstMI is scaled then scale the offset of MI accordingly.
@ -1314,7 +1314,7 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
// first.
if (!ModifiedRegs[getLdStRegOp(MI).getReg()] &&
!(MI->mayLoad() && UsedRegs[getLdStRegOp(MI).getReg()]) &&
!mayAlias(MI, MemInsns, TII)) {
!mayAlias(*MI, MemInsns, TII)) {
Flags.setMergeForward(false);
return MBBI;
}
@ -1325,7 +1325,7 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
// into the second.
if (!ModifiedRegs[getLdStRegOp(FirstMI).getReg()] &&
!(MayLoad && UsedRegs[getLdStRegOp(FirstMI).getReg()]) &&
!mayAlias(FirstMI, MemInsns, TII)) {
!mayAlias(*FirstMI, MemInsns, TII)) {
Flags.setMergeForward(true);
return MBBI;
}
@ -1610,7 +1610,7 @@ bool AArch64LoadStoreOpt::tryToMergeLdStInst(
MachineInstr *MI = MBBI;
MachineBasicBlock::iterator E = MI->getParent()->end();
if (!TII->isCandidateToMergeOrPair(MI))
if (!TII->isCandidateToMergeOrPair(*MI))
return false;
// For promotable zero stores, the stored value should be WZR.
@ -1642,13 +1642,13 @@ bool AArch64LoadStoreOpt::tryToPairLdStInst(MachineBasicBlock::iterator &MBBI) {
MachineInstr *MI = MBBI;
MachineBasicBlock::iterator E = MI->getParent()->end();
if (!TII->isCandidateToMergeOrPair(MI))
if (!TII->isCandidateToMergeOrPair(*MI))
return false;
// Early exit if the offset is not possible to match. (6 bits of positive
// range, plus allow an extra one in case we find a later insn that matches
// with Offset-1)
bool IsUnscaled = TII->isUnscaledLdSt(MI);
bool IsUnscaled = TII->isUnscaledLdSt(*MI);
int Offset = getLdStOffsetOp(MI).getImm();
int OffsetStride = IsUnscaled ? getMemScale(MI) : 1;
if (!inBoundsForPair(IsUnscaled, Offset, OffsetStride))
@ -1660,7 +1660,7 @@ bool AArch64LoadStoreOpt::tryToPairLdStInst(MachineBasicBlock::iterator &MBBI) {
findMatchingInsn(MBBI, Flags, LdStLimit, /* FindNarrowMerge = */ false);
if (Paired != E) {
++NumPairCreated;
if (TII->isUnscaledLdSt(MI))
if (TII->isUnscaledLdSt(*MI))
++NumUnscaledPairCreated;
// Keeping the iterator straight is a pain, so we let the merge routine tell
// us what the next instruction is after it's done mucking about.

View File

@ -108,7 +108,7 @@ def WriteX : SchedWriteRes<[]> { let Latency = 0; }
// The move is replaced by a single nop micro-op.
// MOVZ Rd, #0
// AND Rd, Rzr, #imm
def WriteZPred : SchedPredicate<[{TII->isGPRZero(MI)}]>;
def WriteZPred : SchedPredicate<[{TII->isGPRZero(*MI)}]>;
def WriteImmZ : SchedWriteVariant<[
SchedVar<WriteZPred, [WriteX]>,
SchedVar<NoSchedPred, [WriteImm]>]>;
@ -117,8 +117,8 @@ def : InstRW<[WriteImmZ], (instrs MOVZWi,MOVZXi,ANDWri,ANDXri)>;
// Move GPR is a register rename and single nop micro-op.
// ORR Xd, XZR, Xm
// ADD Xd, Xn, #0
def WriteIMovPred : SchedPredicate<[{TII->isGPRCopy(MI)}]>;
def WriteVMovPred : SchedPredicate<[{TII->isFPRCopy(MI)}]>;
def WriteIMovPred : SchedPredicate<[{TII->isGPRCopy(*MI)}]>;
def WriteVMovPred : SchedPredicate<[{TII->isFPRCopy(*MI)}]>;
def WriteMov : SchedWriteVariant<[
SchedVar<WriteIMovPred, [WriteX]>,
SchedVar<WriteVMovPred, [WriteX]>,

View File

@ -51,15 +51,15 @@ def WriteSTIdx : SchedWrite; // Store to a register index (maybe scaled).
def ReadAdrBase : SchedRead; // Read the base resister of a reg-offset LD/ST.
// Predicate for determining when a shiftable register is shifted.
def RegShiftedPred : SchedPredicate<[{TII->hasShiftedReg(MI)}]>;
def RegShiftedPred : SchedPredicate<[{TII->hasShiftedReg(*MI)}]>;
// Predicate for determining when a extendedable register is extended.
def RegExtendedPred : SchedPredicate<[{TII->hasExtendedReg(MI)}]>;
def RegExtendedPred : SchedPredicate<[{TII->hasExtendedReg(*MI)}]>;
// ScaledIdxPred is true if a WriteLDIdx operand will be
// scaled. Subtargets can use this to dynamically select resources and
// latency for WriteLDIdx and ReadAdrBase.
def ScaledIdxPred : SchedPredicate<[{TII->isScaledAddr(MI)}]>;
def ScaledIdxPred : SchedPredicate<[{TII->isScaledAddr(*MI)}]>;
// Serialized two-level address load.
// EXAMPLE: LOADGot

View File

@ -145,7 +145,7 @@ bool AArch64StorePairSuppress::runOnMachineFunction(MachineFunction &MF) {
continue;
unsigned BaseReg;
int64_t Offset;
if (TII->getMemOpBaseRegImmOfs(&MI, BaseReg, Offset, TRI)) {
if (TII->getMemOpBaseRegImmOfs(MI, BaseReg, Offset, TRI)) {
if (PrevBaseReg == BaseReg) {
// If this block can take STPs, skip ahead to the next block.
if (!SuppressSTP && shouldAddSTPToBlock(MI.getParent()))
@ -153,7 +153,7 @@ bool AArch64StorePairSuppress::runOnMachineFunction(MachineFunction &MF) {
// Otherwise, continue unpairing the stores in this block.
DEBUG(dbgs() << "Unpairing store " << MI << "\n");
SuppressSTP = true;
TII->suppressLdStPair(&MI);
TII->suppressLdStPair(MI);
}
PrevBaseReg = BaseReg;
} else

View File

@ -92,7 +92,7 @@ void AMDGPUAsmPrinter::EmitInstruction(const MachineInstr *MI) {
AMDGPUMCInstLower MCInstLowering(OutContext, STI);
StringRef Err;
if (!STI.getInstrInfo()->verifyInstruction(MI, Err)) {
if (!STI.getInstrInfo()->verifyInstruction(*MI, Err)) {
LLVMContext &C = MI->getParent()->getParent()->getFunction()->getContext();
C.emitError("Illegal instruction detected: " + Err);
MI->dump();

View File

@ -181,7 +181,7 @@ bool R600ClauseMergePass::runOnMachineFunction(MachineFunction &MF) {
MachineBasicBlock::iterator LatestCFAlu = E;
while (I != E) {
MachineInstr *MI = I++;
if ((!TII->canBeConsideredALU(MI) && !isCFAlu(MI)) ||
if ((!TII->canBeConsideredALU(*MI) && !isCFAlu(MI)) ||
TII->mustBeLastInClause(MI->getOpcode()))
LatestCFAlu = E;
if (!isCFAlu(MI))

View File

@ -318,15 +318,15 @@ private:
MachineBasicBlock::iterator ClauseHead = I;
std::vector<MachineInstr *> ClauseContent;
unsigned AluInstCount = 0;
bool IsTex = TII->usesTextureCache(ClauseHead);
bool IsTex = TII->usesTextureCache(*ClauseHead);
std::set<unsigned> DstRegs;
for (MachineBasicBlock::iterator E = MBB.end(); I != E; ++I) {
if (IsTrivialInst(I))
continue;
if (AluInstCount >= MaxFetchInst)
break;
if ((IsTex && !TII->usesTextureCache(I)) ||
(!IsTex && !TII->usesVertexCache(I)))
if ((IsTex && !TII->usesTextureCache(*I)) ||
(!IsTex && !TII->usesVertexCache(*I)))
break;
if (!isCompatibleWithClause(I, DstRegs))
break;
@ -347,8 +347,8 @@ private:
AMDGPU::ALU_LITERAL_Z,
AMDGPU::ALU_LITERAL_W
};
const SmallVector<std::pair<MachineOperand *, int64_t>, 3 > Srcs =
TII->getSrcs(MI);
const SmallVector<std::pair<MachineOperand *, int64_t>, 3> Srcs =
TII->getSrcs(*MI);
for (const auto &Src:Srcs) {
if (Src.first->getReg() != AMDGPU::ALU_LITERAL_X)
continue;
@ -516,7 +516,7 @@ public:
for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
I != E;) {
if (TII->usesTextureCache(I) || TII->usesVertexCache(I)) {
if (TII->usesTextureCache(*I) || TII->usesVertexCache(*I)) {
DEBUG(dbgs() << CfCount << ":"; I->dump(););
FetchClauses.push_back(MakeFetchClause(MBB, I));
CfCount++;

View File

@ -122,8 +122,8 @@ private:
if (!TII->isALUInstr(MI->getOpcode()) && MI->getOpcode() != AMDGPU::DOT_4)
return true;
const SmallVectorImpl<std::pair<MachineOperand *, int64_t> > &Consts =
TII->getSrcs(MI);
const SmallVectorImpl<std::pair<MachineOperand *, int64_t>> &Consts =
TII->getSrcs(*MI);
assert((TII->isALUInstr(MI->getOpcode()) ||
MI->getOpcode() == AMDGPU::DOT_4) && "Can't assign Const");
for (unsigned i = 0, n = Consts.size(); i < n; ++i) {
@ -245,7 +245,7 @@ private:
// clause as predicated alus).
if (AluInstCount > 0)
break;
if (TII->getFlagOp(I).getImm() & MO_FLAG_PUSH)
if (TII->getFlagOp(*I).getImm() & MO_FLAG_PUSH)
PushBeforeModifier = true;
AluInstCount ++;
continue;

View File

@ -60,7 +60,7 @@ void R600ExpandSpecialInstrsPass::SetFlagInNewMI(MachineInstr *NewMI,
int OpIdx = TII->getOperandIdx(*OldMI, Op);
if (OpIdx > -1) {
uint64_t Val = OldMI->getOperand(OpIdx).getImm();
TII->setImmOperand(NewMI, Op, Val);
TII->setImmOperand(*NewMI, Op, Val);
}
}
@ -107,11 +107,11 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
MI.getOperand(0).getReg(), // dst
MI.getOperand(1).getReg(), // src0
AMDGPU::ZERO); // src1
TII->addFlag(PredSet, 0, MO_FLAG_MASK);
TII->addFlag(*PredSet, 0, MO_FLAG_MASK);
if (Flags & MO_FLAG_PUSH) {
TII->setImmOperand(PredSet, AMDGPU::OpName::update_exec_mask, 1);
TII->setImmOperand(*PredSet, AMDGPU::OpName::update_exec_mask, 1);
} else {
TII->setImmOperand(PredSet, AMDGPU::OpName::update_pred, 1);
TII->setImmOperand(*PredSet, AMDGPU::OpName::update_pred, 1);
}
MI.eraseFromParent();
continue;
@ -137,9 +137,9 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
BMI->bundleWithPred();
}
if (Chan >= 2)
TII->addFlag(BMI, 0, MO_FLAG_MASK);
TII->addFlag(*BMI, 0, MO_FLAG_MASK);
if (Chan != 3)
TII->addFlag(BMI, 0, MO_FLAG_NOT_LAST);
TII->addFlag(*BMI, 0, MO_FLAG_NOT_LAST);
}
MI.eraseFromParent();
@ -166,9 +166,9 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
BMI->bundleWithPred();
}
if (Chan < 2)
TII->addFlag(BMI, 0, MO_FLAG_MASK);
TII->addFlag(*BMI, 0, MO_FLAG_MASK);
if (Chan != 3)
TII->addFlag(BMI, 0, MO_FLAG_NOT_LAST);
TII->addFlag(*BMI, 0, MO_FLAG_NOT_LAST);
}
MI.eraseFromParent();
@ -189,7 +189,7 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
BMI->bundleWithPred();
}
if (Chan != 3)
TII->addFlag(BMI, 0, MO_FLAG_NOT_LAST);
TII->addFlag(*BMI, 0, MO_FLAG_NOT_LAST);
}
MI.eraseFromParent();
@ -212,10 +212,10 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
BMI->bundleWithPred();
}
if (Mask) {
TII->addFlag(BMI, 0, MO_FLAG_MASK);
TII->addFlag(*BMI, 0, MO_FLAG_MASK);
}
if (Chan != 3)
TII->addFlag(BMI, 0, MO_FLAG_NOT_LAST);
TII->addFlag(*BMI, 0, MO_FLAG_NOT_LAST);
unsigned Opcode = BMI->getOpcode();
// While not strictly necessary from hw point of view, we force
// all src operands of a dot4 inst to belong to the same slot.
@ -330,10 +330,10 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
if (Chan != 0)
NewMI->bundleWithPred();
if (Mask) {
TII->addFlag(NewMI, 0, MO_FLAG_MASK);
TII->addFlag(*NewMI, 0, MO_FLAG_MASK);
}
if (NotLast) {
TII->addFlag(NewMI, 0, MO_FLAG_NOT_LAST);
TII->addFlag(*NewMI, 0, MO_FLAG_NOT_LAST);
}
SetFlagInNewMI(NewMI, &MI, AMDGPU::OpName::clamp);
SetFlagInNewMI(NewMI, &MI, AMDGPU::OpName::literal);

View File

@ -242,7 +242,7 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
AMDGPU::MOV,
MI->getOperand(0).getReg(),
MI->getOperand(1).getReg());
TII->addFlag(NewMI, 0, MO_FLAG_CLAMP);
TII->addFlag(*NewMI, 0, MO_FLAG_CLAMP);
break;
}
@ -251,7 +251,7 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
AMDGPU::MOV,
MI->getOperand(0).getReg(),
MI->getOperand(1).getReg());
TII->addFlag(NewMI, 0, MO_FLAG_ABS);
TII->addFlag(*NewMI, 0, MO_FLAG_ABS);
break;
}
@ -260,7 +260,7 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
AMDGPU::MOV,
MI->getOperand(0).getReg(),
MI->getOperand(1).getReg());
TII->addFlag(NewMI, 0, MO_FLAG_NEG);
TII->addFlag(*NewMI, 0, MO_FLAG_NEG);
break;
}
@ -268,7 +268,7 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
unsigned maskedRegister = MI->getOperand(0).getReg();
assert(TargetRegisterInfo::isVirtualRegister(maskedRegister));
MachineInstr * defInstr = MRI.getVRegDef(maskedRegister);
TII->addFlag(defInstr, 0, MO_FLAG_MASK);
TII->addFlag(*defInstr, 0, MO_FLAG_MASK);
break;
}
@ -294,8 +294,8 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
case AMDGPU::CONST_COPY: {
MachineInstr *NewMI = TII->buildDefaultInstruction(*BB, MI, AMDGPU::MOV,
MI->getOperand(0).getReg(), AMDGPU::ALU_CONST);
TII->setImmOperand(NewMI, AMDGPU::OpName::src0_sel,
MI->getOperand(1).getImm());
TII->setImmOperand(*NewMI, AMDGPU::OpName::src0_sel,
MI->getOperand(1).getImm());
break;
}
@ -532,7 +532,7 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
.addOperand(MI->getOperand(1))
.addImm(OPCODE_IS_NOT_ZERO)
.addImm(0); // Flags
TII->addFlag(NewMI, 0, MO_FLAG_PUSH);
TII->addFlag(*NewMI, 0, MO_FLAG_PUSH);
BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::JUMP_COND))
.addOperand(MI->getOperand(0))
.addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
@ -546,7 +546,7 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
.addOperand(MI->getOperand(1))
.addImm(OPCODE_IS_NOT_ZERO_INT)
.addImm(0); // Flags
TII->addFlag(NewMI, 0, MO_FLAG_PUSH);
TII->addFlag(*NewMI, 0, MO_FLAG_PUSH);
BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::JUMP_COND))
.addOperand(MI->getOperand(0))
.addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);

View File

@ -152,12 +152,12 @@ bool R600InstrInfo::isLDSRetInstr(unsigned Opcode) const {
return isLDSInstr(Opcode) && getOperandIdx(Opcode, AMDGPU::OpName::dst) != -1;
}
bool R600InstrInfo::canBeConsideredALU(const MachineInstr *MI) const {
if (isALUInstr(MI->getOpcode()))
bool R600InstrInfo::canBeConsideredALU(const MachineInstr &MI) const {
if (isALUInstr(MI.getOpcode()))
return true;
if (isVector(*MI) || isCubeOp(MI->getOpcode()))
if (isVector(MI) || isCubeOp(MI.getOpcode()))
return true;
switch (MI->getOpcode()) {
switch (MI.getOpcode()) {
case AMDGPU::PRED_X:
case AMDGPU::INTERP_PAIR_XY:
case AMDGPU::INTERP_PAIR_ZW:
@ -176,16 +176,16 @@ bool R600InstrInfo::isTransOnly(unsigned Opcode) const {
return (get(Opcode).getSchedClass() == AMDGPU::Sched::TransALU);
}
bool R600InstrInfo::isTransOnly(const MachineInstr *MI) const {
return isTransOnly(MI->getOpcode());
bool R600InstrInfo::isTransOnly(const MachineInstr &MI) const {
return isTransOnly(MI.getOpcode());
}
bool R600InstrInfo::isVectorOnly(unsigned Opcode) const {
return (get(Opcode).getSchedClass() == AMDGPU::Sched::VecALU);
}
bool R600InstrInfo::isVectorOnly(const MachineInstr *MI) const {
return isVectorOnly(MI->getOpcode());
bool R600InstrInfo::isVectorOnly(const MachineInstr &MI) const {
return isVectorOnly(MI.getOpcode());
}
bool R600InstrInfo::isExport(unsigned Opcode) const {
@ -196,21 +196,21 @@ bool R600InstrInfo::usesVertexCache(unsigned Opcode) const {
return ST.hasVertexCache() && IS_VTX(get(Opcode));
}
bool R600InstrInfo::usesVertexCache(const MachineInstr *MI) const {
const MachineFunction *MF = MI->getParent()->getParent();
bool R600InstrInfo::usesVertexCache(const MachineInstr &MI) const {
const MachineFunction *MF = MI.getParent()->getParent();
return !AMDGPU::isCompute(MF->getFunction()->getCallingConv()) &&
usesVertexCache(MI->getOpcode());
usesVertexCache(MI.getOpcode());
}
bool R600InstrInfo::usesTextureCache(unsigned Opcode) const {
return (!ST.hasVertexCache() && IS_VTX(get(Opcode))) || IS_TEX(get(Opcode));
}
bool R600InstrInfo::usesTextureCache(const MachineInstr *MI) const {
const MachineFunction *MF = MI->getParent()->getParent();
bool R600InstrInfo::usesTextureCache(const MachineInstr &MI) const {
const MachineFunction *MF = MI.getParent()->getParent();
return (AMDGPU::isCompute(MF->getFunction()->getCallingConv()) &&
usesVertexCache(MI->getOpcode())) ||
usesTextureCache(MI->getOpcode());
usesVertexCache(MI.getOpcode())) ||
usesTextureCache(MI.getOpcode());
}
bool R600InstrInfo::mustBeLastInClause(unsigned Opcode) const {
@ -223,20 +223,21 @@ bool R600InstrInfo::mustBeLastInClause(unsigned Opcode) const {
}
}
bool R600InstrInfo::usesAddressRegister(MachineInstr *MI) const {
return MI->findRegisterUseOperandIdx(AMDGPU::AR_X) != -1;
bool R600InstrInfo::usesAddressRegister(MachineInstr &MI) const {
return MI.findRegisterUseOperandIdx(AMDGPU::AR_X) != -1;
}
bool R600InstrInfo::definesAddressRegister(MachineInstr *MI) const {
return MI->findRegisterDefOperandIdx(AMDGPU::AR_X) != -1;
bool R600InstrInfo::definesAddressRegister(MachineInstr &MI) const {
return MI.findRegisterDefOperandIdx(AMDGPU::AR_X) != -1;
}
bool R600InstrInfo::readsLDSSrcReg(const MachineInstr *MI) const {
if (!isALUInstr(MI->getOpcode())) {
bool R600InstrInfo::readsLDSSrcReg(const MachineInstr &MI) const {
if (!isALUInstr(MI.getOpcode())) {
return false;
}
for (MachineInstr::const_mop_iterator I = MI->operands_begin(),
E = MI->operands_end(); I != E; ++I) {
for (MachineInstr::const_mop_iterator I = MI.operands_begin(),
E = MI.operands_end();
I != E; ++I) {
if (!I->isReg() || !I->isUse() ||
TargetRegisterInfo::isVirtualRegister(I->getReg()))
continue;
@ -282,10 +283,10 @@ int R600InstrInfo::getSelIdx(unsigned Opcode, unsigned SrcIdx) const {
}
SmallVector<std::pair<MachineOperand *, int64_t>, 3>
R600InstrInfo::getSrcs(MachineInstr *MI) const {
R600InstrInfo::getSrcs(MachineInstr &MI) const {
SmallVector<std::pair<MachineOperand *, int64_t>, 3> Result;
if (MI->getOpcode() == AMDGPU::DOT_4) {
if (MI.getOpcode() == AMDGPU::DOT_4) {
static const unsigned OpTable[8][2] = {
{AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X},
{AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y},
@ -298,12 +299,12 @@ R600InstrInfo::getSrcs(MachineInstr *MI) const {
};
for (unsigned j = 0; j < 8; j++) {
MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(),
OpTable[j][0]));
MachineOperand &MO =
MI.getOperand(getOperandIdx(MI.getOpcode(), OpTable[j][0]));
unsigned Reg = MO.getReg();
if (Reg == AMDGPU::ALU_CONST) {
MachineOperand &Sel = MI->getOperand(getOperandIdx(MI->getOpcode(),
OpTable[j][1]));
MachineOperand &Sel =
MI.getOperand(getOperandIdx(MI.getOpcode(), OpTable[j][1]));
Result.push_back(std::make_pair(&MO, Sel.getImm()));
continue;
}
@ -319,20 +320,20 @@ R600InstrInfo::getSrcs(MachineInstr *MI) const {
};
for (unsigned j = 0; j < 3; j++) {
int SrcIdx = getOperandIdx(MI->getOpcode(), OpTable[j][0]);
int SrcIdx = getOperandIdx(MI.getOpcode(), OpTable[j][0]);
if (SrcIdx < 0)
break;
MachineOperand &MO = MI->getOperand(SrcIdx);
MachineOperand &MO = MI.getOperand(SrcIdx);
unsigned Reg = MO.getReg();
if (Reg == AMDGPU::ALU_CONST) {
MachineOperand &Sel = MI->getOperand(
getOperandIdx(MI->getOpcode(), OpTable[j][1]));
MachineOperand &Sel =
MI.getOperand(getOperandIdx(MI.getOpcode(), OpTable[j][1]));
Result.push_back(std::make_pair(&MO, Sel.getImm()));
continue;
}
if (Reg == AMDGPU::ALU_LITERAL_X) {
MachineOperand &Operand = MI->getOperand(
getOperandIdx(MI->getOpcode(), AMDGPU::OpName::literal));
MachineOperand &Operand =
MI.getOperand(getOperandIdx(MI.getOpcode(), AMDGPU::OpName::literal));
if (Operand.isImm()) {
Result.push_back(std::make_pair(&MO, Operand.getImm()));
continue;
@ -344,8 +345,8 @@ R600InstrInfo::getSrcs(MachineInstr *MI) const {
return Result;
}
std::vector<std::pair<int, unsigned> >
R600InstrInfo::ExtractSrcs(MachineInstr *MI,
std::vector<std::pair<int, unsigned>>
R600InstrInfo::ExtractSrcs(MachineInstr &MI,
const DenseMap<unsigned, unsigned> &PV,
unsigned &ConstCount) const {
ConstCount = 0;
@ -552,7 +553,7 @@ R600InstrInfo::fitsReadPortLimitations(const std::vector<MachineInstr *> &IG,
unsigned ConstCount;
BankSwizzle TransBS = ALU_VEC_012_SCL_210;
for (unsigned i = 0, e = IG.size(); i < e; ++i) {
IGSrcs.push_back(ExtractSrcs(IG[i], PV, ConstCount));
IGSrcs.push_back(ExtractSrcs(*IG[i], PV, ConstCount));
unsigned Op = getOperandIdx(IG[i]->getOpcode(),
AMDGPU::OpName::bank_swizzle);
ValidSwizzle.push_back( (R600InstrInfo::BankSwizzle)
@ -619,8 +620,8 @@ R600InstrInfo::fitsConstReadLimitations(const std::vector<MachineInstr *> &MIs)
std::vector<unsigned> Consts;
SmallSet<int64_t, 4> Literals;
for (unsigned i = 0, n = MIs.size(); i < n; i++) {
MachineInstr *MI = MIs[i];
if (!isALUInstr(MI->getOpcode()))
MachineInstr &MI = *MIs[i];
if (!isALUInstr(MI.getOpcode()))
continue;
ArrayRef<std::pair<MachineOperand *, int64_t>> Srcs = getSrcs(MI);
@ -780,7 +781,7 @@ unsigned R600InstrInfo::InsertBranch(MachineBasicBlock &MBB,
} else {
MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
assert(PredSet && "No previous predicate !");
addFlag(PredSet, 0, MO_FLAG_PUSH);
addFlag(*PredSet, 0, MO_FLAG_PUSH);
PredSet->getOperand(2).setImm(Cond[1].getImm());
BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
@ -796,7 +797,7 @@ unsigned R600InstrInfo::InsertBranch(MachineBasicBlock &MBB,
} else {
MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
assert(PredSet && "No previous predicate !");
addFlag(PredSet, 0, MO_FLAG_PUSH);
addFlag(*PredSet, 0, MO_FLAG_PUSH);
PredSet->getOperand(2).setImm(Cond[1].getImm());
BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
.addMBB(TBB)
@ -828,7 +829,7 @@ R600InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
return 0;
case AMDGPU::JUMP_COND: {
MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
clearFlag(predSet, 0, MO_FLAG_PUSH);
clearFlag(*predSet, 0, MO_FLAG_PUSH);
I->eraseFromParent();
MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
if (CfAlu == MBB.end())
@ -853,7 +854,7 @@ R600InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
return 1;
case AMDGPU::JUMP_COND: {
MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
clearFlag(predSet, 0, MO_FLAG_PUSH);
clearFlag(*predSet, 0, MO_FLAG_PUSH);
I->eraseFromParent();
MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
if (CfAlu == MBB.end())
@ -1026,7 +1027,7 @@ unsigned int R600InstrInfo::getPredicationCost(const MachineInstr &) const {
}
unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
const MachineInstr *MI,
const MachineInstr &,
unsigned *PredCost) const {
if (PredCost)
*PredCost = 2;
@ -1039,44 +1040,43 @@ unsigned R600InstrInfo::calculateIndirectAddress(unsigned RegIndex,
return RegIndex;
}
bool R600InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
switch(MI->getOpcode()) {
bool R600InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
switch (MI.getOpcode()) {
default: {
MachineBasicBlock *MBB = MI->getParent();
int OffsetOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
AMDGPU::OpName::addr);
// addr is a custom operand with multiple MI operands, and only the
// first MI operand is given a name.
MachineBasicBlock *MBB = MI.getParent();
int OffsetOpIdx =
AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::addr);
// addr is a custom operand with multiple MI operands, and only the
// first MI operand is given a name.
int RegOpIdx = OffsetOpIdx + 1;
int ChanOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
AMDGPU::OpName::chan);
if (isRegisterLoad(*MI)) {
int DstOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
AMDGPU::OpName::dst);
unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
int ChanOpIdx =
AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::chan);
if (isRegisterLoad(MI)) {
int DstOpIdx =
AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dst);
unsigned RegIndex = MI.getOperand(RegOpIdx).getImm();
unsigned Channel = MI.getOperand(ChanOpIdx).getImm();
unsigned Address = calculateIndirectAddress(RegIndex, Channel);
unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
unsigned OffsetReg = MI.getOperand(OffsetOpIdx).getReg();
if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
buildMovInstr(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
buildMovInstr(MBB, MI, MI.getOperand(DstOpIdx).getReg(),
getIndirectAddrRegClass()->getRegister(Address));
} else {
buildIndirectRead(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
Address, OffsetReg);
buildIndirectRead(MBB, MI, MI.getOperand(DstOpIdx).getReg(), Address,
OffsetReg);
}
} else if (isRegisterStore(*MI)) {
int ValOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
AMDGPU::OpName::val);
unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
} else if (isRegisterStore(MI)) {
int ValOpIdx =
AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::val);
unsigned RegIndex = MI.getOperand(RegOpIdx).getImm();
unsigned Channel = MI.getOperand(ChanOpIdx).getImm();
unsigned Address = calculateIndirectAddress(RegIndex, Channel);
unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
unsigned OffsetReg = MI.getOperand(OffsetOpIdx).getReg();
if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
buildMovInstr(MBB, MI, getIndirectAddrRegClass()->getRegister(Address),
MI->getOperand(ValOpIdx).getReg());
MI.getOperand(ValOpIdx).getReg());
} else {
buildIndirectWrite(MBB, MI, MI->getOperand(ValOpIdx).getReg(),
buildIndirectWrite(MBB, MI, MI.getOperand(ValOpIdx).getReg(),
calculateIndirectAddress(RegIndex, Channel),
OffsetReg);
}
@ -1089,20 +1089,20 @@ bool R600InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
}
case AMDGPU::R600_EXTRACT_ELT_V2:
case AMDGPU::R600_EXTRACT_ELT_V4:
buildIndirectRead(MI->getParent(), MI, MI->getOperand(0).getReg(),
RI.getHWRegIndex(MI->getOperand(1).getReg()), // Address
MI->getOperand(2).getReg(),
RI.getHWRegChan(MI->getOperand(1).getReg()));
buildIndirectRead(MI.getParent(), MI, MI.getOperand(0).getReg(),
RI.getHWRegIndex(MI.getOperand(1).getReg()), // Address
MI.getOperand(2).getReg(),
RI.getHWRegChan(MI.getOperand(1).getReg()));
break;
case AMDGPU::R600_INSERT_ELT_V2:
case AMDGPU::R600_INSERT_ELT_V4:
buildIndirectWrite(MI->getParent(), MI, MI->getOperand(2).getReg(), // Value
RI.getHWRegIndex(MI->getOperand(1).getReg()), // Address
MI->getOperand(3).getReg(), // Offset
RI.getHWRegChan(MI->getOperand(1).getReg())); // Channel
buildIndirectWrite(MI.getParent(), MI, MI.getOperand(2).getReg(), // Value
RI.getHWRegIndex(MI.getOperand(1).getReg()), // Address
MI.getOperand(3).getReg(), // Offset
RI.getHWRegChan(MI.getOperand(1).getReg())); // Channel
break;
}
MI->eraseFromParent();
MI.eraseFromParent();
return true;
}
@ -1153,13 +1153,13 @@ MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
}
MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
AMDGPU::AR_X, OffsetReg);
setImmOperand(MOVA, AMDGPU::OpName::write, 0);
setImmOperand(*MOVA, AMDGPU::OpName::write, 0);
MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
AddrReg, ValueReg)
.addReg(AMDGPU::AR_X,
RegState::Implicit | RegState::Kill);
setImmOperand(Mov, AMDGPU::OpName::dst_rel, 1);
setImmOperand(*Mov, AMDGPU::OpName::dst_rel, 1);
return Mov;
}
@ -1186,13 +1186,13 @@ MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
AMDGPU::AR_X,
OffsetReg);
setImmOperand(MOVA, AMDGPU::OpName::write, 0);
setImmOperand(*MOVA, AMDGPU::OpName::write, 0);
MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
ValueReg,
AddrReg)
.addReg(AMDGPU::AR_X,
RegState::Implicit | RegState::Kill);
setImmOperand(Mov, AMDGPU::OpName::src0_rel, 1);
setImmOperand(*Mov, AMDGPU::OpName::src0_rel, 1);
return Mov;
}
@ -1322,7 +1322,7 @@ MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction(
MachineOperand &MO = MI->getOperand(
getOperandIdx(MI->getOpcode(), getSlotedOps(Operands[i], Slot)));
assert (MO.isImm());
setImmOperand(MIB, Operands[i], MO.getImm());
setImmOperand(*MIB, Operands[i], MO.getImm());
}
MIB->getOperand(20).setImm(0);
return MIB;
@ -1334,7 +1334,7 @@ MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB,
uint64_t Imm) const {
MachineInstr *MovImm = buildDefaultInstruction(BB, I, AMDGPU::MOV, DstReg,
AMDGPU::ALU_LITERAL_X);
setImmOperand(MovImm, AMDGPU::OpName::literal, Imm);
setImmOperand(*MovImm, AMDGPU::OpName::literal, Imm);
return MovImm;
}
@ -1352,12 +1352,12 @@ int R600InstrInfo::getOperandIdx(unsigned Opcode, unsigned Op) const {
return AMDGPU::getNamedOperandIdx(Opcode, Op);
}
void R600InstrInfo::setImmOperand(MachineInstr *MI, unsigned Op,
void R600InstrInfo::setImmOperand(MachineInstr &MI, unsigned Op,
int64_t Imm) const {
int Idx = getOperandIdx(*MI, Op);
int Idx = getOperandIdx(MI, Op);
assert(Idx != -1 && "Operand not supported for this instruction.");
assert(MI->getOperand(Idx).isImm());
MI->getOperand(Idx).setImm(Imm);
assert(MI.getOperand(Idx).isImm());
MI.getOperand(Idx).setImm(Imm);
}
//===----------------------------------------------------------------------===//
@ -1368,9 +1368,9 @@ bool R600InstrInfo::hasFlagOperand(const MachineInstr &MI) const {
return GET_FLAG_OPERAND_IDX(get(MI.getOpcode()).TSFlags) != 0;
}
MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx,
MachineOperand &R600InstrInfo::getFlagOp(MachineInstr &MI, unsigned SrcIdx,
unsigned Flag) const {
unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
unsigned TargetFlags = get(MI.getOpcode()).TSFlags;
int FlagIndex = 0;
if (Flag != 0) {
// If we pass something other than the default value of Flag to this
@ -1380,20 +1380,26 @@ MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx,
bool IsOP3 = (TargetFlags & R600_InstFlag::OP3) == R600_InstFlag::OP3;
switch (Flag) {
case MO_FLAG_CLAMP:
FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::clamp);
FlagIndex = getOperandIdx(MI, AMDGPU::OpName::clamp);
break;
case MO_FLAG_MASK:
FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::write);
FlagIndex = getOperandIdx(MI, AMDGPU::OpName::write);
break;
case MO_FLAG_NOT_LAST:
case MO_FLAG_LAST:
FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::last);
FlagIndex = getOperandIdx(MI, AMDGPU::OpName::last);
break;
case MO_FLAG_NEG:
switch (SrcIdx) {
case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_neg); break;
case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_neg); break;
case 2: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src2_neg); break;
case 0:
FlagIndex = getOperandIdx(MI, AMDGPU::OpName::src0_neg);
break;
case 1:
FlagIndex = getOperandIdx(MI, AMDGPU::OpName::src1_neg);
break;
case 2:
FlagIndex = getOperandIdx(MI, AMDGPU::OpName::src2_neg);
break;
}
break;
@ -1402,8 +1408,12 @@ MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx,
"instructions.");
(void)IsOP3;
switch (SrcIdx) {
case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_abs); break;
case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_abs); break;
case 0:
FlagIndex = getOperandIdx(MI, AMDGPU::OpName::src0_abs);
break;
case 1:
FlagIndex = getOperandIdx(MI, AMDGPU::OpName::src1_abs);
break;
}
break;
@ -1418,14 +1428,14 @@ MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx,
"Instruction flags not supported for this instruction");
}
MachineOperand &FlagOp = MI->getOperand(FlagIndex);
MachineOperand &FlagOp = MI.getOperand(FlagIndex);
assert(FlagOp.isImm());
return FlagOp;
}
void R600InstrInfo::addFlag(MachineInstr *MI, unsigned Operand,
void R600InstrInfo::addFlag(MachineInstr &MI, unsigned Operand,
unsigned Flag) const {
unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
unsigned TargetFlags = get(MI.getOpcode()).TSFlags;
if (Flag == 0) {
return;
}
@ -1444,9 +1454,9 @@ void R600InstrInfo::addFlag(MachineInstr *MI, unsigned Operand,
}
}
void R600InstrInfo::clearFlag(MachineInstr *MI, unsigned Operand,
void R600InstrInfo::clearFlag(MachineInstr &MI, unsigned Operand,
unsigned Flag) const {
unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
unsigned TargetFlags = get(MI.getOpcode()).TSFlags;
if (HAS_NATIVE_OPERANDS(TargetFlags)) {
MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
FlagOp.setImm(0);

View File

@ -32,8 +32,7 @@ private:
const R600Subtarget &ST;
std::vector<std::pair<int, unsigned>>
ExtractSrcs(MachineInstr *MI,
const DenseMap<unsigned, unsigned> &PV,
ExtractSrcs(MachineInstr &MI, const DenseMap<unsigned, unsigned> &PV,
unsigned &ConstCount) const;
MachineInstrBuilder buildIndirectRead(MachineBasicBlock *MBB,
@ -83,23 +82,23 @@ public:
/// \returns true if this \p Opcode represents an ALU instruction or an
/// instruction that will be lowered in ExpandSpecialInstrs Pass.
bool canBeConsideredALU(const MachineInstr *MI) const;
bool canBeConsideredALU(const MachineInstr &MI) const;
bool isTransOnly(unsigned Opcode) const;
bool isTransOnly(const MachineInstr *MI) const;
bool isTransOnly(const MachineInstr &MI) const;
bool isVectorOnly(unsigned Opcode) const;
bool isVectorOnly(const MachineInstr *MI) const;
bool isVectorOnly(const MachineInstr &MI) const;
bool isExport(unsigned Opcode) const;
bool usesVertexCache(unsigned Opcode) const;
bool usesVertexCache(const MachineInstr *MI) const;
bool usesVertexCache(const MachineInstr &MI) const;
bool usesTextureCache(unsigned Opcode) const;
bool usesTextureCache(const MachineInstr *MI) const;
bool usesTextureCache(const MachineInstr &MI) const;
bool mustBeLastInClause(unsigned Opcode) const;
bool usesAddressRegister(MachineInstr *MI) const;
bool definesAddressRegister(MachineInstr *MI) const;
bool readsLDSSrcReg(const MachineInstr *MI) const;
bool usesAddressRegister(MachineInstr &MI) const;
bool definesAddressRegister(MachineInstr &MI) const;
bool readsLDSSrcReg(const MachineInstr &MI) const;
/// \returns The operand index for the given source number. Legal values
/// for SrcNum are 0, 1, and 2.
@ -114,7 +113,7 @@ public:
/// If register is ALU_LITERAL, second member is IMM.
/// Otherwise, second member value is undefined.
SmallVector<std::pair<MachineOperand *, int64_t>, 3>
getSrcs(MachineInstr *MI) const;
getSrcs(MachineInstr &MI) const;
unsigned isLegalUpTo(
const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs,
@ -205,13 +204,13 @@ public:
unsigned int getPredicationCost(const MachineInstr &) const override;
unsigned int getInstrLatency(const InstrItineraryData *ItinData,
const MachineInstr *MI,
const MachineInstr &MI,
unsigned *PredCost = nullptr) const override;
int getInstrLatency(const InstrItineraryData *ItinData,
SDNode *Node) const override { return 1;}
bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
bool expandPostRAPseudo(MachineInstr &MI) const override;
/// \brief Reserve the registers that may be accesed using indirect addressing.
void reserveIndirectRegisters(BitVector &Reserved,
@ -286,13 +285,13 @@ public:
int getOperandIdx(unsigned Opcode, unsigned Op) const;
/// \brief Helper function for setting instruction flag values.
void setImmOperand(MachineInstr *MI, unsigned Op, int64_t Imm) const;
void setImmOperand(MachineInstr &MI, unsigned Op, int64_t Imm) const;
/// \returns true if this instruction has an operand for storing target flags.
bool hasFlagOperand(const MachineInstr &MI) const;
///\brief Add one of the MO_FLAG* flags to the specified \p Operand.
void addFlag(MachineInstr *MI, unsigned Operand, unsigned Flag) const;
void addFlag(MachineInstr &MI, unsigned Operand, unsigned Flag) const;
///\brief Determine if the specified \p Flag is set on this \p Operand.
bool isFlagSet(const MachineInstr &MI, unsigned Operand, unsigned Flag) const;
@ -301,11 +300,11 @@ public:
/// \param Flag The flag being set.
///
/// \returns the operand containing the flags for this instruction.
MachineOperand &getFlagOp(MachineInstr *MI, unsigned SrcIdx = 0,
MachineOperand &getFlagOp(MachineInstr &MI, unsigned SrcIdx = 0,
unsigned Flag = 0) const;
/// \brief Clear the specified flag on the instruction.
void clearFlag(MachineInstr *MI, unsigned Operand, unsigned Flag) const;
void clearFlag(MachineInstr &MI, unsigned Operand, unsigned Flag) const;
// Helper functions that check the opcode for status information
bool isRegisterStore(const MachineInstr &MI) const;

View File

@ -222,7 +222,7 @@ bool R600SchedStrategy::regBelongsToClass(unsigned Reg,
R600SchedStrategy::AluKind R600SchedStrategy::getAluKind(SUnit *SU) const {
MachineInstr *MI = SU->getInstr();
if (TII->isTransOnly(MI))
if (TII->isTransOnly(*MI))
return AluTrans;
switch (MI->getOpcode()) {
@ -286,7 +286,7 @@ R600SchedStrategy::AluKind R600SchedStrategy::getAluKind(SUnit *SU) const {
return AluT_XYZW;
// LDS src registers cannot be used in the Trans slot.
if (TII->readsLDSSrcReg(MI))
if (TII->readsLDSSrcReg(*MI))
return AluT_XYZW;
return AluAny;
@ -323,9 +323,8 @@ SUnit *R600SchedStrategy::PopInst(std::vector<SUnit *> &Q, bool AnyALU) {
It != E; ++It) {
SUnit *SU = *It;
InstructionsGroupCandidate.push_back(SU->getInstr());
if (TII->fitsConstReadLimitations(InstructionsGroupCandidate)
&& (!AnyALU || !TII->isVectorOnly(SU->getInstr()))
) {
if (TII->fitsConstReadLimitations(InstructionsGroupCandidate) &&
(!AnyALU || !TII->isVectorOnly(*SU->getInstr()))) {
InstructionsGroupCandidate.pop_back();
Q.erase((It + 1).base());
return SU;

View File

@ -94,7 +94,7 @@ private:
continue;
}
unsigned Dst = BI->getOperand(DstIdx).getReg();
if (isTrans || TII->isTransOnly(&*BI)) {
if (isTrans || TII->isTransOnly(*BI)) {
Result[Dst] = AMDGPU::PS;
continue;
}
@ -207,10 +207,10 @@ public:
}
}
bool ARDef = TII->definesAddressRegister(MII) ||
TII->definesAddressRegister(MIJ);
bool ARUse = TII->usesAddressRegister(MII) ||
TII->usesAddressRegister(MIJ);
bool ARDef =
TII->definesAddressRegister(*MII) || TII->definesAddressRegister(*MIJ);
bool ARUse =
TII->usesAddressRegister(*MII) || TII->usesAddressRegister(*MIJ);
return !ARDef || !ARUse;
}
@ -230,14 +230,14 @@ public:
const DenseMap<unsigned, unsigned> &PV,
std::vector<R600InstrInfo::BankSwizzle> &BS,
bool &isTransSlot) {
isTransSlot = TII->isTransOnly(&MI);
isTransSlot = TII->isTransOnly(MI);
assert (!isTransSlot || VLIW5);
// Is the dst reg sequence legal ?
if (!isTransSlot && !CurrentPacketMIs.empty()) {
if (getSlot(MI) <= getSlot(*CurrentPacketMIs.back())) {
if (ConsideredInstUsesAlreadyWrittenVectorElement &&
!TII->isVectorOnly(&MI) && VLIW5) {
!TII->isVectorOnly(MI) && VLIW5) {
isTransSlot = true;
DEBUG({
dbgs() << "Considering as Trans Inst :";
@ -284,7 +284,7 @@ public:
}
// We cannot read LDS source registrs from the Trans slot.
if (isTransSlot && TII->readsLDSSrcReg(&MI))
if (isTransSlot && TII->readsLDSSrcReg(MI))
return false;
CurrentPacketMIs.pop_back();
@ -319,7 +319,7 @@ public:
return It;
}
endPacket(MI.getParent(), MI);
if (TII->isTransOnly(&MI))
if (TII->isTransOnly(MI))
return MI;
return VLIWPacketizerList::addToPacket(MI);
}
@ -378,7 +378,7 @@ bool R600Packetizer::runOnMachineFunction(MachineFunction &Fn) {
// instruction stream until we find the nearest boundary.
MachineBasicBlock::iterator I = RegionEnd;
for(;I != MBB->begin(); --I, --RemainingCount) {
if (TII->isSchedulingBoundary(&*std::prev(I), &*MBB, Fn))
if (TII->isSchedulingBoundary(*std::prev(I), &*MBB, Fn))
break;
}
I = MBB->begin();

View File

@ -132,7 +132,7 @@ static bool tryAddToFoldList(std::vector<FoldCandidate> &FoldList,
MachineInstr *MI, unsigned OpNo,
MachineOperand *OpToFold,
const SIInstrInfo *TII) {
if (!TII->isOperandLegal(MI, OpNo, OpToFold)) {
if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
// Special case for v_mac_f32_e64 if we are trying to fold into src2
unsigned Opc = MI->getOpcode();
@ -159,7 +159,7 @@ static bool tryAddToFoldList(std::vector<FoldCandidate> &FoldList,
// see if this makes it possible to fold.
unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
bool CanCommute = TII->findCommutedOpIndices(MI, CommuteIdx0, CommuteIdx1);
bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
if (CanCommute) {
if (CommuteIdx0 == OpNo)
@ -177,10 +177,10 @@ static bool tryAddToFoldList(std::vector<FoldCandidate> &FoldList,
return false;
if (!CanCommute ||
!TII->commuteInstruction(MI, false, CommuteIdx0, CommuteIdx1))
!TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
return false;
if (!TII->isOperandLegal(MI, OpNo, OpToFold))
if (!TII->isOperandLegal(*MI, OpNo, OpToFold))
return false;
}

View File

@ -3223,7 +3223,7 @@ void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI,
if (TII->isVOP3(MI->getOpcode())) {
// Make sure constant bus requirements are respected.
TII->legalizeOperandsVOP3(MRI, MI);
TII->legalizeOperandsVOP3(MRI, *MI);
return;
}

File diff suppressed because it is too large Load Diff

View File

@ -54,40 +54,39 @@ private:
unsigned SubIdx,
const TargetRegisterClass *SubRC) const;
void swapOperands(MachineBasicBlock::iterator Inst) const;
void swapOperands(MachineInstr &Inst) const;
void lowerScalarAbs(SmallVectorImpl<MachineInstr *> &Worklist,
MachineInstr *Inst) const;
MachineInstr &Inst) const;
void splitScalar64BitUnaryOp(SmallVectorImpl<MachineInstr *> &Worklist,
MachineInstr *Inst, unsigned Opcode) const;
MachineInstr &Inst, unsigned Opcode) const;
void splitScalar64BitBinaryOp(SmallVectorImpl<MachineInstr *> &Worklist,
MachineInstr *Inst, unsigned Opcode) const;
MachineInstr &Inst, unsigned Opcode) const;
void splitScalar64BitBCNT(SmallVectorImpl<MachineInstr *> &Worklist,
MachineInstr *Inst) const;
MachineInstr &Inst) const;
void splitScalar64BitBFE(SmallVectorImpl<MachineInstr *> &Worklist,
MachineInstr *Inst) const;
MachineInstr &Inst) const;
void addUsersToMoveToVALUWorklist(
unsigned Reg, MachineRegisterInfo &MRI,
SmallVectorImpl<MachineInstr *> &Worklist) const;
void addSCCDefUsersToVALUWorklist(
MachineInstr *SCCDefInst, SmallVectorImpl<MachineInstr *> &Worklist) const;
void
addSCCDefUsersToVALUWorklist(MachineInstr &SCCDefInst,
SmallVectorImpl<MachineInstr *> &Worklist) const;
const TargetRegisterClass *
getDestEquivalentVGPRClass(const MachineInstr &Inst) const;
bool checkInstOffsetsDoNotOverlap(MachineInstr *MIa,
MachineInstr *MIb) const;
bool checkInstOffsetsDoNotOverlap(MachineInstr &MIa, MachineInstr &MIb) const;
unsigned findUsedSGPR(const MachineInstr *MI, int OpIndices[3]) const;
unsigned findUsedSGPR(const MachineInstr &MI, int OpIndices[3]) const;
protected:
MachineInstr *commuteInstructionImpl(MachineInstr *MI,
bool NewMI,
MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI,
unsigned OpIdx0,
unsigned OpIdx1) const override;
@ -98,45 +97,40 @@ public:
return RI;
}
bool isReallyTriviallyReMaterializable(const MachineInstr *MI,
bool isReallyTriviallyReMaterializable(const MachineInstr &MI,
AliasAnalysis *AA) const override;
bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
int64_t &Offset1,
int64_t &Offset2) const override;
bool getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
bool getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg,
int64_t &Offset,
const TargetRegisterInfo *TRI) const final;
bool shouldClusterMemOps(MachineInstr *FirstLdSt,
MachineInstr *SecondLdSt,
bool shouldClusterMemOps(MachineInstr &FirstLdSt, MachineInstr &SecondLdSt,
unsigned NumLoads) const final;
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc) const override;
unsigned calculateLDSSpillAddress(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
RegScavenger *RS,
unsigned TmpReg,
unsigned Offset,
unsigned Size) const;
unsigned calculateLDSSpillAddress(MachineBasicBlock &MBB, MachineInstr &MI,
RegScavenger *RS, unsigned TmpReg,
unsigned Offset, unsigned Size) const;
void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned SrcReg, bool isKill, int FrameIndex,
MachineBasicBlock::iterator MI, unsigned SrcReg,
bool isKill, int FrameIndex,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const override;
void loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, int FrameIndex,
const TargetRegisterClass *RC,
MachineBasicBlock::iterator MI, unsigned DestReg,
int FrameIndex, const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const override;
bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
bool expandPostRAPseudo(MachineInstr &MI) const override;
// \brief Returns an opcode that can be used to move a value to a \p DstRC
// register. If there is no hardware instruction that can store to \p
@ -146,8 +140,7 @@ public:
LLVM_READONLY
int commuteOpcode(const MachineInstr &MI) const;
bool findCommutedOpIndices(MachineInstr *MI,
unsigned &SrcOpIdx1,
bool findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const override;
bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
@ -164,20 +157,20 @@ public:
bool ReverseBranchCondition(
SmallVectorImpl<MachineOperand> &Cond) const override;
bool areMemAccessesTriviallyDisjoint(
MachineInstr *MIa, MachineInstr *MIb,
AliasAnalysis *AA = nullptr) const override;
bool
areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb,
AliasAnalysis *AA = nullptr) const override;
bool FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,
unsigned Reg, MachineRegisterInfo *MRI) const final;
bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, unsigned Reg,
MachineRegisterInfo *MRI) const final;
unsigned getMachineCSELookAheadLimit() const override { return 500; }
MachineInstr *convertToThreeAddress(MachineFunction::iterator &MBB,
MachineBasicBlock::iterator &MI,
MachineInstr &MI,
LiveVariables *LV) const override;
bool isSchedulingBoundary(const MachineInstr *MI,
bool isSchedulingBoundary(const MachineInstr &MI,
const MachineBasicBlock *MBB,
const MachineFunction &MF) const override;
@ -361,7 +354,7 @@ public:
bool isInlineConstant(const MachineOperand &MO, unsigned OpSize) const;
bool isLiteralConstant(const MachineOperand &MO, unsigned OpSize) const;
bool isImmOperandLegal(const MachineInstr *MI, unsigned OpNo,
bool isImmOperandLegal(const MachineInstr &MI, unsigned OpNo,
const MachineOperand &MO) const;
/// \brief Return true if this 64-bit VALU instruction has a 32-bit encoding.
@ -380,7 +373,7 @@ public:
bool hasModifiersSet(const MachineInstr &MI,
unsigned OpName) const;
bool verifyInstruction(const MachineInstr *MI,
bool verifyInstruction(const MachineInstr &MI,
StringRef &ErrInfo) const override;
static unsigned getVALUOp(const MachineInstr &MI);
@ -428,11 +421,11 @@ public:
///
/// If the operand being legalized is a register, then a COPY will be used
/// instead of MOV.
void legalizeOpWithMove(MachineInstr *MI, unsigned OpIdx) const;
void legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const;
/// \brief Check if \p MO is a legal operand if it was the \p OpIdx Operand
/// for \p MI.
bool isOperandLegal(const MachineInstr *MI, unsigned OpIdx,
bool isOperandLegal(const MachineInstr &MI, unsigned OpIdx,
const MachineOperand *MO = nullptr) const;
/// \brief Check if \p MO would be a valid operand for the given operand
@ -450,23 +443,23 @@ public:
/// \brief Legalize operands in \p MI by either commuting it or inserting a
/// copy of src1.
void legalizeOperandsVOP2(MachineRegisterInfo &MRI, MachineInstr *MI) const;
void legalizeOperandsVOP2(MachineRegisterInfo &MRI, MachineInstr &MI) const;
/// \brief Fix operands in \p MI to satisfy constant bus requirements.
void legalizeOperandsVOP3(MachineRegisterInfo &MRI, MachineInstr *MI) const;
void legalizeOperandsVOP3(MachineRegisterInfo &MRI, MachineInstr &MI) const;
/// Copy a value from a VGPR (\p SrcReg) to SGPR. This function can only
/// be used when it is know that the value in SrcReg is same across all
/// threads in the wave.
/// \returns The SGPR register that \p SrcReg was copied to.
unsigned readlaneVGPRToSGPR(unsigned SrcReg, MachineInstr *UseMI,
MachineRegisterInfo &MRI) const;
unsigned readlaneVGPRToSGPR(unsigned SrcReg, MachineInstr &UseMI,
MachineRegisterInfo &MRI) const;
void legalizeOperandsSMRD(MachineRegisterInfo &MRI, MachineInstr *MI) const;
void legalizeOperandsSMRD(MachineRegisterInfo &MRI, MachineInstr &MI) const;
/// \brief Legalize all operands in this instruction. This function may
/// create new instruction and insert them before \p MI.
void legalizeOperands(MachineInstr *MI) const;
void legalizeOperands(MachineInstr &MI) const;
/// \brief Replace this instruction's opcode with the equivalent VALU
/// opcode. This function will also move the users of \p MI to the
@ -505,8 +498,8 @@ public:
uint64_t getDefaultRsrcDataFormat() const;
uint64_t getScratchRsrcWords23() const;
bool isLowLatencyInstruction(const MachineInstr *MI) const;
bool isHighLatencyInstruction(const MachineInstr *MI) const;
bool isLowLatencyInstruction(const MachineInstr &MI) const;
bool isHighLatencyInstruction(const MachineInstr &MI) const;
/// \brief Return the descriptor of the target-specific machine instruction
/// that corresponds to the specified pseudo or native opcode.

View File

@ -1694,7 +1694,7 @@ void SIScheduleDAGMI::moveLowLatencies() {
for (SDep& PredDep : SU->Preds) {
SUnit *Pred = PredDep.getSUnit();
if (SITII->isLowLatencyInstruction(Pred->getInstr())) {
if (SITII->isLowLatencyInstruction(*Pred->getInstr())) {
IsLowLatencyUser = true;
}
if (Pred->NodeNum >= DAGSize)
@ -1704,7 +1704,7 @@ void SIScheduleDAGMI::moveLowLatencies() {
MinPos = PredPos + 1;
}
if (SITII->isLowLatencyInstruction(SU->getInstr())) {
if (SITII->isLowLatencyInstruction(*SU->getInstr())) {
unsigned BestPos = LastLowLatencyUser + 1;
if ((int)BestPos <= LastLowLatencyPos)
BestPos = LastLowLatencyPos + 1;
@ -1729,7 +1729,7 @@ void SIScheduleDAGMI::moveLowLatencies() {
bool CopyForLowLat = false;
for (SDep& SuccDep : SU->Succs) {
SUnit *Succ = SuccDep.getSUnit();
if (SITII->isLowLatencyInstruction(Succ->getInstr())) {
if (SITII->isLowLatencyInstruction(*Succ->getInstr())) {
CopyForLowLat = true;
}
}
@ -1814,12 +1814,12 @@ void SIScheduleDAGMI::schedule()
SUnit *SU = &SUnits[i];
unsigned BaseLatReg;
int64_t OffLatReg;
if (SITII->isLowLatencyInstruction(SU->getInstr())) {
if (SITII->isLowLatencyInstruction(*SU->getInstr())) {
IsLowLatencySU[i] = 1;
if (SITII->getMemOpBaseRegImmOfs(SU->getInstr(), BaseLatReg,
OffLatReg, TRI))
if (SITII->getMemOpBaseRegImmOfs(*SU->getInstr(), BaseLatReg, OffLatReg,
TRI))
LowLatencyOffset[i] = OffLatReg;
} else if (SITII->isHighLatencyInstruction(SU->getInstr()))
} else if (SITII->isHighLatencyInstruction(*SU->getInstr()))
IsHighLatencySU[i] = 1;
}

View File

@ -651,7 +651,7 @@ void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
default: {
int64_t Offset = FrameInfo->getObjectOffset(Index);
FIOp.ChangeToImmediate(Offset);
if (!TII->isImmOperandLegal(MI, FIOperandNum, FIOp)) {
if (!TII->isImmOperandLegal(*MI, FIOperandNum, FIOp)) {
unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
BuildMI(*MBB, MI, MI->getDebugLoc(),
TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)

View File

@ -172,7 +172,7 @@ static void foldImmediates(MachineInstr &MI, const SIInstrInfo *TII,
}
// We have failed to fold src0, so commute the instruction and try again.
if (TryToCommute && MI.isCommutable() && TII->commuteInstruction(&MI))
if (TryToCommute && MI.isCommutable() && TII->commuteInstruction(MI))
foldImmediates(MI, TII, MRI, false);
}
@ -312,7 +312,7 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
if (!canShrink(MI, TII, TRI, MRI)) {
// Try commuting the instruction and see if that enables us to shrink
// it.
if (!MI.isCommutable() || !TII->commuteInstruction(&MI) ||
if (!MI.isCommutable() || !TII->commuteInstruction(MI) ||
!canShrink(MI, TII, TRI, MRI))
continue;
}

File diff suppressed because it is too large Load Diff

View File

@ -92,8 +92,7 @@ protected:
/// non-commutable pair of operand indices OpIdx1 and OpIdx2.
/// Even though the instruction is commutable, the method may still
/// fail to commute the operands, null pointer is returned in such cases.
MachineInstr *commuteInstructionImpl(MachineInstr *MI,
bool NewMI,
MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI,
unsigned OpIdx1,
unsigned OpIdx2) const override;
@ -106,7 +105,7 @@ public:
virtual unsigned getUnindexedOpcode(unsigned Opc) const =0;
MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI,
MachineBasicBlock::iterator &MBBI,
MachineInstr &MI,
LiveVariables *LV) const override;
virtual const ARMBaseRegisterInfo &getRegisterInfo() const = 0;
@ -155,15 +154,15 @@ public:
/// GetInstSize - Returns the size of the specified MachineInstr.
///
virtual unsigned GetInstSizeInBytes(const MachineInstr* MI) const;
virtual unsigned GetInstSizeInBytes(const MachineInstr &MI) const;
unsigned isLoadFromStackSlot(const MachineInstr *MI,
unsigned isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
unsigned isStoreToStackSlot(const MachineInstr *MI,
unsigned isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI,
unsigned isLoadFromStackSlotPostFE(const MachineInstr &MI,
int &FrameIndex) const override;
unsigned isStoreToStackSlotPostFE(const MachineInstr *MI,
unsigned isStoreToStackSlotPostFE(const MachineInstr &MI,
int &FrameIndex) const override;
void copyToCPSR(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
@ -189,21 +188,21 @@ public:
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const override;
bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
bool expandPostRAPseudo(MachineInstr &MI) const override;
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
unsigned DestReg, unsigned SubIdx,
const MachineInstr *Orig,
const MachineInstr &Orig,
const TargetRegisterInfo &TRI) const override;
MachineInstr *duplicate(MachineInstr *Orig,
MachineInstr *duplicate(MachineInstr &Orig,
MachineFunction &MF) const override;
const MachineInstrBuilder &AddDReg(MachineInstrBuilder &MIB, unsigned Reg,
unsigned SubIdx, unsigned State,
const TargetRegisterInfo *TRI) const;
bool produceSameValue(const MachineInstr *MI0, const MachineInstr *MI1,
bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1,
const MachineRegisterInfo *MRI) const override;
/// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to
@ -226,7 +225,7 @@ public:
int64_t Offset1, int64_t Offset2,
unsigned NumLoads) const override;
bool isSchedulingBoundary(const MachineInstr *MI,
bool isSchedulingBoundary(const MachineInstr &MI,
const MachineBasicBlock *MBB,
const MachineFunction &MF) const override;
@ -251,7 +250,7 @@ public:
/// in SrcReg and SrcReg2 if having two register operands, and the value it
/// compares against in CmpValue. Return true if the comparison instruction
/// can be analyzed.
bool analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
unsigned &SrcReg2, int &CmpMask,
int &CmpValue) const override;
@ -259,30 +258,29 @@ public:
/// that we can remove a "comparison with zero"; Remove a redundant CMP
/// instruction if the flags can be updated in the same way by an earlier
/// instruction such as SUB.
bool optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg,
bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
unsigned SrcReg2, int CmpMask, int CmpValue,
const MachineRegisterInfo *MRI) const override;
bool analyzeSelect(const MachineInstr *MI,
SmallVectorImpl<MachineOperand> &Cond,
unsigned &TrueOp, unsigned &FalseOp,
bool &Optimizable) const override;
bool analyzeSelect(const MachineInstr &MI,
SmallVectorImpl<MachineOperand> &Cond, unsigned &TrueOp,
unsigned &FalseOp, bool &Optimizable) const override;
MachineInstr *optimizeSelect(MachineInstr *MI,
MachineInstr *optimizeSelect(MachineInstr &MI,
SmallPtrSetImpl<MachineInstr *> &SeenMIs,
bool) const override;
/// FoldImmediate - 'Reg' is known to be defined by a move immediate
/// instruction, try to fold the immediate into the use instruction.
bool FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,
unsigned Reg, MachineRegisterInfo *MRI) const override;
bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, unsigned Reg,
MachineRegisterInfo *MRI) const override;
unsigned getNumMicroOps(const InstrItineraryData *ItinData,
const MachineInstr *MI) const override;
const MachineInstr &MI) const override;
int getOperandLatency(const InstrItineraryData *ItinData,
const MachineInstr *DefMI, unsigned DefIdx,
const MachineInstr *UseMI,
const MachineInstr &DefMI, unsigned DefIdx,
const MachineInstr &UseMI,
unsigned UseIdx) const override;
int getOperandLatency(const InstrItineraryData *ItinData,
SDNode *DefNode, unsigned DefIdx,
@ -290,19 +288,20 @@ public:
/// VFP/NEON execution domains.
std::pair<uint16_t, uint16_t>
getExecutionDomain(const MachineInstr *MI) const override;
void setExecutionDomain(MachineInstr *MI, unsigned Domain) const override;
getExecutionDomain(const MachineInstr &MI) const override;
void setExecutionDomain(MachineInstr &MI, unsigned Domain) const override;
unsigned getPartialRegUpdateClearance(const MachineInstr*, unsigned,
const TargetRegisterInfo*) const override;
void breakPartialRegDependency(MachineBasicBlock::iterator, unsigned,
unsigned
getPartialRegUpdateClearance(const MachineInstr &, unsigned,
const TargetRegisterInfo *) const override;
void breakPartialRegDependency(MachineInstr &, unsigned,
const TargetRegisterInfo *TRI) const override;
/// Get the number of addresses by LDM or VLDM or zero for unknown.
unsigned getNumLDMAddresses(const MachineInstr *MI) const;
unsigned getNumLDMAddresses(const MachineInstr &MI) const;
private:
unsigned getInstBundleLength(const MachineInstr *MI) const;
unsigned getInstBundleLength(const MachineInstr &MI) const;
int getVLDMDefCycle(const InstrItineraryData *ItinData,
const MCInstrDesc &DefMCID,
@ -326,10 +325,17 @@ private:
const MCInstrDesc &UseMCID,
unsigned UseIdx, unsigned UseAlign) const;
int getOperandLatencyImpl(const InstrItineraryData *ItinData,
const MachineInstr &DefMI, unsigned DefIdx,
const MCInstrDesc &DefMCID, unsigned DefAdj,
const MachineOperand &DefMO, unsigned Reg,
const MachineInstr &UseMI, unsigned UseIdx,
const MCInstrDesc &UseMCID, unsigned UseAdj) const;
unsigned getPredicationCost(const MachineInstr &MI) const override;
unsigned getInstrLatency(const InstrItineraryData *ItinData,
const MachineInstr *MI,
const MachineInstr &MI,
unsigned *PredCost = nullptr) const override;
int getInstrLatency(const InstrItineraryData *ItinData,
@ -337,15 +343,15 @@ private:
bool hasHighOperandLatency(const TargetSchedModel &SchedModel,
const MachineRegisterInfo *MRI,
const MachineInstr *DefMI, unsigned DefIdx,
const MachineInstr *UseMI,
const MachineInstr &DefMI, unsigned DefIdx,
const MachineInstr &UseMI,
unsigned UseIdx) const override;
bool hasLowDefLatency(const TargetSchedModel &SchedModel,
const MachineInstr *DefMI,
const MachineInstr &DefMI,
unsigned DefIdx) const override;
/// verifyInstruction - Perform target specific instruction verification.
bool verifyInstruction(const MachineInstr *MI,
bool verifyInstruction(const MachineInstr &MI,
StringRef &ErrInfo) const override;
virtual void expandLoadStackGuard(MachineBasicBlock::iterator MI) const = 0;

View File

@ -919,7 +919,7 @@ void ARMConstantIslands::computeBlockSize(MachineBasicBlock *MBB) {
for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;
++I) {
BBI.Size += TII->GetInstSizeInBytes(I);
BBI.Size += TII->GetInstSizeInBytes(*I);
// For inline asm, GetInstSizeInBytes returns a conservative estimate.
// The actual size may be smaller, but still a multiple of the instr size.
if (I->isInlineAsm())
@ -950,7 +950,7 @@ unsigned ARMConstantIslands::getOffsetOf(MachineInstr *MI) const {
// Sum instructions before MI in MBB.
for (MachineBasicBlock::iterator I = MBB->begin(); &*I != MI; ++I) {
assert(I != MBB->end() && "Didn't find MI in its own basic block?");
Offset += TII->GetInstSizeInBytes(I);
Offset += TII->GetInstSizeInBytes(*I);
}
return Offset;
}
@ -1458,7 +1458,7 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
// iterates at least once.
BaseInsertOffset =
std::max(UserBBI.postOffset() - UPad - 8,
UserOffset + TII->GetInstSizeInBytes(UserMI) + 1);
UserOffset + TII->GetInstSizeInBytes(*UserMI) + 1);
DEBUG(dbgs() << format("Move inside block: %#x\n", BaseInsertOffset));
}
unsigned EndInsertOffset = BaseInsertOffset + 4 + UPad +
@ -1468,9 +1468,9 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
unsigned CPUIndex = CPUserIndex+1;
unsigned NumCPUsers = CPUsers.size();
MachineInstr *LastIT = nullptr;
for (unsigned Offset = UserOffset+TII->GetInstSizeInBytes(UserMI);
for (unsigned Offset = UserOffset + TII->GetInstSizeInBytes(*UserMI);
Offset < BaseInsertOffset;
Offset += TII->GetInstSizeInBytes(MI), MI = std::next(MI)) {
Offset += TII->GetInstSizeInBytes(*MI), MI = std::next(MI)) {
assert(MI != UserMBB->end() && "Fell off end of block");
if (CPUIndex < NumCPUsers && CPUsers[CPUIndex].MI == MI) {
CPUser &U = CPUsers[CPUIndex];
@ -1771,7 +1771,7 @@ ARMConstantIslands::fixupConditionalBr(ImmBranch &Br) {
splitBlockBeforeInstr(MI);
// No need for the branch to the next block. We're adding an unconditional
// branch to the destination.
int delta = TII->GetInstSizeInBytes(&MBB->back());
int delta = TII->GetInstSizeInBytes(MBB->back());
BBInfo[MBB->getNumber()].Size -= delta;
MBB->back().eraseFromParent();
// BBInfo[SplitBB].Offset is wrong temporarily, fixed below
@ -1787,18 +1787,18 @@ ARMConstantIslands::fixupConditionalBr(ImmBranch &Br) {
BuildMI(MBB, DebugLoc(), TII->get(MI->getOpcode()))
.addMBB(NextBB).addImm(CC).addReg(CCReg);
Br.MI = &MBB->back();
BBInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(&MBB->back());
BBInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(MBB->back());
if (isThumb)
BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr)).addMBB(DestBB)
.addImm(ARMCC::AL).addReg(0);
else
BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr)).addMBB(DestBB);
BBInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(&MBB->back());
BBInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(MBB->back());
unsigned MaxDisp = getUnconditionalBrDisp(Br.UncondBr);
ImmBranches.push_back(ImmBranch(&MBB->back(), MaxDisp, false, Br.UncondBr));
// Remove the old conditional branch. It may or may not still be in MBB.
BBInfo[MI->getParent()->getNumber()].Size -= TII->GetInstSizeInBytes(MI);
BBInfo[MI->getParent()->getNumber()].Size -= TII->GetInstSizeInBytes(*MI);
MI->eraseFromParent();
adjustBBOffsetsAfter(MBB);
return true;
@ -2211,8 +2211,8 @@ bool ARMConstantIslands::optimizeThumb2JumpTables() {
}
}
unsigned NewSize = TII->GetInstSizeInBytes(NewJTMI);
unsigned OrigSize = TII->GetInstSizeInBytes(MI);
unsigned NewSize = TII->GetInstSizeInBytes(*NewJTMI);
unsigned OrigSize = TII->GetInstSizeInBytes(*MI);
MI->eraseFromParent();
int Delta = OrigSize - NewSize + DeadSize;

View File

@ -1360,7 +1360,7 @@ static unsigned GetFunctionSizeInBytes(const MachineFunction &MF,
unsigned FnSize = 0;
for (auto &MBB : MF) {
for (auto &MI : MBB)
FnSize += TII.GetInstSizeInBytes(&MI);
FnSize += TII.GetInstSizeInBytes(MI);
}
return FnSize;
}

View File

@ -2053,7 +2053,7 @@ bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
if (!Def)
return false;
if (!Flags.isByVal()) {
if (!TII->isLoadFromStackSlot(Def, FI))
if (!TII->isLoadFromStackSlot(*Def, FI))
return false;
} else {
return false;

View File

@ -2025,12 +2025,12 @@ def A9WriteAdr#NumAddr : WriteSequence<[A9WriteAdr], NumAddr>;
// Define a predicate to select the LDM based on number of memory addresses.
def A9LMAdr#NumAddr#Pred :
SchedPredicate<"(TII->getNumLDMAddresses(MI)+1)/2 == "#NumAddr>;
SchedPredicate<"(TII->getNumLDMAddresses(*MI)+1)/2 == "#NumAddr>;
} // foreach NumAddr
// Fall-back for unknown LDMs.
def A9LMUnknownPred : SchedPredicate<"TII->getNumLDMAddresses(MI) == 0">;
def A9LMUnknownPred : SchedPredicate<"TII->getNumLDMAddresses(*MI) == 0">;
// LDM/VLDM/VLDn address generation latency & resources.
// Dynamically select the A9WriteAdrN sequence using a predicate.

View File

@ -374,7 +374,7 @@ let SchedModel = SwiftModel in {
}
// Predicate.
foreach NumAddr = 1-16 in {
def SwiftLMAddr#NumAddr#Pred : SchedPredicate<"TII->getNumLDMAddresses(MI) == "#NumAddr>;
def SwiftLMAddr#NumAddr#Pred : SchedPredicate<"TII->getNumLDMAddresses(*MI) == "#NumAddr>;
}
def SwiftWriteLDMAddrNoWB : SchedWriteRes<[SwiftUnitP01]> { let Latency = 0; }
def SwiftWriteLDMAddrWB : SchedWriteRes<[SwiftUnitP01, SwiftUnitP01]>;

View File

@ -718,7 +718,7 @@ Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
if (Reg1 != Reg0)
return false;
// Try to commute the operands to make it a 2-address instruction.
MachineInstr *CommutedMI = TII->commuteInstruction(MI);
MachineInstr *CommutedMI = TII->commuteInstruction(*MI);
if (!CommutedMI)
return false;
}
@ -726,11 +726,11 @@ Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
// Try to commute the operands to make it a 2-address instruction.
unsigned CommOpIdx1 = 1;
unsigned CommOpIdx2 = TargetInstrInfo::CommuteAnyOperandIndex;
if (!TII->findCommutedOpIndices(MI, CommOpIdx1, CommOpIdx2) ||
if (!TII->findCommutedOpIndices(*MI, CommOpIdx1, CommOpIdx2) ||
MI->getOperand(CommOpIdx2).getReg() != Reg0)
return false;
MachineInstr *CommutedMI =
TII->commuteInstruction(MI, false, CommOpIdx1, CommOpIdx2);
TII->commuteInstruction(*MI, false, CommOpIdx1, CommOpIdx2);
if (!CommutedMI)
return false;
}

View File

@ -60,15 +60,15 @@ void AVRInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
.addReg(SrcReg, getKillRegState(KillSrc));
}
unsigned AVRInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
unsigned AVRInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
switch (MI->getOpcode()) {
switch (MI.getOpcode()) {
case AVR::LDDRdPtrQ:
case AVR::LDDWRdYQ: { //:FIXME: remove this once PR13375 gets fixed
if ((MI->getOperand(1).isFI()) && (MI->getOperand(2).isImm()) &&
(MI->getOperand(2).getImm() == 0)) {
FrameIndex = MI->getOperand(1).getIndex();
return MI->getOperand(0).getReg();
if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0) {
FrameIndex = MI.getOperand(1).getIndex();
return MI.getOperand(0).getReg();
}
break;
}
@ -79,15 +79,15 @@ unsigned AVRInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
return 0;
}
unsigned AVRInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
unsigned AVRInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
switch (MI->getOpcode()) {
switch (MI.getOpcode()) {
case AVR::STDPtrQRr:
case AVR::STDWPtrQRr: {
if ((MI->getOperand(0).isFI()) && (MI->getOperand(1).isImm()) &&
(MI->getOperand(1).getImm() == 0)) {
FrameIndex = MI->getOperand(0).getIndex();
return MI->getOperand(2).getReg();
if (MI.getOperand(0).isFI() && MI.getOperand(1).isImm() &&
MI.getOperand(1).getImm() == 0) {
FrameIndex = MI.getOperand(0).getIndex();
return MI.getOperand(2).getReg();
}
break;
}

View File

@ -84,9 +84,9 @@ public:
MachineBasicBlock::iterator MI, unsigned DestReg,
int FrameIndex, const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const override;
unsigned isLoadFromStackSlot(const MachineInstr *MI,
unsigned isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
unsigned isStoreToStackSlot(const MachineInstr *MI,
unsigned isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
// Branch analysis.

View File

@ -823,7 +823,7 @@ bool HexagonExpandCondsets::canMoveMemTo(MachineInstr *TheI, MachineInstr *ToI,
bool IsLoad = TheI->mayLoad(), IsStore = TheI->mayStore();
if (!IsLoad && !IsStore)
return true;
if (HII->areMemAccessesTriviallyDisjoint(TheI, ToI))
if (HII->areMemAccessesTriviallyDisjoint(*TheI, *ToI))
return true;
if (TheI->hasUnmodeledSideEffects())
return false;

View File

@ -1926,8 +1926,8 @@ void HexagonFrameLowering::optimizeSpillSlots(MachineFunction &MF,
for (auto &In : B) {
int LFI, SFI;
bool Load = HII.isLoadFromStackSlot(&In, LFI) && !HII.isPredicated(In);
bool Store = HII.isStoreToStackSlot(&In, SFI) && !HII.isPredicated(In);
bool Load = HII.isLoadFromStackSlot(In, LFI) && !HII.isPredicated(In);
bool Store = HII.isStoreToStackSlot(In, SFI) && !HII.isPredicated(In);
if (Load && Store) {
// If it's both a load and a store, then we won't handle it.
BadFIs.insert(LFI);
@ -2146,7 +2146,7 @@ void HexagonFrameLowering::optimizeSpillSlots(MachineFunction &MF,
MachineInstr *MI = &*It;
NextIt = std::next(It);
int TFI;
if (!HII.isLoadFromStackSlot(MI, TFI) || TFI != FI)
if (!HII.isLoadFromStackSlot(*MI, TFI) || TFI != FI)
continue;
unsigned DstR = MI->getOperand(0).getReg();
assert(MI->getOperand(0).getSubReg() == 0);
@ -2156,9 +2156,9 @@ void HexagonFrameLowering::optimizeSpillSlots(MachineFunction &MF,
unsigned MemSize = (1U << (HII.getMemAccessSize(MI) - 1));
assert(HII.getAddrMode(MI) == HexagonII::BaseImmOffset);
unsigned CopyOpc = TargetOpcode::COPY;
if (HII.isSignExtendingLoad(MI))
if (HII.isSignExtendingLoad(*MI))
CopyOpc = (MemSize == 1) ? Hexagon::A2_sxtb : Hexagon::A2_sxth;
else if (HII.isZeroExtendingLoad(MI))
else if (HII.isZeroExtendingLoad(*MI))
CopyOpc = (MemSize == 1) ? Hexagon::A2_zxtb : Hexagon::A2_zxth;
CopyOut = BuildMI(B, It, DL, HII.get(CopyOpc), DstR)
.addReg(FoundR, getKillRegState(MI == EI));

View File

@ -450,8 +450,8 @@ bool HexagonHardwareLoops::findInductionRegister(MachineLoop *L,
unsigned CmpReg1 = 0, CmpReg2 = 0;
int CmpImm = 0, CmpMask = 0;
bool CmpAnalyzed = TII->analyzeCompare(PredI, CmpReg1, CmpReg2,
CmpMask, CmpImm);
bool CmpAnalyzed =
TII->analyzeCompare(*PredI, CmpReg1, CmpReg2, CmpMask, CmpImm);
// Fail if the compare was not analyzed, or it's not comparing a register
// with an immediate value. Not checking the mask here, since we handle
// the individual compare opcodes (including A4_cmpb*) later on.
@ -620,8 +620,8 @@ CountValue *HexagonHardwareLoops::getLoopTripCount(MachineLoop *L,
unsigned CmpReg1 = 0, CmpReg2 = 0;
int Mask = 0, ImmValue = 0;
bool AnalyzedCmp = TII->analyzeCompare(CondI, CmpReg1, CmpReg2,
Mask, ImmValue);
bool AnalyzedCmp =
TII->analyzeCompare(*CondI, CmpReg1, CmpReg2, Mask, ImmValue);
if (!AnalyzedCmp)
return nullptr;
@ -1420,7 +1420,7 @@ bool HexagonHardwareLoops::loopCountMayWrapOrUnderFlow(
unsigned CmpReg1 = 0, CmpReg2 = 0;
int CmpMask = 0, CmpValue = 0;
if (!TII->analyzeCompare(MI, CmpReg1, CmpReg2, CmpMask, CmpValue))
if (!TII->analyzeCompare(*MI, CmpReg1, CmpReg2, CmpMask, CmpValue))
continue;
MachineBasicBlock *TBB = 0, *FBB = 0;

File diff suppressed because it is too large Load Diff

View File

@ -43,7 +43,7 @@ public:
/// the destination along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than loading from the stack slot.
unsigned isLoadFromStackSlot(const MachineInstr *MI,
unsigned isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
/// If the specified machine instruction is a direct
@ -51,7 +51,7 @@ public:
/// the source reg along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than storing to the stack slot.
unsigned isStoreToStackSlot(const MachineInstr *MI,
unsigned isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
/// Analyze the branching code at the end of MBB, returning
@ -170,7 +170,7 @@ public:
/// into real instructions. The target can edit MI in place, or it can insert
/// new instructions and erase MI. The function should return true if
/// anything was changed.
bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
bool expandPostRAPseudo(MachineInstr &MI) const override;
/// Reverses the branch condition of the specified condition list,
/// returning false on success and true if it cannot be reversed.
@ -207,7 +207,7 @@ public:
/// Test if the given instruction should be considered a scheduling boundary.
/// This primarily includes labels and terminators.
bool isSchedulingBoundary(const MachineInstr *MI,
bool isSchedulingBoundary(const MachineInstr &MI,
const MachineBasicBlock *MBB,
const MachineFunction &MF) const override;
@ -226,15 +226,14 @@ public:
/// in SrcReg and SrcReg2 if having two register operands, and the value it
/// compares against in CmpValue. Return true if the comparison instruction
/// can be analyzed.
bool analyzeCompare(const MachineInstr *MI,
unsigned &SrcReg, unsigned &SrcReg2,
int &Mask, int &Value) const override;
bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
unsigned &SrcReg2, int &Mask, int &Value) const override;
/// Compute the instruction latency of a given instruction.
/// If the instruction has higher cost when predicated, it's returned via
/// PredCost.
unsigned getInstrLatency(const InstrItineraryData *ItinData,
const MachineInstr *MI,
const MachineInstr &MI,
unsigned *PredCost = 0) const override;
/// Create machine specific model for scheduling.
@ -245,10 +244,9 @@ public:
// to tell, even without aliasing information, that two MIs access different
// memory addresses. This function returns true if two MIs access different
// memory addresses and false otherwise.
bool areMemAccessesTriviallyDisjoint(MachineInstr *MIa, MachineInstr *MIb,
AliasAnalysis *AA = nullptr)
const override;
bool
areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb,
AliasAnalysis *AA = nullptr) const override;
/// HexagonInstrInfo specifics.
///
@ -308,7 +306,7 @@ public:
bool isPredicateLate(unsigned Opcode) const;
bool isPredictedTaken(unsigned Opcode) const;
bool isSaveCalleeSavedRegsCall(const MachineInstr *MI) const;
bool isSignExtendingLoad(const MachineInstr *MI) const;
bool isSignExtendingLoad(const MachineInstr &MI) const;
bool isSolo(const MachineInstr* MI) const;
bool isSpillPredRegOp(const MachineInstr *MI) const;
bool isTC1(const MachineInstr *MI) const;
@ -322,8 +320,7 @@ public:
bool isVecALU(const MachineInstr *MI) const;
bool isVecUsableNextPacket(const MachineInstr *ProdMI,
const MachineInstr *ConsMI) const;
bool isZeroExtendingLoad(const MachineInstr *MI) const;
bool isZeroExtendingLoad(const MachineInstr &MI) const;
bool canExecuteInBundle(const MachineInstr *First,
const MachineInstr *Second) const;

View File

@ -464,7 +464,7 @@ void HexagonSplitDoubleRegs::collectIndRegsForLoop(const MachineLoop *L,
CmpI = MRI->getVRegDef(CmpI->getOperand(1).getReg());
int Mask = 0, Val = 0;
bool OkCI = TII->analyzeCompare(CmpI, CmpR1, CmpR2, Mask, Val);
bool OkCI = TII->analyzeCompare(*CmpI, CmpR1, CmpR2, Mask, Val);
if (!OkCI)
return;
// Eliminate non-double input registers.

View File

@ -218,12 +218,12 @@ bool HexagonPacketizer::runOnMachineFunction(MachineFunction &MF) {
// First the first non-boundary starting from the end of the last
// scheduling region.
MachineBasicBlock::iterator RB = Begin;
while (RB != End && HII->isSchedulingBoundary(RB, &MB, MF))
while (RB != End && HII->isSchedulingBoundary(*RB, &MB, MF))
++RB;
// First the first boundary starting from the beginning of the new
// region.
MachineBasicBlock::iterator RE = RB;
while (RE != End && !HII->isSchedulingBoundary(RE, &MB, MF))
while (RE != End && !HII->isSchedulingBoundary(*RE, &MB, MF))
++RE;
// Add the scheduling boundary if it's not block end.
if (RE != End)

View File

@ -86,14 +86,14 @@ void LanaiInstrInfo::loadRegFromStackSlot(
.addImm(LPAC::ADD);
}
bool LanaiInstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr *MIa,
MachineInstr *MIb,
bool LanaiInstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr &MIa,
MachineInstr &MIb,
AliasAnalysis *AA) const {
assert(MIa && MIa->mayLoadOrStore() && "MIa must be a load or store.");
assert(MIb && MIb->mayLoadOrStore() && "MIb must be a load or store.");
assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
if (MIa->hasUnmodeledSideEffects() || MIb->hasUnmodeledSideEffects() ||
MIa->hasOrderedMemoryRef() || MIb->hasOrderedMemoryRef())
if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
return false;
// Retrieve the base register, offset from the base register and width. Width
@ -118,7 +118,7 @@ bool LanaiInstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr *MIa,
return false;
}
bool LanaiInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
bool LanaiInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
return false;
}
@ -321,20 +321,20 @@ unsigned LanaiInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
return Count;
}
unsigned LanaiInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
unsigned LanaiInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
if (MI->getOpcode() == Lanai::LDW_RI)
if (MI->getOperand(1).isFI() && MI->getOperand(2).isImm() &&
MI->getOperand(2).getImm() == 0) {
FrameIndex = MI->getOperand(1).getIndex();
return MI->getOperand(0).getReg();
if (MI.getOpcode() == Lanai::LDW_RI)
if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0) {
FrameIndex = MI.getOperand(1).getIndex();
return MI.getOperand(0).getReg();
}
return 0;
}
unsigned LanaiInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
unsigned LanaiInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI,
int &FrameIndex) const {
if (MI->getOpcode() == Lanai::LDW_RI) {
if (MI.getOpcode() == Lanai::LDW_RI) {
unsigned Reg;
if ((Reg = isLoadFromStackSlot(MI, FrameIndex)))
return Reg;
@ -345,30 +345,29 @@ unsigned LanaiInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
return 0;
}
unsigned LanaiInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
unsigned LanaiInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
if (MI->getOpcode() == Lanai::SW_RI)
if (MI->getOperand(0).isFI() && MI->getOperand(1).isImm() &&
MI->getOperand(1).getImm() == 0) {
FrameIndex = MI->getOperand(0).getIndex();
return MI->getOperand(2).getReg();
if (MI.getOpcode() == Lanai::SW_RI)
if (MI.getOperand(0).isFI() && MI.getOperand(1).isImm() &&
MI.getOperand(1).getImm() == 0) {
FrameIndex = MI.getOperand(0).getIndex();
return MI.getOperand(2).getReg();
}
return 0;
}
bool LanaiInstrInfo::getMemOpBaseRegImmOfsWidth(
MachineInstr *LdSt, unsigned &BaseReg, int64_t &Offset, unsigned &Width,
MachineInstr &LdSt, unsigned &BaseReg, int64_t &Offset, unsigned &Width,
const TargetRegisterInfo *TRI) const {
// Handle only loads/stores with base register followed by immediate offset
// and with add as ALU op.
if (LdSt->getNumOperands() != 4)
if (LdSt.getNumOperands() != 4)
return false;
if (!LdSt->getOperand(1).isReg() || !LdSt->getOperand(2).isImm() ||
!(LdSt->getOperand(3).isImm() &&
LdSt->getOperand(3).getImm() == LPAC::ADD))
if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isImm() ||
!(LdSt.getOperand(3).isImm() && LdSt.getOperand(3).getImm() == LPAC::ADD))
return false;
switch (LdSt->getOpcode()) {
switch (LdSt.getOpcode()) {
default:
return false;
case Lanai::LDW_RI:
@ -389,15 +388,15 @@ bool LanaiInstrInfo::getMemOpBaseRegImmOfsWidth(
break;
}
BaseReg = LdSt->getOperand(1).getReg();
Offset = LdSt->getOperand(2).getImm();
BaseReg = LdSt.getOperand(1).getReg();
Offset = LdSt.getOperand(2).getImm();
return true;
}
bool LanaiInstrInfo::getMemOpBaseRegImmOfs(
MachineInstr *LdSt, unsigned &BaseReg, int64_t &Offset,
MachineInstr &LdSt, unsigned &BaseReg, int64_t &Offset,
const TargetRegisterInfo *TRI) const {
switch (LdSt->getOpcode()) {
switch (LdSt.getOpcode()) {
default:
return false;
case Lanai::LDW_RI:

View File

@ -35,16 +35,16 @@ public:
return RegisterInfo;
}
bool areMemAccessesTriviallyDisjoint(MachineInstr *MIa, MachineInstr *MIb,
bool areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb,
AliasAnalysis *AA) const override;
unsigned isLoadFromStackSlot(const MachineInstr *MI,
unsigned isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI,
unsigned isLoadFromStackSlotPostFE(const MachineInstr &MI,
int &FrameIndex) const override;
unsigned isStoreToStackSlot(const MachineInstr *MI,
unsigned isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator Position,
@ -65,13 +65,13 @@ public:
const TargetRegisterClass *RegisterClass,
const TargetRegisterInfo *RegisterInfo) const override;
bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
bool expandPostRAPseudo(MachineInstr &MI) const override;
bool getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
bool getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg,
int64_t &Offset,
const TargetRegisterInfo *TRI) const override;
bool getMemOpBaseRegImmOfsWidth(MachineInstr *LdSt, unsigned &BaseReg,
bool getMemOpBaseRegImmOfsWidth(MachineInstr &LdSt, unsigned &BaseReg,
int64_t &Offset, unsigned &Width,
const TargetRegisterInfo *TRI) const;

View File

@ -41,7 +41,7 @@ const MipsRegisterInfo &Mips16InstrInfo::getRegisterInfo() const {
/// the destination along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than loading from the stack slot.
unsigned Mips16InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
unsigned Mips16InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
return 0;
}
@ -51,7 +51,7 @@ unsigned Mips16InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
/// the source reg along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than storing to the stack slot.
unsigned Mips16InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
unsigned Mips16InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
return 0;
}
@ -124,9 +124,9 @@ void Mips16InstrInfo::loadRegFromStack(MachineBasicBlock &MBB,
.addMemOperand(MMO);
}
bool Mips16InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
MachineBasicBlock &MBB = *MI->getParent();
switch(MI->getDesc().getOpcode()) {
bool Mips16InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
MachineBasicBlock &MBB = *MI.getParent();
switch (MI.getDesc().getOpcode()) {
default:
return false;
case Mips::RetRA16:
@ -134,7 +134,7 @@ bool Mips16InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
break;
}
MBB.erase(MI);
MBB.erase(MI.getIterator());
return true;
}

View File

@ -32,7 +32,7 @@ public:
/// the destination along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than loading from the stack slot.
unsigned isLoadFromStackSlot(const MachineInstr *MI,
unsigned isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
/// isStoreToStackSlot - If the specified machine instruction is a direct
@ -40,7 +40,7 @@ public:
/// the source reg along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than storing to the stack slot.
unsigned isStoreToStackSlot(const MachineInstr *MI,
unsigned isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
@ -61,7 +61,7 @@ public:
const TargetRegisterInfo *TRI,
int64_t Offset) const override;
bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
bool expandPostRAPseudo(MachineInstr &MI) const override;
unsigned getOppositeBranchOpc(unsigned Opc) const override;

View File

@ -38,17 +38,17 @@ const MipsRegisterInfo &MipsSEInstrInfo::getRegisterInfo() const {
/// the destination along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than loading from the stack slot.
unsigned MipsSEInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
unsigned MipsSEInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
unsigned Opc = MI->getOpcode();
unsigned Opc = MI.getOpcode();
if ((Opc == Mips::LW) || (Opc == Mips::LD) ||
(Opc == Mips::LWC1) || (Opc == Mips::LDC1) || (Opc == Mips::LDC164)) {
if ((MI->getOperand(1).isFI()) && // is a stack slot
(MI->getOperand(2).isImm()) && // the imm is zero
(isZeroImm(MI->getOperand(2)))) {
FrameIndex = MI->getOperand(1).getIndex();
return MI->getOperand(0).getReg();
if ((MI.getOperand(1).isFI()) && // is a stack slot
(MI.getOperand(2).isImm()) && // the imm is zero
(isZeroImm(MI.getOperand(2)))) {
FrameIndex = MI.getOperand(1).getIndex();
return MI.getOperand(0).getReg();
}
}
@ -60,17 +60,17 @@ unsigned MipsSEInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
/// the source reg along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than storing to the stack slot.
unsigned MipsSEInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
unsigned MipsSEInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
unsigned Opc = MI->getOpcode();
unsigned Opc = MI.getOpcode();
if ((Opc == Mips::SW) || (Opc == Mips::SD) ||
(Opc == Mips::SWC1) || (Opc == Mips::SDC1) || (Opc == Mips::SDC164)) {
if ((MI->getOperand(1).isFI()) && // is a stack slot
(MI->getOperand(2).isImm()) && // the imm is zero
(isZeroImm(MI->getOperand(2)))) {
FrameIndex = MI->getOperand(1).getIndex();
return MI->getOperand(0).getReg();
if ((MI.getOperand(1).isFI()) && // is a stack slot
(MI.getOperand(2).isImm()) && // the imm is zero
(isZeroImm(MI.getOperand(2)))) {
FrameIndex = MI.getOperand(1).getIndex();
return MI.getOperand(0).getReg();
}
}
return 0;
@ -328,12 +328,12 @@ loadRegFromStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
}
}
bool MipsSEInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
MachineBasicBlock &MBB = *MI->getParent();
bool MipsSEInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
MachineBasicBlock &MBB = *MI.getParent();
bool isMicroMips = Subtarget.inMicroMipsMode();
unsigned Opc;
switch(MI->getDesc().getOpcode()) {
switch (MI.getDesc().getOpcode()) {
default:
return false;
case Mips::RetRA:

View File

@ -32,7 +32,7 @@ public:
/// the destination along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than loading from the stack slot.
unsigned isLoadFromStackSlot(const MachineInstr *MI,
unsigned isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
/// isStoreToStackSlot - If the specified machine instruction is a direct
@ -40,7 +40,7 @@ public:
/// the source reg along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than storing to the stack slot.
unsigned isStoreToStackSlot(const MachineInstr *MI,
unsigned isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
@ -61,7 +61,7 @@ public:
const TargetRegisterInfo *TRI,
int64_t Offset) const override;
bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
bool expandPostRAPseudo(MachineInstr &MI) const override;
unsigned getOppositeBranchOpc(unsigned Opc) const override;

View File

@ -107,10 +107,9 @@ bool PPCBSel::runOnMachineFunction(MachineFunction &Fn) {
}
unsigned BlockSize = 0;
for (MachineBasicBlock::iterator MBBI = MBB->begin(), EE = MBB->end();
MBBI != EE; ++MBBI)
BlockSize += TII->GetInstSizeInBytes(MBBI);
for (MachineInstr &MI : *MBB)
BlockSize += TII->GetInstSizeInBytes(MI);
BlockSizes[MBB->getNumber()] = BlockSize;
FuncSize += BlockSize;
}
@ -156,7 +155,7 @@ bool PPCBSel::runOnMachineFunction(MachineFunction &Fn) {
Dest = I->getOperand(0).getMBB();
if (!Dest) {
MBBStartOffset += TII->GetInstSizeInBytes(I);
MBBStartOffset += TII->GetInstSizeInBytes(*I);
continue;
}

View File

@ -11126,7 +11126,7 @@ unsigned PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
uint64_t LoopSize = 0;
for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I)
for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) {
LoopSize += TII->GetInstSizeInBytes(J);
LoopSize += TII->GetInstSizeInBytes(*J);
if (LoopSize > 32)
break;
}

View File

@ -109,7 +109,7 @@ PPCInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
}
unsigned PPCInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
const MachineInstr *MI,
const MachineInstr &MI,
unsigned *PredCost) const {
if (!ItinData || UseOldLatencyCalc)
return PPCGenInstrInfo::getInstrLatency(ItinData, MI, PredCost);
@ -122,9 +122,9 @@ unsigned PPCInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
// is an output).
unsigned Latency = 1;
unsigned DefClass = MI->getDesc().getSchedClass();
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
unsigned DefClass = MI.getDesc().getSchedClass();
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
continue;
@ -139,22 +139,22 @@ unsigned PPCInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
}
int PPCInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
const MachineInstr *DefMI, unsigned DefIdx,
const MachineInstr *UseMI,
const MachineInstr &DefMI, unsigned DefIdx,
const MachineInstr &UseMI,
unsigned UseIdx) const {
int Latency = PPCGenInstrInfo::getOperandLatency(ItinData, DefMI, DefIdx,
UseMI, UseIdx);
if (!DefMI->getParent())
if (!DefMI.getParent())
return Latency;
const MachineOperand &DefMO = DefMI->getOperand(DefIdx);
const MachineOperand &DefMO = DefMI.getOperand(DefIdx);
unsigned Reg = DefMO.getReg();
bool IsRegCR;
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
const MachineRegisterInfo *MRI =
&DefMI->getParent()->getParent()->getRegInfo();
&DefMI.getParent()->getParent()->getRegInfo();
IsRegCR = MRI->getRegClass(Reg)->hasSuperClassEq(&PPC::CRRCRegClass) ||
MRI->getRegClass(Reg)->hasSuperClassEq(&PPC::CRBITRCRegClass);
} else {
@ -162,7 +162,7 @@ int PPCInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
PPC::CRBITRCRegClass.contains(Reg);
}
if (UseMI->isBranch() && IsRegCR) {
if (UseMI.isBranch() && IsRegCR) {
if (Latency < 0)
Latency = getInstrLatency(ItinData, DefMI);
@ -260,10 +260,10 @@ bool PPCInstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
}
}
unsigned PPCInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
unsigned PPCInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
// Note: This list must be kept consistent with LoadRegFromStackSlot.
switch (MI->getOpcode()) {
switch (MI.getOpcode()) {
default: break;
case PPC::LD:
case PPC::LWZ:
@ -279,20 +279,20 @@ unsigned PPCInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
case PPC::RESTORE_VRSAVE:
// Check for the operands added by addFrameReference (the immediate is the
// offset which defaults to 0).
if (MI->getOperand(1).isImm() && !MI->getOperand(1).getImm() &&
MI->getOperand(2).isFI()) {
FrameIndex = MI->getOperand(2).getIndex();
return MI->getOperand(0).getReg();
if (MI.getOperand(1).isImm() && !MI.getOperand(1).getImm() &&
MI.getOperand(2).isFI()) {
FrameIndex = MI.getOperand(2).getIndex();
return MI.getOperand(0).getReg();
}
break;
}
return 0;
}
unsigned PPCInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
unsigned PPCInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
// Note: This list must be kept consistent with StoreRegToStackSlot.
switch (MI->getOpcode()) {
switch (MI.getOpcode()) {
default: break;
case PPC::STD:
case PPC::STW:
@ -308,25 +308,23 @@ unsigned PPCInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
case PPC::SPILL_VRSAVE:
// Check for the operands added by addFrameReference (the immediate is the
// offset which defaults to 0).
if (MI->getOperand(1).isImm() && !MI->getOperand(1).getImm() &&
MI->getOperand(2).isFI()) {
FrameIndex = MI->getOperand(2).getIndex();
return MI->getOperand(0).getReg();
if (MI.getOperand(1).isImm() && !MI.getOperand(1).getImm() &&
MI.getOperand(2).isFI()) {
FrameIndex = MI.getOperand(2).getIndex();
return MI.getOperand(0).getReg();
}
break;
}
return 0;
}
MachineInstr *PPCInstrInfo::commuteInstructionImpl(MachineInstr *MI,
bool NewMI,
MachineInstr *PPCInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
unsigned OpIdx1,
unsigned OpIdx2) const {
MachineFunction &MF = *MI->getParent()->getParent();
MachineFunction &MF = *MI.getParent()->getParent();
// Normal instructions can be commuted the obvious way.
if (MI->getOpcode() != PPC::RLWIMI &&
MI->getOpcode() != PPC::RLWIMIo)
if (MI.getOpcode() != PPC::RLWIMI && MI.getOpcode() != PPC::RLWIMIo)
return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
// Note that RLWIMI can be commuted as a 32-bit instruction, but not as a
// 64-bit instruction (so we don't handle PPC::RLWIMI8 here), because
@ -334,7 +332,7 @@ MachineInstr *PPCInstrInfo::commuteInstructionImpl(MachineInstr *MI,
// to the high-bits of the mask (and, thus, the result).
// Cannot commute if it has a non-zero rotate count.
if (MI->getOperand(3).getImm() != 0)
if (MI.getOperand(3).getImm() != 0)
return nullptr;
// If we have a zero rotate count, we have:
@ -347,28 +345,28 @@ MachineInstr *PPCInstrInfo::commuteInstructionImpl(MachineInstr *MI,
// Swap op1/op2
assert(((OpIdx1 == 1 && OpIdx2 == 2) || (OpIdx1 == 2 && OpIdx2 == 1)) &&
"Only the operands 1 and 2 can be swapped in RLSIMI/RLWIMIo.");
unsigned Reg0 = MI->getOperand(0).getReg();
unsigned Reg1 = MI->getOperand(1).getReg();
unsigned Reg2 = MI->getOperand(2).getReg();
unsigned SubReg1 = MI->getOperand(1).getSubReg();
unsigned SubReg2 = MI->getOperand(2).getSubReg();
bool Reg1IsKill = MI->getOperand(1).isKill();
bool Reg2IsKill = MI->getOperand(2).isKill();
unsigned Reg0 = MI.getOperand(0).getReg();
unsigned Reg1 = MI.getOperand(1).getReg();
unsigned Reg2 = MI.getOperand(2).getReg();
unsigned SubReg1 = MI.getOperand(1).getSubReg();
unsigned SubReg2 = MI.getOperand(2).getSubReg();
bool Reg1IsKill = MI.getOperand(1).isKill();
bool Reg2IsKill = MI.getOperand(2).isKill();
bool ChangeReg0 = false;
// If machine instrs are no longer in two-address forms, update
// destination register as well.
if (Reg0 == Reg1) {
// Must be two address instruction!
assert(MI->getDesc().getOperandConstraint(0, MCOI::TIED_TO) &&
assert(MI.getDesc().getOperandConstraint(0, MCOI::TIED_TO) &&
"Expecting a two-address instruction!");
assert(MI->getOperand(0).getSubReg() == SubReg1 && "Tied subreg mismatch");
assert(MI.getOperand(0).getSubReg() == SubReg1 && "Tied subreg mismatch");
Reg2IsKill = false;
ChangeReg0 = true;
}
// Masks.
unsigned MB = MI->getOperand(4).getImm();
unsigned ME = MI->getOperand(5).getImm();
unsigned MB = MI.getOperand(4).getImm();
unsigned ME = MI.getOperand(5).getImm();
// We can't commute a trivial mask (there is no way to represent an all-zero
// mask).
@ -377,40 +375,40 @@ MachineInstr *PPCInstrInfo::commuteInstructionImpl(MachineInstr *MI,
if (NewMI) {
// Create a new instruction.
unsigned Reg0 = ChangeReg0 ? Reg2 : MI->getOperand(0).getReg();
bool Reg0IsDead = MI->getOperand(0).isDead();
return BuildMI(MF, MI->getDebugLoc(), MI->getDesc())
.addReg(Reg0, RegState::Define | getDeadRegState(Reg0IsDead))
.addReg(Reg2, getKillRegState(Reg2IsKill))
.addReg(Reg1, getKillRegState(Reg1IsKill))
.addImm((ME+1) & 31)
.addImm((MB-1) & 31);
unsigned Reg0 = ChangeReg0 ? Reg2 : MI.getOperand(0).getReg();
bool Reg0IsDead = MI.getOperand(0).isDead();
return BuildMI(MF, MI.getDebugLoc(), MI.getDesc())
.addReg(Reg0, RegState::Define | getDeadRegState(Reg0IsDead))
.addReg(Reg2, getKillRegState(Reg2IsKill))
.addReg(Reg1, getKillRegState(Reg1IsKill))
.addImm((ME + 1) & 31)
.addImm((MB - 1) & 31);
}
if (ChangeReg0) {
MI->getOperand(0).setReg(Reg2);
MI->getOperand(0).setSubReg(SubReg2);
MI.getOperand(0).setReg(Reg2);
MI.getOperand(0).setSubReg(SubReg2);
}
MI->getOperand(2).setReg(Reg1);
MI->getOperand(1).setReg(Reg2);
MI->getOperand(2).setSubReg(SubReg1);
MI->getOperand(1).setSubReg(SubReg2);
MI->getOperand(2).setIsKill(Reg1IsKill);
MI->getOperand(1).setIsKill(Reg2IsKill);
MI.getOperand(2).setReg(Reg1);
MI.getOperand(1).setReg(Reg2);
MI.getOperand(2).setSubReg(SubReg1);
MI.getOperand(1).setSubReg(SubReg2);
MI.getOperand(2).setIsKill(Reg1IsKill);
MI.getOperand(1).setIsKill(Reg2IsKill);
// Swap the mask around.
MI->getOperand(4).setImm((ME+1) & 31);
MI->getOperand(5).setImm((MB-1) & 31);
return MI;
MI.getOperand(4).setImm((ME + 1) & 31);
MI.getOperand(5).setImm((MB - 1) & 31);
return &MI;
}
bool PPCInstrInfo::findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
bool PPCInstrInfo::findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const {
// For VSX A-Type FMA instructions, it is the first two operands that can be
// commuted, however, because the non-encoded tied input operand is listed
// first, the operands to swap are actually the second and third.
int AltOpc = PPC::getAltVSXFMAOpcode(MI->getOpcode());
int AltOpc = PPC::getAltVSXFMAOpcode(MI.getOpcode());
if (AltOpc == -1)
return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
@ -1211,35 +1209,35 @@ ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
return false;
}
bool PPCInstrInfo::FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,
unsigned Reg, MachineRegisterInfo *MRI) const {
bool PPCInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
unsigned Reg, MachineRegisterInfo *MRI) const {
// For some instructions, it is legal to fold ZERO into the RA register field.
// A zero immediate should always be loaded with a single li.
unsigned DefOpc = DefMI->getOpcode();
unsigned DefOpc = DefMI.getOpcode();
if (DefOpc != PPC::LI && DefOpc != PPC::LI8)
return false;
if (!DefMI->getOperand(1).isImm())
if (!DefMI.getOperand(1).isImm())
return false;
if (DefMI->getOperand(1).getImm() != 0)
if (DefMI.getOperand(1).getImm() != 0)
return false;
// Note that we cannot here invert the arguments of an isel in order to fold
// a ZERO into what is presented as the second argument. All we have here
// is the condition bit, and that might come from a CR-logical bit operation.
const MCInstrDesc &UseMCID = UseMI->getDesc();
const MCInstrDesc &UseMCID = UseMI.getDesc();
// Only fold into real machine instructions.
if (UseMCID.isPseudo())
return false;
unsigned UseIdx;
for (UseIdx = 0; UseIdx < UseMI->getNumOperands(); ++UseIdx)
if (UseMI->getOperand(UseIdx).isReg() &&
UseMI->getOperand(UseIdx).getReg() == Reg)
for (UseIdx = 0; UseIdx < UseMI.getNumOperands(); ++UseIdx)
if (UseMI.getOperand(UseIdx).isReg() &&
UseMI.getOperand(UseIdx).getReg() == Reg)
break;
assert(UseIdx < UseMI->getNumOperands() && "Cannot find Reg in UseMI");
assert(UseIdx < UseMI.getNumOperands() && "Cannot find Reg in UseMI");
assert(UseIdx < UseMCID.getNumOperands() && "No operand description for Reg");
const MCOperandInfo *UseInfo = &UseMCID.OpInfo[UseIdx];
@ -1271,10 +1269,10 @@ bool PPCInstrInfo::FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,
}
bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
UseMI->getOperand(UseIdx).setReg(ZeroReg);
UseMI.getOperand(UseIdx).setReg(ZeroReg);
if (DeleteDef)
DefMI->eraseFromParent();
DefMI.eraseFromParent();
return true;
}
@ -1497,10 +1495,10 @@ bool PPCInstrInfo::isPredicable(MachineInstr &MI) const {
}
}
bool PPCInstrInfo::analyzeCompare(const MachineInstr *MI,
unsigned &SrcReg, unsigned &SrcReg2,
int &Mask, int &Value) const {
unsigned Opc = MI->getOpcode();
bool PPCInstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
unsigned &SrcReg2, int &Mask,
int &Value) const {
unsigned Opc = MI.getOpcode();
switch (Opc) {
default: return false;
@ -1508,9 +1506,9 @@ bool PPCInstrInfo::analyzeCompare(const MachineInstr *MI,
case PPC::CMPLWI:
case PPC::CMPDI:
case PPC::CMPLDI:
SrcReg = MI->getOperand(1).getReg();
SrcReg = MI.getOperand(1).getReg();
SrcReg2 = 0;
Value = MI->getOperand(2).getImm();
Value = MI.getOperand(2).getImm();
Mask = 0xFFFF;
return true;
case PPC::CMPW:
@ -1519,21 +1517,20 @@ bool PPCInstrInfo::analyzeCompare(const MachineInstr *MI,
case PPC::CMPLD:
case PPC::FCMPUS:
case PPC::FCMPUD:
SrcReg = MI->getOperand(1).getReg();
SrcReg2 = MI->getOperand(2).getReg();
SrcReg = MI.getOperand(1).getReg();
SrcReg2 = MI.getOperand(2).getReg();
return true;
}
}
bool PPCInstrInfo::optimizeCompareInstr(MachineInstr *CmpInstr,
unsigned SrcReg, unsigned SrcReg2,
int Mask, int Value,
bool PPCInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
unsigned SrcReg2, int Mask, int Value,
const MachineRegisterInfo *MRI) const {
if (DisableCmpOpt)
return false;
int OpC = CmpInstr->getOpcode();
unsigned CRReg = CmpInstr->getOperand(0).getReg();
int OpC = CmpInstr.getOpcode();
unsigned CRReg = CmpInstr.getOperand(0).getReg();
// FP record forms set CR1 based on the execption status bits, not a
// comparison with zero.
@ -1616,8 +1613,8 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr *CmpInstr,
MachineBasicBlock::iterator I = CmpInstr;
// Scan forward to find the first use of the compare.
for (MachineBasicBlock::iterator EL = CmpInstr->getParent()->end();
I != EL; ++I) {
for (MachineBasicBlock::iterator EL = CmpInstr.getParent()->end(); I != EL;
++I) {
bool FoundUse = false;
for (MachineRegisterInfo::use_instr_iterator J =MRI->use_instr_begin(CRReg),
JE = MRI->use_instr_end(); J != JE; ++J)
@ -1641,7 +1638,7 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr *CmpInstr,
// same BB as the comparison. This is to allow the check below to avoid calls
// (and other explicit clobbers); instead we should really check for these
// more explicitly (in at least a few predecessors).
else if (MI->getParent() != CmpInstr->getParent() || Value != 0) {
else if (MI->getParent() != CmpInstr.getParent() || Value != 0) {
// PPC does not have a record-form SUBri.
return false;
}
@ -1651,16 +1648,14 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr *CmpInstr,
--I;
// Get ready to iterate backward from CmpInstr.
MachineBasicBlock::iterator E = MI,
B = CmpInstr->getParent()->begin();
MachineBasicBlock::iterator E = MI, B = CmpInstr.getParent()->begin();
for (; I != E && !noSub; --I) {
const MachineInstr &Instr = *I;
unsigned IOpC = Instr.getOpcode();
if (&*I != CmpInstr && (
Instr.modifiesRegister(PPC::CR0, TRI) ||
Instr.readsRegister(PPC::CR0, TRI)))
if (&*I != &CmpInstr && (Instr.modifiesRegister(PPC::CR0, TRI) ||
Instr.readsRegister(PPC::CR0, TRI)))
// This instruction modifies or uses the record condition register after
// the one we want to change. While we could do this transformation, it
// would likely not be profitable. This transformation removes one
@ -1760,7 +1755,7 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr *CmpInstr,
// Create a new virtual register to hold the value of the CR set by the
// record-form instruction. If the instruction was not previously in
// record form, then set the kill flag on the CR.
CmpInstr->eraseFromParent();
CmpInstr.eraseFromParent();
MachineBasicBlock::iterator MII = MI;
BuildMI(*MI->getParent(), std::next(MII), MI->getDebugLoc(),
@ -1813,17 +1808,17 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr *CmpInstr,
/// GetInstSize - Return the number of bytes of code the specified
/// instruction may be. This returns the maximum number of bytes.
///
unsigned PPCInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
unsigned Opcode = MI->getOpcode();
unsigned PPCInstrInfo::GetInstSizeInBytes(const MachineInstr &MI) const {
unsigned Opcode = MI.getOpcode();
if (Opcode == PPC::INLINEASM) {
const MachineFunction *MF = MI->getParent()->getParent();
const char *AsmStr = MI->getOperand(0).getSymbolName();
const MachineFunction *MF = MI.getParent()->getParent();
const char *AsmStr = MI.getOperand(0).getSymbolName();
return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
} else if (Opcode == TargetOpcode::STACKMAP) {
return MI->getOperand(1).getImm();
return MI.getOperand(1).getImm();
} else if (Opcode == TargetOpcode::PATCHPOINT) {
PatchPointOpers Opers(MI);
PatchPointOpers Opers(&MI);
return Opers.getMetaOper(PatchPointOpers::NBytesPos).getImm();
} else {
const MCInstrDesc &Desc = get(Opcode);
@ -1863,15 +1858,15 @@ PPCInstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const {
return makeArrayRef(TargetFlags);
}
bool PPCInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
switch (MI->getOpcode()) {
bool PPCInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
switch (MI.getOpcode()) {
case TargetOpcode::LOAD_STACK_GUARD: {
assert(Subtarget.isTargetLinux() &&
"Only Linux target is expected to contain LOAD_STACK_GUARD");
const int64_t Offset = Subtarget.isPPC64() ? -0x7010 : -0x7008;
const unsigned Reg = Subtarget.isPPC64() ? PPC::X13 : PPC::R2;
MI->setDesc(get(Subtarget.isPPC64() ? PPC::LD : PPC::LWZ));
MachineInstrBuilder(*MI->getParent()->getParent(), MI)
MI.setDesc(get(Subtarget.isPPC64() ? PPC::LD : PPC::LWZ));
MachineInstrBuilder(*MI.getParent()->getParent(), MI)
.addImm(Offset)
.addReg(Reg);
return true;

View File

@ -91,8 +91,7 @@ protected:
///
/// For example, we can commute rlwimi instructions, but only if the
/// rotate amt is zero. We also have to munge the immediates a bit.
MachineInstr *commuteInstructionImpl(MachineInstr *MI,
bool NewMI,
MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI,
unsigned OpIdx1,
unsigned OpIdx2) const override;
@ -113,12 +112,12 @@ public:
const ScheduleDAG *DAG) const override;
unsigned getInstrLatency(const InstrItineraryData *ItinData,
const MachineInstr *MI,
const MachineInstr &MI,
unsigned *PredCost = nullptr) const override;
int getOperandLatency(const InstrItineraryData *ItinData,
const MachineInstr *DefMI, unsigned DefIdx,
const MachineInstr *UseMI,
const MachineInstr &DefMI, unsigned DefIdx,
const MachineInstr &UseMI,
unsigned UseIdx) const override;
int getOperandLatency(const InstrItineraryData *ItinData,
SDNode *DefNode, unsigned DefIdx,
@ -128,7 +127,7 @@ public:
}
bool hasLowDefLatency(const TargetSchedModel &SchedModel,
const MachineInstr *DefMI,
const MachineInstr &DefMI,
unsigned DefIdx) const override {
// Machine LICM should hoist all instructions in low-register-pressure
// situations; none are sufficiently free to justify leaving in a loop
@ -152,12 +151,12 @@ public:
bool isCoalescableExtInstr(const MachineInstr &MI,
unsigned &SrcReg, unsigned &DstReg,
unsigned &SubIdx) const override;
unsigned isLoadFromStackSlot(const MachineInstr *MI,
unsigned isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
unsigned isStoreToStackSlot(const MachineInstr *MI,
unsigned isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
bool findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
bool findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const override;
void insertNoop(MachineBasicBlock &MBB,
@ -201,8 +200,8 @@ public:
bool
ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
bool FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,
unsigned Reg, MachineRegisterInfo *MRI) const override;
bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, unsigned Reg,
MachineRegisterInfo *MRI) const override;
// If conversion by predication (only supported by some branch instructions).
// All of the profitability checks always return true; it is always
@ -247,20 +246,17 @@ public:
// Comparison optimization.
bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
unsigned &SrcReg2, int &Mask, int &Value) const override;
bool analyzeCompare(const MachineInstr *MI,
unsigned &SrcReg, unsigned &SrcReg2,
int &Mask, int &Value) const override;
bool optimizeCompareInstr(MachineInstr *CmpInstr,
unsigned SrcReg, unsigned SrcReg2,
int Mask, int Value,
bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
unsigned SrcReg2, int Mask, int Value,
const MachineRegisterInfo *MRI) const override;
/// GetInstSize - Return the number of bytes of code the specified
/// instruction may be. This returns the maximum number of bytes.
///
unsigned GetInstSizeInBytes(const MachineInstr *MI) const;
unsigned GetInstSizeInBytes(const MachineInstr &MI) const;
void getNoopForMachoTarget(MCInst &NopInst) const override;
@ -274,7 +270,7 @@ public:
getSerializableBitmaskMachineOperandTargetFlags() const override;
// Lower pseudo instructions after register allocation.
bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
bool expandPostRAPseudo(MachineInstr &MI) const override;
};
}

View File

@ -41,17 +41,15 @@ SparcInstrInfo::SparcInstrInfo(SparcSubtarget &ST)
/// the destination along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than loading from the stack slot.
unsigned SparcInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
unsigned SparcInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
if (MI->getOpcode() == SP::LDri ||
MI->getOpcode() == SP::LDXri ||
MI->getOpcode() == SP::LDFri ||
MI->getOpcode() == SP::LDDFri ||
MI->getOpcode() == SP::LDQFri) {
if (MI->getOperand(1).isFI() && MI->getOperand(2).isImm() &&
MI->getOperand(2).getImm() == 0) {
FrameIndex = MI->getOperand(1).getIndex();
return MI->getOperand(0).getReg();
if (MI.getOpcode() == SP::LDri || MI.getOpcode() == SP::LDXri ||
MI.getOpcode() == SP::LDFri || MI.getOpcode() == SP::LDDFri ||
MI.getOpcode() == SP::LDQFri) {
if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0) {
FrameIndex = MI.getOperand(1).getIndex();
return MI.getOperand(0).getReg();
}
}
return 0;
@ -62,17 +60,15 @@ unsigned SparcInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
/// the source reg along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than storing to the stack slot.
unsigned SparcInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
unsigned SparcInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
if (MI->getOpcode() == SP::STri ||
MI->getOpcode() == SP::STXri ||
MI->getOpcode() == SP::STFri ||
MI->getOpcode() == SP::STDFri ||
MI->getOpcode() == SP::STQFri) {
if (MI->getOperand(0).isFI() && MI->getOperand(1).isImm() &&
MI->getOperand(1).getImm() == 0) {
FrameIndex = MI->getOperand(0).getIndex();
return MI->getOperand(2).getReg();
if (MI.getOpcode() == SP::STri || MI.getOpcode() == SP::STXri ||
MI.getOpcode() == SP::STFri || MI.getOpcode() == SP::STDFri ||
MI.getOpcode() == SP::STQFri) {
if (MI.getOperand(0).isFI() && MI.getOperand(1).isImm() &&
MI.getOperand(1).getImm() == 0) {
FrameIndex = MI.getOperand(0).getIndex();
return MI.getOperand(2).getReg();
}
}
return 0;
@ -492,16 +488,17 @@ unsigned SparcInstrInfo::getGlobalBaseReg(MachineFunction *MF) const
return GlobalBaseReg;
}
bool SparcInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
switch (MI->getOpcode()) {
bool SparcInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
switch (MI.getOpcode()) {
case TargetOpcode::LOAD_STACK_GUARD: {
assert(Subtarget.isTargetLinux() &&
"Only Linux target is expected to contain LOAD_STACK_GUARD");
// offsetof(tcbhead_t, stack_guard) from sysdeps/sparc/nptl/tls.h in glibc.
const int64_t Offset = Subtarget.is64Bit() ? 0x28 : 0x14;
MI->setDesc(get(Subtarget.is64Bit() ? SP::LDXri : SP::LDri));
MachineInstrBuilder(*MI->getParent()->getParent(), MI)
.addReg(SP::G7).addImm(Offset);
MI.setDesc(get(Subtarget.is64Bit() ? SP::LDXri : SP::LDri));
MachineInstrBuilder(*MI.getParent()->getParent(), MI)
.addReg(SP::G7)
.addImm(Offset);
return true;
}
}

View File

@ -54,7 +54,7 @@ public:
/// the destination along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than loading from the stack slot.
unsigned isLoadFromStackSlot(const MachineInstr *MI,
unsigned isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
/// isStoreToStackSlot - If the specified machine instruction is a direct
@ -62,7 +62,7 @@ public:
/// the source reg along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than storing to the stack slot.
unsigned isStoreToStackSlot(const MachineInstr *MI,
unsigned isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
@ -98,7 +98,7 @@ public:
unsigned getGlobalBaseReg(MachineFunction *MF) const;
// Lower pseudo instructions after register allocation.
bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
bool expandPostRAPseudo(MachineInstr &MI) const override;
};
}

View File

@ -106,59 +106,58 @@ void SystemZInstrInfo::splitAdjDynAlloc(MachineBasicBlock::iterator MI) const {
// and HighOpcode takes an unsigned 32-bit operand. In those cases,
// MI has the same kind of operand as LowOpcode, so needs to be converted
// if HighOpcode is used.
void SystemZInstrInfo::expandRIPseudo(MachineInstr *MI, unsigned LowOpcode,
void SystemZInstrInfo::expandRIPseudo(MachineInstr &MI, unsigned LowOpcode,
unsigned HighOpcode,
bool ConvertHigh) const {
unsigned Reg = MI->getOperand(0).getReg();
unsigned Reg = MI.getOperand(0).getReg();
bool IsHigh = isHighReg(Reg);
MI->setDesc(get(IsHigh ? HighOpcode : LowOpcode));
MI.setDesc(get(IsHigh ? HighOpcode : LowOpcode));
if (IsHigh && ConvertHigh)
MI->getOperand(1).setImm(uint32_t(MI->getOperand(1).getImm()));
MI.getOperand(1).setImm(uint32_t(MI.getOperand(1).getImm()));
}
// MI is a three-operand RIE-style pseudo instruction. Replace it with
// LowOpcodeK if the registers are both low GR32s, otherwise use a move
// followed by HighOpcode or LowOpcode, depending on whether the target
// is a high or low GR32.
void SystemZInstrInfo::expandRIEPseudo(MachineInstr *MI, unsigned LowOpcode,
void SystemZInstrInfo::expandRIEPseudo(MachineInstr &MI, unsigned LowOpcode,
unsigned LowOpcodeK,
unsigned HighOpcode) const {
unsigned DestReg = MI->getOperand(0).getReg();
unsigned SrcReg = MI->getOperand(1).getReg();
unsigned DestReg = MI.getOperand(0).getReg();
unsigned SrcReg = MI.getOperand(1).getReg();
bool DestIsHigh = isHighReg(DestReg);
bool SrcIsHigh = isHighReg(SrcReg);
if (!DestIsHigh && !SrcIsHigh)
MI->setDesc(get(LowOpcodeK));
MI.setDesc(get(LowOpcodeK));
else {
emitGRX32Move(*MI->getParent(), MI, MI->getDebugLoc(),
DestReg, SrcReg, SystemZ::LR, 32,
MI->getOperand(1).isKill());
MI->setDesc(get(DestIsHigh ? HighOpcode : LowOpcode));
MI->getOperand(1).setReg(DestReg);
MI->tieOperands(0, 1);
emitGRX32Move(*MI.getParent(), MI, MI.getDebugLoc(), DestReg, SrcReg,
SystemZ::LR, 32, MI.getOperand(1).isKill());
MI.setDesc(get(DestIsHigh ? HighOpcode : LowOpcode));
MI.getOperand(1).setReg(DestReg);
MI.tieOperands(0, 1);
}
}
// MI is an RXY-style pseudo instruction. Replace it with LowOpcode
// if the first operand is a low GR32 and HighOpcode if the first operand
// is a high GR32.
void SystemZInstrInfo::expandRXYPseudo(MachineInstr *MI, unsigned LowOpcode,
void SystemZInstrInfo::expandRXYPseudo(MachineInstr &MI, unsigned LowOpcode,
unsigned HighOpcode) const {
unsigned Reg = MI->getOperand(0).getReg();
unsigned Reg = MI.getOperand(0).getReg();
unsigned Opcode = getOpcodeForOffset(isHighReg(Reg) ? HighOpcode : LowOpcode,
MI->getOperand(2).getImm());
MI->setDesc(get(Opcode));
MI.getOperand(2).getImm());
MI.setDesc(get(Opcode));
}
// MI is an RR-style pseudo instruction that zero-extends the low Size bits
// of one GRX32 into another. Replace it with LowOpcode if both operands
// are low registers, otherwise use RISB[LH]G.
void SystemZInstrInfo::expandZExtPseudo(MachineInstr *MI, unsigned LowOpcode,
void SystemZInstrInfo::expandZExtPseudo(MachineInstr &MI, unsigned LowOpcode,
unsigned Size) const {
emitGRX32Move(*MI->getParent(), MI, MI->getDebugLoc(),
MI->getOperand(0).getReg(), MI->getOperand(1).getReg(),
LowOpcode, Size, MI->getOperand(1).isKill());
MI->eraseFromParent();
emitGRX32Move(*MI.getParent(), MI, MI.getDebugLoc(),
MI.getOperand(0).getReg(), MI.getOperand(1).getReg(), LowOpcode,
Size, MI.getOperand(1).isKill());
MI.eraseFromParent();
}
void SystemZInstrInfo::expandLoadStackGuard(MachineInstr *MI) const {
@ -228,45 +227,41 @@ void SystemZInstrInfo::emitGRX32Move(MachineBasicBlock &MBB,
// Return 0 otherwise.
//
// Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
static int isSimpleMove(const MachineInstr *MI, int &FrameIndex,
static int isSimpleMove(const MachineInstr &MI, int &FrameIndex,
unsigned Flag) {
const MCInstrDesc &MCID = MI->getDesc();
if ((MCID.TSFlags & Flag) &&
MI->getOperand(1).isFI() &&
MI->getOperand(2).getImm() == 0 &&
MI->getOperand(3).getReg() == 0) {
FrameIndex = MI->getOperand(1).getIndex();
return MI->getOperand(0).getReg();
const MCInstrDesc &MCID = MI.getDesc();
if ((MCID.TSFlags & Flag) && MI.getOperand(1).isFI() &&
MI.getOperand(2).getImm() == 0 && MI.getOperand(3).getReg() == 0) {
FrameIndex = MI.getOperand(1).getIndex();
return MI.getOperand(0).getReg();
}
return 0;
}
unsigned SystemZInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
unsigned SystemZInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXLoad);
}
unsigned SystemZInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
unsigned SystemZInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXStore);
}
bool SystemZInstrInfo::isStackSlotCopy(const MachineInstr *MI,
bool SystemZInstrInfo::isStackSlotCopy(const MachineInstr &MI,
int &DestFrameIndex,
int &SrcFrameIndex) const {
// Check for MVC 0(Length,FI1),0(FI2)
const MachineFrameInfo *MFI = MI->getParent()->getParent()->getFrameInfo();
if (MI->getOpcode() != SystemZ::MVC ||
!MI->getOperand(0).isFI() ||
MI->getOperand(1).getImm() != 0 ||
!MI->getOperand(3).isFI() ||
MI->getOperand(4).getImm() != 0)
const MachineFrameInfo *MFI = MI.getParent()->getParent()->getFrameInfo();
if (MI.getOpcode() != SystemZ::MVC || !MI.getOperand(0).isFI() ||
MI.getOperand(1).getImm() != 0 || !MI.getOperand(3).isFI() ||
MI.getOperand(4).getImm() != 0)
return false;
// Check that Length covers the full slots.
int64_t Length = MI->getOperand(2).getImm();
unsigned FI1 = MI->getOperand(0).getIndex();
unsigned FI2 = MI->getOperand(3).getIndex();
int64_t Length = MI.getOperand(2).getImm();
unsigned FI1 = MI.getOperand(0).getIndex();
unsigned FI2 = MI.getOperand(3).getIndex();
if (MFI->getObjectSize(FI1) != Length ||
MFI->getObjectSize(FI2) != Length)
return false;
@ -302,7 +297,7 @@ bool SystemZInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
return true;
// Can't handle indirect branches.
SystemZII::Branch Branch(getBranchInfo(I));
SystemZII::Branch Branch(getBranchInfo(*I));
if (!Branch.Target->isMBB())
return true;
@ -379,7 +374,7 @@ unsigned SystemZInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
continue;
if (!I->isBranch())
break;
if (!getBranchInfo(I).Target->isMBB())
if (!getBranchInfo(*I).Target->isMBB())
break;
// Remove the branch.
I->eraseFromParent();
@ -434,17 +429,16 @@ unsigned SystemZInstrInfo::InsertBranch(MachineBasicBlock &MBB,
return Count;
}
bool SystemZInstrInfo::analyzeCompare(const MachineInstr *MI,
unsigned &SrcReg, unsigned &SrcReg2,
int &Mask, int &Value) const {
assert(MI->isCompare() && "Caller should have checked for a comparison");
bool SystemZInstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
unsigned &SrcReg2, int &Mask,
int &Value) const {
assert(MI.isCompare() && "Caller should have checked for a comparison");
if (MI->getNumExplicitOperands() == 2 &&
MI->getOperand(0).isReg() &&
MI->getOperand(1).isImm()) {
SrcReg = MI->getOperand(0).getReg();
if (MI.getNumExplicitOperands() == 2 && MI.getOperand(0).isReg() &&
MI.getOperand(1).isImm()) {
SrcReg = MI.getOperand(0).getReg();
SrcReg2 = 0;
Value = MI->getOperand(1).getImm();
Value = MI.getOperand(1).getImm();
Mask = ~0;
return true;
}
@ -477,7 +471,7 @@ static void eraseIfDead(MachineInstr *MI, const MachineRegisterInfo *MRI) {
// the result of an IPM sequence whose input CC survives until Compare,
// and whether Compare is therefore redundant. Delete it and return
// true if so.
static bool removeIPMBasedCompare(MachineInstr *Compare, unsigned SrcReg,
static bool removeIPMBasedCompare(MachineInstr &Compare, unsigned SrcReg,
const MachineRegisterInfo *MRI,
const TargetRegisterInfo *TRI) {
MachineInstr *LGFR = nullptr;
@ -498,16 +492,16 @@ static bool removeIPMBasedCompare(MachineInstr *Compare, unsigned SrcReg,
return false;
// Check that there are no assignments to CC between the IPM and Compare,
if (IPM->getParent() != Compare->getParent())
if (IPM->getParent() != Compare.getParent())
return false;
MachineBasicBlock::iterator MBBI = IPM, MBBE = Compare;
MachineBasicBlock::iterator MBBI = IPM, MBBE = Compare.getIterator();
for (++MBBI; MBBI != MBBE; ++MBBI) {
MachineInstr *MI = MBBI;
if (MI->modifiesRegister(SystemZ::CC, TRI))
return false;
}
Compare->eraseFromParent();
Compare.eraseFromParent();
if (LGFR)
eraseIfDead(LGFR, MRI);
eraseIfDead(RLL, MRI);
@ -517,13 +511,11 @@ static bool removeIPMBasedCompare(MachineInstr *Compare, unsigned SrcReg,
return true;
}
bool
SystemZInstrInfo::optimizeCompareInstr(MachineInstr *Compare,
unsigned SrcReg, unsigned SrcReg2,
int Mask, int Value,
const MachineRegisterInfo *MRI) const {
bool SystemZInstrInfo::optimizeCompareInstr(
MachineInstr &Compare, unsigned SrcReg, unsigned SrcReg2, int Mask,
int Value, const MachineRegisterInfo *MRI) const {
assert(!SrcReg2 && "Only optimizing constant comparisons so far");
bool IsLogical = (Compare->getDesc().TSFlags & SystemZII::IsLogical) != 0;
bool IsLogical = (Compare.getDesc().TSFlags & SystemZII::IsLogical) != 0;
return Value == 0 && !IsLogical &&
removeIPMBasedCompare(Compare, SrcReg, MRI, &RI);
}
@ -775,25 +767,22 @@ static MachineInstr *finishConvertToThreeAddress(MachineInstr *OldMI,
return NewMI;
}
MachineInstr *
SystemZInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
MachineBasicBlock::iterator &MBBI,
LiveVariables *LV) const {
MachineInstr *MI = MBBI;
MachineBasicBlock *MBB = MI->getParent();
MachineInstr *SystemZInstrInfo::convertToThreeAddress(
MachineFunction::iterator &MFI, MachineInstr &MI, LiveVariables *LV) const {
MachineBasicBlock *MBB = MI.getParent();
MachineFunction *MF = MBB->getParent();
MachineRegisterInfo &MRI = MF->getRegInfo();
unsigned Opcode = MI->getOpcode();
unsigned NumOps = MI->getNumOperands();
unsigned Opcode = MI.getOpcode();
unsigned NumOps = MI.getNumOperands();
// Try to convert something like SLL into SLLK, if supported.
// We prefer to keep the two-operand form where possible both
// because it tends to be shorter and because some instructions
// have memory forms that can be used during spilling.
if (STI.hasDistinctOps()) {
MachineOperand &Dest = MI->getOperand(0);
MachineOperand &Src = MI->getOperand(1);
MachineOperand &Dest = MI.getOperand(0);
MachineOperand &Src = MI.getOperand(1);
unsigned DestReg = Dest.getReg();
unsigned SrcReg = Src.getReg();
// AHIMux is only really a three-operand instruction when both operands
@ -812,23 +801,23 @@ SystemZInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
// Create three address instruction without adding the implicit
// operands. Those will instead be copied over from the original
// instruction by the loop below.
MachineInstrBuilder MIB(*MF,
MF->CreateMachineInstr(get(ThreeOperandOpcode),
MI->getDebugLoc(), /*NoImplicit=*/true));
MachineInstrBuilder MIB(
*MF, MF->CreateMachineInstr(get(ThreeOperandOpcode), MI.getDebugLoc(),
/*NoImplicit=*/true));
MIB.addOperand(Dest);
// Keep the kill state, but drop the tied flag.
MIB.addReg(Src.getReg(), getKillRegState(Src.isKill()), Src.getSubReg());
// Keep the remaining operands as-is.
for (unsigned I = 2; I < NumOps; ++I)
MIB.addOperand(MI->getOperand(I));
MIB.addOperand(MI.getOperand(I));
MBB->insert(MI, MIB);
return finishConvertToThreeAddress(MI, MIB, LV);
return finishConvertToThreeAddress(&MI, MIB, LV);
}
}
// Try to convert an AND into an RISBG-type instruction.
if (LogicOp And = interpretAndImmediate(Opcode)) {
uint64_t Imm = MI->getOperand(2).getImm() << And.ImmLSB;
uint64_t Imm = MI.getOperand(2).getImm() << And.ImmLSB;
// AND IMMEDIATE leaves the other bits of the register unchanged.
Imm |= allOnes(And.RegSize) & ~(allOnes(And.ImmSize) << And.ImmLSB);
unsigned Start, End;
@ -844,33 +833,35 @@ SystemZInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
Start &= 31;
End &= 31;
}
MachineOperand &Dest = MI->getOperand(0);
MachineOperand &Src = MI->getOperand(1);
MachineOperand &Dest = MI.getOperand(0);
MachineOperand &Src = MI.getOperand(1);
MachineInstrBuilder MIB =
BuildMI(*MBB, MI, MI->getDebugLoc(), get(NewOpcode))
.addOperand(Dest).addReg(0)
.addReg(Src.getReg(), getKillRegState(Src.isKill()), Src.getSubReg())
.addImm(Start).addImm(End + 128).addImm(0);
return finishConvertToThreeAddress(MI, MIB, LV);
BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpcode))
.addOperand(Dest)
.addReg(0)
.addReg(Src.getReg(), getKillRegState(Src.isKill()),
Src.getSubReg())
.addImm(Start)
.addImm(End + 128)
.addImm(0);
return finishConvertToThreeAddress(&MI, MIB, LV);
}
}
return nullptr;
}
MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, int FrameIndex,
LiveIntervals *LIS) const {
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
const MachineFrameInfo *MFI = MF.getFrameInfo();
unsigned Size = MFI->getObjectSize(FrameIndex);
unsigned Opcode = MI->getOpcode();
unsigned Opcode = MI.getOpcode();
if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
if (LIS != nullptr &&
(Opcode == SystemZ::LA || Opcode == SystemZ::LAY) &&
isInt<8>(MI->getOperand(2).getImm()) &&
!MI->getOperand(3).getReg()) {
if (LIS != nullptr && (Opcode == SystemZ::LA || Opcode == SystemZ::LAY) &&
isInt<8>(MI.getOperand(2).getImm()) && !MI.getOperand(3).getReg()) {
// Check CC liveness, since new instruction introduces a dead
// def of CC.
@ -879,15 +870,14 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
++CCUnit;
assert (!CCUnit.isValid() && "CC only has one reg unit.");
SlotIndex MISlot =
LIS->getSlotIndexes()->getInstructionIndex(*MI).getRegSlot();
LIS->getSlotIndexes()->getInstructionIndex(MI).getRegSlot();
if (!CCLiveRange.liveAt(MISlot)) {
// LA(Y) %reg, CONST(%reg) -> AGSI %mem, CONST
MachineInstr *BuiltMI =
BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(),
get(SystemZ::AGSI))
.addFrameIndex(FrameIndex)
.addImm(0)
.addImm(MI->getOperand(2).getImm());
MachineInstr *BuiltMI = BuildMI(*InsertPt->getParent(), InsertPt,
MI.getDebugLoc(), get(SystemZ::AGSI))
.addFrameIndex(FrameIndex)
.addImm(0)
.addImm(MI.getOperand(2).getImm());
BuiltMI->findRegisterDefOperand(SystemZ::CC)->setIsDead(true);
CCLiveRange.createDeadDef(MISlot, LIS->getVNInfoAllocator());
return BuiltMI;
@ -901,22 +891,22 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
return nullptr;
unsigned OpNum = Ops[0];
assert(Size == MF.getRegInfo()
.getRegClass(MI->getOperand(OpNum).getReg())->getSize() &&
assert(Size ==
MF.getRegInfo()
.getRegClass(MI.getOperand(OpNum).getReg())
->getSize() &&
"Invalid size combination");
if ((Opcode == SystemZ::AHI || Opcode == SystemZ::AGHI) &&
OpNum == 0 &&
isInt<8>(MI->getOperand(2).getImm())) {
if ((Opcode == SystemZ::AHI || Opcode == SystemZ::AGHI) && OpNum == 0 &&
isInt<8>(MI.getOperand(2).getImm())) {
// A(G)HI %reg, CONST -> A(G)SI %mem, CONST
Opcode = (Opcode == SystemZ::AHI ? SystemZ::ASI : SystemZ::AGSI);
MachineInstr *BuiltMI =
BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(),
get(Opcode))
.addFrameIndex(FrameIndex)
.addImm(0)
.addImm(MI->getOperand(2).getImm());
transferDeadCC(MI, BuiltMI);
BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode))
.addFrameIndex(FrameIndex)
.addImm(0)
.addImm(MI.getOperand(2).getImm());
transferDeadCC(&MI, BuiltMI);
return BuiltMI;
}
@ -927,9 +917,9 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
// source register instead.
if (OpNum == 0) {
unsigned StoreOpcode = Op1IsGPR ? SystemZ::STG : SystemZ::STD;
return BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(),
return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
get(StoreOpcode))
.addOperand(MI->getOperand(1))
.addOperand(MI.getOperand(1))
.addFrameIndex(FrameIndex)
.addImm(0)
.addReg(0);
@ -938,8 +928,8 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
// destination register instead.
if (OpNum == 1) {
unsigned LoadOpcode = Op0IsGPR ? SystemZ::LG : SystemZ::LD;
unsigned Dest = MI->getOperand(0).getReg();
return BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(),
unsigned Dest = MI.getOperand(0).getReg();
return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
get(LoadOpcode), Dest)
.addFrameIndex(FrameIndex)
.addImm(0)
@ -960,26 +950,26 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
// might be equal. We don't worry about that case here, because spill slot
// coloring happens later, and because we have special code to remove
// MVCs that turn out to be redundant.
if (OpNum == 0 && MI->hasOneMemOperand()) {
MachineMemOperand *MMO = *MI->memoperands_begin();
if (OpNum == 0 && MI.hasOneMemOperand()) {
MachineMemOperand *MMO = *MI.memoperands_begin();
if (MMO->getSize() == Size && !MMO->isVolatile()) {
// Handle conversion of loads.
if (isSimpleBD12Move(MI, SystemZII::SimpleBDXLoad)) {
return BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(),
if (isSimpleBD12Move(&MI, SystemZII::SimpleBDXLoad)) {
return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
get(SystemZ::MVC))
.addFrameIndex(FrameIndex)
.addImm(0)
.addImm(Size)
.addOperand(MI->getOperand(1))
.addImm(MI->getOperand(2).getImm())
.addOperand(MI.getOperand(1))
.addImm(MI.getOperand(2).getImm())
.addMemOperand(MMO);
}
// Handle conversion of stores.
if (isSimpleBD12Move(MI, SystemZII::SimpleBDXStore)) {
return BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(),
if (isSimpleBD12Move(&MI, SystemZII::SimpleBDXStore)) {
return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
get(SystemZ::MVC))
.addOperand(MI->getOperand(1))
.addImm(MI->getOperand(2).getImm())
.addOperand(MI.getOperand(1))
.addImm(MI.getOperand(2).getImm())
.addImm(Size)
.addFrameIndex(FrameIndex)
.addImm(0)
@ -992,7 +982,7 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
// into <INSN>.
int MemOpcode = SystemZ::getMemOpcode(Opcode);
if (MemOpcode >= 0) {
unsigned NumOps = MI->getNumExplicitOperands();
unsigned NumOps = MI.getNumExplicitOperands();
if (OpNum == NumOps - 1) {
const MCInstrDesc &MemDesc = get(MemOpcode);
uint64_t AccessBytes = SystemZII::getAccessSize(MemDesc.TSFlags);
@ -1000,13 +990,13 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
assert(AccessBytes <= Size && "Access outside the frame index");
uint64_t Offset = Size - AccessBytes;
MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt,
MI->getDebugLoc(), get(MemOpcode));
MI.getDebugLoc(), get(MemOpcode));
for (unsigned I = 0; I < OpNum; ++I)
MIB.addOperand(MI->getOperand(I));
MIB.addOperand(MI.getOperand(I));
MIB.addFrameIndex(FrameIndex).addImm(Offset);
if (MemDesc.TSFlags & SystemZII::HasIndex)
MIB.addReg(0);
transferDeadCC(MI, MIB);
transferDeadCC(&MI, MIB);
return MIB;
}
}
@ -1015,15 +1005,14 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
}
MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI,
MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
LiveIntervals *LIS) const {
return nullptr;
}
bool
SystemZInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
switch (MI->getOpcode()) {
bool SystemZInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
switch (MI.getOpcode()) {
case SystemZ::L128:
splitMove(MI, SystemZ::LG);
return true;
@ -1161,13 +1150,13 @@ SystemZInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
return true;
case SystemZ::RISBMux: {
bool DestIsHigh = isHighReg(MI->getOperand(0).getReg());
bool SrcIsHigh = isHighReg(MI->getOperand(2).getReg());
bool DestIsHigh = isHighReg(MI.getOperand(0).getReg());
bool SrcIsHigh = isHighReg(MI.getOperand(2).getReg());
if (SrcIsHigh == DestIsHigh)
MI->setDesc(get(DestIsHigh ? SystemZ::RISBHH : SystemZ::RISBLL));
MI.setDesc(get(DestIsHigh ? SystemZ::RISBHH : SystemZ::RISBLL));
else {
MI->setDesc(get(DestIsHigh ? SystemZ::RISBHL : SystemZ::RISBLH));
MI->getOperand(5).setImm(MI->getOperand(5).getImm() ^ 32);
MI.setDesc(get(DestIsHigh ? SystemZ::RISBHL : SystemZ::RISBLH));
MI.getOperand(5).setImm(MI.getOperand(5).getImm() ^ 32);
}
return true;
}
@ -1177,7 +1166,7 @@ SystemZInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
return true;
case TargetOpcode::LOAD_STACK_GUARD:
expandLoadStackGuard(MI);
expandLoadStackGuard(&MI);
return true;
default:
@ -1185,57 +1174,56 @@ SystemZInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
}
}
uint64_t SystemZInstrInfo::getInstSizeInBytes(const MachineInstr *MI) const {
if (MI->getOpcode() == TargetOpcode::INLINEASM) {
const MachineFunction *MF = MI->getParent()->getParent();
const char *AsmStr = MI->getOperand(0).getSymbolName();
uint64_t SystemZInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
if (MI.getOpcode() == TargetOpcode::INLINEASM) {
const MachineFunction *MF = MI.getParent()->getParent();
const char *AsmStr = MI.getOperand(0).getSymbolName();
return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
}
return MI->getDesc().getSize();
return MI.getDesc().getSize();
}
SystemZII::Branch
SystemZInstrInfo::getBranchInfo(const MachineInstr *MI) const {
switch (MI->getOpcode()) {
SystemZInstrInfo::getBranchInfo(const MachineInstr &MI) const {
switch (MI.getOpcode()) {
case SystemZ::BR:
case SystemZ::J:
case SystemZ::JG:
return SystemZII::Branch(SystemZII::BranchNormal, SystemZ::CCMASK_ANY,
SystemZ::CCMASK_ANY, &MI->getOperand(0));
SystemZ::CCMASK_ANY, &MI.getOperand(0));
case SystemZ::BRC:
case SystemZ::BRCL:
return SystemZII::Branch(SystemZII::BranchNormal,
MI->getOperand(0).getImm(),
MI->getOperand(1).getImm(), &MI->getOperand(2));
return SystemZII::Branch(SystemZII::BranchNormal, MI.getOperand(0).getImm(),
MI.getOperand(1).getImm(), &MI.getOperand(2));
case SystemZ::BRCT:
return SystemZII::Branch(SystemZII::BranchCT, SystemZ::CCMASK_ICMP,
SystemZ::CCMASK_CMP_NE, &MI->getOperand(2));
SystemZ::CCMASK_CMP_NE, &MI.getOperand(2));
case SystemZ::BRCTG:
return SystemZII::Branch(SystemZII::BranchCTG, SystemZ::CCMASK_ICMP,
SystemZ::CCMASK_CMP_NE, &MI->getOperand(2));
SystemZ::CCMASK_CMP_NE, &MI.getOperand(2));
case SystemZ::CIJ:
case SystemZ::CRJ:
return SystemZII::Branch(SystemZII::BranchC, SystemZ::CCMASK_ICMP,
MI->getOperand(2).getImm(), &MI->getOperand(3));
MI.getOperand(2).getImm(), &MI.getOperand(3));
case SystemZ::CLIJ:
case SystemZ::CLRJ:
return SystemZII::Branch(SystemZII::BranchCL, SystemZ::CCMASK_ICMP,
MI->getOperand(2).getImm(), &MI->getOperand(3));
MI.getOperand(2).getImm(), &MI.getOperand(3));
case SystemZ::CGIJ:
case SystemZ::CGRJ:
return SystemZII::Branch(SystemZII::BranchCG, SystemZ::CCMASK_ICMP,
MI->getOperand(2).getImm(), &MI->getOperand(3));
MI.getOperand(2).getImm(), &MI.getOperand(3));
case SystemZ::CLGIJ:
case SystemZ::CLGRJ:
return SystemZII::Branch(SystemZII::BranchCLG, SystemZ::CCMASK_ICMP,
MI->getOperand(2).getImm(), &MI->getOperand(3));
MI.getOperand(2).getImm(), &MI.getOperand(3));
default:
llvm_unreachable("Unrecognized branch opcode");

View File

@ -136,13 +136,13 @@ class SystemZInstrInfo : public SystemZGenInstrInfo {
void splitMove(MachineBasicBlock::iterator MI, unsigned NewOpcode) const;
void splitAdjDynAlloc(MachineBasicBlock::iterator MI) const;
void expandRIPseudo(MachineInstr *MI, unsigned LowOpcode,
unsigned HighOpcode, bool ConvertHigh) const;
void expandRIEPseudo(MachineInstr *MI, unsigned LowOpcode,
void expandRIPseudo(MachineInstr &MI, unsigned LowOpcode, unsigned HighOpcode,
bool ConvertHigh) const;
void expandRIEPseudo(MachineInstr &MI, unsigned LowOpcode,
unsigned LowOpcodeK, unsigned HighOpcode) const;
void expandRXYPseudo(MachineInstr *MI, unsigned LowOpcode,
void expandRXYPseudo(MachineInstr &MI, unsigned LowOpcode,
unsigned HighOpcode) const;
void expandZExtPseudo(MachineInstr *MI, unsigned LowOpcode,
void expandZExtPseudo(MachineInstr &MI, unsigned LowOpcode,
unsigned Size) const;
void expandLoadStackGuard(MachineInstr *MI) const;
void emitGRX32Move(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
@ -154,11 +154,11 @@ public:
explicit SystemZInstrInfo(SystemZSubtarget &STI);
// Override TargetInstrInfo.
unsigned isLoadFromStackSlot(const MachineInstr *MI,
unsigned isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
unsigned isStoreToStackSlot(const MachineInstr *MI,
unsigned isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
bool isStackSlotCopy(const MachineInstr *MI, int &DestFrameIndex,
bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex,
int &SrcFrameIndex) const override;
bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
@ -168,9 +168,9 @@ public:
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
const DebugLoc &DL) const override;
bool analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
unsigned &SrcReg2, int &Mask, int &Value) const override;
bool optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg,
bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
unsigned SrcReg2, int Mask, int Value,
const MachineRegisterInfo *MRI) const override;
bool isPredicable(MachineInstr &MI) const override;
@ -200,19 +200,18 @@ public:
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const override;
MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI,
MachineBasicBlock::iterator &MBBI,
MachineInstr &MI,
LiveVariables *LV) const override;
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt,
int FrameIndex,
LiveIntervals *LIS = nullptr) const override;
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt,
MachineInstr *LoadMI,
LiveIntervals *LIS = nullptr) const override;
bool expandPostRAPseudo(MachineBasicBlock::iterator MBBI) const override;
MachineInstr *
foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, int FrameIndex,
LiveIntervals *LIS = nullptr) const override;
MachineInstr *foldMemoryOperandImpl(
MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
LiveIntervals *LIS = nullptr) const override;
bool expandPostRAPseudo(MachineInstr &MBBI) const override;
bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const
override;
@ -220,14 +219,14 @@ public:
const SystemZRegisterInfo &getRegisterInfo() const { return RI; }
// Return the size in bytes of MI.
uint64_t getInstSizeInBytes(const MachineInstr *MI) const;
uint64_t getInstSizeInBytes(const MachineInstr &MI) const;
// Return true if MI is a conditional or unconditional branch.
// When returning true, set Cond to the mask of condition-code
// values on which the instruction will branch, and set Target
// to the operand that contains the branch target. This target
// can be a register or a basic block.
SystemZII::Branch getBranchInfo(const MachineInstr *MI) const;
SystemZII::Branch getBranchInfo(const MachineInstr &MI) const;
// Get the load and store opcodes for a given register class.
void getLoadStoreOpcodes(const TargetRegisterClass *RC,

View File

@ -147,7 +147,7 @@ private:
void skipNonTerminators(BlockPosition &Position, MBBInfo &Block);
void skipTerminator(BlockPosition &Position, TerminatorInfo &Terminator,
bool AssumeRelaxed);
TerminatorInfo describeTerminator(MachineInstr *MI);
TerminatorInfo describeTerminator(MachineInstr &MI);
uint64_t initMBBInfo();
bool mustRelaxBranch(const TerminatorInfo &Terminator, uint64_t Address);
bool mustRelaxABranch();
@ -210,11 +210,11 @@ void SystemZLongBranch::skipTerminator(BlockPosition &Position,
}
// Return a description of terminator instruction MI.
TerminatorInfo SystemZLongBranch::describeTerminator(MachineInstr *MI) {
TerminatorInfo SystemZLongBranch::describeTerminator(MachineInstr &MI) {
TerminatorInfo Terminator;
Terminator.Size = TII->getInstSizeInBytes(MI);
if (MI->isConditionalBranch() || MI->isUnconditionalBranch()) {
switch (MI->getOpcode()) {
if (MI.isConditionalBranch() || MI.isUnconditionalBranch()) {
switch (MI.getOpcode()) {
case SystemZ::J:
// Relaxes to JG, which is 2 bytes longer.
Terminator.ExtraRelaxSize = 2;
@ -251,7 +251,7 @@ TerminatorInfo SystemZLongBranch::describeTerminator(MachineInstr *MI) {
default:
llvm_unreachable("Unrecognized branch instruction");
}
Terminator.Branch = MI;
Terminator.Branch = &MI;
Terminator.TargetBlock =
TII->getBranchInfo(MI).Target->getMBB()->getNumber();
}
@ -283,7 +283,7 @@ uint64_t SystemZLongBranch::initMBBInfo() {
MachineBasicBlock::iterator MI = MBB->begin();
MachineBasicBlock::iterator End = MBB->end();
while (MI != End && !MI->isTerminator()) {
Block.Size += TII->getInstSizeInBytes(MI);
Block.Size += TII->getInstSizeInBytes(*MI);
++MI;
}
skipNonTerminators(Position, Block);
@ -292,7 +292,7 @@ uint64_t SystemZLongBranch::initMBBInfo() {
while (MI != End) {
if (!MI->isDebugValue()) {
assert(MI->isTerminator() && "Terminator followed by non-terminator");
Terminators.push_back(describeTerminator(MI));
Terminators.push_back(describeTerminator(*MI));
skipTerminator(Position, Terminators.back(), false);
++Block.NumTerminators;
}

View File

@ -34,8 +34,8 @@ WebAssemblyInstrInfo::WebAssemblyInstrInfo(const WebAssemblySubtarget &STI)
RI(STI.getTargetTriple()) {}
bool WebAssemblyInstrInfo::isReallyTriviallyReMaterializable(
const MachineInstr *MI, AliasAnalysis *AA) const {
switch (MI->getOpcode()) {
const MachineInstr &MI, AliasAnalysis *AA) const {
switch (MI.getOpcode()) {
case WebAssembly::CONST_I32:
case WebAssembly::CONST_I64:
case WebAssembly::CONST_F32:
@ -77,14 +77,14 @@ void WebAssemblyInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
}
MachineInstr *
WebAssemblyInstrInfo::commuteInstructionImpl(MachineInstr *MI, bool NewMI,
WebAssemblyInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
unsigned OpIdx1,
unsigned OpIdx2) const {
// If the operands are stackified, we can't reorder them.
WebAssemblyFunctionInfo &MFI =
*MI->getParent()->getParent()->getInfo<WebAssemblyFunctionInfo>();
if (MFI.isVRegStackified(MI->getOperand(OpIdx1).getReg()) ||
MFI.isVRegStackified(MI->getOperand(OpIdx2).getReg()))
*MI.getParent()->getParent()->getInfo<WebAssemblyFunctionInfo>();
if (MFI.isVRegStackified(MI.getOperand(OpIdx1).getReg()) ||
MFI.isVRegStackified(MI.getOperand(OpIdx2).getReg()))
return nullptr;
// Otherwise use the default implementation.

View File

@ -34,13 +34,13 @@ public:
const WebAssemblyRegisterInfo &getRegisterInfo() const { return RI; }
bool isReallyTriviallyReMaterializable(const MachineInstr *MI,
bool isReallyTriviallyReMaterializable(const MachineInstr &MI,
AliasAnalysis *AA) const override;
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc) const override;
MachineInstr *commuteInstructionImpl(MachineInstr *MI, bool NewMI,
MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI,
unsigned OpIdx1,
unsigned OpIdx2) const override;

View File

@ -217,10 +217,9 @@ static void Query(const MachineInstr *MI, AliasAnalysis &AA,
}
// Test whether Def is safe and profitable to rematerialize.
static bool ShouldRematerialize(const MachineInstr *Def, AliasAnalysis &AA,
static bool ShouldRematerialize(const MachineInstr &Def, AliasAnalysis &AA,
const WebAssemblyInstrInfo *TII) {
return Def->isAsCheapAsAMove() &&
TII->isTriviallyReMaterializable(Def, &AA);
return Def.isAsCheapAsAMove() && TII->isTriviallyReMaterializable(Def, &AA);
}
// Identify the definition for this register at this point. This is a
@ -475,19 +474,18 @@ static MachineInstr *MoveForSingleUse(unsigned Reg, MachineOperand& Op,
/// A trivially cloneable instruction; clone it and nest the new copy with the
/// current instruction.
static MachineInstr *
RematerializeCheapDef(unsigned Reg, MachineOperand &Op, MachineInstr *Def,
MachineBasicBlock &MBB, MachineInstr *Insert,
LiveIntervals &LIS, WebAssemblyFunctionInfo &MFI,
MachineRegisterInfo &MRI, const WebAssemblyInstrInfo *TII,
const WebAssemblyRegisterInfo *TRI) {
DEBUG(dbgs() << "Rematerializing cheap def: "; Def->dump());
static MachineInstr *RematerializeCheapDef(
unsigned Reg, MachineOperand &Op, MachineInstr &Def, MachineBasicBlock &MBB,
MachineBasicBlock::instr_iterator Insert, LiveIntervals &LIS,
WebAssemblyFunctionInfo &MFI, MachineRegisterInfo &MRI,
const WebAssemblyInstrInfo *TII, const WebAssemblyRegisterInfo *TRI) {
DEBUG(dbgs() << "Rematerializing cheap def: "; Def.dump());
DEBUG(dbgs() << " - for use in "; Op.getParent()->dump());
unsigned NewReg = MRI.createVirtualRegister(MRI.getRegClass(Reg));
TII->reMaterialize(MBB, Insert, NewReg, 0, Def, *TRI);
Op.setReg(NewReg);
MachineInstr *Clone = &*std::prev(MachineBasicBlock::instr_iterator(Insert));
MachineInstr *Clone = &*std::prev(Insert);
LIS.InsertMachineInstrInMaps(*Clone);
LIS.createAndComputeVirtRegInterval(NewReg);
MFI.stackifyVReg(NewReg);
@ -500,17 +498,17 @@ RematerializeCheapDef(unsigned Reg, MachineOperand &Op, MachineInstr *Def,
if (!IsDead) {
LiveInterval &LI = LIS.getInterval(Reg);
ShrinkToUses(LI, LIS);
IsDead = !LI.liveAt(LIS.getInstructionIndex(*Def).getDeadSlot());
IsDead = !LI.liveAt(LIS.getInstructionIndex(Def).getDeadSlot());
}
// If that was the last use of the original, delete the original.
if (IsDead) {
DEBUG(dbgs() << " - Deleting original\n");
SlotIndex Idx = LIS.getInstructionIndex(*Def).getRegSlot();
SlotIndex Idx = LIS.getInstructionIndex(Def).getRegSlot();
LIS.removePhysRegDefAt(WebAssembly::ARGUMENTS, Idx);
LIS.removeInterval(Reg);
LIS.RemoveMachineInstrFromMaps(*Def);
Def->eraseFromParent();
LIS.RemoveMachineInstrFromMaps(Def);
Def.eraseFromParent();
}
return Clone;
@ -678,15 +676,15 @@ public:
assert(!Declined &&
"Don't decline commuting until you've finished trying it");
// Commuting didn't help. Revert it.
TII->commuteInstruction(Insert, /*NewMI=*/false, Operand0, Operand1);
TII->commuteInstruction(*Insert, /*NewMI=*/false, Operand0, Operand1);
TentativelyCommuting = false;
Declined = true;
} else if (!Declined && TreeWalker.HasRemainingOperands(Insert)) {
Operand0 = TargetInstrInfo::CommuteAnyOperandIndex;
Operand1 = TargetInstrInfo::CommuteAnyOperandIndex;
if (TII->findCommutedOpIndices(Insert, Operand0, Operand1)) {
if (TII->findCommutedOpIndices(*Insert, Operand0, Operand1)) {
// Tentatively commute the operands and try again.
TII->commuteInstruction(Insert, /*NewMI=*/false, Operand0, Operand1);
TII->commuteInstruction(*Insert, /*NewMI=*/false, Operand0, Operand1);
TreeWalker.ResetTopOperands(Insert);
TentativelyCommuting = true;
Declined = false;
@ -782,9 +780,10 @@ bool WebAssemblyRegStackify::runOnMachineFunction(MachineFunction &MF) {
!TreeWalker.IsOnStack(Reg);
if (CanMove && HasOneUse(Reg, Def, MRI, MDT, LIS)) {
Insert = MoveForSingleUse(Reg, Op, Def, MBB, Insert, LIS, MFI, MRI);
} else if (ShouldRematerialize(Def, AA, TII)) {
Insert = RematerializeCheapDef(Reg, Op, Def, MBB, Insert, LIS, MFI,
MRI, TII, TRI);
} else if (ShouldRematerialize(*Def, AA, TII)) {
Insert =
RematerializeCheapDef(Reg, Op, *Def, MBB, Insert->getIterator(),
LIS, MFI, MRI, TII, TRI);
} else if (CanMove &&
OneUseDominatesOtherUses(Reg, Op, MBB, MRI, MDT, LIS, MFI)) {
Insert = MoveAndTeeForMultiUse(Reg, Op, Def, MBB, Insert, LIS, MFI,

View File

@ -277,7 +277,7 @@ static void printLeaMemReference(X86AsmPrinter &P, const MachineInstr *MI,
static void printMemReference(X86AsmPrinter &P, const MachineInstr *MI,
unsigned Op, raw_ostream &O,
const char *Modifier = nullptr) {
assert(isMem(MI, Op) && "Invalid memory reference!");
assert(isMem(*MI, Op) && "Invalid memory reference!");
const MachineOperand &Segment = MI->getOperand(Op+X86::AddrSegmentReg);
if (Segment.getReg()) {
printOperand(P, MI, Op+X86::AddrSegmentReg, O, Modifier);

View File

@ -3738,7 +3738,7 @@ bool X86FastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
AM.getFullAddress(AddrOps);
MachineInstr *Result = XII.foldMemoryOperandImpl(
*FuncInfo.MF, MI, OpNo, AddrOps, FuncInfo.InsertPt, Size, Alignment,
*FuncInfo.MF, *MI, OpNo, AddrOps, FuncInfo.InsertPt, Size, Alignment,
/*AllowCommute=*/true);
if (!Result)
return false;

View File

@ -110,22 +110,22 @@ char FixupLEAPass::ID = 0;
MachineInstr *
FixupLEAPass::postRAConvertToLEA(MachineFunction::iterator &MFI,
MachineBasicBlock::iterator &MBBI) const {
MachineInstr *MI = MBBI;
MachineInstr *NewMI;
switch (MI->getOpcode()) {
MachineInstr &MI = *MBBI;
switch (MI.getOpcode()) {
case X86::MOV32rr:
case X86::MOV64rr: {
const MachineOperand &Src = MI->getOperand(1);
const MachineOperand &Dest = MI->getOperand(0);
NewMI = BuildMI(*MF, MI->getDebugLoc(),
TII->get(MI->getOpcode() == X86::MOV32rr ? X86::LEA32r
: X86::LEA64r))
.addOperand(Dest)
.addOperand(Src)
.addImm(1)
.addReg(0)
.addImm(0)
.addReg(0);
const MachineOperand &Src = MI.getOperand(1);
const MachineOperand &Dest = MI.getOperand(0);
MachineInstr *NewMI =
BuildMI(*MF, MI.getDebugLoc(),
TII->get(MI.getOpcode() == X86::MOV32rr ? X86::LEA32r
: X86::LEA64r))
.addOperand(Dest)
.addOperand(Src)
.addImm(1)
.addReg(0)
.addImm(0)
.addReg(0);
MFI->insert(MBBI, NewMI); // Insert the new inst
return NewMI;
}
@ -141,7 +141,7 @@ FixupLEAPass::postRAConvertToLEA(MachineFunction::iterator &MFI,
case X86::ADD16ri8:
case X86::ADD16ri_DB:
case X86::ADD16ri8_DB:
if (!MI->getOperand(2).isImm()) {
if (!MI.getOperand(2).isImm()) {
// convertToThreeAddress will call getImm()
// which requires isImm() to be true
return nullptr;
@ -149,14 +149,14 @@ FixupLEAPass::postRAConvertToLEA(MachineFunction::iterator &MFI,
break;
case X86::ADD16rr:
case X86::ADD16rr_DB:
if (MI->getOperand(1).getReg() != MI->getOperand(2).getReg()) {
if (MI.getOperand(1).getReg() != MI.getOperand(2).getReg()) {
// if src1 != src2, then convertToThreeAddress will
// need to create a Virtual register, which we cannot do
// after register allocation.
return nullptr;
}
}
return TII->convertToThreeAddress(MFI, MBBI, nullptr);
return TII->convertToThreeAddress(MFI, MI, nullptr);
}
FunctionPass *llvm::createX86FixupLEAs() { return new FixupLEAPass(); }
@ -236,7 +236,7 @@ FixupLEAPass::searchBackwards(MachineOperand &p, MachineBasicBlock::iterator &I,
return CurInst;
}
InstrDistance += TII->getInstrLatency(
MF->getSubtarget().getInstrItineraryData(), CurInst);
MF->getSubtarget().getInstrItineraryData(), *CurInst);
Found = getPreviousInstr(CurInst, MFI);
}
return nullptr;

View File

@ -1281,7 +1281,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
}
while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup)) {
const MachineInstr *FrameInstr = &*MBBI;
const MachineInstr &FrameInstr = *MBBI;
++MBBI;
if (NeedsWinCFI) {

View File

@ -3525,7 +3525,7 @@ bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
if (!Def)
return false;
if (!Flags.isByVal()) {
if (!TII->isLoadFromStackSlot(Def, FI))
if (!TII->isLoadFromStackSlot(*Def, FI))
return false;
} else {
unsigned Opcode = Def->getOpcode();

File diff suppressed because it is too large Load Diff

View File

@ -118,23 +118,24 @@ inline static bool isScale(const MachineOperand &MO) {
MO.getImm() == 4 || MO.getImm() == 8);
}
inline static bool isLeaMem(const MachineInstr *MI, unsigned Op) {
if (MI->getOperand(Op).isFI()) return true;
return Op+X86::AddrSegmentReg <= MI->getNumOperands() &&
MI->getOperand(Op+X86::AddrBaseReg).isReg() &&
isScale(MI->getOperand(Op+X86::AddrScaleAmt)) &&
MI->getOperand(Op+X86::AddrIndexReg).isReg() &&
(MI->getOperand(Op+X86::AddrDisp).isImm() ||
MI->getOperand(Op+X86::AddrDisp).isGlobal() ||
MI->getOperand(Op+X86::AddrDisp).isCPI() ||
MI->getOperand(Op+X86::AddrDisp).isJTI());
inline static bool isLeaMem(const MachineInstr &MI, unsigned Op) {
if (MI.getOperand(Op).isFI())
return true;
return Op + X86::AddrSegmentReg <= MI.getNumOperands() &&
MI.getOperand(Op + X86::AddrBaseReg).isReg() &&
isScale(MI.getOperand(Op + X86::AddrScaleAmt)) &&
MI.getOperand(Op + X86::AddrIndexReg).isReg() &&
(MI.getOperand(Op + X86::AddrDisp).isImm() ||
MI.getOperand(Op + X86::AddrDisp).isGlobal() ||
MI.getOperand(Op + X86::AddrDisp).isCPI() ||
MI.getOperand(Op + X86::AddrDisp).isJTI());
}
inline static bool isMem(const MachineInstr *MI, unsigned Op) {
if (MI->getOperand(Op).isFI()) return true;
return Op+X86::AddrNumOperands <= MI->getNumOperands() &&
MI->getOperand(Op+X86::AddrSegmentReg).isReg() &&
isLeaMem(MI, Op);
inline static bool isMem(const MachineInstr &MI, unsigned Op) {
if (MI.getOperand(Op).isFI())
return true;
return Op + X86::AddrNumOperands <= MI.getNumOperands() &&
MI.getOperand(Op + X86::AddrSegmentReg).isReg() && isLeaMem(MI, Op);
}
class X86InstrInfo final : public X86GenInstrInfo {
@ -183,7 +184,7 @@ public:
/// getSPAdjust - This returns the stack pointer adjustment made by
/// this instruction. For x86, we need to handle more complex call
/// sequences involving PUSHes.
int getSPAdjust(const MachineInstr *MI) const override;
int getSPAdjust(const MachineInstr &MI) const override;
/// isCoalescableExtInstr - Return true if the instruction is a "coalescable"
/// extension instruction. That is, it's like a copy where it's legal for the
@ -195,27 +196,27 @@ public:
unsigned &SrcReg, unsigned &DstReg,
unsigned &SubIdx) const override;
unsigned isLoadFromStackSlot(const MachineInstr *MI,
unsigned isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
/// isLoadFromStackSlotPostFE - Check for post-frame ptr elimination
/// stack locations as well. This uses a heuristic so it isn't
/// reliable for correctness.
unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI,
unsigned isLoadFromStackSlotPostFE(const MachineInstr &MI,
int &FrameIndex) const override;
unsigned isStoreToStackSlot(const MachineInstr *MI,
unsigned isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
/// isStoreToStackSlotPostFE - Check for post-frame ptr elimination
/// stack locations as well. This uses a heuristic so it isn't
/// reliable for correctness.
unsigned isStoreToStackSlotPostFE(const MachineInstr *MI,
unsigned isStoreToStackSlotPostFE(const MachineInstr &MI,
int &FrameIndex) const override;
bool isReallyTriviallyReMaterializable(const MachineInstr *MI,
bool isReallyTriviallyReMaterializable(const MachineInstr &MI,
AliasAnalysis *AA) const override;
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
unsigned DestReg, unsigned SubIdx,
const MachineInstr *Orig,
const MachineInstr &Orig,
const TargetRegisterInfo &TRI) const override;
/// Given an operand within a MachineInstr, insert preceding code to put it
@ -226,10 +227,10 @@ public:
///
/// Reference parameters are set to indicate how caller should add this
/// operand to the LEA instruction.
bool classifyLEAReg(MachineInstr *MI, const MachineOperand &Src,
unsigned LEAOpcode, bool AllowSP,
unsigned &NewSrc, bool &isKill,
bool &isUndef, MachineOperand &ImplicitOp) const;
bool classifyLEAReg(MachineInstr &MI, const MachineOperand &Src,
unsigned LEAOpcode, bool AllowSP, unsigned &NewSrc,
bool &isKill, bool &isUndef,
MachineOperand &ImplicitOp) const;
/// convertToThreeAddress - This method must be implemented by targets that
/// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target
@ -242,7 +243,7 @@ public:
/// performed, otherwise it returns the new instruction.
///
MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI,
MachineBasicBlock::iterator &MBBI,
MachineInstr &MI,
LiveVariables *LV) const override;
/// Returns true iff the routine could find two commutable operands in the
@ -260,7 +261,7 @@ public:
/// findCommutedOpIndices(MI, Op1, Op2);
/// can be interpreted as a query asking to find an operand that would be
/// commutable with the operand#1.
bool findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
bool findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const override;
/// Returns true if the routine could find two commutable operands
@ -285,8 +286,7 @@ public:
/// FMA213 #1, #2, #3
/// results into instruction with adjusted opcode:
/// FMA231 #3, #2, #1
bool findFMA3CommutedOpIndices(MachineInstr *MI,
unsigned &SrcOpIdx1,
bool findFMA3CommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const;
/// Returns an adjusted FMA opcode that must be used in FMA instruction that
@ -299,8 +299,7 @@ public:
/// FMA213 #1, #2, #3
/// results into instruction with adjusted opcode:
/// FMA231 #3, #2, #1
unsigned getFMA3OpcodeToCommuteOperands(MachineInstr *MI,
unsigned SrcOpIdx1,
unsigned getFMA3OpcodeToCommuteOperands(MachineInstr &MI, unsigned SrcOpIdx1,
unsigned SrcOpIdx2) const;
// Branch analysis.
@ -310,7 +309,7 @@ public:
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const override;
bool getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
bool getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg,
int64_t &Offset,
const TargetRegisterInfo *TRI) const override;
bool AnalyzeBranchPredicate(MachineBasicBlock &MBB,
@ -356,7 +355,7 @@ public:
MachineInstr::mmo_iterator MMOEnd,
SmallVectorImpl<MachineInstr*> &NewMIs) const;
bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
bool expandPostRAPseudo(MachineInstr &MI) const override;
/// foldMemoryOperand - If this target supports it, fold a load or store of
/// the specified stack slot into the specified machine instruction for the
@ -364,27 +363,27 @@ public:
/// folding and return true, otherwise it should return false. If it folds
/// the instruction, it is likely that the MachineInstruction the iterator
/// references has been changed.
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt,
int FrameIndex,
LiveIntervals *LIS = nullptr) const override;
MachineInstr *
foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, int FrameIndex,
LiveIntervals *LIS = nullptr) const override;
/// foldMemoryOperand - Same as the previous version except it allows folding
/// of any load and store from / to any address, not just from a specific
/// stack slot.
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt,
MachineInstr *LoadMI,
LiveIntervals *LIS = nullptr) const override;
MachineInstr *foldMemoryOperandImpl(
MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
LiveIntervals *LIS = nullptr) const override;
/// unfoldMemoryOperand - Separate a single instruction which folded a load or
/// a store or a load and a store into two or more instruction. If this is
/// possible, returns true as well as the new instructions by reference.
bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
SmallVectorImpl<MachineInstr*> &NewMIs) const override;
bool
unfoldMemoryOperand(MachineFunction &MF, MachineInstr &MI, unsigned Reg,
bool UnfoldLoad, bool UnfoldStore,
SmallVectorImpl<MachineInstr *> &NewMIs) const override;
bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
SmallVectorImpl<SDNode*> &NewNodes) const override;
@ -419,8 +418,8 @@ public:
int64_t Offset1, int64_t Offset2,
unsigned NumLoads) const override;
bool shouldScheduleAdjacent(MachineInstr* First,
MachineInstr *Second) const override;
bool shouldScheduleAdjacent(MachineInstr &First,
MachineInstr &Second) const override;
void getNoopForMachoTarget(MCInst &NopInst) const override;
@ -440,7 +439,7 @@ public:
/// True if MI has a condition code def, e.g. EFLAGS, that is
/// not marked dead.
bool hasLiveCondCodeDef(MachineInstr *MI) const;
bool hasLiveCondCodeDef(MachineInstr &MI) const;
/// getGlobalBaseReg - Return a virtual register initialized with the
/// the global base register value. Output instructions required to
@ -449,19 +448,19 @@ public:
unsigned getGlobalBaseReg(MachineFunction *MF) const;
std::pair<uint16_t, uint16_t>
getExecutionDomain(const MachineInstr *MI) const override;
getExecutionDomain(const MachineInstr &MI) const override;
void setExecutionDomain(MachineInstr *MI, unsigned Domain) const override;
void setExecutionDomain(MachineInstr &MI, unsigned Domain) const override;
unsigned
getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum,
const TargetRegisterInfo *TRI) const override;
unsigned getUndefRegClearance(const MachineInstr *MI, unsigned &OpNum,
getPartialRegUpdateClearance(const MachineInstr &MI, unsigned OpNum,
const TargetRegisterInfo *TRI) const override;
unsigned getUndefRegClearance(const MachineInstr &MI, unsigned &OpNum,
const TargetRegisterInfo *TRI) const override;
void breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum,
void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum,
const TargetRegisterInfo *TRI) const override;
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
unsigned OpNum,
ArrayRef<MachineOperand> MOs,
MachineBasicBlock::iterator InsertPt,
@ -480,10 +479,10 @@ public:
bool hasHighOperandLatency(const TargetSchedModel &SchedModel,
const MachineRegisterInfo *MRI,
const MachineInstr *DefMI, unsigned DefIdx,
const MachineInstr *UseMI,
const MachineInstr &DefMI, unsigned DefIdx,
const MachineInstr &UseMI,
unsigned UseIdx) const override;
bool useMachineCombiner() const override {
return true;
}
@ -501,14 +500,14 @@ public:
/// in SrcReg and SrcReg2 if having two register operands, and the value it
/// compares against in CmpValue. Return true if the comparison instruction
/// can be analyzed.
bool analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
unsigned &SrcReg2, int &CmpMask,
int &CmpValue) const override;
/// optimizeCompareInstr - Check if there exists an earlier instruction that
/// operates on the same source operands and sets flags in the same way as
/// Compare; remove Compare if possible.
bool optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg,
bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
unsigned SrcReg2, int CmpMask, int CmpValue,
const MachineRegisterInfo *MRI) const override;
@ -519,7 +518,7 @@ public:
/// defined by the load we are trying to fold. DefMI returns the machine
/// instruction that defines FoldAsLoadDefReg, and the function returns
/// the machine instruction generated due to folding.
MachineInstr* optimizeLoadInstr(MachineInstr *MI,
MachineInstr *optimizeLoadInstr(MachineInstr &MI,
const MachineRegisterInfo *MRI,
unsigned &FoldAsLoadDefReg,
MachineInstr *&DefMI) const override;
@ -542,19 +541,19 @@ protected:
/// non-commutable operands.
/// Even though the instruction is commutable, the method may still
/// fail to commute the operands, null pointer is returned in such cases.
MachineInstr *commuteInstructionImpl(MachineInstr *MI, bool NewMI,
MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI,
unsigned CommuteOpIdx1,
unsigned CommuteOpIdx2) const override;
private:
MachineInstr * convertToThreeAddressWithLEA(unsigned MIOpc,
MachineFunction::iterator &MFI,
MachineBasicBlock::iterator &MBBI,
LiveVariables *LV) const;
MachineInstr *convertToThreeAddressWithLEA(unsigned MIOpc,
MachineFunction::iterator &MFI,
MachineInstr &MI,
LiveVariables *LV) const;
/// Handles memory folding for special case instructions, for instance those
/// requiring custom manipulation of the address.
MachineInstr *foldMemoryOperandCustom(MachineFunction &MF, MachineInstr *MI,
MachineInstr *foldMemoryOperandCustom(MachineFunction &MF, MachineInstr &MI,
unsigned OpNum,
ArrayRef<MachineOperand> MOs,
MachineBasicBlock::iterator InsertPt,
@ -562,7 +561,7 @@ private:
/// isFrameOperand - Return true and the FrameIndex if the specified
/// operand and follow operands form a reference to the stack frame.
bool isFrameOperand(const MachineInstr *MI, unsigned int Op,
bool isFrameOperand(const MachineInstr &MI, unsigned int Op,
int &FrameIndex) const;
/// Expand the MOVImmSExti8 pseudo-instructions.

View File

@ -199,7 +199,7 @@ bool PadShortFunc::cyclesUntilReturn(MachineBasicBlock *MBB,
return true;
}
CyclesToEnd += TII->getInstrLatency(STI->getInstrItineraryData(), MI);
CyclesToEnd += TII->getInstrLatency(STI->getInstrItineraryData(), *MI);
}
VisitedBBs[MBB] = VisitedBBInfo(false, CyclesToEnd);

View File

@ -60,17 +60,16 @@ static bool isZeroImm(const MachineOperand &op) {
/// the destination along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than loading from the stack slot.
unsigned
XCoreInstrInfo::isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const{
int Opcode = MI->getOpcode();
unsigned XCoreInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
int Opcode = MI.getOpcode();
if (Opcode == XCore::LDWFI)
{
if ((MI->getOperand(1).isFI()) && // is a stack slot
(MI->getOperand(2).isImm()) && // the imm is zero
(isZeroImm(MI->getOperand(2))))
{
FrameIndex = MI->getOperand(1).getIndex();
return MI->getOperand(0).getReg();
if ((MI.getOperand(1).isFI()) && // is a stack slot
(MI.getOperand(2).isImm()) && // the imm is zero
(isZeroImm(MI.getOperand(2)))) {
FrameIndex = MI.getOperand(1).getIndex();
return MI.getOperand(0).getReg();
}
}
return 0;
@ -81,18 +80,16 @@ XCoreInstrInfo::isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) con
/// the source reg along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than storing to the stack slot.
unsigned
XCoreInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
int &FrameIndex) const {
int Opcode = MI->getOpcode();
unsigned XCoreInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
int Opcode = MI.getOpcode();
if (Opcode == XCore::STWFI)
{
if ((MI->getOperand(1).isFI()) && // is a stack slot
(MI->getOperand(2).isImm()) && // the imm is zero
(isZeroImm(MI->getOperand(2))))
{
FrameIndex = MI->getOperand(1).getIndex();
return MI->getOperand(0).getReg();
if ((MI.getOperand(1).isFI()) && // is a stack slot
(MI.getOperand(2).isImm()) && // the imm is zero
(isZeroImm(MI.getOperand(2)))) {
FrameIndex = MI.getOperand(1).getIndex();
return MI.getOperand(0).getReg();
}
}
return 0;

View File

@ -39,7 +39,7 @@ public:
/// the destination along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than loading from the stack slot.
unsigned isLoadFromStackSlot(const MachineInstr *MI,
unsigned isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
/// isStoreToStackSlot - If the specified machine instruction is a direct
@ -47,7 +47,7 @@ public:
/// the source reg along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than storing to the stack slot.
unsigned isStoreToStackSlot(const MachineInstr *MI,
unsigned isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,