Pass DebugLoc and SDLoc by const ref.

This used to be free, copying and moving DebugLocs became expensive
after the metadata rewrite. Passing by reference eliminates a ton of
track/untrack operations. No functionality change intended.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@272512 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Benjamin Kramer 2016-06-12 15:39:02 +00:00
parent 9a476793c5
commit af18e017d2
155 changed files with 2163 additions and 2415 deletions

View File

@ -452,7 +452,7 @@ protected:
/// \brief Emit an unconditional branch to the given block, unless it is the
/// immediate (fall-through) successor, and update the CFG.
void fastEmitBranch(MachineBasicBlock *MBB, DebugLoc DL);
void fastEmitBranch(MachineBasicBlock *MBB, const DebugLoc &DL);
/// Emit an unconditional branch to \p FalseMBB, obtains the branch weight
/// and adds TrueMBB and FalseMBB to the successor list.

View File

@ -121,7 +121,7 @@ public:
/// addSafePoint - Notes the existence of a safe point. Num is the ID of the
/// label just prior to the safe point (if the code generator is using
/// MachineModuleInfo).
void addSafePoint(GC::PointKind Kind, MCSymbol *Label, DebugLoc DL) {
void addSafePoint(GC::PointKind Kind, MCSymbol *Label, const DebugLoc &DL) {
SafePoints.emplace_back(Kind, Label, DL);
}

View File

@ -502,8 +502,7 @@ public:
/// CreateMachineInstr - Allocate a new MachineInstr. Use this instead
/// of `new MachineInstr'.
///
MachineInstr *CreateMachineInstr(const MCInstrDesc &MCID,
DebugLoc DL,
MachineInstr *CreateMachineInstr(const MCInstrDesc &MCID, const DebugLoc &DL,
bool NoImp = false);
/// CloneMachineInstr - Create a new MachineInstr which is a copy of the

View File

@ -237,18 +237,15 @@ public:
};
/// Builder interface. Specify how to create the initial instruction itself.
inline MachineInstrBuilder BuildMI(MachineFunction &MF,
DebugLoc DL,
inline MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL,
const MCInstrDesc &MCID) {
return MachineInstrBuilder(MF, MF.CreateMachineInstr(MCID, DL));
}
/// This version of the builder sets up the first operand as a
/// destination virtual register.
inline MachineInstrBuilder BuildMI(MachineFunction &MF,
DebugLoc DL,
const MCInstrDesc &MCID,
unsigned DestReg) {
inline MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL,
const MCInstrDesc &MCID, unsigned DestReg) {
return MachineInstrBuilder(MF, MF.CreateMachineInstr(MCID, DL))
.addReg(DestReg, RegState::Define);
}
@ -258,8 +255,7 @@ inline MachineInstrBuilder BuildMI(MachineFunction &MF,
/// operand as a destination virtual register.
inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
MachineBasicBlock::iterator I,
DebugLoc DL,
const MCInstrDesc &MCID,
const DebugLoc &DL, const MCInstrDesc &MCID,
unsigned DestReg) {
MachineFunction &MF = *BB.getParent();
MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
@ -269,8 +265,7 @@ inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
MachineBasicBlock::instr_iterator I,
DebugLoc DL,
const MCInstrDesc &MCID,
const DebugLoc &DL, const MCInstrDesc &MCID,
unsigned DestReg) {
MachineFunction &MF = *BB.getParent();
MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
@ -278,10 +273,8 @@ inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
return MachineInstrBuilder(MF, MI).addReg(DestReg, RegState::Define);
}
inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
MachineInstr *I,
DebugLoc DL,
const MCInstrDesc &MCID,
inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB, MachineInstr *I,
const DebugLoc &DL, const MCInstrDesc &MCID,
unsigned DestReg) {
if (I->isInsideBundle()) {
MachineBasicBlock::instr_iterator MII(I);
@ -297,7 +290,7 @@ inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
/// destination register.
inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
MachineBasicBlock::iterator I,
DebugLoc DL,
const DebugLoc &DL,
const MCInstrDesc &MCID) {
MachineFunction &MF = *BB.getParent();
MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
@ -307,7 +300,7 @@ inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
MachineBasicBlock::instr_iterator I,
DebugLoc DL,
const DebugLoc &DL,
const MCInstrDesc &MCID) {
MachineFunction &MF = *BB.getParent();
MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
@ -315,9 +308,8 @@ inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
return MachineInstrBuilder(MF, MI);
}
inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
MachineInstr *I,
DebugLoc DL,
inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB, MachineInstr *I,
const DebugLoc &DL,
const MCInstrDesc &MCID) {
if (I->isInsideBundle()) {
MachineBasicBlock::instr_iterator MII(I);
@ -330,8 +322,7 @@ inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
/// This version of the builder inserts the newly-built instruction at the end
/// of the given MachineBasicBlock, and does NOT take a destination register.
inline MachineInstrBuilder BuildMI(MachineBasicBlock *BB,
DebugLoc DL,
inline MachineInstrBuilder BuildMI(MachineBasicBlock *BB, const DebugLoc &DL,
const MCInstrDesc &MCID) {
return BuildMI(*BB, BB->end(), DL, MCID);
}
@ -339,10 +330,8 @@ inline MachineInstrBuilder BuildMI(MachineBasicBlock *BB,
/// This version of the builder inserts the newly-built instruction at the
/// end of the given MachineBasicBlock, and sets up the first operand as a
/// destination virtual register.
inline MachineInstrBuilder BuildMI(MachineBasicBlock *BB,
DebugLoc DL,
const MCInstrDesc &MCID,
unsigned DestReg) {
inline MachineInstrBuilder BuildMI(MachineBasicBlock *BB, const DebugLoc &DL,
const MCInstrDesc &MCID, unsigned DestReg) {
return BuildMI(*BB, BB->end(), DL, MCID, DestReg);
}
@ -350,7 +339,7 @@ inline MachineInstrBuilder BuildMI(MachineBasicBlock *BB,
/// for either a value in a register or a register-indirect+offset
/// address. The convention is that a DBG_VALUE is indirect iff the
/// second operand is an immediate.
MachineInstrBuilder BuildMI(MachineFunction &MF, DebugLoc DL,
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL,
const MCInstrDesc &MCID, bool IsIndirect,
unsigned Reg, unsigned Offset,
const MDNode *Variable, const MDNode *Expr);
@ -359,7 +348,7 @@ MachineInstrBuilder BuildMI(MachineFunction &MF, DebugLoc DL,
/// for either a value in a register or a register-indirect+offset
/// address and inserts it at position I.
MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
MachineBasicBlock::iterator I, DebugLoc DL,
MachineBasicBlock::iterator I, const DebugLoc &DL,
const MCInstrDesc &MCID, bool IsIndirect,
unsigned Reg, unsigned Offset,
const MDNode *Variable, const MDNode *Expr);

View File

@ -474,22 +474,23 @@ public:
/// If only legal types can be produced, this does the necessary
/// transformations (e.g., if the vector element type is illegal).
/// @{
SDValue getConstant(uint64_t Val, SDLoc DL, EVT VT, bool isTarget = false,
bool isOpaque = false);
SDValue getConstant(const APInt &Val, SDLoc DL, EVT VT, bool isTarget = false,
bool isOpaque = false);
SDValue getConstant(const ConstantInt &Val, SDLoc DL, EVT VT,
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT,
bool isTarget = false, bool isOpaque = false);
SDValue getIntPtrConstant(uint64_t Val, SDLoc DL, bool isTarget = false);
SDValue getTargetConstant(uint64_t Val, SDLoc DL, EVT VT,
SDValue getConstant(const APInt &Val, const SDLoc &DL, EVT VT,
bool isTarget = false, bool isOpaque = false);
SDValue getConstant(const ConstantInt &Val, const SDLoc &DL, EVT VT,
bool isTarget = false, bool isOpaque = false);
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL,
bool isTarget = false);
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT,
bool isOpaque = false) {
return getConstant(Val, DL, VT, true, isOpaque);
}
SDValue getTargetConstant(const APInt &Val, SDLoc DL, EVT VT,
SDValue getTargetConstant(const APInt &Val, const SDLoc &DL, EVT VT,
bool isOpaque = false) {
return getConstant(Val, DL, VT, true, isOpaque);
}
SDValue getTargetConstant(const ConstantInt &Val, SDLoc DL, EVT VT,
SDValue getTargetConstant(const ConstantInt &Val, const SDLoc &DL, EVT VT,
bool isOpaque = false) {
return getConstant(Val, DL, VT, true, isOpaque);
}
@ -503,26 +504,27 @@ public:
/// The forms that take a double should only be used for simple constants
/// that can be exactly represented in VT. No checks are made.
/// @{
SDValue getConstantFP(double Val, SDLoc DL, EVT VT, bool isTarget = false);
SDValue getConstantFP(const APFloat& Val, SDLoc DL, EVT VT,
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT,
bool isTarget = false);
SDValue getConstantFP(const ConstantFP &CF, SDLoc DL, EVT VT,
SDValue getConstantFP(const APFloat &Val, const SDLoc &DL, EVT VT,
bool isTarget = false);
SDValue getTargetConstantFP(double Val, SDLoc DL, EVT VT) {
SDValue getConstantFP(const ConstantFP &CF, const SDLoc &DL, EVT VT,
bool isTarget = false);
SDValue getTargetConstantFP(double Val, const SDLoc &DL, EVT VT) {
return getConstantFP(Val, DL, VT, true);
}
SDValue getTargetConstantFP(const APFloat& Val, SDLoc DL, EVT VT) {
SDValue getTargetConstantFP(const APFloat &Val, const SDLoc &DL, EVT VT) {
return getConstantFP(Val, DL, VT, true);
}
SDValue getTargetConstantFP(const ConstantFP &Val, SDLoc DL, EVT VT) {
SDValue getTargetConstantFP(const ConstantFP &Val, const SDLoc &DL, EVT VT) {
return getConstantFP(Val, DL, VT, true);
}
/// @}
SDValue getGlobalAddress(const GlobalValue *GV, SDLoc DL, EVT VT,
SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT,
int64_t offset = 0, bool isTargetGA = false,
unsigned char TargetFlags = 0);
SDValue getTargetGlobalAddress(const GlobalValue *GV, SDLoc DL, EVT VT,
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT,
int64_t offset = 0,
unsigned char TargetFlags = 0) {
return getGlobalAddress(GV, DL, VT, offset, true, TargetFlags);
@ -559,7 +561,7 @@ public:
SDValue getBasicBlock(MachineBasicBlock *MBB);
SDValue getBasicBlock(MachineBasicBlock *MBB, SDLoc dl);
SDValue getExternalSymbol(const char *Sym, EVT VT);
SDValue getExternalSymbol(const char *Sym, SDLoc dl, EVT VT);
SDValue getExternalSymbol(const char *Sym, const SDLoc &dl, EVT VT);
SDValue getTargetExternalSymbol(const char *Sym, EVT VT,
unsigned char TargetFlags = 0);
SDValue getMCSymbol(MCSymbol *Sym, EVT VT);
@ -567,7 +569,7 @@ public:
SDValue getValueType(EVT);
SDValue getRegister(unsigned Reg, EVT VT);
SDValue getRegisterMask(const uint32_t *RegMask);
SDValue getEHLabel(SDLoc dl, SDValue Root, MCSymbol *Label);
SDValue getEHLabel(const SDLoc &dl, SDValue Root, MCSymbol *Label);
SDValue getBlockAddress(const BlockAddress *BA, EVT VT,
int64_t Offset = 0, bool isTarget = false,
unsigned char TargetFlags = 0);
@ -577,7 +579,8 @@ public:
return getBlockAddress(BA, VT, Offset, true, TargetFlags);
}
SDValue getCopyToReg(SDValue Chain, SDLoc dl, unsigned Reg, SDValue N) {
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg,
SDValue N) {
return getNode(ISD::CopyToReg, dl, MVT::Other, Chain,
getRegister(Reg, N.getValueType()), N);
}
@ -585,7 +588,7 @@ public:
// This version of the getCopyToReg method takes an extra operand, which
// indicates that there is potentially an incoming glue value (if Glue is not
// null) and that there should be a glue result.
SDValue getCopyToReg(SDValue Chain, SDLoc dl, unsigned Reg, SDValue N,
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N,
SDValue Glue) {
SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
SDValue Ops[] = { Chain, getRegister(Reg, N.getValueType()), N, Glue };
@ -594,7 +597,7 @@ public:
}
// Similar to last getCopyToReg() except parameter Reg is a SDValue
SDValue getCopyToReg(SDValue Chain, SDLoc dl, SDValue Reg, SDValue N,
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, SDValue Reg, SDValue N,
SDValue Glue) {
SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
SDValue Ops[] = { Chain, Reg, N, Glue };
@ -602,7 +605,7 @@ public:
makeArrayRef(Ops, Glue.getNode() ? 4 : 3));
}
SDValue getCopyFromReg(SDValue Chain, SDLoc dl, unsigned Reg, EVT VT) {
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT) {
SDVTList VTs = getVTList(VT, MVT::Other);
SDValue Ops[] = { Chain, getRegister(Reg, VT) };
return getNode(ISD::CopyFromReg, dl, VTs, Ops);
@ -611,7 +614,7 @@ public:
// This version of the getCopyFromReg method takes an extra operand, which
// indicates that there is potentially an incoming glue value (if Glue is not
// null) and that there should be a glue result.
SDValue getCopyFromReg(SDValue Chain, SDLoc dl, unsigned Reg, EVT VT,
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT,
SDValue Glue) {
SDVTList VTs = getVTList(VT, MVT::Other, MVT::Glue);
SDValue Ops[] = { Chain, getRegister(Reg, VT), Glue };
@ -623,16 +626,16 @@ public:
/// Returns the ConvertRndSat Note: Avoid using this node because it may
/// disappear in the future and most targets don't support it.
SDValue getConvertRndSat(EVT VT, SDLoc dl, SDValue Val, SDValue DTy,
SDValue STy,
SDValue Rnd, SDValue Sat, ISD::CvtCode Code);
SDValue getConvertRndSat(EVT VT, const SDLoc &dl, SDValue Val, SDValue DTy,
SDValue STy, SDValue Rnd, SDValue Sat,
ISD::CvtCode Code);
/// Return an ISD::VECTOR_SHUFFLE node. The number of elements in VT,
/// which must be a vector type, must match the number of mask elements
/// NumElts. An integer mask element equal to -1 is treated as undefined.
SDValue getVectorShuffle(EVT VT, SDLoc dl, SDValue N1, SDValue N2,
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2,
const int *MaskElts);
SDValue getVectorShuffle(EVT VT, SDLoc dl, SDValue N1, SDValue N2,
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2,
ArrayRef<int> MaskElts) {
assert(VT.getVectorNumElements() == MaskElts.size() &&
"Must have the same number of vector elements as mask elements!");
@ -643,7 +646,7 @@ public:
/// which must be a vector type, must match the number of operands in Ops.
/// The operands must have the same type as (or, for integers, a type wider
/// than) VT's element type.
SDValue getBuildVector(EVT VT, SDLoc DL, ArrayRef<SDValue> Ops) {
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef<SDValue> Ops) {
// VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
}
@ -651,7 +654,7 @@ public:
/// Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all
/// elements. VT must be a vector type. Op's type must be the same as (or,
/// for integers, a type wider than) VT's element type.
SDValue getSplatBuildVector(EVT VT, SDLoc DL, SDValue Op) {
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op) {
// VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
if (Op.getOpcode() == ISD::UNDEF) {
assert((VT.getVectorElementType() == Op.getValueType() ||
@ -679,52 +682,52 @@ public:
/// Convert Op, which must be of integer type, to the
/// integer type VT, by either any-extending or truncating it.
SDValue getAnyExtOrTrunc(SDValue Op, SDLoc DL, EVT VT);
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
/// Convert Op, which must be of integer type, to the
/// integer type VT, by either sign-extending or truncating it.
SDValue getSExtOrTrunc(SDValue Op, SDLoc DL, EVT VT);
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
/// Convert Op, which must be of integer type, to the
/// integer type VT, by either zero-extending or truncating it.
SDValue getZExtOrTrunc(SDValue Op, SDLoc DL, EVT VT);
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
/// Return the expression required to zero extend the Op
/// value assuming it was the smaller SrcTy value.
SDValue getZeroExtendInReg(SDValue Op, SDLoc DL, EVT SrcTy);
SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT SrcTy);
/// Return an operation which will any-extend the low lanes of the operand
/// into the specified vector type. For example,
/// this can convert a v16i8 into a v4i32 by any-extending the low four
/// lanes of the operand from i8 to i32.
SDValue getAnyExtendVectorInReg(SDValue Op, SDLoc DL, EVT VT);
SDValue getAnyExtendVectorInReg(SDValue Op, const SDLoc &DL, EVT VT);
/// Return an operation which will sign extend the low lanes of the operand
/// into the specified vector type. For example,
/// this can convert a v16i8 into a v4i32 by sign extending the low four
/// lanes of the operand from i8 to i32.
SDValue getSignExtendVectorInReg(SDValue Op, SDLoc DL, EVT VT);
SDValue getSignExtendVectorInReg(SDValue Op, const SDLoc &DL, EVT VT);
/// Return an operation which will zero extend the low lanes of the operand
/// into the specified vector type. For example,
/// this can convert a v16i8 into a v4i32 by zero extending the low four
/// lanes of the operand from i8 to i32.
SDValue getZeroExtendVectorInReg(SDValue Op, SDLoc DL, EVT VT);
SDValue getZeroExtendVectorInReg(SDValue Op, const SDLoc &DL, EVT VT);
/// Convert Op, which must be of integer type, to the integer type VT,
/// by using an extension appropriate for the target's
/// BooleanContent for type OpVT or truncating it.
SDValue getBoolExtOrTrunc(SDValue Op, SDLoc SL, EVT VT, EVT OpVT);
SDValue getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, EVT OpVT);
/// Create a bitwise NOT operation as (XOR Val, -1).
SDValue getNOT(SDLoc DL, SDValue Val, EVT VT);
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT);
/// \brief Create a logical NOT operation as (XOR Val, BooleanOne).
SDValue getLogicalNOT(SDLoc DL, SDValue Val, EVT VT);
SDValue getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT);
/// Return a new CALLSEQ_START node, which always must have a glue result
/// (to ensure it's not CSE'd). CALLSEQ_START does not have a useful SDLoc.
SDValue getCALLSEQ_START(SDValue Chain, SDValue Op, SDLoc DL) {
SDValue getCALLSEQ_START(SDValue Chain, SDValue Op, const SDLoc &DL) {
SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
SDValue Ops[] = { Chain, Op };
return getNode(ISD::CALLSEQ_START, DL, VTs, Ops);
@ -734,7 +737,7 @@ public:
/// glue result (to ensure it's not CSE'd).
/// CALLSEQ_END does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2,
SDValue InGlue, SDLoc DL) {
SDValue InGlue, const SDLoc &DL) {
SDVTList NodeTys = getVTList(MVT::Other, MVT::Glue);
SmallVector<SDValue, 4> Ops;
Ops.push_back(Chain);
@ -757,38 +760,38 @@ public:
/// Gets or creates the specified node.
///
SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT,
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
ArrayRef<SDUse> Ops);
SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT,
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
ArrayRef<SDValue> Ops, const SDNodeFlags *Flags = nullptr);
SDValue getNode(unsigned Opcode, SDLoc DL, ArrayRef<EVT> ResultTys,
SDValue getNode(unsigned Opcode, const SDLoc &DL, ArrayRef<EVT> ResultTys,
ArrayRef<SDValue> Ops);
SDValue getNode(unsigned Opcode, SDLoc DL, SDVTList VTs,
SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTs,
ArrayRef<SDValue> Ops);
// Specialize based on number of operands.
SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT);
SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N);
SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1, SDValue N2,
const SDNodeFlags *Flags = nullptr);
SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1, SDValue N2,
SDValue N3);
SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1, SDValue N2,
SDValue N3, SDValue N4);
SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1, SDValue N2,
SDValue N3, SDValue N4, SDValue N5);
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT);
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N);
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
SDValue N2, const SDNodeFlags *Flags = nullptr);
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
SDValue N2, SDValue N3);
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
SDValue N2, SDValue N3, SDValue N4);
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
SDValue N2, SDValue N3, SDValue N4, SDValue N5);
// Specialize again based on number of operands for nodes with a VTList
// rather than a single VT.
SDValue getNode(unsigned Opcode, SDLoc DL, SDVTList VTs);
SDValue getNode(unsigned Opcode, SDLoc DL, SDVTList VTs, SDValue N);
SDValue getNode(unsigned Opcode, SDLoc DL, SDVTList VTs, SDValue N1,
SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTs);
SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTs, SDValue N);
SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTs, SDValue N1,
SDValue N2);
SDValue getNode(unsigned Opcode, SDLoc DL, SDVTList VTs, SDValue N1,
SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTs, SDValue N1,
SDValue N2, SDValue N3);
SDValue getNode(unsigned Opcode, SDLoc DL, SDVTList VTs, SDValue N1,
SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTs, SDValue N1,
SDValue N2, SDValue N3, SDValue N4);
SDValue getNode(unsigned Opcode, SDLoc DL, SDVTList VTs, SDValue N1,
SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTs, SDValue N1,
SDValue N2, SDValue N3, SDValue N4, SDValue N5);
/// Compute a TokenFactor to force all the incoming stack arguments to be
@ -796,24 +799,24 @@ public:
/// stack arguments from being clobbered.
SDValue getStackArgumentTokenFactor(SDValue Chain);
SDValue getMemcpy(SDValue Chain, SDLoc dl, SDValue Dst, SDValue Src,
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src,
SDValue Size, unsigned Align, bool isVol, bool AlwaysInline,
bool isTailCall, MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo);
SDValue getMemmove(SDValue Chain, SDLoc dl, SDValue Dst, SDValue Src,
SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src,
SDValue Size, unsigned Align, bool isVol, bool isTailCall,
MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo);
SDValue getMemset(SDValue Chain, SDLoc dl, SDValue Dst, SDValue Src,
SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src,
SDValue Size, unsigned Align, bool isVol, bool isTailCall,
MachinePointerInfo DstPtrInfo);
/// Helper function to make it easier to build SetCC's if you just
/// have an ISD::CondCode instead of an SDValue.
///
SDValue getSetCC(SDLoc DL, EVT VT, SDValue LHS, SDValue RHS,
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS,
ISD::CondCode Cond) {
assert(LHS.getValueType().isVector() == RHS.getValueType().isVector() &&
"Cannot compare scalars to vectors");
@ -826,8 +829,8 @@ public:
/// Helper function to make it easier to build Select's if you just
/// have operands and don't want to check for vector.
SDValue getSelect(SDLoc DL, EVT VT, SDValue Cond,
SDValue LHS, SDValue RHS) {
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS,
SDValue RHS) {
assert(LHS.getValueType() == RHS.getValueType() &&
"Cannot use select on differing types");
assert(VT.isVector() == LHS.getValueType().isVector() &&
@ -839,142 +842,137 @@ public:
/// Helper function to make it easier to build SelectCC's if you
/// just have an ISD::CondCode instead of an SDValue.
///
SDValue getSelectCC(SDLoc DL, SDValue LHS, SDValue RHS,
SDValue True, SDValue False, ISD::CondCode Cond) {
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True,
SDValue False, ISD::CondCode Cond) {
return getNode(ISD::SELECT_CC, DL, True.getValueType(),
LHS, RHS, True, False, getCondCode(Cond));
}
/// VAArg produces a result and token chain, and takes a pointer
/// and a source value as input.
SDValue getVAArg(EVT VT, SDLoc dl, SDValue Chain, SDValue Ptr,
SDValue getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
SDValue SV, unsigned Align);
/// Gets a node for an atomic cmpxchg op. There are two
/// valid Opcodes. ISD::ATOMIC_CMO_SWAP produces the value loaded and a
/// chain result. ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS produces the value loaded,
/// a success flag (initially i1), and a chain.
SDValue getAtomicCmpSwap(unsigned Opcode, SDLoc dl, EVT MemVT, SDVTList VTs,
SDValue Chain, SDValue Ptr, SDValue Cmp, SDValue Swp,
MachinePointerInfo PtrInfo, unsigned Alignment,
AtomicOrdering SuccessOrdering,
SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT,
SDVTList VTs, SDValue Chain, SDValue Ptr,
SDValue Cmp, SDValue Swp, MachinePointerInfo PtrInfo,
unsigned Alignment, AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
SynchronizationScope SynchScope);
SDValue getAtomicCmpSwap(unsigned Opcode, SDLoc dl, EVT MemVT, SDVTList VTs,
SDValue Chain, SDValue Ptr, SDValue Cmp, SDValue Swp,
MachineMemOperand *MMO,
SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT,
SDVTList VTs, SDValue Chain, SDValue Ptr,
SDValue Cmp, SDValue Swp, MachineMemOperand *MMO,
AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
SynchronizationScope SynchScope);
/// Gets a node for an atomic op, produces result (if relevant)
/// and chain and takes 2 operands.
SDValue getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, SDValue Chain,
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain,
SDValue Ptr, SDValue Val, const Value *PtrVal,
unsigned Alignment, AtomicOrdering Ordering,
SynchronizationScope SynchScope);
SDValue getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, SDValue Chain,
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain,
SDValue Ptr, SDValue Val, MachineMemOperand *MMO,
AtomicOrdering Ordering,
SynchronizationScope SynchScope);
AtomicOrdering Ordering, SynchronizationScope SynchScope);
/// Gets a node for an atomic op, produces result and chain and
/// takes 1 operand.
SDValue getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, EVT VT,
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, EVT VT,
SDValue Chain, SDValue Ptr, MachineMemOperand *MMO,
AtomicOrdering Ordering,
SynchronizationScope SynchScope);
AtomicOrdering Ordering, SynchronizationScope SynchScope);
/// Gets a node for an atomic op, produces result and chain and takes N
/// operands.
SDValue getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, SDVTList VTList,
ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
AtomicOrdering SuccessOrdering,
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
SDVTList VTList, ArrayRef<SDValue> Ops,
MachineMemOperand *MMO, AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
SynchronizationScope SynchScope);
SDValue getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, SDVTList VTList,
ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
AtomicOrdering Ordering, SynchronizationScope SynchScope);
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
SDVTList VTList, ArrayRef<SDValue> Ops,
MachineMemOperand *MMO, AtomicOrdering Ordering,
SynchronizationScope SynchScope);
/// Creates a MemIntrinsicNode that may produce a
/// result and takes a list of operands. Opcode may be INTRINSIC_VOID,
/// INTRINSIC_W_CHAIN, or a target-specific opcode with a value not
/// less than FIRST_TARGET_MEMORY_OPCODE.
SDValue getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
ArrayRef<SDValue> Ops,
EVT MemVT, MachinePointerInfo PtrInfo,
unsigned Align = 0, bool Vol = false,
bool ReadMem = true, bool WriteMem = true,
unsigned Size = 0);
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList,
ArrayRef<SDValue> Ops, EVT MemVT,
MachinePointerInfo PtrInfo, unsigned Align = 0,
bool Vol = false, bool ReadMem = true,
bool WriteMem = true, unsigned Size = 0);
SDValue getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
ArrayRef<SDValue> Ops,
EVT MemVT, MachineMemOperand *MMO);
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList,
ArrayRef<SDValue> Ops, EVT MemVT,
MachineMemOperand *MMO);
/// Create a MERGE_VALUES node from the given operands.
SDValue getMergeValues(ArrayRef<SDValue> Ops, SDLoc dl);
SDValue getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl);
/// Loads are not normal binary operators: their result type is not
/// determined by their operands, and they produce a value AND a token chain.
///
SDValue getLoad(EVT VT, SDLoc dl, SDValue Chain, SDValue Ptr,
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
MachinePointerInfo PtrInfo, bool isVolatile,
bool isNonTemporal, bool isInvariant, unsigned Alignment,
const AAMDNodes &AAInfo = AAMDNodes(),
const MDNode *Ranges = nullptr);
SDValue getLoad(EVT VT, SDLoc dl, SDValue Chain, SDValue Ptr,
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
MachineMemOperand *MMO);
SDValue getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT,
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT,
SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo,
EVT MemVT, bool isVolatile,
bool isNonTemporal, bool isInvariant, unsigned Alignment,
EVT MemVT, bool isVolatile, bool isNonTemporal,
bool isInvariant, unsigned Alignment,
const AAMDNodes &AAInfo = AAMDNodes());
SDValue getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT,
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT,
SDValue Chain, SDValue Ptr, EVT MemVT,
MachineMemOperand *MMO);
SDValue getIndexedLoad(SDValue OrigLoad, SDLoc dl, SDValue Base,
SDValue getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base,
SDValue Offset, ISD::MemIndexedMode AM);
SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
EVT VT, SDLoc dl,
SDValue Chain, SDValue Ptr, SDValue Offset,
MachinePointerInfo PtrInfo, EVT MemVT,
bool isVolatile, bool isNonTemporal, bool isInvariant,
unsigned Alignment, const AAMDNodes &AAInfo = AAMDNodes(),
SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT,
const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset,
MachinePointerInfo PtrInfo, EVT MemVT, bool isVolatile,
bool isNonTemporal, bool isInvariant, unsigned Alignment,
const AAMDNodes &AAInfo = AAMDNodes(),
const MDNode *Ranges = nullptr);
SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
EVT VT, SDLoc dl,
SDValue Chain, SDValue Ptr, SDValue Offset,
SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT,
const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset,
EVT MemVT, MachineMemOperand *MMO);
/// Helper function to build ISD::STORE nodes.
SDValue getStore(SDValue Chain, SDLoc dl, SDValue Val, SDValue Ptr,
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
MachinePointerInfo PtrInfo, bool isVolatile,
bool isNonTemporal, unsigned Alignment,
const AAMDNodes &AAInfo = AAMDNodes());
SDValue getStore(SDValue Chain, SDLoc dl, SDValue Val, SDValue Ptr,
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
MachineMemOperand *MMO);
SDValue getTruncStore(SDValue Chain, SDLoc dl, SDValue Val, SDValue Ptr,
MachinePointerInfo PtrInfo, EVT TVT,
bool isNonTemporal, bool isVolatile,
unsigned Alignment,
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
SDValue Ptr, MachinePointerInfo PtrInfo, EVT TVT,
bool isNonTemporal, bool isVolatile, unsigned Alignment,
const AAMDNodes &AAInfo = AAMDNodes());
SDValue getTruncStore(SDValue Chain, SDLoc dl, SDValue Val, SDValue Ptr,
EVT TVT, MachineMemOperand *MMO);
SDValue getIndexedStore(SDValue OrigStoe, SDLoc dl, SDValue Base,
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
SDValue Ptr, EVT TVT, MachineMemOperand *MMO);
SDValue getIndexedStore(SDValue OrigStoe, const SDLoc &dl, SDValue Base,
SDValue Offset, ISD::MemIndexedMode AM);
/// Returns sum of the base pointer and offset.
SDValue getMemBasePlusOffset(SDValue Base, unsigned Offset, SDLoc DL);
SDValue getMemBasePlusOffset(SDValue Base, unsigned Offset, const SDLoc &DL);
SDValue getMaskedLoad(EVT VT, SDLoc dl, SDValue Chain, SDValue Ptr,
SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
SDValue Mask, SDValue Src0, EVT MemVT,
MachineMemOperand *MMO, ISD::LoadExtType);
SDValue getMaskedStore(SDValue Chain, SDLoc dl, SDValue Val,
SDValue getMaskedStore(SDValue Chain, const SDLoc &dl, SDValue Val,
SDValue Ptr, SDValue Mask, EVT MemVT,
MachineMemOperand *MMO, bool IsTrunc);
SDValue getMaskedGather(SDVTList VTs, EVT VT, SDLoc dl,
SDValue getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl,
ArrayRef<SDValue> Ops, MachineMemOperand *MMO);
SDValue getMaskedScatter(SDVTList VTs, EVT VT, SDLoc dl,
SDValue getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl,
ArrayRef<SDValue> Ops, MachineMemOperand *MMO);
/// Construct a node to track a Value* through the backend.
SDValue getSrcValue(const Value *v);
@ -987,8 +985,8 @@ public:
SDValue getBitcast(EVT VT, SDValue V);
/// Return an AddrSpaceCastSDNode.
SDValue getAddrSpaceCast(SDLoc dl, EVT VT, SDValue Ptr,
unsigned SrcAS, unsigned DestAS);
SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS,
unsigned DestAS);
/// Return the specified value casted to
/// the target's desired shift amount type.
@ -1057,45 +1055,46 @@ public:
/// Note that getMachineNode returns the resultant node. If there is already
/// a node of the specified opcode and operands, it returns that node instead
/// of the current one.
MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT);
MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT);
MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
SDValue Op1);
MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
SDValue Op1, SDValue Op2);
MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
SDValue Op1, SDValue Op2, SDValue Op3);
MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
ArrayRef<SDValue> Ops);
MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2);
MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2,
SDValue Op1);
MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2,
SDValue Op1, SDValue Op2);
MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2,
SDValue Op1, SDValue Op2, SDValue Op3);
MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2,
ArrayRef<SDValue> Ops);
MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2,
EVT VT3, SDValue Op1, SDValue Op2);
MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2,
EVT VT3, SDValue Op1, SDValue Op2,
MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
EVT VT2);
MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
EVT VT2, SDValue Op1);
MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
EVT VT2, SDValue Op1, SDValue Op2);
MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
EVT VT2, SDValue Op1, SDValue Op2, SDValue Op3);
MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
EVT VT2, ArrayRef<SDValue> Ops);
MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
EVT VT2, EVT VT3, SDValue Op1, SDValue Op2);
MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
EVT VT2, EVT VT3, SDValue Op1, SDValue Op2,
SDValue Op3);
MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2,
EVT VT3, ArrayRef<SDValue> Ops);
MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2,
EVT VT3, EVT VT4, ArrayRef<SDValue> Ops);
MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl,
ArrayRef<EVT> ResultTys,
MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
EVT VT2, EVT VT3, ArrayRef<SDValue> Ops);
MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
EVT VT2, EVT VT3, EVT VT4,
ArrayRef<SDValue> Ops);
MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, SDVTList VTs,
MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl,
ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops);
MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, SDVTList VTs,
ArrayRef<SDValue> Ops);
/// A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
SDValue getTargetExtractSubreg(int SRIdx, SDLoc DL, EVT VT,
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT,
SDValue Operand);
/// A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getTargetInsertSubreg(int SRIdx, SDLoc DL, EVT VT,
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT,
SDValue Operand, SDValue Subreg);
/// Get the specified node if it's already available, or else return NULL.
@ -1104,16 +1103,17 @@ public:
/// Creates a SDDbgValue node.
SDDbgValue *getDbgValue(MDNode *Var, MDNode *Expr, SDNode *N, unsigned R,
bool IsIndirect, uint64_t Off, DebugLoc DL,
bool IsIndirect, uint64_t Off, const DebugLoc &DL,
unsigned O);
/// Constant
SDDbgValue *getConstantDbgValue(MDNode *Var, MDNode *Expr, const Value *C,
uint64_t Off, DebugLoc DL, unsigned O);
uint64_t Off, const DebugLoc &DL, unsigned O);
/// FrameIndex
SDDbgValue *getFrameIndexDbgValue(MDNode *Var, MDNode *Expr, unsigned FI,
uint64_t Off, DebugLoc DL, unsigned O);
uint64_t Off, const DebugLoc &DL,
unsigned O);
/// Remove the specified node from the system. If any of its
/// operands then becomes dead, remove them as well. Inform UpdateListener
@ -1252,20 +1252,20 @@ public:
const GlobalAddressSDNode *GA,
const SDNode *N2);
SDValue FoldConstantArithmetic(unsigned Opcode, SDLoc DL, EVT VT,
SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT,
SDNode *Cst1, SDNode *Cst2);
SDValue FoldConstantArithmetic(unsigned Opcode, SDLoc DL, EVT VT,
SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT,
const ConstantSDNode *Cst1,
const ConstantSDNode *Cst2);
SDValue FoldConstantVectorArithmetic(unsigned Opcode, SDLoc DL,
EVT VT, ArrayRef<SDValue> Ops,
SDValue FoldConstantVectorArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT,
ArrayRef<SDValue> Ops,
const SDNodeFlags *Flags = nullptr);
/// Constant fold a setcc to true or false.
SDValue FoldSetCC(EVT VT, SDValue N1,
SDValue N2, ISD::CondCode Cond, SDLoc dl);
SDValue FoldSetCC(EVT VT, SDValue N1, SDValue N2, ISD::CondCode Cond,
const SDLoc &dl);
/// Return true if the sign bit of Op is known to be zero.
/// We use this predicate to simplify operations downstream.
@ -1389,8 +1389,9 @@ private:
void allnodes_clear();
SDNode *GetBinarySDNode(unsigned Opcode, SDLoc DL, SDVTList VTs, SDValue N1,
SDValue N2, const SDNodeFlags *Flags = nullptr);
SDNode *GetBinarySDNode(unsigned Opcode, const SDLoc &DL, SDVTList VTs,
SDValue N1, SDValue N2,
const SDNodeFlags *Flags = nullptr);
/// Look up the node specified by ID in CSEMap. If it exists, return it. If
/// not, return the insertion token that will make insertion faster. This
@ -1401,7 +1402,7 @@ private:
/// Look up the node specified by ID in CSEMap. If it exists, return it. If
/// not, return the insertion token that will make insertion faster. Performs
/// additional processing for constant nodes.
SDNode *FindNodeOrInsertPos(const FoldingSetNodeID &ID, SDLoc DL,
SDNode *FindNodeOrInsertPos(const FoldingSetNodeID &ID, const SDLoc &DL,
void *&InsertPos);
/// List of non-single value types.

View File

@ -210,8 +210,8 @@ protected:
/// SelectInlineAsmMemoryOperands - Calls to this are automatically generated
/// by tblgen. Others should not call it.
void SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops, SDLoc DL);
void SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops,
const SDLoc &DL);
public:
// Calls to these predicates are generated by tblgen.

View File

@ -847,8 +847,8 @@ public:
if (I)
DL = I->getDebugLoc();
}
unsigned getIROrder() { return IROrder; }
DebugLoc getDebugLoc() { return DL; }
unsigned getIROrder() const { return IROrder; }
const DebugLoc &getDebugLoc() const { return DL; }
};
@ -952,8 +952,8 @@ static bool isBinOpWithFlags(unsigned Opcode) {
class BinaryWithFlagsSDNode : public SDNode {
public:
SDNodeFlags Flags;
BinaryWithFlagsSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
const SDNodeFlags &NodeFlags)
BinaryWithFlagsSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
SDVTList VTs, const SDNodeFlags &NodeFlags)
: SDNode(Opc, Order, dl, VTs), Flags(NodeFlags) {}
static bool classof(const SDNode *N) {
return isBinOpWithFlags(N->getOpcode());
@ -992,8 +992,8 @@ private:
unsigned DestAddrSpace;
public:
AddrSpaceCastSDNode(unsigned Order, DebugLoc dl, EVT VT, unsigned SrcAS,
unsigned DestAS);
AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, EVT VT,
unsigned SrcAS, unsigned DestAS);
unsigned getSrcAddressSpace() const { return SrcAddrSpace; }
unsigned getDestAddressSpace() const { return DestAddrSpace; }
@ -1014,7 +1014,7 @@ protected:
MachineMemOperand *MMO;
public:
MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs,
EVT MemoryVT, MachineMemOperand *MMO);
bool readMem() const { return MMO->isLoad(); }
@ -1146,7 +1146,7 @@ class AtomicSDNode : public MemSDNode {
}
public:
AtomicSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTL,
AtomicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTL,
EVT MemVT, MachineMemOperand *MMO,
AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
SynchronizationScope SynchScope)
@ -1198,8 +1198,8 @@ public:
/// with a value not less than FIRST_TARGET_MEMORY_OPCODE.
class MemIntrinsicSDNode : public MemSDNode {
public:
MemIntrinsicSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
EVT MemoryVT, MachineMemOperand *MMO)
MemIntrinsicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
SDVTList VTs, EVT MemoryVT, MachineMemOperand *MMO)
: MemSDNode(Opc, Order, dl, VTs, MemoryVT, MMO) {
SubclassData |= 1u << 13;
}
@ -1228,7 +1228,7 @@ class ShuffleVectorSDNode : public SDNode {
const int *Mask;
protected:
friend class SelectionDAG;
ShuffleVectorSDNode(EVT VT, unsigned Order, DebugLoc dl, const int *M)
ShuffleVectorSDNode(EVT VT, unsigned Order, const DebugLoc &dl, const int *M)
: SDNode(ISD::VECTOR_SHUFFLE, Order, dl, getSDVTList(VT)), Mask(M) {}
public:
@ -1277,9 +1277,10 @@ class ConstantSDNode : public SDNode {
const ConstantInt *Value;
friend class SelectionDAG;
ConstantSDNode(bool isTarget, bool isOpaque, const ConstantInt *val,
DebugLoc DL, EVT VT)
: SDNode(isTarget ? ISD::TargetConstant : ISD::Constant,
0, DL, getSDVTList(VT)), Value(val) {
const DebugLoc &DL, EVT VT)
: SDNode(isTarget ? ISD::TargetConstant : ISD::Constant, 0, DL,
getSDVTList(VT)),
Value(val) {
SubclassData |= (uint16_t)isOpaque;
}
public:
@ -1304,10 +1305,12 @@ public:
class ConstantFPSDNode : public SDNode {
const ConstantFP *Value;
friend class SelectionDAG;
ConstantFPSDNode(bool isTarget, const ConstantFP *val, DebugLoc DL, EVT VT)
: SDNode(isTarget ? ISD::TargetConstantFP : ISD::ConstantFP,
0, DL, getSDVTList(VT)), Value(val) {
}
ConstantFPSDNode(bool isTarget, const ConstantFP *val, const DebugLoc &DL,
EVT VT)
: SDNode(isTarget ? ISD::TargetConstantFP : ISD::ConstantFP, 0, DL,
getSDVTList(VT)),
Value(val) {}
public:
const APFloat& getValueAPF() const { return Value->getValueAPF(); }
@ -1367,9 +1370,10 @@ class GlobalAddressSDNode : public SDNode {
int64_t Offset;
unsigned char TargetFlags;
friend class SelectionDAG;
GlobalAddressSDNode(unsigned Opc, unsigned Order, DebugLoc DL,
GlobalAddressSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL,
const GlobalValue *GA, EVT VT, int64_t o,
unsigned char TargetFlags);
public:
const GlobalValue *getGlobal() const { return TheGlobal; }
@ -1667,7 +1671,7 @@ public:
class EHLabelSDNode : public SDNode {
MCSymbol *Label;
friend class SelectionDAG;
EHLabelSDNode(unsigned Order, DebugLoc dl, MCSymbol *L)
EHLabelSDNode(unsigned Order, const DebugLoc &dl, MCSymbol *L)
: SDNode(ISD::EH_LABEL, Order, dl, getSDVTList(MVT::Other)), Label(L) {}
public:
@ -1734,7 +1738,7 @@ public:
class CvtRndSatSDNode : public SDNode {
ISD::CvtCode CvtCode;
friend class SelectionDAG;
explicit CvtRndSatSDNode(EVT VT, unsigned Order, DebugLoc dl,
explicit CvtRndSatSDNode(EVT VT, unsigned Order, const DebugLoc &dl,
ISD::CvtCode Code)
: SDNode(ISD::CONVERT_RNDSAT, Order, dl, getSDVTList(VT)), CvtCode(Code) {
}
@ -1768,7 +1772,7 @@ public:
/// Base class for LoadSDNode and StoreSDNode
class LSBaseSDNode : public MemSDNode {
public:
LSBaseSDNode(ISD::NodeType NodeTy, unsigned Order, DebugLoc dl,
LSBaseSDNode(ISD::NodeType NodeTy, unsigned Order, const DebugLoc &dl,
SDVTList VTs, ISD::MemIndexedMode AM, EVT MemVT,
MachineMemOperand *MMO)
: MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
@ -1801,8 +1805,9 @@ public:
/// This class is used to represent ISD::LOAD nodes.
class LoadSDNode : public LSBaseSDNode {
friend class SelectionDAG;
LoadSDNode(unsigned Order, DebugLoc dl, SDVTList VTs, ISD::MemIndexedMode AM,
ISD::LoadExtType ETy, EVT MemVT, MachineMemOperand *MMO)
LoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
ISD::MemIndexedMode AM, ISD::LoadExtType ETy, EVT MemVT,
MachineMemOperand *MMO)
: LSBaseSDNode(ISD::LOAD, Order, dl, VTs, AM, MemVT, MMO) {
SubclassData |= (unsigned short)ETy;
assert(getExtensionType() == ETy && "LoadExtType encoding error!");
@ -1828,8 +1833,9 @@ public:
/// This class is used to represent ISD::STORE nodes.
class StoreSDNode : public LSBaseSDNode {
friend class SelectionDAG;
StoreSDNode(unsigned Order, DebugLoc dl, SDVTList VTs, ISD::MemIndexedMode AM,
bool isTrunc, EVT MemVT, MachineMemOperand *MMO)
StoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
ISD::MemIndexedMode AM, bool isTrunc, EVT MemVT,
MachineMemOperand *MMO)
: LSBaseSDNode(ISD::STORE, Order, dl, VTs, AM, MemVT, MMO) {
SubclassData |= (unsigned short)isTrunc;
assert(isTruncatingStore() == isTrunc && "isTrunc encoding error!");
@ -1856,8 +1862,9 @@ public:
class MaskedLoadStoreSDNode : public MemSDNode {
public:
friend class SelectionDAG;
MaskedLoadStoreSDNode(ISD::NodeType NodeTy, unsigned Order, DebugLoc dl,
SDVTList VTs, EVT MemVT, MachineMemOperand *MMO)
MaskedLoadStoreSDNode(ISD::NodeType NodeTy, unsigned Order,
const DebugLoc &dl, SDVTList VTs, EVT MemVT,
MachineMemOperand *MMO)
: MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {}
// In the both nodes address is Op1, mask is Op2:
@ -1877,7 +1884,7 @@ public:
class MaskedLoadSDNode : public MaskedLoadStoreSDNode {
public:
friend class SelectionDAG;
MaskedLoadSDNode(unsigned Order, DebugLoc dl, SDVTList VTs,
MaskedLoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
ISD::LoadExtType ETy, EVT MemVT, MachineMemOperand *MMO)
: MaskedLoadStoreSDNode(ISD::MLOAD, Order, dl, VTs, MemVT, MMO) {
SubclassData |= (unsigned short)ETy;
@ -1897,8 +1904,8 @@ class MaskedStoreSDNode : public MaskedLoadStoreSDNode {
public:
friend class SelectionDAG;
MaskedStoreSDNode(unsigned Order, DebugLoc dl, SDVTList VTs, bool isTrunc,
EVT MemVT, MachineMemOperand *MMO)
MaskedStoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
bool isTrunc, EVT MemVT, MachineMemOperand *MMO)
: MaskedLoadStoreSDNode(ISD::MSTORE, Order, dl, VTs, MemVT, MMO) {
SubclassData |= (unsigned short)isTrunc;
}
@ -1920,8 +1927,9 @@ public:
class MaskedGatherScatterSDNode : public MemSDNode {
public:
friend class SelectionDAG;
MaskedGatherScatterSDNode(ISD::NodeType NodeTy, unsigned Order, DebugLoc dl,
SDVTList VTs, EVT MemVT, MachineMemOperand *MMO)
MaskedGatherScatterSDNode(ISD::NodeType NodeTy, unsigned Order,
const DebugLoc &dl, SDVTList VTs, EVT MemVT,
MachineMemOperand *MMO)
: MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {}
// In the both nodes address is Op1, mask is Op2:
@ -1944,8 +1952,8 @@ public:
class MaskedGatherSDNode : public MaskedGatherScatterSDNode {
public:
friend class SelectionDAG;
MaskedGatherSDNode(unsigned Order, DebugLoc dl, SDVTList VTs, EVT MemVT,
MachineMemOperand *MMO)
MaskedGatherSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
EVT MemVT, MachineMemOperand *MMO)
: MaskedGatherScatterSDNode(ISD::MGATHER, Order, dl, VTs, MemVT, MMO) {}
static bool classof(const SDNode *N) {
@ -1959,8 +1967,8 @@ class MaskedScatterSDNode : public MaskedGatherScatterSDNode {
public:
friend class SelectionDAG;
MaskedScatterSDNode(unsigned Order, DebugLoc dl, SDVTList VTs, EVT MemVT,
MachineMemOperand *MMO)
MaskedScatterSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
EVT MemVT, MachineMemOperand *MMO)
: MaskedGatherScatterSDNode(ISD::MSCATTER, Order, dl, VTs, MemVT, MMO) {}
static bool classof(const SDNode *N) {
@ -1977,7 +1985,7 @@ public:
private:
friend class SelectionDAG;
MachineSDNode(unsigned Opc, unsigned Order, const DebugLoc DL, SDVTList VTs)
MachineSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL, SDVTList VTs)
: SDNode(Opc, Order, DL, VTs), MemRefs(nullptr), MemRefsEnd(nullptr) {}
/// Memory reference descriptions for this instruction.

View File

@ -46,10 +46,13 @@ public:
/// expanded in a place where calls are not feasible (e.g. within the prologue
/// for another call). If the target chooses to decline an AlwaysInline
/// request here, legalize will resort to using simple loads and stores.
virtual SDValue EmitTargetCodeForMemcpy(
SelectionDAG &DAG, SDLoc dl, SDValue Chain, SDValue Op1, SDValue Op2,
SDValue Op3, unsigned Align, bool isVolatile, bool AlwaysInline,
MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const {
virtual SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, const SDLoc &dl,
SDValue Chain, SDValue Op1,
SDValue Op2, SDValue Op3,
unsigned Align, bool isVolatile,
bool AlwaysInline,
MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo) const {
return SDValue();
}
@ -60,8 +63,8 @@ public:
/// SDValue if the target declines to use custom code and a different
/// lowering strategy should be used.
virtual SDValue EmitTargetCodeForMemmove(
SelectionDAG &DAG, SDLoc dl, SDValue Chain, SDValue Op1, SDValue Op2,
SDValue Op3, unsigned Align, bool isVolatile,
SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1,
SDValue Op2, SDValue Op3, unsigned Align, bool isVolatile,
MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const {
return SDValue();
}
@ -72,7 +75,7 @@ public:
/// efficient than using a library call. This function can return a null
/// SDValue if the target declines to use custom code and a different
/// lowering strategy should be used.
virtual SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
virtual SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, const SDLoc &dl,
SDValue Chain, SDValue Op1,
SDValue Op2, SDValue Op3,
unsigned Align, bool isVolatile,
@ -85,7 +88,7 @@ public:
/// memcmp and the second is the chain. Both SDValues can be null if a normal
/// libcall should be used.
virtual std::pair<SDValue, SDValue>
EmitTargetCodeForMemcmp(SelectionDAG &DAG, SDLoc dl, SDValue Chain,
EmitTargetCodeForMemcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain,
SDValue Op1, SDValue Op2, SDValue Op3,
MachinePointerInfo Op1PtrInfo,
MachinePointerInfo Op2PtrInfo) const {
@ -97,7 +100,7 @@ public:
/// memchr and the second is the chain. Both SDValues can be null if a normal
/// libcall should be used.
virtual std::pair<SDValue, SDValue>
EmitTargetCodeForMemchr(SelectionDAG &DAG, SDLoc dl, SDValue Chain,
EmitTargetCodeForMemchr(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain,
SDValue Src, SDValue Char, SDValue Length,
MachinePointerInfo SrcPtrInfo) const {
return std::make_pair(SDValue(), SDValue());
@ -110,7 +113,7 @@ public:
/// for stpcpy) and the second is the chain. Both SDValues can be null
/// if a normal libcall should be used.
virtual std::pair<SDValue, SDValue>
EmitTargetCodeForStrcpy(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
EmitTargetCodeForStrcpy(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain,
SDValue Dest, SDValue Src,
MachinePointerInfo DestPtrInfo,
MachinePointerInfo SrcPtrInfo, bool isStpcpy) const {
@ -121,20 +124,22 @@ public:
/// faster than a libcall.
/// The first returned SDValue is the result of the strcmp and the second is
/// the chain. Both SDValues can be null if a normal libcall should be used.
virtual std::pair<SDValue, SDValue> EmitTargetCodeForStrcmp(
SelectionDAG &DAG, SDLoc dl, SDValue Chain, SDValue Op1, SDValue Op2,
MachinePointerInfo Op1PtrInfo, MachinePointerInfo Op2PtrInfo) const {
virtual std::pair<SDValue, SDValue>
EmitTargetCodeForStrcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain,
SDValue Op1, SDValue Op2,
MachinePointerInfo Op1PtrInfo,
MachinePointerInfo Op2PtrInfo) const {
return std::make_pair(SDValue(), SDValue());
}
virtual std::pair<SDValue, SDValue>
EmitTargetCodeForStrlen(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
EmitTargetCodeForStrlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain,
SDValue Src, MachinePointerInfo SrcPtrInfo) const {
return std::make_pair(SDValue(), SDValue());
}
virtual std::pair<SDValue, SDValue>
EmitTargetCodeForStrnlen(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
EmitTargetCodeForStrnlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain,
SDValue Src, SDValue MaxLength,
MachinePointerInfo SrcPtrInfo) const {
return std::make_pair(SDValue(), SDValue());

View File

@ -542,7 +542,7 @@ public:
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
ArrayRef<MachineOperand> Cond,
DebugLoc DL) const {
const DebugLoc &DL) const {
llvm_unreachable("Target didn't implement TargetInstrInfo::InsertBranch!");
}
@ -689,7 +689,7 @@ public:
/// @param TrueReg Virtual register to copy when Cond is true.
/// @param FalseReg Virtual register to copy when Cons is false.
virtual void insertSelect(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, DebugLoc DL,
MachineBasicBlock::iterator I, const DebugLoc &DL,
unsigned DstReg, ArrayRef<MachineOperand> Cond,
unsigned TrueReg, unsigned FalseReg) const {
llvm_unreachable("Target didn't implement TargetInstrInfo::insertSelect!");
@ -752,7 +752,7 @@ public:
/// careful implementation when multiple copy instructions are required for
/// large registers. See for example the ARM target.
virtual void copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI, DebugLoc DL,
MachineBasicBlock::iterator MI, const DebugLoc &DL,
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const {
llvm_unreachable("Target didn't implement TargetInstrInfo::copyPhysReg!");

View File

@ -2207,15 +2207,15 @@ public:
bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
SDValue &Chain) const;
void softenSetCCOperands(SelectionDAG &DAG, EVT VT,
SDValue &NewLHS, SDValue &NewRHS,
ISD::CondCode &CCCode, SDLoc DL) const;
void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS,
SDValue &NewRHS, ISD::CondCode &CCCode,
const SDLoc &DL) const;
/// Returns a pair of (return value, chain).
/// It is an error to pass RTLIB::UNKNOWN_LIBCALL as \p LC.
std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC,
EVT RetVT, ArrayRef<SDValue> Ops,
bool isSigned, SDLoc dl,
bool isSigned, const SDLoc &dl,
bool doesNotReturn = false,
bool isReturnValueUsed = true) const;
@ -2264,7 +2264,7 @@ public:
/// uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
/// generalized for targets with other types of implicit widening casts.
bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded,
SDLoc dl);
const SDLoc &dl);
};
/// Look at Op. At this point, we know that only the DemandedMask bits of the
@ -2332,9 +2332,9 @@ public:
/// Try to simplify a setcc built with the specified operands and cc. If it is
/// unable to simplify it, return a null SDValue.
SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
ISD::CondCode Cond, bool foldBooleans,
DAGCombinerInfo &DCI, SDLoc dl) const;
SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
bool foldBooleans, DAGCombinerInfo &DCI,
const SDLoc &dl) const;
/// Returns true (and the GlobalValue and the offset) if the node is a
/// GlobalAddress + offset.
@ -2434,12 +2434,10 @@ public:
/// should fill in the InVals array with legal-type argument values, and
/// return the resulting token chain value.
///
virtual SDValue
LowerFormalArguments(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
bool /*isVarArg*/,
const SmallVectorImpl<ISD::InputArg> &/*Ins*/,
SDLoc /*dl*/, SelectionDAG &/*DAG*/,
SmallVectorImpl<SDValue> &/*InVals*/) const {
virtual SDValue LowerFormalArguments(
SDValue /*Chain*/, CallingConv::ID /*CallConv*/, bool /*isVarArg*/,
const SmallVectorImpl<ISD::InputArg> & /*Ins*/, const SDLoc & /*dl*/,
SelectionDAG & /*DAG*/, SmallVectorImpl<SDValue> & /*InVals*/) const {
llvm_unreachable("Not Implemented");
}
@ -2506,7 +2504,7 @@ public:
CallConv(CallingConv::C), DAG(DAG), CS(nullptr), IsPatchPoint(false) {
}
CallLoweringInfo &setDebugLoc(SDLoc dl) {
CallLoweringInfo &setDebugLoc(const SDLoc &dl) {
DL = dl;
return *this;
}
@ -2640,12 +2638,12 @@ public:
/// This hook must be implemented to lower outgoing return values, described
/// by the Outs array, into the specified DAG. The implementation should
/// return the resulting token chain value.
virtual SDValue
LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
virtual SDValue LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
bool /*isVarArg*/,
const SmallVectorImpl<ISD::OutputArg> & /*Outs*/,
const SmallVectorImpl<SDValue> & /*OutVals*/,
SDLoc /*dl*/, SelectionDAG &/*DAG*/) const {
const SDLoc & /*dl*/,
SelectionDAG & /*DAG*/) const {
llvm_unreachable("Not Implemented");
}
@ -2713,7 +2711,7 @@ public:
/// which allows a CPU to reuse the result of a previous load indefinitely,
/// even if a cache-coherent store is performed by another CPU. The default
/// implementation does nothing.
virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, SDLoc DL,
virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL,
SelectionDAG &DAG) const {
return Chain;
}
@ -3040,7 +3038,7 @@ public:
private:
SDValue simplifySetCCWithAnd(EVT VT, SDValue N0, SDValue N1,
ISD::CondCode Cond, DAGCombinerInfo &DCI,
SDLoc DL) const;
const SDLoc &DL) const;
};
/// Given an LLVM IR type and return type attributes, compute the return value

View File

@ -169,7 +169,7 @@ static void addLocIfNotPresent(SmallVectorImpl<const DILocation *> &Locs,
Locs.push_back(Loc);
}
void CodeViewDebug::maybeRecordLocation(DebugLoc DL,
void CodeViewDebug::maybeRecordLocation(const DebugLoc &DL,
const MachineFunction *MF) {
// Skip this instruction if it has the same location as the previous one.
if (DL == CurFn->LastLoc)

View File

@ -150,7 +150,7 @@ class LLVM_LIBRARY_VISIBILITY CodeViewDebug : public DebugHandlerBase {
unsigned maybeRecordFile(const DIFile *F);
void maybeRecordLocation(DebugLoc DL, const MachineFunction *MF);
void maybeRecordLocation(const DebugLoc &DL, const MachineFunction *MF);
void clear() {
assert(CurFn == nullptr);

View File

@ -64,7 +64,7 @@ class GCMachineCodeAnalysis : public MachineFunctionPass {
void FindSafePoints(MachineFunction &MF);
void VisitCallPoint(MachineBasicBlock::iterator MI);
MCSymbol *InsertLabel(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
DebugLoc DL) const;
const DebugLoc &DL) const;
void FindStackOffsets(MachineFunction &MF);
@ -270,7 +270,7 @@ void GCMachineCodeAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
MCSymbol *GCMachineCodeAnalysis::InsertLabel(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
DebugLoc DL) const {
const DebugLoc &DL) const {
MCSymbol *Label = MBB.getParent()->getContext().createTempSymbol();
BuildMI(MBB, MI, DL, TII->get(TargetOpcode::GC_LABEL)).addSym(Label);
return Label;

View File

@ -305,7 +305,7 @@ class LDVImpl {
/// getUserValue - Find or create a UserValue.
UserValue *getUserValue(const MDNode *Var, const MDNode *Expr,
unsigned Offset, bool IsIndirect, DebugLoc DL);
unsigned Offset, bool IsIndirect, const DebugLoc &DL);
/// lookupVirtReg - Find the EC leader for VirtReg or null.
UserValue *lookupVirtReg(unsigned VirtReg);
@ -358,7 +358,7 @@ public:
};
} // namespace
static void printDebugLoc(DebugLoc DL, raw_ostream &CommentOS,
static void printDebugLoc(const DebugLoc &DL, raw_ostream &CommentOS,
const LLVMContext &Ctx) {
if (!DL)
return;
@ -459,7 +459,7 @@ void UserValue::mapVirtRegs(LDVImpl *LDV) {
UserValue *LDVImpl::getUserValue(const MDNode *Var, const MDNode *Expr,
unsigned Offset, bool IsIndirect,
DebugLoc DL) {
const DebugLoc &DL) {
UserValue *&Leader = userVarMap[Var];
if (Leader) {
UserValue *UV = Leader->getLeader();

View File

@ -247,9 +247,9 @@ void MachineFunction::RenumberBlocks(MachineBasicBlock *MBB) {
}
/// Allocate a new MachineInstr. Use this instead of `new MachineInstr'.
MachineInstr *
MachineFunction::CreateMachineInstr(const MCInstrDesc &MCID,
DebugLoc DL, bool NoImp) {
MachineInstr *MachineFunction::CreateMachineInstr(const MCInstrDesc &MCID,
const DebugLoc &DL,
bool NoImp) {
return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
MachineInstr(*this, MCID, DL, NoImp);
}

View File

@ -2168,7 +2168,7 @@ void MachineInstr::emitError(StringRef Msg) const {
report_fatal_error(Msg);
}
MachineInstrBuilder llvm::BuildMI(MachineFunction &MF, DebugLoc DL,
MachineInstrBuilder llvm::BuildMI(MachineFunction &MF, const DebugLoc &DL,
const MCInstrDesc &MCID, bool IsIndirect,
unsigned Reg, unsigned Offset,
const MDNode *Variable, const MDNode *Expr) {
@ -2193,10 +2193,11 @@ MachineInstrBuilder llvm::BuildMI(MachineFunction &MF, DebugLoc DL,
}
MachineInstrBuilder llvm::BuildMI(MachineBasicBlock &BB,
MachineBasicBlock::iterator I, DebugLoc DL,
const MCInstrDesc &MCID, bool IsIndirect,
unsigned Reg, unsigned Offset,
const MDNode *Variable, const MDNode *Expr) {
MachineBasicBlock::iterator I,
const DebugLoc &DL, const MCInstrDesc &MCID,
bool IsIndirect, unsigned Reg,
unsigned Offset, const MDNode *Variable,
const MDNode *Expr) {
assert(isa<DILocalVariable>(Variable) && "not a variable");
assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
MachineFunction &MF = *BB.getParent();

View File

@ -212,8 +212,8 @@ namespace {
SDValue PromoteExtend(SDValue Op);
bool PromoteLoad(SDValue Op);
void ExtendSetCCUses(const SmallVectorImpl<SDNode *> &SetCCs,
SDValue Trunc, SDValue ExtLoad, SDLoc DL,
void ExtendSetCCUses(const SmallVectorImpl<SDNode *> &SetCCs, SDValue Trunc,
SDValue ExtLoad, const SDLoc &DL,
ISD::NodeType ExtType);
/// Call the node-specific routine that knows how to fold each
@ -329,18 +329,19 @@ namespace {
SDValue visitFMULForFMACombine(SDNode *N);
SDValue XformToShuffleWithZero(SDNode *N);
SDValue ReassociateOps(unsigned Opc, SDLoc DL, SDValue LHS, SDValue RHS);
SDValue ReassociateOps(unsigned Opc, const SDLoc &DL, SDValue LHS,
SDValue RHS);
SDValue visitShiftByConstant(SDNode *N, ConstantSDNode *Amt);
bool SimplifySelectOps(SDNode *SELECT, SDValue LHS, SDValue RHS);
SDValue SimplifyBinOpWithSameOpcodeHands(SDNode *N);
SDValue SimplifySelect(SDLoc DL, SDValue N0, SDValue N1, SDValue N2);
SDValue SimplifySelectCC(SDLoc DL, SDValue N0, SDValue N1, SDValue N2,
SDValue N3, ISD::CondCode CC,
SDValue SimplifySelect(const SDLoc &DL, SDValue N0, SDValue N1, SDValue N2);
SDValue SimplifySelectCC(const SDLoc &DL, SDValue N0, SDValue N1,
SDValue N2, SDValue N3, ISD::CondCode CC,
bool NotExtCompare = false);
SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
SDLoc DL, bool foldBooleans = true);
const SDLoc &DL, bool foldBooleans = true);
bool isSetCCEquivalent(SDValue N, SDValue &LHS, SDValue &RHS,
SDValue &CC) const;
@ -367,8 +368,8 @@ namespace {
SDNode *MatchRotatePosNeg(SDValue Shifted, SDValue Pos, SDValue Neg,
SDValue InnerPos, SDValue InnerNeg,
unsigned PosOpcode, unsigned NegOpcode,
SDLoc DL);
SDNode *MatchRotate(SDValue LHS, SDValue RHS, SDLoc DL);
const SDLoc &DL);
SDNode *MatchRotate(SDValue LHS, SDValue RHS, const SDLoc &DL);
SDValue ReduceLoadWidth(SDNode *N);
SDValue ReduceLoadOpStoreWidth(SDNode *N);
SDValue TransformFPLoadStorePair(SDNode *N);
@ -420,8 +421,7 @@ namespace {
/// This is a helper function for MergeStoresOfConstantsOrVecElts. Returns a
/// constant build_vector of the stored constant values in Stores.
SDValue getMergedConstantVectorStore(SelectionDAG &DAG,
SDLoc SL,
SDValue getMergedConstantVectorStore(SelectionDAG &DAG, const SDLoc &SL,
ArrayRef<MemOpLink> Stores,
SmallVectorImpl<SDValue> &Chains,
EVT Ty) const;
@ -823,8 +823,8 @@ static ConstantFPSDNode *isConstOrConstSplatFP(SDValue N) {
return nullptr;
}
SDValue DAGCombiner::ReassociateOps(unsigned Opc, SDLoc DL,
SDValue N0, SDValue N1) {
SDValue DAGCombiner::ReassociateOps(unsigned Opc, const SDLoc &DL, SDValue N0,
SDValue N1) {
EVT VT = N0.getValueType();
if (N0.getOpcode() == Opc) {
if (SDNode *L = DAG.isConstantIntBuildVectorOrConstantInt(N0.getOperand(1))) {
@ -1851,9 +1851,9 @@ SDValue DAGCombiner::visitADDE(SDNode *N) {
// Since it may not be valid to emit a fold to zero for vector initializers
// check if we can before folding.
static SDValue tryFoldToZero(SDLoc DL, const TargetLowering &TLI, EVT VT,
SelectionDAG &DAG,
bool LegalOperations, bool LegalTypes) {
static SDValue tryFoldToZero(const SDLoc &DL, const TargetLowering &TLI, EVT VT,
SelectionDAG &DAG, bool LegalOperations,
bool LegalTypes) {
if (!VT.isVector())
return DAG.getConstant(0, DL, VT);
if (!LegalOperations || TLI.isOperationLegal(ISD::BUILD_VECTOR, VT))
@ -4000,7 +4000,7 @@ static bool matchRotateSub(SDValue Pos, SDValue Neg, unsigned EltSize) {
SDNode *DAGCombiner::MatchRotatePosNeg(SDValue Shifted, SDValue Pos,
SDValue Neg, SDValue InnerPos,
SDValue InnerNeg, unsigned PosOpcode,
unsigned NegOpcode, SDLoc DL) {
unsigned NegOpcode, const SDLoc &DL) {
// fold (or (shl x, (*ext y)),
// (srl x, (*ext (sub 32, y)))) ->
// (rotl x, y) or (rotr x, (sub 32, y))
@ -4021,7 +4021,7 @@ SDNode *DAGCombiner::MatchRotatePosNeg(SDValue Shifted, SDValue Pos,
// MatchRotate - Handle an 'or' of two operands. If this is one of the many
// idioms for rotate, and if the target supports rotation instructions, generate
// a rot[lr].
SDNode *DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, SDLoc DL) {
SDNode *DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, const SDLoc &DL) {
// Must be a legal type. Expanded 'n promoted things won't work with rotates.
EVT VT = LHS.getValueType();
if (!TLI.isTypeLegal(VT)) return nullptr;
@ -5037,8 +5037,8 @@ SDValue DAGCombiner::visitCTPOP(SDNode *N) {
/// \brief Generate Min/Max node
static SDValue combineMinNumMaxNum(SDLoc DL, EVT VT, SDValue LHS, SDValue RHS,
SDValue True, SDValue False,
static SDValue combineMinNumMaxNum(const SDLoc &DL, EVT VT, SDValue LHS,
SDValue RHS, SDValue True, SDValue False,
ISD::CondCode CC, const TargetLowering &TLI,
SelectionDAG &DAG) {
if (!(LHS == True && RHS == False) && !(LHS == False && RHS == True))
@ -5900,8 +5900,8 @@ static bool ExtendUsesToFormExtLoad(SDNode *N, SDValue N0,
}
void DAGCombiner::ExtendSetCCUses(const SmallVectorImpl<SDNode *> &SetCCs,
SDValue Trunc, SDValue ExtLoad, SDLoc DL,
ISD::NodeType ExtType) {
SDValue Trunc, SDValue ExtLoad,
const SDLoc &DL, ISD::NodeType ExtType) {
// Extend SetCC uses if necessary.
for (unsigned i = 0, e = SetCCs.size(); i != e; ++i) {
SDNode *SetCC = SetCCs[i];
@ -11241,11 +11241,9 @@ bool DAGCombiner::isMulAddWithConstProfitable(SDNode *MulNode,
return false;
}
SDValue DAGCombiner::getMergedConstantVectorStore(SelectionDAG &DAG,
SDLoc SL,
ArrayRef<MemOpLink> Stores,
SmallVectorImpl<SDValue> &Chains,
EVT Ty) const {
SDValue DAGCombiner::getMergedConstantVectorStore(
SelectionDAG &DAG, const SDLoc &SL, ArrayRef<MemOpLink> Stores,
SmallVectorImpl<SDValue> &Chains, EVT Ty) const {
SmallVector<SDValue, 8> BuildVector;
for (unsigned I = 0, E = Ty.getVectorNumElements(); I != E; ++I) {
@ -14003,8 +14001,8 @@ SDValue DAGCombiner::SimplifyVBinOp(SDNode *N) {
return SDValue();
}
SDValue DAGCombiner::SimplifySelect(SDLoc DL, SDValue N0,
SDValue N1, SDValue N2){
SDValue DAGCombiner::SimplifySelect(const SDLoc &DL, SDValue N0, SDValue N1,
SDValue N2) {
assert(N0.getOpcode() ==ISD::SETCC && "First argument must be a SetCC node!");
SDValue SCC = SimplifySelectCC(DL, N0.getOperand(0), N0.getOperand(1), N1, N2,
@ -14187,9 +14185,9 @@ bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS,
/// Simplify an expression of the form (N0 cond N1) ? N2 : N3
/// where 'cond' is the comparison specified by CC.
SDValue DAGCombiner::SimplifySelectCC(SDLoc DL, SDValue N0, SDValue N1,
SDValue N2, SDValue N3,
ISD::CondCode CC, bool NotExtCompare) {
SDValue DAGCombiner::SimplifySelectCC(const SDLoc &DL, SDValue N0, SDValue N1,
SDValue N2, SDValue N3, ISD::CondCode CC,
bool NotExtCompare) {
// (x ? y : y) -> y.
if (N2 == N3) return N2;
@ -14469,9 +14467,9 @@ SDValue DAGCombiner::SimplifySelectCC(SDLoc DL, SDValue N0, SDValue N1,
}
/// This is a stub for TargetLowering::SimplifySetCC.
SDValue DAGCombiner::SimplifySetCC(EVT VT, SDValue N0,
SDValue N1, ISD::CondCode Cond,
SDLoc DL, bool foldBooleans) {
SDValue DAGCombiner::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
ISD::CondCode Cond, const SDLoc &DL,
bool foldBooleans) {
TargetLowering::DAGCombinerInfo
DagCombineInfo(DAG, Level, false, this);
return TLI.SimplifySetCC(VT, N0, N1, Cond, foldBooleans, DagCombineInfo, DL);

View File

@ -1435,7 +1435,8 @@ bool FastISel::selectInstruction(const Instruction *I) {
/// Emit an unconditional branch to the given block, unless it is the immediate
/// (fall-through) successor, and update the CFG.
void FastISel::fastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DbgLoc) {
void FastISel::fastEmitBranch(MachineBasicBlock *MSucc,
const DebugLoc &DbgLoc) {
if (FuncInfo.MBB->getBasicBlock()->size() > 1 &&
FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
// For more accurate line information if this is the only instruction

View File

@ -441,7 +441,7 @@ void InstrEmitter::AddOperand(MachineInstrBuilder &MIB,
}
unsigned InstrEmitter::ConstrainForSubReg(unsigned VReg, unsigned SubIdx,
MVT VT, DebugLoc DL) {
MVT VT, const DebugLoc &DL) {
const TargetRegisterClass *VRC = MRI->getRegClass(VReg);
const TargetRegisterClass *RC = TRI->getSubClassWithSubReg(VRC, SubIdx);

View File

@ -83,8 +83,8 @@ class LLVM_LIBRARY_VISIBILITY InstrEmitter {
/// ConstrainForSubReg - Try to constrain VReg to a register class that
/// supports SubIdx sub-registers. Emit a copy if that isn't possible.
/// Return the virtual register to use.
unsigned ConstrainForSubReg(unsigned VReg, unsigned SubIdx,
MVT VT, DebugLoc DL);
unsigned ConstrainForSubReg(unsigned VReg, unsigned SubIdx, MVT VT,
const DebugLoc &DL);
/// EmitSubregNode - Generate machine code for subreg nodes.
///

View File

@ -94,25 +94,25 @@ private:
/// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it
/// is necessary to spill the vector being inserted into to memory, perform
/// the insert there, and then read the result back.
SDValue PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val,
SDValue Idx, SDLoc dl);
SDValue ExpandINSERT_VECTOR_ELT(SDValue Vec, SDValue Val,
SDValue Idx, SDLoc dl);
SDValue PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, SDValue Idx,
const SDLoc &dl);
SDValue ExpandINSERT_VECTOR_ELT(SDValue Vec, SDValue Val, SDValue Idx,
const SDLoc &dl);
/// Return a vector shuffle operation which
/// performs the same shuffe in terms of order or result bytes, but on a type
/// whose vector element type is narrower than the original shuffle type.
/// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3>
SDValue ShuffleWithNarrowerEltType(EVT NVT, EVT VT, SDLoc dl,
SDValue ShuffleWithNarrowerEltType(EVT NVT, EVT VT, const SDLoc &dl,
SDValue N1, SDValue N2,
ArrayRef<int> Mask) const;
bool LegalizeSetCCCondCode(EVT VT, SDValue &LHS, SDValue &RHS, SDValue &CC,
bool &NeedInvert, SDLoc dl);
bool &NeedInvert, const SDLoc &dl);
SDValue ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, bool isSigned);
SDValue ExpandLibCall(RTLIB::Libcall LC, EVT RetVT, const SDValue *Ops,
unsigned NumOps, bool isSigned, SDLoc dl);
unsigned NumOps, bool isSigned, const SDLoc &dl);
std::pair<SDValue, SDValue> ExpandChainLibCall(RTLIB::Libcall LC,
SDNode *Node, bool isSigned);
@ -129,26 +129,28 @@ private:
void ExpandDivRemLibCall(SDNode *Node, SmallVectorImpl<SDValue> &Results);
void ExpandSinCosLibCall(SDNode *Node, SmallVectorImpl<SDValue> &Results);
SDValue EmitStackConvert(SDValue SrcOp, EVT SlotVT, EVT DestVT, SDLoc dl);
SDValue EmitStackConvert(SDValue SrcOp, EVT SlotVT, EVT DestVT,
const SDLoc &dl);
SDValue ExpandBUILD_VECTOR(SDNode *Node);
SDValue ExpandSCALAR_TO_VECTOR(SDNode *Node);
void ExpandDYNAMIC_STACKALLOC(SDNode *Node,
SmallVectorImpl<SDValue> &Results);
void getSignAsIntValue(FloatSignAsInt &State, SDLoc DL, SDValue Value) const;
SDValue modifySignAsInt(const FloatSignAsInt &State, SDLoc DL,
void getSignAsIntValue(FloatSignAsInt &State, const SDLoc &DL,
SDValue Value) const;
SDValue modifySignAsInt(const FloatSignAsInt &State, const SDLoc &DL,
SDValue NewIntValue) const;
SDValue ExpandFCOPYSIGN(SDNode *Node) const;
SDValue ExpandFABS(SDNode *Node) const;
SDValue ExpandLegalINT_TO_FP(bool isSigned, SDValue LegalOp, EVT DestVT,
SDLoc dl);
const SDLoc &dl);
SDValue PromoteLegalINT_TO_FP(SDValue LegalOp, EVT DestVT, bool isSigned,
SDLoc dl);
const SDLoc &dl);
SDValue PromoteLegalFP_TO_INT(SDValue LegalOp, EVT DestVT, bool isSigned,
SDLoc dl);
const SDLoc &dl);
SDValue ExpandBITREVERSE(SDValue Op, SDLoc dl);
SDValue ExpandBSWAP(SDValue Op, SDLoc dl);
SDValue ExpandBitCount(unsigned Opc, SDValue Op, SDLoc dl);
SDValue ExpandBITREVERSE(SDValue Op, const SDLoc &dl);
SDValue ExpandBSWAP(SDValue Op, const SDLoc &dl);
SDValue ExpandBitCount(unsigned Opc, SDValue Op, const SDLoc &dl);
SDValue ExpandExtractFromVectorThroughStack(SDValue Op);
SDValue ExpandInsertToVectorThroughStack(SDValue Op);
@ -214,9 +216,8 @@ public:
/// performs the same shuffe in terms of order or result bytes, but on a type
/// whose vector element type is narrower than the original shuffle type.
/// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3>
SDValue
SelectionDAGLegalize::ShuffleWithNarrowerEltType(EVT NVT, EVT VT, SDLoc dl,
SDValue N1, SDValue N2,
SDValue SelectionDAGLegalize::ShuffleWithNarrowerEltType(
EVT NVT, EVT VT, const SDLoc &dl, SDValue N1, SDValue N2,
ArrayRef<int> Mask) const {
unsigned NumMaskElts = VT.getVectorNumElements();
unsigned NumDestElts = NVT.getVectorNumElements();
@ -314,9 +315,10 @@ SDValue SelectionDAGLegalize::ExpandConstant(ConstantSDNode *CP) {
/// INSERT_VECTOR_ELT instruction. In this case, it
/// is necessary to spill the vector being inserted into to memory, perform
/// the insert there, and then read the result back.
SDValue SelectionDAGLegalize::
PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, SDValue Idx,
SDLoc dl) {
SDValue SelectionDAGLegalize::PerformInsertVectorEltInMemory(SDValue Vec,
SDValue Val,
SDValue Idx,
const SDLoc &dl) {
SDValue Tmp1 = Vec;
SDValue Tmp2 = Val;
SDValue Tmp3 = Idx;
@ -357,9 +359,9 @@ PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, SDValue Idx,
false, false, false, 0);
}
SDValue SelectionDAGLegalize::
ExpandINSERT_VECTOR_ELT(SDValue Vec, SDValue Val, SDValue Idx, SDLoc dl) {
SDValue SelectionDAGLegalize::ExpandINSERT_VECTOR_ELT(SDValue Vec, SDValue Val,
SDValue Idx,
const SDLoc &dl) {
if (ConstantSDNode *InsertPos = dyn_cast<ConstantSDNode>(Idx)) {
// SCALAR_TO_VECTOR requires that the type of the value being inserted
// match the element type of the vector being created, except for
@ -1368,7 +1370,8 @@ struct FloatSignAsInt {
/// containing the sign bit if the target has no integer value capable of
/// holding all bits of the floating-point value.
void SelectionDAGLegalize::getSignAsIntValue(FloatSignAsInt &State,
SDLoc DL, SDValue Value) const {
const SDLoc &DL,
SDValue Value) const {
EVT FloatVT = Value.getValueType();
unsigned NumBits = FloatVT.getSizeInBits();
State.FloatVT = FloatVT;
@ -1420,7 +1423,8 @@ void SelectionDAGLegalize::getSignAsIntValue(FloatSignAsInt &State,
/// Replace the integer value produced by getSignAsIntValue() with a new value
/// and cast the result back to a floating-point type.
SDValue SelectionDAGLegalize::modifySignAsInt(const FloatSignAsInt &State,
SDLoc DL, SDValue NewIntValue) const {
const SDLoc &DL,
SDValue NewIntValue) const {
if (!State.Chain)
return DAG.getNode(ISD::BITCAST, DL, State.FloatVT, NewIntValue);
@ -1566,11 +1570,10 @@ void SelectionDAGLegalize::ExpandDYNAMIC_STACKALLOC(SDNode* Node,
/// of a true/false result.
///
/// \returns true if the SetCC has been legalized, false if it hasn't.
bool SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT,
SDValue &LHS, SDValue &RHS,
SDValue &CC,
bool SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT, SDValue &LHS,
SDValue &RHS, SDValue &CC,
bool &NeedInvert,
SDLoc dl) {
const SDLoc &dl) {
MVT OpVT = LHS.getSimpleValueType();
ISD::CondCode CCCode = cast<CondCodeSDNode>(CC)->get();
NeedInvert = false;
@ -1667,10 +1670,8 @@ bool SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT,
/// SrcOp to a stack slot of type SlotVT, truncating it if needed. It then does
/// a load from the stack slot to DestVT, extending it if needed.
/// The resultant code need not be legal.
SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp,
EVT SlotVT,
EVT DestVT,
SDLoc dl) {
SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp, EVT SlotVT,
EVT DestVT, const SDLoc &dl) {
// Create the stack frame object.
unsigned SrcAlign = DAG.getDataLayout().getPrefTypeAlignment(
SrcOp.getValueType().getTypeForEVT(*DAG.getContext()));
@ -1993,7 +1994,7 @@ SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node,
/// and returning a result of type RetVT.
SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, EVT RetVT,
const SDValue *Ops, unsigned NumOps,
bool isSigned, SDLoc dl) {
bool isSigned, const SDLoc &dl) {
TargetLowering::ArgListTy Args;
Args.reserve(NumOps);
@ -2268,10 +2269,9 @@ SelectionDAGLegalize::ExpandSinCosLibCall(SDNode *Node,
/// INT_TO_FP operation of the specified operand when the target requests that
/// we expand it. At this point, we know that the result and operand types are
/// legal for the target.
SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
SDValue Op0,
SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned, SDValue Op0,
EVT DestVT,
SDLoc dl) {
const SDLoc &dl) {
// TODO: Should any fast-math-flags be set for the created nodes?
if (Op0.getValueType() == MVT::i32 && TLI.isTypeLegal(MVT::f64)) {
@ -2479,10 +2479,9 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
/// we promote it. At this point, we know that the result and operand types are
/// legal for the target, and that there is a legal UINT_TO_FP or SINT_TO_FP
/// operation that takes a larger input.
SDValue SelectionDAGLegalize::PromoteLegalINT_TO_FP(SDValue LegalOp,
EVT DestVT,
SDValue SelectionDAGLegalize::PromoteLegalINT_TO_FP(SDValue LegalOp, EVT DestVT,
bool isSigned,
SDLoc dl) {
const SDLoc &dl) {
// First step, figure out the appropriate *INT_TO_FP operation to use.
EVT NewInTy = LegalOp.getValueType();
@ -2521,10 +2520,9 @@ SDValue SelectionDAGLegalize::PromoteLegalINT_TO_FP(SDValue LegalOp,
/// we promote it. At this point, we know that the result and operand types are
/// legal for the target, and that there is a legal FP_TO_UINT or FP_TO_SINT
/// operation that returns a larger result.
SDValue SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDValue LegalOp,
EVT DestVT,
SDValue SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDValue LegalOp, EVT DestVT,
bool isSigned,
SDLoc dl) {
const SDLoc &dl) {
// First step, figure out the appropriate FP_TO*INT operation to use.
EVT NewOutTy = DestVT;
@ -2561,7 +2559,7 @@ SDValue SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDValue LegalOp,
}
/// Open code the operations for BITREVERSE.
SDValue SelectionDAGLegalize::ExpandBITREVERSE(SDValue Op, SDLoc dl) {
SDValue SelectionDAGLegalize::ExpandBITREVERSE(SDValue Op, const SDLoc &dl) {
EVT VT = Op.getValueType();
EVT SHVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout());
unsigned Sz = VT.getScalarSizeInBits();
@ -2586,7 +2584,7 @@ SDValue SelectionDAGLegalize::ExpandBITREVERSE(SDValue Op, SDLoc dl) {
}
/// Open code the operations for BSWAP of the specified operation.
SDValue SelectionDAGLegalize::ExpandBSWAP(SDValue Op, SDLoc dl) {
SDValue SelectionDAGLegalize::ExpandBSWAP(SDValue Op, const SDLoc &dl) {
EVT VT = Op.getValueType();
EVT SHVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout());
SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8;
@ -2640,7 +2638,7 @@ SDValue SelectionDAGLegalize::ExpandBSWAP(SDValue Op, SDLoc dl) {
/// Expand the specified bitcount instruction into operations.
SDValue SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDValue Op,
SDLoc dl) {
const SDLoc &dl) {
switch (Opc) {
default: llvm_unreachable("Cannot expand this yet!");
case ISD::CTPOP: {

View File

@ -1516,7 +1516,7 @@ bool DAGTypeLegalizer::ExpandFloatOperand(SDNode *N, unsigned OpNo) {
void DAGTypeLegalizer::FloatExpandSetCCOperands(SDValue &NewLHS,
SDValue &NewRHS,
ISD::CondCode &CCCode,
SDLoc dl) {
const SDLoc &dl) {
SDValue LHSLo, LHSHi, RHSLo, RHSHi;
GetExpandedFloat(NewLHS, LHSLo, LHSHi);
GetExpandedFloat(NewRHS, RHSLo, RHSHi);

View File

@ -2786,7 +2786,7 @@ bool DAGTypeLegalizer::ExpandIntegerOperand(SDNode *N, unsigned OpNo) {
void DAGTypeLegalizer::IntegerExpandSetCCOperands(SDValue &NewLHS,
SDValue &NewRHS,
ISD::CondCode &CCCode,
SDLoc dl) {
const SDLoc &dl) {
SDValue LHSLo, LHSHi, RHSLo, RHSHi;
GetExpandedInteger(NewLHS, LHSLo, LHSHi);
GetExpandedInteger(NewRHS, RHSLo, RHSHi);

View File

@ -389,7 +389,7 @@ private:
SDValue ExpandIntOp_ATOMIC_STORE(SDNode *N);
void IntegerExpandSetCCOperands(SDValue &NewLHS, SDValue &NewRHS,
ISD::CondCode &CCCode, SDLoc dl);
ISD::CondCode &CCCode, const SDLoc &dl);
//===--------------------------------------------------------------------===//
// Float to Integer Conversion Support: LegalizeFloatTypes.cpp
@ -543,8 +543,7 @@ private:
SDValue ExpandFloatOp_STORE(SDNode *N, unsigned OpNo);
void FloatExpandSetCCOperands(SDValue &NewLHS, SDValue &NewRHS,
ISD::CondCode &CCCode, SDLoc dl);
ISD::CondCode &CCCode, const SDLoc &dl);
//===--------------------------------------------------------------------===//
// Float promotion support: LegalizeFloatTypes.cpp

File diff suppressed because it is too large Load Diff

View File

@ -115,7 +115,7 @@ OptsizeJumpTableDensity("optsize-jump-table-density", cl::init(40), cl::Hidden,
// store [4096 x i8] %data, [4096 x i8]* %buffer
static const unsigned MaxParallelChains = 64;
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, SDLoc DL,
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
const SDValue *Parts, unsigned NumParts,
MVT PartVT, EVT ValueVT, const Value *V);
@ -124,10 +124,9 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, SDLoc DL,
/// larger than ValueVT then AssertOp can be used to specify whether the extra
/// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
/// (ISD::AssertSext).
static SDValue getCopyFromParts(SelectionDAG &DAG, SDLoc DL,
const SDValue *Parts,
unsigned NumParts, MVT PartVT, EVT ValueVT,
const Value *V,
static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL,
const SDValue *Parts, unsigned NumParts,
MVT PartVT, EVT ValueVT, const Value *V,
Optional<ISD::NodeType> AssertOp = None) {
if (ValueVT.isVector())
return getCopyFromPartsVector(DAG, DL, Parts, NumParts,
@ -273,7 +272,7 @@ static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
/// type larger than ValueVT then AssertOp can be used to specify whether the
/// extra bits are known to be zero (ISD::AssertZext) or sign extended from
/// ValueVT (ISD::AssertSext).
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, SDLoc DL,
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
const SDValue *Parts, unsigned NumParts,
MVT PartVT, EVT ValueVT, const Value *V) {
assert(ValueVT.isVector() && "Not a vector value");
@ -372,16 +371,16 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, SDLoc DL,
return DAG.getNode(ISD::BUILD_VECTOR, DL, ValueVT, Val);
}
static void getCopyToPartsVector(SelectionDAG &DAG, SDLoc dl,
static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl,
SDValue Val, SDValue *Parts, unsigned NumParts,
MVT PartVT, const Value *V);
/// getCopyToParts - Create a series of nodes that contain the specified value
/// split into legal parts. If the parts contain more bits than Val, then, for
/// integers, ExtendKind can be used to specify how to generate the extra bits.
static void getCopyToParts(SelectionDAG &DAG, SDLoc DL,
SDValue Val, SDValue *Parts, unsigned NumParts,
MVT PartVT, const Value *V,
static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val,
SDValue *Parts, unsigned NumParts, MVT PartVT,
const Value *V,
ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
EVT ValueVT = Val.getValueType();
@ -508,7 +507,7 @@ static void getCopyToParts(SelectionDAG &DAG, SDLoc DL,
/// getCopyToPartsVector - Create a series of nodes that contain the specified
/// value split into legal parts.
static void getCopyToPartsVector(SelectionDAG &DAG, SDLoc DL,
static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
SDValue Val, SDValue *Parts, unsigned NumParts,
MVT PartVT, const Value *V) {
EVT ValueVT = Val.getValueType();
@ -637,9 +636,8 @@ RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
/// If the Flag pointer is NULL, no flag is used.
SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
FunctionLoweringInfo &FuncInfo,
SDLoc dl,
SDValue &Chain, SDValue *Flag,
const Value *V) const {
const SDLoc &dl, SDValue &Chain,
SDValue *Flag, const Value *V) const {
// A Value with type {} or [0 x %t] needs no registers.
if (ValueVTs.empty())
return SDValue();
@ -741,8 +739,9 @@ SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
/// specified value into the registers specified by this object. This uses
/// Chain/Flag as the input and updates them for the output Chain/Flag.
/// If the Flag pointer is NULL, no flag is used.
void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, SDLoc dl,
SDValue &Chain, SDValue *Flag, const Value *V,
void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG,
const SDLoc &dl, SDValue &Chain, SDValue *Flag,
const Value *V,
ISD::NodeType PreferredExtendType) const {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
ISD::NodeType ExtendKind = PreferredExtendType;
@ -797,7 +796,7 @@ void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, SDLoc dl,
/// operand list. This adds the code marker and includes the number of
/// values added into it.
void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching,
unsigned MatchingIdx, SDLoc dl,
unsigned MatchingIdx, const SDLoc &dl,
SelectionDAG &DAG,
std::vector<SDValue> &Ops) const {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
@ -2000,7 +1999,8 @@ void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT,
/// Create a LOAD_STACK_GUARD node, and let it carry the target specific global
/// variable if there exists one.
static SDValue getLoadStackGuard(SelectionDAG &DAG, SDLoc DL, SDValue &Chain) {
static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL,
SDValue &Chain) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
MachineFunction &MF = DAG.getMachineFunction();
@ -4111,8 +4111,7 @@ void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
/// Op = (Op & 0x007fffff) | 0x3f800000;
///
/// where Op is the hexadecimal representation of floating point value.
static SDValue
GetSignificand(SelectionDAG &DAG, SDValue Op, SDLoc dl) {
static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl) {
SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
DAG.getConstant(0x007fffff, dl, MVT::i32));
SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
@ -4125,9 +4124,8 @@ GetSignificand(SelectionDAG &DAG, SDValue Op, SDLoc dl) {
/// (float)(int)(((Op & 0x7f800000) >> 23) - 127);
///
/// where Op is the hexadecimal representation of floating point value.
static SDValue
GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI,
SDLoc dl) {
static SDValue GetExponent(SelectionDAG &DAG, SDValue Op,
const TargetLowering &TLI, const SDLoc &dl) {
SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
DAG.getConstant(0x7f800000, dl, MVT::i32));
SDValue t1 = DAG.getNode(
@ -4139,13 +4137,13 @@ GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI,
}
/// getF32Constant - Get 32-bit floating point constant.
static SDValue
getF32Constant(SelectionDAG &DAG, unsigned Flt, SDLoc dl) {
static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt,
const SDLoc &dl) {
return DAG.getConstantFP(APFloat(APFloat::IEEEsingle, APInt(32, Flt)), dl,
MVT::f32);
}
static SDValue getLimitedPrecisionExp2(SDValue t0, SDLoc dl,
static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl,
SelectionDAG &DAG) {
// TODO: What fast-math-flags should be set on the floating-point nodes?
@ -4237,7 +4235,7 @@ static SDValue getLimitedPrecisionExp2(SDValue t0, SDLoc dl,
/// expandExp - Lower an exp intrinsic. Handles the special sequences for
/// limited-precision mode.
static SDValue expandExp(SDLoc dl, SDValue Op, SelectionDAG &DAG,
static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
const TargetLowering &TLI) {
if (Op.getValueType() == MVT::f32 &&
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
@ -4260,7 +4258,7 @@ static SDValue expandExp(SDLoc dl, SDValue Op, SelectionDAG &DAG,
/// expandLog - Lower a log intrinsic. Handles the special sequences for
/// limited-precision mode.
static SDValue expandLog(SDLoc dl, SDValue Op, SelectionDAG &DAG,
static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
const TargetLowering &TLI) {
// TODO: What fast-math-flags should be set on the floating-point nodes?
@ -4359,7 +4357,7 @@ static SDValue expandLog(SDLoc dl, SDValue Op, SelectionDAG &DAG,
/// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
/// limited-precision mode.
static SDValue expandLog2(SDLoc dl, SDValue Op, SelectionDAG &DAG,
static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
const TargetLowering &TLI) {
// TODO: What fast-math-flags should be set on the floating-point nodes?
@ -4457,7 +4455,7 @@ static SDValue expandLog2(SDLoc dl, SDValue Op, SelectionDAG &DAG,
/// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
/// limited-precision mode.
static SDValue expandLog10(SDLoc dl, SDValue Op, SelectionDAG &DAG,
static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
const TargetLowering &TLI) {
// TODO: What fast-math-flags should be set on the floating-point nodes?
@ -4548,7 +4546,7 @@ static SDValue expandLog10(SDLoc dl, SDValue Op, SelectionDAG &DAG,
/// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
/// limited-precision mode.
static SDValue expandExp2(SDLoc dl, SDValue Op, SelectionDAG &DAG,
static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
const TargetLowering &TLI) {
if (Op.getValueType() == MVT::f32 &&
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18)
@ -4560,7 +4558,7 @@ static SDValue expandExp2(SDLoc dl, SDValue Op, SelectionDAG &DAG,
/// visitPow - Lower a pow intrinsic. Handles the special sequences for
/// limited-precision mode with x == 10.0f.
static SDValue expandPow(SDLoc dl, SDValue LHS, SDValue RHS,
static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS,
SelectionDAG &DAG, const TargetLowering &TLI) {
bool IsExp10 = false;
if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 &&
@ -4589,7 +4587,7 @@ static SDValue expandPow(SDLoc dl, SDValue LHS, SDValue RHS,
/// ExpandPowI - Expand a llvm.powi intrinsic.
static SDValue ExpandPowI(SDLoc DL, SDValue LHS, SDValue RHS,
static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS,
SelectionDAG &DAG) {
// If RHS is a constant, we can expand this out to a multiplication tree,
// otherwise we end up lowering to a call to __powidf2 (for example). When
@ -6450,9 +6448,8 @@ typedef SmallVector<SDISelAsmOperandInfo,16> SDISelAsmOperandInfoVector;
///
/// OpInfo describes the operand.
///
static void GetRegistersForValue(SelectionDAG &DAG,
const TargetLowering &TLI,
SDLoc DL,
static void GetRegistersForValue(SelectionDAG &DAG, const TargetLowering &TLI,
const SDLoc &DL,
SDISelAsmOperandInfo &OpInfo) {
LLVMContext &Context = *DAG.getContext();
@ -7219,7 +7216,7 @@ void SelectionDAGBuilder::populateCallLoweringInfo(
/// only available in a register, then the runtime would need to trap when
/// execution reaches the StackMap in order to read the alloca's location.
static void addStackMapLiveVars(ImmutableCallSite CS, unsigned StartIdx,
SDLoc DL, SmallVectorImpl<SDValue> &Ops,
const SDLoc &DL, SmallVectorImpl<SDValue> &Ops,
SelectionDAGBuilder &Builder) {
for (unsigned i = StartIdx, e = CS.arg_size(); i != e; ++i) {
SDValue OpVal = Builder.getValue(CS.getArgument(i));

View File

@ -995,8 +995,7 @@ struct RegsForValue {
/// Chain/Flag as the input and updates them for the output Chain/Flag.
/// If the Flag pointer is NULL, no flag is used.
SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo,
SDLoc dl,
SDValue &Chain, SDValue *Flag,
const SDLoc &dl, SDValue &Chain, SDValue *Flag,
const Value *V = nullptr) const;
/// getCopyToRegs - Emit a series of CopyToReg nodes that copies the specified
@ -1004,18 +1003,16 @@ struct RegsForValue {
/// as the input and updates them for the output Chain/Flag. If the Flag
/// pointer is nullptr, no flag is used. If V is not nullptr, then it is used
/// in printing better diagnostic messages on error.
void
getCopyToRegs(SDValue Val, SelectionDAG &DAG, SDLoc dl, SDValue &Chain,
SDValue *Flag, const Value *V = nullptr,
void getCopyToRegs(SDValue Val, SelectionDAG &DAG, const SDLoc &dl,
SDValue &Chain, SDValue *Flag, const Value *V = nullptr,
ISD::NodeType PreferredExtendType = ISD::ANY_EXTEND) const;
/// AddInlineAsmOperands - Add this value to the specified inlineasm node
/// operand list. This adds the code marker, matching input operand index
/// (if applicable), and includes the number of values added into it.
void AddInlineAsmOperands(unsigned Kind,
bool HasMatching, unsigned MatchingIdx, SDLoc dl,
SelectionDAG &DAG,
std::vector<SDValue> &Ops) const;
void AddInlineAsmOperands(unsigned Kind, bool HasMatching,
unsigned MatchingIdx, const SDLoc &dl,
SelectionDAG &DAG, std::vector<SDValue> &Ops) const;
};
} // end namespace llvm

View File

@ -1913,8 +1913,8 @@ bool SelectionDAGISel::CheckOrMask(SDValue LHS, ConstantSDNode *RHS,
/// SelectInlineAsmMemoryOperands - Calls to this are automatically generated
/// by tblgen. Others should not call it.
void SelectionDAGISel::
SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops, SDLoc DL) {
void SelectionDAGISel::SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops,
const SDLoc &DL) {
std::vector<SDValue> InOps;
std::swap(InOps, Ops);

View File

@ -111,11 +111,9 @@ void TargetLowering::ArgListEntry::setAttributes(ImmutableCallSite *CS,
/// Generate a libcall taking the given operands as arguments and returning a
/// result of type RetVT.
std::pair<SDValue, SDValue>
TargetLowering::makeLibCall(SelectionDAG &DAG,
RTLIB::Libcall LC, EVT RetVT,
ArrayRef<SDValue> Ops,
bool isSigned, SDLoc dl,
bool doesNotReturn,
TargetLowering::makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT,
ArrayRef<SDValue> Ops, bool isSigned,
const SDLoc &dl, bool doesNotReturn,
bool isReturnValueUsed) const {
TargetLowering::ArgListTy Args;
Args.reserve(Ops.size());
@ -149,7 +147,7 @@ TargetLowering::makeLibCall(SelectionDAG &DAG,
void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT,
SDValue &NewLHS, SDValue &NewRHS,
ISD::CondCode &CCCode,
SDLoc dl) const {
const SDLoc &dl) const {
assert((VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128 || VT == MVT::ppcf128)
&& "Unsupported setcc type!");
@ -370,11 +368,10 @@ bool TargetLowering::TargetLoweringOpt::ShrinkDemandedConstant(SDValue Op,
/// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.
/// This uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
/// generalized for targets with other types of implicit widening casts.
bool
TargetLowering::TargetLoweringOpt::ShrinkDemandedOp(SDValue Op,
bool TargetLowering::TargetLoweringOpt::ShrinkDemandedOp(SDValue Op,
unsigned BitWidth,
const APInt &Demanded,
SDLoc dl) {
const SDLoc &dl) {
assert(Op.getNumOperands() == 2 &&
"ShrinkDemandedOp only supports binary operators!");
assert(Op.getNode()->getNumValues() == 1 &&
@ -1278,7 +1275,7 @@ bool TargetLowering::isExtendedTrueVal(const ConstantSDNode *N, EVT VT,
SDValue TargetLowering::simplifySetCCWithAnd(EVT VT, SDValue N0, SDValue N1,
ISD::CondCode Cond,
DAGCombinerInfo &DCI,
SDLoc DL) const {
const SDLoc &DL) const {
// Match these patterns in any of their permutations:
// (X & Y) == Y
// (X & Y) != Y
@ -1336,10 +1333,10 @@ SDValue TargetLowering::simplifySetCCWithAnd(EVT VT, SDValue N0, SDValue N1,
/// Try to simplify a setcc built with the specified operands and cc. If it is
/// unable to simplify it, return a null SDValue.
SDValue
TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
ISD::CondCode Cond, bool foldBooleans,
DAGCombinerInfo &DCI, SDLoc dl) const {
DAGCombinerInfo &DCI,
const SDLoc &dl) const {
SelectionDAG &DAG = DCI.DAG;
// These setcc operations always fold.
@ -2782,7 +2779,7 @@ void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo,
/// \brief Given an exact SDIV by a constant, create a multiplication
/// with the multiplicative inverse of the constant.
static SDValue BuildExactSDIV(const TargetLowering &TLI, SDValue Op1, APInt d,
SDLoc dl, SelectionDAG &DAG,
const SDLoc &dl, SelectionDAG &DAG,
std::vector<SDNode *> &Created) {
assert(d != 0 && "Division by zero!");

View File

@ -617,7 +617,7 @@ bool TailDuplicator::isSimpleBB(MachineBasicBlock *TailBB) {
}
static bool bothUsedInPHI(const MachineBasicBlock &A,
SmallPtrSet<MachineBasicBlock *, 8> SuccsB) {
const SmallPtrSet<MachineBasicBlock *, 8> &SuccsB) {
for (MachineBasicBlock *BB : A.successors())
if (SuccsB.count(BB) && !BB->empty() && BB->begin()->isPHI())
return true;

View File

@ -317,8 +317,8 @@ bool AArch64FrameLowering::shouldCombineCSRLocalStackBump(
// decrement/increment to allocate/deallocate the callee-save stack area by
// converting store/load to use pre/post increment version.
static MachineBasicBlock::iterator convertCalleeSaveRestoreToSPPrePostIncDec(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc DL,
const TargetInstrInfo *TII, int CSStackSizeInc) {
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
const DebugLoc &DL, const TargetInstrInfo *TII, int CSStackSizeInc) {
unsigned NewOpc;
bool NewIsUnscaled = false;

View File

@ -1214,7 +1214,7 @@ static bool isLegalArithImmed(uint64_t C) {
}
static SDValue emitComparison(SDValue LHS, SDValue RHS, ISD::CondCode CC,
SDLoc dl, SelectionDAG &DAG) {
const SDLoc &dl, SelectionDAG &DAG) {
EVT VT = LHS.getValueType();
if (VT.isFloatingPoint()) {
@ -1307,7 +1307,7 @@ static SDValue emitConditionalComparison(SDValue LHS, SDValue RHS,
ISD::CondCode CC, SDValue CCOp,
AArch64CC::CondCode Predicate,
AArch64CC::CondCode OutCC,
SDLoc DL, SelectionDAG &DAG) {
const SDLoc &DL, SelectionDAG &DAG) {
unsigned Opcode = 0;
if (LHS.getValueType().isFloatingPoint()) {
assert(LHS.getValueType() != MVT::f128);
@ -1514,7 +1514,8 @@ static SDValue emitConjunctionDisjunctionTree(SelectionDAG &DAG, SDValue Val,
/// @}
static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
SDValue &AArch64cc, SelectionDAG &DAG, SDLoc dl) {
SDValue &AArch64cc, SelectionDAG &DAG,
const SDLoc &dl) {
if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
EVT VT = RHS.getValueType();
uint64_t C = RHSC->getZExtValue();
@ -2479,8 +2480,8 @@ CCAssignFn *AArch64TargetLowering::CCAssignFnForCall(CallingConv::ID CC,
SDValue AArch64TargetLowering::LowerFormalArguments(
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
@ -2677,7 +2678,8 @@ SDValue AArch64TargetLowering::LowerFormalArguments(
}
void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo,
SelectionDAG &DAG, SDLoc DL,
SelectionDAG &DAG,
const SDLoc &DL,
SDValue &Chain) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
@ -2754,8 +2756,8 @@ void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo,
/// appropriate copies out of appropriate physical registers.
SDValue AArch64TargetLowering::LowerCallResult(
SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
SDValue ThisVal) const {
CCAssignFn *RetCC = CallConv == CallingConv::WebKit_JS
? RetCC_AArch64_WebKit_JS
@ -3331,7 +3333,7 @@ AArch64TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
SDLoc DL, SelectionDAG &DAG) const {
const SDLoc &DL, SelectionDAG &DAG) const {
CCAssignFn *RetCC = CallConv == CallingConv::WebKit_JS
? RetCC_AArch64_WebKit_JS
: RetCC_AArch64_AAPCS;
@ -3526,7 +3528,8 @@ AArch64TargetLowering::LowerDarwinGlobalTLSAddress(SDValue Op,
/// Therefore, a pseudo-instruction (TLSDESC_CALLSEQ) is used to represent the
/// above sequence, and expanded really late in the compilation flow, to ensure
/// the sequence is produced as per above.
SDValue AArch64TargetLowering::LowerELFTLSDescCallSeq(SDValue SymAddr, SDLoc DL,
SDValue AArch64TargetLowering::LowerELFTLSDescCallSeq(SDValue SymAddr,
const SDLoc &DL,
SelectionDAG &DAG) const {
EVT PtrVT = getPointerTy(DAG.getDataLayout());
@ -3967,7 +3970,7 @@ SDValue AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
SDValue AArch64TargetLowering::LowerSELECT_CC(ISD::CondCode CC, SDValue LHS,
SDValue RHS, SDValue TVal,
SDValue FVal, SDLoc dl,
SDValue FVal, const SDLoc &dl,
SelectionDAG &DAG) const {
// Handle f128 first, because it will result in a comparison of some RTLIB
// call result against zero.
@ -5444,7 +5447,7 @@ static SDValue tryFormConcatFromShuffle(SDValue Op, SelectionDAG &DAG) {
/// the specified operations to build the shuffle.
static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
SDValue RHS, SelectionDAG &DAG,
SDLoc dl) {
const SDLoc &dl) {
unsigned OpNum = (PFEntry >> 26) & 0x0F;
unsigned LHSID = (PFEntry >> 13) & ((1 << 13) - 1);
unsigned RHSID = (PFEntry >> 0) & ((1 << 13) - 1);
@ -6716,7 +6719,7 @@ SDValue AArch64TargetLowering::LowerVectorSRA_SRL_SHL(SDValue Op,
static SDValue EmitVectorComparison(SDValue LHS, SDValue RHS,
AArch64CC::CondCode CC, bool NoNans, EVT VT,
SDLoc dl, SelectionDAG &DAG) {
const SDLoc &dl, SelectionDAG &DAG) {
EVT SrcVT = LHS.getValueType();
assert(VT.getSizeInBits() == SrcVT.getSizeInBits() &&
"function only supposed to emit natural comparisons");

View File

@ -425,10 +425,10 @@ private:
void addDRTypeForNEON(MVT VT);
void addQRTypeForNEON(MVT VT);
SDValue
LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL,
SelectionDAG &DAG,
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
const SDLoc &DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const override;
SDValue LowerCall(CallLoweringInfo & /*CLI*/,
@ -436,9 +436,10 @@ private:
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
bool isThisReturn, SDValue ThisVal) const;
const SmallVectorImpl<ISD::InputArg> &Ins,
const SDLoc &DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
SDValue ThisVal) const;
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
@ -458,7 +459,7 @@ private:
bool IsTailCallConvention(CallingConv::ID CallCC) const;
void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, SDLoc DL,
void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &DL,
SDValue &Chain) const;
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
@ -468,21 +469,21 @@ private:
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals, SDLoc DL,
const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
SelectionDAG &DAG) const override;
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, SDLoc DL,
SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, const SDLoc &DL,
SelectionDAG &DAG) const;
SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSELECT_CC(ISD::CondCode CC, SDValue LHS, SDValue RHS,
SDValue TVal, SDValue FVal, SDLoc dl,
SDValue TVal, SDValue FVal, const SDLoc &dl,
SelectionDAG &DAG) const;
SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;

View File

@ -244,7 +244,7 @@ unsigned AArch64InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
}
void AArch64InstrInfo::instantiateCondBranch(
MachineBasicBlock &MBB, DebugLoc DL, MachineBasicBlock *TBB,
MachineBasicBlock &MBB, const DebugLoc &DL, MachineBasicBlock *TBB,
ArrayRef<MachineOperand> Cond) const {
if (Cond[0].getImm() != -1) {
// Regular Bcc
@ -260,9 +260,11 @@ void AArch64InstrInfo::instantiateCondBranch(
}
}
unsigned AArch64InstrInfo::InsertBranch(
MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
ArrayRef<MachineOperand> Cond, DebugLoc DL) const {
unsigned AArch64InstrInfo::InsertBranch(MachineBasicBlock &MBB,
MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
ArrayRef<MachineOperand> Cond,
const DebugLoc &DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
@ -400,8 +402,8 @@ bool AArch64InstrInfo::canInsertSelect(
}
void AArch64InstrInfo::insertSelect(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, DebugLoc DL,
unsigned DstReg,
MachineBasicBlock::iterator I,
const DebugLoc &DL, unsigned DstReg,
ArrayRef<MachineOperand> Cond,
unsigned TrueReg, unsigned FalseReg) const {
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
@ -1841,7 +1843,7 @@ bool AArch64InstrInfo::shouldScheduleAdjacent(MachineInstr *First,
MachineInstr *AArch64InstrInfo::emitFrameIndexDebugValue(
MachineFunction &MF, int FrameIx, uint64_t Offset, const MDNode *Var,
const MDNode *Expr, DebugLoc DL) const {
const MDNode *Expr, const DebugLoc &DL) const {
MachineInstrBuilder MIB = BuildMI(MF, DL, get(AArch64::DBG_VALUE))
.addFrameIndex(FrameIx)
.addImm(0)
@ -1871,7 +1873,7 @@ static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg,
}
void AArch64InstrInfo::copyPhysRegTuple(
MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL,
MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL,
unsigned DestReg, unsigned SrcReg, bool KillSrc, unsigned Opcode,
llvm::ArrayRef<unsigned> Indices) const {
assert(Subtarget.hasNEON() &&
@ -1897,9 +1899,9 @@ void AArch64InstrInfo::copyPhysRegTuple(
}
void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const {
MachineBasicBlock::iterator I,
const DebugLoc &DL, unsigned DestReg,
unsigned SrcReg, bool KillSrc) const {
if (AArch64::GPR32spRegClass.contains(DestReg) &&
(AArch64::GPR32spRegClass.contains(SrcReg) || SrcReg == AArch64::WZR)) {
const TargetRegisterInfo *TRI = &getRegisterInfo();
@ -2385,7 +2387,7 @@ void AArch64InstrInfo::loadRegFromStackSlot(
}
void llvm::emitFrameOffset(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI, DebugLoc DL,
MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
unsigned DestReg, unsigned SrcReg, int Offset,
const TargetInstrInfo *TII,
MachineInstr::MIFlag Flag, bool SetNZCV) {

View File

@ -119,13 +119,14 @@ public:
MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF, int FrameIx,
uint64_t Offset, const MDNode *Var,
const MDNode *Expr, DebugLoc DL) const;
const MDNode *Expr,
const DebugLoc &DL) const;
void copyPhysRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
DebugLoc DL, unsigned DestReg, unsigned SrcReg,
const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc, unsigned Opcode,
llvm::ArrayRef<unsigned> Indices) const;
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
DebugLoc DL, unsigned DestReg, unsigned SrcReg,
const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc) const override;
void storeRegToStackSlot(MachineBasicBlock &MBB,
@ -153,14 +154,15 @@ public:
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
DebugLoc DL) const override;
const DebugLoc &DL) const override;
bool
ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
bool canInsertSelect(const MachineBasicBlock &, ArrayRef<MachineOperand> Cond,
unsigned, unsigned, int &, int &, int &) const override;
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
DebugLoc DL, unsigned DstReg, ArrayRef<MachineOperand> Cond,
unsigned TrueReg, unsigned FalseReg) const override;
const DebugLoc &DL, unsigned DstReg,
ArrayRef<MachineOperand> Cond, unsigned TrueReg,
unsigned FalseReg) const override;
void getNoopForMachoTarget(MCInst &NopInst) const override;
/// analyzeCompare - For a comparison instruction, return the source registers
@ -209,7 +211,7 @@ public:
getSerializableBitmaskMachineOperandTargetFlags() const override;
private:
void instantiateCondBranch(MachineBasicBlock &MBB, DebugLoc DL,
void instantiateCondBranch(MachineBasicBlock &MBB, const DebugLoc &DL,
MachineBasicBlock *TBB,
ArrayRef<MachineOperand> Cond) const;
bool substituteCmpToZero(MachineInstr *CmpInstr,
@ -221,8 +223,8 @@ private:
/// insertion (PEI) pass, where a virtual scratch register may be allocated
/// if necessary, to be replaced by the scavenger at the end of PEI.
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
DebugLoc DL, unsigned DestReg, unsigned SrcReg, int Offset,
const TargetInstrInfo *TII,
const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
int Offset, const TargetInstrInfo *TII,
MachineInstr::MIFlag = MachineInstr::NoFlags,
bool SetNZCV = false);

View File

@ -17,7 +17,7 @@ using namespace llvm;
#define DEBUG_TYPE "aarch64-selectiondag-info"
SDValue AArch64SelectionDAGInfo::EmitTargetCodeForMemset(
SelectionDAG &DAG, SDLoc dl, SDValue Chain, SDValue Dst, SDValue Src,
SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src,
SDValue Size, unsigned Align, bool isVolatile,
MachinePointerInfo DstPtrInfo) const {
// Check to see if there is a specialized entry-point for memory zeroing.

View File

@ -20,10 +20,9 @@ namespace llvm {
class AArch64SelectionDAGInfo : public SelectionDAGTargetInfo {
public:
SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl, SDValue Chain,
SDValue Dst, SDValue Src, SDValue Size,
unsigned Align, bool isVolatile,
SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, const SDLoc &dl,
SDValue Chain, SDValue Dst, SDValue Src,
SDValue Size, unsigned Align, bool isVolatile,
MachinePointerInfo DstPtrInfo) const override;
bool generateFMAsInMachineCombiner(CodeGenOpt::Level OptLevel) const override;
};

View File

@ -168,7 +168,7 @@ private:
void SelectADD_SUB_I64(SDNode *N);
void SelectDIV_SCALE(SDNode *N);
SDNode *getS_BFE(unsigned Opcode, SDLoc DL, SDValue Val,
SDNode *getS_BFE(unsigned Opcode, const SDLoc &DL, SDValue Val,
uint32_t Offset, uint32_t Width);
void SelectS_BFEFromShifts(SDNode *N);
void SelectS_BFE(SDNode *N);
@ -1364,8 +1364,9 @@ bool AMDGPUDAGToDAGISel::SelectSMRDBufferSgpr(SDValue Addr,
!isa<ConstantSDNode>(Offset);
}
SDNode *AMDGPUDAGToDAGISel::getS_BFE(unsigned Opcode, SDLoc DL, SDValue Val,
uint32_t Offset, uint32_t Width) {
SDNode *AMDGPUDAGToDAGISel::getS_BFE(unsigned Opcode, const SDLoc &DL,
SDValue Val, uint32_t Offset,
uint32_t Width) {
// Transformation function, pack the offset and width of a BFE into
// the format expected by the S_BFE_I32 / S_BFE_U32. In the second
// source, bits [5:0] contain the offset and bits [22:16] the width.

View File

@ -644,13 +644,12 @@ void AMDGPUTargetLowering::AnalyzeReturn(CCState &State,
State.AnalyzeReturn(Outs, RetCC_SI);
}
SDValue AMDGPUTargetLowering::LowerReturn(
SDValue Chain,
CallingConv::ID CallConv,
SDValue
AMDGPUTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
SDLoc DL, SelectionDAG &DAG) const {
const SDLoc &DL, SelectionDAG &DAG) const {
return DAG.getNode(AMDGPUISD::RET_FLAG, DL, MVT::Other, Chain);
}
@ -949,12 +948,9 @@ SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
}
/// \brief Generate Min/Max node
SDValue AMDGPUTargetLowering::CombineFMinMaxLegacy(SDLoc DL,
EVT VT,
SDValue LHS,
SDValue RHS,
SDValue True,
SDValue False,
SDValue AMDGPUTargetLowering::CombineFMinMaxLegacy(const SDLoc &DL, EVT VT,
SDValue LHS, SDValue RHS,
SDValue True, SDValue False,
SDValue CC,
DAGCombinerInfo &DCI) const {
if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
@ -1629,7 +1625,8 @@ SDValue AMDGPUTargetLowering::LowerFCEIL(SDValue Op, SelectionDAG &DAG) const {
return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
}
static SDValue extractF64Exponent(SDValue Hi, SDLoc SL, SelectionDAG &DAG) {
static SDValue extractF64Exponent(SDValue Hi, const SDLoc &SL,
SelectionDAG &DAG) {
const unsigned FractBits = 52;
const unsigned ExpBits = 11;
@ -2150,8 +2147,8 @@ static void simplifyI24(SDValue Op, TargetLowering::DAGCombinerInfo &DCI) {
}
template <typename IntTy>
static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0,
uint32_t Offset, uint32_t Width, SDLoc DL) {
static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0, uint32_t Offset,
uint32_t Width, const SDLoc &DL) {
if (Width + Offset < 32) {
uint32_t Shl = static_cast<uint32_t>(Src0) << (32 - Offset - Width);
IntTy Result = static_cast<IntTy>(Shl) >> (32 - Width);
@ -2405,8 +2402,8 @@ static bool isCtlzOpc(unsigned Opc) {
// type VT.
// Need to match pre-legalized type because the generic legalization inserts the
// add/sub between the select and compare.
static SDValue getFFBH_U32(const TargetLowering &TLI,
SelectionDAG &DAG, SDLoc SL, SDValue Op) {
static SDValue getFFBH_U32(const TargetLowering &TLI, SelectionDAG &DAG,
const SDLoc &SL, SDValue Op) {
EVT VT = Op.getValueType();
EVT LegalVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
if (LegalVT != MVT::i32)
@ -2429,10 +2426,8 @@ static SDValue getFFBH_U32(const TargetLowering &TLI,
// against the bitwidth.
//
// TODO: Should probably combine against FFBH_U32 instead of ctlz directly.
SDValue AMDGPUTargetLowering::performCtlzCombine(SDLoc SL,
SDValue Cond,
SDValue LHS,
SDValue RHS,
SDValue AMDGPUTargetLowering::performCtlzCombine(const SDLoc &SL, SDValue Cond,
SDValue LHS, SDValue RHS,
DAGCombinerInfo &DCI) const {
ConstantSDNode *CmpRhs = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
if (!CmpRhs || !CmpRhs->isNullValue())

View File

@ -72,8 +72,8 @@ protected:
SDValue performSraCombine(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue performSrlCombine(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue performMulCombine(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue performCtlzCombine(SDLoc SL, SDValue Cond, SDValue LHS, SDValue RHS,
DAGCombinerInfo &DCI) const;
SDValue performCtlzCombine(const SDLoc &SL, SDValue Cond, SDValue LHS,
SDValue RHS, DAGCombinerInfo &DCI) const;
SDValue performSelectCombine(SDNode *N, DAGCombinerInfo &DCI) const;
static EVT getEquivalentMemType(LLVMContext &Context, EVT VT);
@ -147,11 +147,10 @@ public:
bool isCheapToSpeculateCttz() const override;
bool isCheapToSpeculateCtlz() const override;
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
SDLoc DL, SelectionDAG &DAG) const override;
const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
SelectionDAG &DAG) const override;
SDValue LowerCall(CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const override;
@ -164,14 +163,9 @@ public:
SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) const override;
SDValue CombineFMinMaxLegacy(SDLoc DL,
EVT VT,
SDValue LHS,
SDValue RHS,
SDValue True,
SDValue False,
SDValue CC,
DAGCombinerInfo &DCI) const;
SDValue CombineFMinMaxLegacy(const SDLoc &DL, EVT VT, SDValue LHS,
SDValue RHS, SDValue True, SDValue False,
SDValue CC, DAGCombinerInfo &DCI) const;
const char* getTargetNodeName(unsigned Opcode) const override;

View File

@ -229,15 +229,15 @@ protected:
// Function originally from CFGStructTraits
void insertInstrEnd(MachineBasicBlock *MBB, int NewOpcode,
DebugLoc DL = DebugLoc());
const DebugLoc &DL = DebugLoc());
MachineInstr *insertInstrBefore(MachineBasicBlock *MBB, int NewOpcode,
DebugLoc DL = DebugLoc());
const DebugLoc &DL = DebugLoc());
MachineInstr *insertInstrBefore(MachineBasicBlock::iterator I, int NewOpcode);
void insertCondBranchBefore(MachineBasicBlock::iterator I, int NewOpcode,
DebugLoc DL);
const DebugLoc &DL);
void insertCondBranchBefore(MachineBasicBlock *MBB,
MachineBasicBlock::iterator I, int NewOpcode, int RegNum,
DebugLoc DL);
MachineBasicBlock::iterator I, int NewOpcode,
int RegNum, const DebugLoc &DL);
void insertCondBranchEnd(MachineBasicBlock *MBB, int NewOpcode, int RegNum);
static int getBranchNzeroOpcode(int OldOpcode);
static int getBranchZeroOpcode(int OldOpcode);
@ -469,16 +469,17 @@ void AMDGPUCFGStructurizer::reversePredicateSetter(
}
void AMDGPUCFGStructurizer::insertInstrEnd(MachineBasicBlock *MBB,
int NewOpcode, DebugLoc DL) {
MachineInstr *MI = MBB->getParent()
->CreateMachineInstr(TII->get(NewOpcode), DL);
int NewOpcode, const DebugLoc &DL) {
MachineInstr *MI =
MBB->getParent()->CreateMachineInstr(TII->get(NewOpcode), DL);
MBB->push_back(MI);
//assume the instruction doesn't take any reg operand ...
SHOWNEWINSTR(MI);
}
MachineInstr *AMDGPUCFGStructurizer::insertInstrBefore(MachineBasicBlock *MBB,
int NewOpcode, DebugLoc DL) {
int NewOpcode,
const DebugLoc &DL) {
MachineInstr *MI =
MBB->getParent()->CreateMachineInstr(TII->get(NewOpcode), DL);
if (MBB->begin() != MBB->end())
@ -502,7 +503,7 @@ MachineInstr *AMDGPUCFGStructurizer::insertInstrBefore(
}
void AMDGPUCFGStructurizer::insertCondBranchBefore(
MachineBasicBlock::iterator I, int NewOpcode, DebugLoc DL) {
MachineBasicBlock::iterator I, int NewOpcode, const DebugLoc &DL) {
MachineInstr *OldMI = &(*I);
MachineBasicBlock *MBB = OldMI->getParent();
MachineFunction *MF = MBB->getParent();
@ -514,9 +515,9 @@ void AMDGPUCFGStructurizer::insertCondBranchBefore(
//erase later oldInstr->eraseFromParent();
}
void AMDGPUCFGStructurizer::insertCondBranchBefore(MachineBasicBlock *blk,
MachineBasicBlock::iterator I, int NewOpcode, int RegNum,
DebugLoc DL) {
void AMDGPUCFGStructurizer::insertCondBranchBefore(
MachineBasicBlock *blk, MachineBasicBlock::iterator I, int NewOpcode,
int RegNum, const DebugLoc &DL) {
MachineFunction *MF = blk->getParent();
MachineInstr *NewInstr = MF->CreateMachineInstr(TII->get(NewOpcode), DL);
//insert before

View File

@ -1077,7 +1077,7 @@ SDValue R600TargetLowering::LowerFPTOUINT(SDValue Op, SelectionDAG &DAG) const {
}
SDValue R600TargetLowering::LowerImplicitParameter(SelectionDAG &DAG, EVT VT,
SDLoc DL,
const SDLoc &DL,
unsigned DwordOffset) const {
unsigned ByteOffset = DwordOffset * 4;
PointerType * PtrType = PointerType::get(VT.getTypeForEVT(*DAG.getContext()),
@ -1747,12 +1747,9 @@ SDValue R600TargetLowering::lowerFrameIndex(SDValue Op,
/// every function is a kernel function, but in the future we should use
/// separate calling conventions for kernel and non-kernel functions.
SDValue R600TargetLowering::LowerFormalArguments(
SDValue Chain,
CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
*DAG.getContext());
@ -1931,10 +1928,9 @@ static SDValue ReorganizeVector(SelectionDAG &DAG, SDValue VectorEntry,
NewBldVec);
}
SDValue R600TargetLowering::OptimizeSwizzle(SDValue BuildVector,
SDValue Swz[4], SelectionDAG &DAG,
SDLoc DL) const {
SDValue R600TargetLowering::OptimizeSwizzle(SDValue BuildVector, SDValue Swz[4],
SelectionDAG &DAG,
const SDLoc &DL) const {
assert(BuildVector.getOpcode() == ISD::BUILD_VECTOR);
// Old -> New swizzle values
DenseMap<unsigned, unsigned> SwizzleRemap;

View File

@ -31,12 +31,10 @@ public:
void ReplaceNodeResults(SDNode * N,
SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) const override;
SDValue LowerFormalArguments(
SDValue Chain,
CallingConv::ID CallConv,
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc DL, SelectionDAG &DAG,
const SDLoc &DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const override;
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &,
EVT VT) const override;
@ -51,13 +49,13 @@ private:
/// first nine dwords of a Vertex Buffer. These implicit parameters are
/// lowered to load instructions which retrieve the values from the Vertex
/// Buffer.
SDValue LowerImplicitParameter(SelectionDAG &DAG, EVT VT,
SDLoc DL, unsigned DwordOffset) const;
SDValue LowerImplicitParameter(SelectionDAG &DAG, EVT VT, const SDLoc &DL,
unsigned DwordOffset) const;
void lowerImplicitParameter(MachineInstr *MI, MachineBasicBlock &BB,
MachineRegisterInfo & MRI, unsigned dword_offset) const;
SDValue OptimizeSwizzle(SDValue BuildVector, SDValue Swz[], SelectionDAG &DAG,
SDLoc DL) const;
const SDLoc &DL) const;
SDValue vectorToVerticalVector(SelectionDAG &DAG, SDValue Vector) const;
SDValue lowerFrameIndex(SDValue Op, SelectionDAG &DAG) const;

View File

@ -43,11 +43,10 @@ bool R600InstrInfo::isVector(const MachineInstr &MI) const {
return get(MI.getOpcode()).TSFlags & R600_InstFlag::VECTOR;
}
void
R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const {
void R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const DebugLoc &DL, unsigned DestReg,
unsigned SrcReg, bool KillSrc) const {
unsigned VectorComponents = 0;
if ((AMDGPU::R600_Reg128RegClass.contains(DestReg) ||
AMDGPU::R600_Reg128VerticalRegClass.contains(DestReg)) &&
@ -772,12 +771,11 @@ MachineBasicBlock::iterator FindLastAluClause(MachineBasicBlock &MBB) {
return MBB.end();
}
unsigned
R600InstrInfo::InsertBranch(MachineBasicBlock &MBB,
unsigned R600InstrInfo::InsertBranch(MachineBasicBlock &MBB,
MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
ArrayRef<MachineOperand> Cond,
DebugLoc DL) const {
const DebugLoc &DL) const {
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
if (!FBB) {

View File

@ -58,9 +58,8 @@ namespace llvm {
explicit R600InstrInfo(const AMDGPUSubtarget &st);
const R600RegisterInfo &getRegisterInfo() const override;
void copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc) const override;
bool isLegalToSplitMBBAt(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI) const override;
@ -161,7 +160,7 @@ namespace llvm {
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
DebugLoc DL) const override;
const DebugLoc &DL) const override;
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;

View File

@ -536,7 +536,7 @@ bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const {
}
SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT,
SDLoc SL, SDValue Chain,
const SDLoc &SL, SDValue Chain,
unsigned Offset, bool Signed) const {
const DataLayout &DL = DAG.getDataLayout();
MachineFunction &MF = DAG.getMachineFunction();
@ -572,8 +572,8 @@ SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT,
SDValue SITargetLowering::LowerFormalArguments(
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
const SIRegisterInfo *TRI =
static_cast<const SIRegisterInfo *>(Subtarget->getRegisterInfo());
@ -916,12 +916,12 @@ SDValue SITargetLowering::LowerFormalArguments(
return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
}
SDValue SITargetLowering::LowerReturn(SDValue Chain,
CallingConv::ID CallConv,
SDValue
SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
SDLoc DL, SelectionDAG &DAG) const {
const SDLoc &DL, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
@ -1416,8 +1416,8 @@ SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
return DAG.getUNDEF(ASC->getValueType(0));
}
SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain, SDLoc DL,
SDValue V) const {
SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain,
const SDLoc &DL, SDValue V) const {
// We can't use S_MOV_B32 directly, because there is no way to specify m0 as
// the destination register.
//
@ -2575,11 +2575,8 @@ static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) {
}
}
static SDValue performIntMed3ImmCombine(SelectionDAG &DAG,
SDLoc SL,
SDValue Op0,
SDValue Op1,
bool Signed) {
static SDValue performIntMed3ImmCombine(SelectionDAG &DAG, const SDLoc &SL,
SDValue Op0, SDValue Op1, bool Signed) {
ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1);
if (!K1)
return SDValue();
@ -2609,10 +2606,8 @@ static bool isKnownNeverSNan(SelectionDAG &DAG, SDValue Op) {
return DAG.isKnownNeverNaN(Op);
}
static SDValue performFPMed3ImmCombine(SelectionDAG &DAG,
SDLoc SL,
SDValue Op0,
SDValue Op1) {
static SDValue performFPMed3ImmCombine(SelectionDAG &DAG, const SDLoc &SL,
SDValue Op0, SDValue Op1) {
ConstantFPSDNode *K1 = dyn_cast<ConstantFPSDNode>(Op1);
if (!K1)
return SDValue();
@ -3166,13 +3161,14 @@ void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI,
}
}
static SDValue buildSMovImm32(SelectionDAG &DAG, SDLoc DL, uint64_t Val) {
static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL,
uint64_t Val) {
SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32);
return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0);
}
MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG,
SDLoc DL,
const SDLoc &DL,
SDValue Ptr) const {
const SIInstrInfo *TII =
static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
@ -3207,10 +3203,8 @@ MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG,
/// The TID (Thread ID) is multiplied by the stride value (bits [61:48]
/// of the resource descriptor) to create an offset, which is added to
/// the resource pointer.
MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG,
SDLoc DL,
SDValue Ptr,
uint32_t RsrcDword1,
MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL,
SDValue Ptr, uint32_t RsrcDword1,
uint64_t RsrcDword2And3) const {
SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr);
SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr);

View File

@ -21,7 +21,7 @@
namespace llvm {
class SITargetLowering final : public AMDGPUTargetLowering {
SDValue LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT, SDLoc DL,
SDValue LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT, const SDLoc &DL,
SDValue Chain, unsigned Offset, bool Signed) const;
SDValue lowerImplicitZextParam(SelectionDAG &DAG, SDValue Op,
@ -102,15 +102,13 @@ public:
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc DL, SelectionDAG &DAG,
const SDLoc &DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const override;
SDValue LowerReturn(SDValue Chain,
CallingConv::ID CallConv,
bool isVarArg,
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
SDLoc DL, SelectionDAG &DAG) const override;
const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
SelectionDAG &DAG) const override;
unsigned getRegisterByName(const char* RegName, EVT VT,
SelectionDAG &DAG) const override;
@ -133,17 +131,16 @@ public:
unsigned Reg, EVT VT) const override;
void legalizeTargetIndependentNode(SDNode *Node, SelectionDAG &DAG) const;
MachineSDNode *wrapAddr64Rsrc(SelectionDAG &DAG, SDLoc DL, SDValue Ptr) const;
MachineSDNode *buildRSRC(SelectionDAG &DAG,
SDLoc DL,
SDValue Ptr,
uint32_t RsrcDword1,
uint64_t RsrcDword2And3) const;
MachineSDNode *wrapAddr64Rsrc(SelectionDAG &DAG, const SDLoc &DL,
SDValue Ptr) const;
MachineSDNode *buildRSRC(SelectionDAG &DAG, const SDLoc &DL, SDValue Ptr,
uint32_t RsrcDword1, uint64_t RsrcDword2And3) const;
std::pair<unsigned, const TargetRegisterClass *>
getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
StringRef Constraint, MVT VT) const override;
ConstraintType getConstraintType(StringRef Constraint) const override;
SDValue copyToM0(SelectionDAG &DAG, SDValue Chain, SDLoc DL, SDValue V) const;
SDValue copyToM0(SelectionDAG &DAG, SDValue Chain, const SDLoc &DL,
SDValue V) const;
};
} // End namespace llvm

View File

@ -339,11 +339,10 @@ bool SIInstrInfo::shouldClusterMemOps(MachineInstr *FirstLdSt,
return (NumLoads * DstRC->getSize()) <= LoadClusterThreshold;
}
void
SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const {
void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const DebugLoc &DL, unsigned DestReg,
unsigned SrcReg, bool KillSrc) const {
// If we are trying to copy to or from SCC, there is a bug somewhere else in
// the backend. While it may be theoretically possible to do this, it should
@ -1173,7 +1172,7 @@ unsigned SIInstrInfo::InsertBranch(MachineBasicBlock &MBB,
MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
ArrayRef<MachineOperand> Cond,
DebugLoc DL) const {
const DebugLoc &DL) const {
if (!FBB && Cond.empty()) {
BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH))

View File

@ -112,9 +112,8 @@ public:
MachineInstr *SecondLdSt,
unsigned NumLoads) const final;
void copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc) const override;
unsigned calculateLDSSpillAddress(MachineBasicBlock &MBB,
@ -159,7 +158,7 @@ public:
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
DebugLoc DL) const override;
const DebugLoc &DL) const override;
bool ReverseBranchCondition(
SmallVectorImpl<MachineOperand> &Cond) const override;

View File

@ -68,34 +68,31 @@ namespace {
//
unsigned createDupLane(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
DebugLoc DL,
unsigned Reg, unsigned Lane,
const DebugLoc &DL, unsigned Reg, unsigned Lane,
bool QPR = false);
unsigned createExtractSubreg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
DebugLoc DL,
unsigned DReg, unsigned Lane,
const TargetRegisterClass *TRC);
const DebugLoc &DL, unsigned DReg,
unsigned Lane, const TargetRegisterClass *TRC);
unsigned createVExt(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
DebugLoc DL,
unsigned Ssub0, unsigned Ssub1);
const DebugLoc &DL, unsigned Ssub0, unsigned Ssub1);
unsigned createRegSequence(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
DebugLoc DL,
unsigned Reg1, unsigned Reg2);
const DebugLoc &DL, unsigned Reg1,
unsigned Reg2);
unsigned createInsertSubreg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
DebugLoc DL, unsigned DReg, unsigned Lane,
unsigned ToInsert);
const DebugLoc &DL, unsigned DReg,
unsigned Lane, unsigned ToInsert);
unsigned createImplicitDef(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
DebugLoc DL);
const DebugLoc &DL);
//
// Various property checkers
@ -426,11 +423,10 @@ SmallVector<unsigned, 8> A15SDOptimizer::getReadDPRs(MachineInstr *MI) {
}
// Creates a DPR register from an SPR one by using a VDUP.
unsigned
A15SDOptimizer::createDupLane(MachineBasicBlock &MBB,
unsigned A15SDOptimizer::createDupLane(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
DebugLoc DL,
unsigned Reg, unsigned Lane, bool QPR) {
const DebugLoc &DL, unsigned Reg,
unsigned Lane, bool QPR) {
unsigned Out = MRI->createVirtualRegister(QPR ? &ARM::QPRRegClass :
&ARM::DPRRegClass);
AddDefaultPred(BuildMI(MBB,
@ -445,11 +441,9 @@ A15SDOptimizer::createDupLane(MachineBasicBlock &MBB,
}
// Creates a SPR register from a DPR by copying the value in lane 0.
unsigned
A15SDOptimizer::createExtractSubreg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
DebugLoc DL,
unsigned DReg, unsigned Lane,
unsigned A15SDOptimizer::createExtractSubreg(
MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore,
const DebugLoc &DL, unsigned DReg, unsigned Lane,
const TargetRegisterClass *TRC) {
unsigned Out = MRI->createVirtualRegister(TRC);
BuildMI(MBB,
@ -462,11 +456,9 @@ A15SDOptimizer::createExtractSubreg(MachineBasicBlock &MBB,
}
// Takes two SPR registers and creates a DPR by using a REG_SEQUENCE.
unsigned
A15SDOptimizer::createRegSequence(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
DebugLoc DL,
unsigned Reg1, unsigned Reg2) {
unsigned A15SDOptimizer::createRegSequence(
MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore,
const DebugLoc &DL, unsigned Reg1, unsigned Reg2) {
unsigned Out = MRI->createVirtualRegister(&ARM::QPRRegClass);
BuildMI(MBB,
InsertBefore,
@ -481,11 +473,10 @@ A15SDOptimizer::createRegSequence(MachineBasicBlock &MBB,
// Takes two DPR registers that have previously been VDUPed (Ssub0 and Ssub1)
// and merges them into one DPR register.
unsigned
A15SDOptimizer::createVExt(MachineBasicBlock &MBB,
unsigned A15SDOptimizer::createVExt(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
DebugLoc DL,
unsigned Ssub0, unsigned Ssub1) {
const DebugLoc &DL, unsigned Ssub0,
unsigned Ssub1) {
unsigned Out = MRI->createVirtualRegister(&ARM::DPRRegClass);
AddDefaultPred(BuildMI(MBB,
InsertBefore,
@ -497,11 +488,9 @@ A15SDOptimizer::createVExt(MachineBasicBlock &MBB,
return Out;
}
unsigned
A15SDOptimizer::createInsertSubreg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
DebugLoc DL, unsigned DReg, unsigned Lane,
unsigned ToInsert) {
unsigned A15SDOptimizer::createInsertSubreg(
MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore,
const DebugLoc &DL, unsigned DReg, unsigned Lane, unsigned ToInsert) {
unsigned Out = MRI->createVirtualRegister(&ARM::DPR_VFP2RegClass);
BuildMI(MBB,
InsertBefore,
@ -517,7 +506,7 @@ A15SDOptimizer::createInsertSubreg(MachineBasicBlock &MBB,
unsigned
A15SDOptimizer::createImplicitDef(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
DebugLoc DL) {
const DebugLoc &DL) {
unsigned Out = MRI->createVirtualRegister(&ARM::DPRRegClass);
BuildMI(MBB,
InsertBefore,

View File

@ -390,11 +390,11 @@ unsigned ARMBaseInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
return 2;
}
unsigned
ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
unsigned ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB,
MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
ArrayRef<MachineOperand> Cond,
DebugLoc DL) const {
const DebugLoc &DL) const {
ARMFunctionInfo *AFI = MBB.getParent()->getInfo<ARMFunctionInfo>();
int BOpc = !AFI->isThumbFunction()
? ARM::B : (AFI->isThumb2Function() ? ARM::t2B : ARM::tB);
@ -701,9 +701,9 @@ void ARMBaseInstrInfo::copyToCPSR(MachineBasicBlock &MBB,
}
void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const {
MachineBasicBlock::iterator I,
const DebugLoc &DL, unsigned DestReg,
unsigned SrcReg, bool KillSrc) const {
bool GPRDest = ARM::GPRRegClass.contains(DestReg);
bool GPRSrc = ARM::GPRRegClass.contains(SrcReg);
@ -1995,10 +1995,12 @@ unsigned llvm::convertAddSubFlagsOpcode(unsigned OldOpc) {
}
void llvm::emitARMRegPlusImmediate(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI, DebugLoc dl,
unsigned DestReg, unsigned BaseReg, int NumBytes,
MachineBasicBlock::iterator &MBBI,
const DebugLoc &dl, unsigned DestReg,
unsigned BaseReg, int NumBytes,
ARMCC::CondCodes Pred, unsigned PredReg,
const ARMBaseInstrInfo &TII, unsigned MIFlags) {
const ARMBaseInstrInfo &TII,
unsigned MIFlags) {
if (NumBytes == 0 && DestReg != BaseReg) {
BuildMI(MBB, MBBI, dl, TII.get(ARM::MOVr), DestReg)
.addReg(BaseReg, RegState::Kill)

View File

@ -129,7 +129,7 @@ public:
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
DebugLoc DL) const override;
const DebugLoc &DL) const override;
bool
ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
@ -175,7 +175,7 @@ public:
const ARMSubtarget &Subtarget) const;
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
DebugLoc DL, unsigned DestReg, unsigned SrcReg,
const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc) const override;
void storeRegToStackSlot(MachineBasicBlock &MBB,
@ -466,20 +466,23 @@ unsigned convertAddSubFlagsOpcode(unsigned OldOpc);
/// instructions to materializea destreg = basereg + immediate in ARM / Thumb2
/// code.
void emitARMRegPlusImmediate(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI, DebugLoc dl,
unsigned DestReg, unsigned BaseReg, int NumBytes,
MachineBasicBlock::iterator &MBBI,
const DebugLoc &dl, unsigned DestReg,
unsigned BaseReg, int NumBytes,
ARMCC::CondCodes Pred, unsigned PredReg,
const ARMBaseInstrInfo &TII, unsigned MIFlags = 0);
void emitT2RegPlusImmediate(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI, DebugLoc dl,
unsigned DestReg, unsigned BaseReg, int NumBytes,
MachineBasicBlock::iterator &MBBI,
const DebugLoc &dl, unsigned DestReg,
unsigned BaseReg, int NumBytes,
ARMCC::CondCodes Pred, unsigned PredReg,
const ARMBaseInstrInfo &TII, unsigned MIFlags = 0);
void emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI, DebugLoc dl,
unsigned DestReg, unsigned BaseReg,
int NumBytes, const TargetInstrInfo &TII,
MachineBasicBlock::iterator &MBBI,
const DebugLoc &dl, unsigned DestReg,
unsigned BaseReg, int NumBytes,
const TargetInstrInfo &TII,
const ARMBaseRegisterInfo &MRI,
unsigned MIFlags = 0);

View File

@ -408,13 +408,10 @@ ARMBaseRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
/// emitLoadConstPool - Emits a load from constpool to materialize the
/// specified immediate.
void ARMBaseRegisterInfo::
emitLoadConstPool(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
DebugLoc dl,
unsigned DestReg, unsigned SubIdx, int Val,
ARMCC::CondCodes Pred,
unsigned PredReg, unsigned MIFlags) const {
void ARMBaseRegisterInfo::emitLoadConstPool(
MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
const DebugLoc &dl, unsigned DestReg, unsigned SubIdx, int Val,
ARMCC::CondCodes Pred, unsigned PredReg, unsigned MIFlags) const {
MachineFunction &MF = *MBB.getParent();
const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
MachineConstantPool *ConstantPool = MF.getConstantPool();

View File

@ -166,9 +166,9 @@ public:
/// emitLoadConstPool - Emits a load from constpool to materialize the
/// specified immediate.
virtual void emitLoadConstPool(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
DebugLoc dl, unsigned DestReg, unsigned SubIdx,
virtual void
emitLoadConstPool(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
const DebugLoc &dl, unsigned DestReg, unsigned SubIdx,
int Val, ARMCC::CondCodes Pred = ARMCC::AL,
unsigned PredReg = 0,
unsigned MIFlags = MachineInstr::NoFlags) const;

View File

@ -120,13 +120,11 @@ static bool isCSRestore(MachineInstr *MI,
return false;
}
static void emitRegPlusImmediate(bool isARM, MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI, DebugLoc dl,
const ARMBaseInstrInfo &TII, unsigned DestReg,
unsigned SrcReg, int NumBytes,
unsigned MIFlags = MachineInstr::NoFlags,
ARMCC::CondCodes Pred = ARMCC::AL,
unsigned PredReg = 0) {
static void emitRegPlusImmediate(
bool isARM, MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
const DebugLoc &dl, const ARMBaseInstrInfo &TII, unsigned DestReg,
unsigned SrcReg, int NumBytes, unsigned MIFlags = MachineInstr::NoFlags,
ARMCC::CondCodes Pred = ARMCC::AL, unsigned PredReg = 0) {
if (isARM)
emitARMRegPlusImmediate(MBB, MBBI, dl, DestReg, SrcReg, NumBytes,
Pred, PredReg, TII, MIFlags);
@ -136,7 +134,7 @@ static void emitRegPlusImmediate(bool isARM, MachineBasicBlock &MBB,
}
static void emitSPUpdate(bool isARM, MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI, DebugLoc dl,
MachineBasicBlock::iterator &MBBI, const DebugLoc &dl,
const ARMBaseInstrInfo &TII, int NumBytes,
unsigned MIFlags = MachineInstr::NoFlags,
ARMCC::CondCodes Pred = ARMCC::AL,
@ -206,7 +204,8 @@ struct StackAdjustingInsts {
}
void emitDefCFAOffsets(MachineModuleInfo &MMI, MachineBasicBlock &MBB,
DebugLoc dl, const ARMBaseInstrInfo &TII, bool HasFP) {
const DebugLoc &dl, const ARMBaseInstrInfo &TII,
bool HasFP) {
unsigned CFAOffset = 0;
for (auto &Info : Insts) {
if (HasFP && !Info.BeforeFPSet)
@ -235,7 +234,7 @@ static void emitAligningInstructions(MachineFunction &MF, ARMFunctionInfo *AFI,
const TargetInstrInfo &TII,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
DebugLoc DL, const unsigned Reg,
const DebugLoc &DL, const unsigned Reg,
const unsigned Alignment,
const bool MustBeSingleInstruction) {
const ARMSubtarget &AST =

View File

@ -83,7 +83,7 @@ public:
/// getI32Imm - Return a target constant of type i32 with the specified
/// value.
inline SDValue getI32Imm(unsigned Imm, SDLoc dl) {
inline SDValue getI32Imm(unsigned Imm, const SDLoc &dl) {
return CurDAG->getTargetConstant(Imm, dl, MVT::i32);
}
@ -270,7 +270,7 @@ private:
SDNode *createQuadQRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
// Get the alignment operand for a NEON VLD or VST instruction.
SDValue GetVLDSTAlign(SDValue Align, SDLoc dl, unsigned NumVecs,
SDValue GetVLDSTAlign(SDValue Align, const SDLoc &dl, unsigned NumVecs,
bool is64BitVector);
/// Returns the number of instructions required to materialize the given
@ -1473,7 +1473,7 @@ bool ARMDAGToDAGISel::SelectT2AddrModeExclusive(SDValue N, SDValue &Base,
//===--------------------------------------------------------------------===//
/// getAL - Returns a ARMCC::AL immediate node.
static inline SDValue getAL(SelectionDAG *CurDAG, SDLoc dl) {
static inline SDValue getAL(SelectionDAG *CurDAG, const SDLoc &dl) {
return CurDAG->getTargetConstant((uint64_t)ARMCC::AL, dl, MVT::i32);
}
@ -1693,7 +1693,7 @@ SDNode *ARMDAGToDAGISel::createQuadQRegsNode(EVT VT, SDValue V0, SDValue V1,
/// GetVLDSTAlign - Get the alignment (in bytes) for the alignment operand
/// of a NEON VLD or VST instruction. The supported values depend on the
/// number of registers being loaded.
SDValue ARMDAGToDAGISel::GetVLDSTAlign(SDValue Align, SDLoc dl,
SDValue ARMDAGToDAGISel::GetVLDSTAlign(SDValue Align, const SDLoc &dl,
unsigned NumVecs, bool is64BitVector) {
unsigned NumRegs = NumVecs;
if (!is64BitVector && NumVecs < 3)
@ -3675,7 +3675,8 @@ void ARMDAGToDAGISel::Select(SDNode *N) {
// and obtain the integer operands from them, adding these operands to the
// provided vector.
static void getIntOperandsFromRegisterString(StringRef RegString,
SelectionDAG *CurDAG, SDLoc DL,
SelectionDAG *CurDAG,
const SDLoc &DL,
std::vector<SDValue> &Ops) {
SmallVector<StringRef, 5> Fields;
RegString.split(Fields, ':');

View File

@ -1438,13 +1438,11 @@ CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC,
/// LowerCallResult - Lower the result values of a call into the
/// appropriate copies out of appropriate physical registers.
SDValue
ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals,
bool isThisReturn, SDValue ThisVal) const {
SDValue ARMTargetLowering::LowerCallResult(
SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
SDValue ThisVal) const {
// Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs;
@ -1524,10 +1522,9 @@ ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
}
/// LowerMemOpCallTo - Store the argument to the stack.
SDValue
ARMTargetLowering::LowerMemOpCallTo(SDValue Chain,
SDValue StackPtr, SDValue Arg,
SDLoc dl, SelectionDAG &DAG,
SDValue ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
SDValue Arg, const SDLoc &dl,
SelectionDAG &DAG,
const CCValAssign &VA,
ISD::ArgFlagsTy Flags) const {
unsigned LocMemOffset = VA.getLocMemOffset();
@ -1540,7 +1537,7 @@ ARMTargetLowering::LowerMemOpCallTo(SDValue Chain,
false, false, 0);
}
void ARMTargetLowering::PassF64ArgInRegs(SDLoc dl, SelectionDAG &DAG,
void ARMTargetLowering::PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG,
SDValue Chain, SDValue &Arg,
RegsToPassVector &RegsToPass,
CCValAssign &VA, CCValAssign &NextVA,
@ -2234,7 +2231,7 @@ ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
}
static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
SDLoc DL, SelectionDAG &DAG) {
const SDLoc &DL, SelectionDAG &DAG) {
const MachineFunction &MF = DAG.getMachineFunction();
const Function *F = MF.getFunction();
@ -2267,11 +2264,11 @@ static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
}
SDValue
ARMTargetLowering::LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
ARMTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
SDLoc dl, SelectionDAG &DAG) const {
const SDLoc &dl, SelectionDAG &DAG) const {
// CCValAssign - represent the assignment of the return value to a location.
SmallVector<CCValAssign, 16> RVLocs;
@ -3088,10 +3085,11 @@ static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) {
MachinePointerInfo(SV), false, false, 0);
}
SDValue
ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
SDValue &Root, SelectionDAG &DAG,
SDLoc dl) const {
SDValue ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA,
CCValAssign &NextVA,
SDValue &Root,
SelectionDAG &DAG,
const SDLoc &dl) const {
MachineFunction &MF = DAG.getMachineFunction();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
@ -3133,13 +3131,11 @@ ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
// these values; otherwise, this reassembles a (byval) structure that
// was split between registers and memory.
// Return: The frame index registers were stored into.
int
ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
SDLoc dl, SDValue &Chain,
int ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
const SDLoc &dl, SDValue &Chain,
const Value *OrigArg,
unsigned InRegsParamRecordIdx,
int ArgOffset,
unsigned ArgSize) const {
int ArgOffset, unsigned ArgSize) const {
// Currently, two use-cases possible:
// Case #1. Non-var-args function, and we meet first byval parameter.
// Setup first unallocated register as first byval register;
@ -3190,9 +3186,8 @@ ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
}
// Setup stack frame, the va_list pointer will start from.
void
ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
SDLoc dl, SDValue &Chain,
void ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
const SDLoc &dl, SDValue &Chain,
unsigned ArgOffset,
unsigned TotalArgRegsSaveSize,
bool ForceMutable) const {
@ -3210,14 +3205,10 @@ ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
AFI->setVarArgsFrameIndex(FrameIndex);
}
SDValue
ARMTargetLowering::LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg>
&Ins,
SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals)
const {
SDValue ARMTargetLowering::LowerFormalArguments(
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
@ -3442,10 +3433,9 @@ static bool isFloatingPointZero(SDValue Op) {
/// Returns appropriate ARM CMP (cmp) and corresponding condition code for
/// the given operands.
SDValue
ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
SDValue &ARMcc, SelectionDAG &DAG,
SDLoc dl) const {
const SDLoc &dl) const {
if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
unsigned C = RHSC->getZExtValue();
if (!isLegalICmpImmediate(C)) {
@ -3501,9 +3491,8 @@ ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
}
/// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands.
SDValue
ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG,
SDLoc dl) const {
SDValue ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS,
SelectionDAG &DAG, const SDLoc &dl) const {
assert(!Subtarget->isFPOnlySP() || RHS.getValueType() != MVT::f64);
SDValue Cmp;
if (!isFloatingPointZero(RHS))
@ -3720,7 +3709,7 @@ static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
}
}
SDValue ARMTargetLowering::getCMOV(SDLoc dl, EVT VT, SDValue FalseVal,
SDValue ARMTargetLowering::getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal,
SDValue TrueVal, SDValue ARMcc, SDValue CCR,
SDValue Cmp, SelectionDAG &DAG) const {
if (Subtarget->isFPOnlySP() && VT == MVT::f64) {
@ -4394,7 +4383,7 @@ static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) {
/// not support i64 elements, so sometimes the zero vectors will need to be
/// explicitly constructed. Regardless, use a canonical VMOV to create the
/// zero vector.
static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, SDLoc dl) {
static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) {
assert(VT.isVector() && "Expected a vector type");
// The canonical modified immediate encoding of a zero vector is....0!
SDValue EncodedVal = DAG.getTargetConstant(0, dl, MVT::i32);
@ -4927,7 +4916,7 @@ static SDValue LowerSETCCE(SDValue Op, SelectionDAG &DAG) {
/// operand (e.g., VMOV). If so, return the encoded value.
static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
unsigned SplatBitSize, SelectionDAG &DAG,
SDLoc dl, EVT &VT, bool is128Bits,
const SDLoc &dl, EVT &VT, bool is128Bits,
NEONModImmType type) {
unsigned OpCmode, Imm;
@ -5517,7 +5506,7 @@ static bool isReverseMask(ArrayRef<int> M, EVT VT) {
// instruction, return an SDValue of such a constant (will become a MOV
// instruction). Otherwise return null.
static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG,
const ARMSubtarget *ST, SDLoc dl) {
const ARMSubtarget *ST, const SDLoc &dl) {
uint64_t Val;
if (!isa<ConstantSDNode>(N))
return SDValue();
@ -5991,7 +5980,7 @@ ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
/// the specified operations to build the shuffle.
static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
SDValue RHS, SelectionDAG &DAG,
SDLoc dl) {
const SDLoc &dl) {
unsigned OpNum = (PFEntry >> 26) & 0x0F;
unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
@ -6598,8 +6587,8 @@ static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) {
DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1));
}
static SDValue
LowerSDIV_v4i8(SDValue X, SDValue Y, SDLoc dl, SelectionDAG &DAG) {
static SDValue LowerSDIV_v4i8(SDValue X, SDValue Y, const SDLoc &dl,
SelectionDAG &DAG) {
// TODO: Should this propagate fast-math-flags?
// Convert to float
@ -6629,8 +6618,8 @@ LowerSDIV_v4i8(SDValue X, SDValue Y, SDLoc dl, SelectionDAG &DAG) {
return X;
}
static SDValue
LowerSDIV_v4i16(SDValue N0, SDValue N1, SDLoc dl, SelectionDAG &DAG) {
static SDValue LowerSDIV_v4i16(SDValue N0, SDValue N1, const SDLoc &dl,
SelectionDAG &DAG) {
// TODO: Should this propagate fast-math-flags?
SDValue N2;
@ -7723,7 +7712,7 @@ static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2) {
/// Emit a post-increment load operation with given size. The instructions
/// will be added to BB at Pos.
static void emitPostLd(MachineBasicBlock *BB, MachineInstr *Pos,
const TargetInstrInfo *TII, DebugLoc dl,
const TargetInstrInfo *TII, const DebugLoc &dl,
unsigned LdSize, unsigned Data, unsigned AddrIn,
unsigned AddrOut, bool IsThumb1, bool IsThumb2) {
unsigned LdOpc = getLdOpcode(LdSize, IsThumb1, IsThumb2);
@ -7755,7 +7744,7 @@ static void emitPostLd(MachineBasicBlock *BB, MachineInstr *Pos,
/// Emit a post-increment store operation with given size. The instructions
/// will be added to BB at Pos.
static void emitPostSt(MachineBasicBlock *BB, MachineInstr *Pos,
const TargetInstrInfo *TII, DebugLoc dl,
const TargetInstrInfo *TII, const DebugLoc &dl,
unsigned StSize, unsigned Data, unsigned AddrIn,
unsigned AddrOut, bool IsThumb1, bool IsThumb2) {
unsigned StOpc = getStOpcode(StSize, IsThumb1, IsThumb2);

View File

@ -503,23 +503,22 @@ namespace llvm {
std::pair<SDValue, SDValue> getARMXALUOOp(SDValue Op, SelectionDAG &DAG, SDValue &ARMcc) const;
typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector;
void PassF64ArgInRegs(SDLoc dl, SelectionDAG &DAG,
SDValue Chain, SDValue &Arg,
RegsToPassVector &RegsToPass,
void PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG, SDValue Chain,
SDValue &Arg, RegsToPassVector &RegsToPass,
CCValAssign &VA, CCValAssign &NextVA,
SDValue &StackPtr,
SmallVectorImpl<SDValue> &MemOpChains,
ISD::ArgFlagsTy Flags) const;
SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
SDValue &Root, SelectionDAG &DAG,
SDLoc dl) const;
const SDLoc &dl) const;
CallingConv::ID getEffectiveCallingConv(CallingConv::ID CC,
bool isVarArg) const;
CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return,
bool isVarArg) const;
SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
SDLoc dl, SelectionDAG &DAG,
const SDLoc &dl, SelectionDAG &DAG,
const CCValAssign &VA,
ISD::ArgFlagsTy Flags) const;
SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
@ -588,9 +587,9 @@ namespace llvm {
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals,
bool isThisReturn, SDValue ThisVal) const;
const SDLoc &dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
SDValue ThisVal) const;
bool supportSplitCSR(MachineFunction *MF) const override {
return MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
@ -602,23 +601,19 @@ namespace llvm {
const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
SDValue
LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc dl, SelectionDAG &DAG,
const SDLoc &dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const override;
int StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
SDLoc dl, SDValue &Chain,
const Value *OrigArg,
unsigned InRegsParamRecordIdx,
int ArgOffset,
int StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &dl,
SDValue &Chain, const Value *OrigArg,
unsigned InRegsParamRecordIdx, int ArgOffset,
unsigned ArgSize) const;
void VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
SDLoc dl, SDValue &Chain,
unsigned ArgOffset,
unsigned TotalArgRegsSaveSize,
const SDLoc &dl, SDValue &Chain,
unsigned ArgOffset, unsigned TotalArgRegsSaveSize,
bool ForceMutable = false) const;
SDValue
@ -646,24 +641,22 @@ namespace llvm {
const SmallVectorImpl<ISD::OutputArg> &Outs,
LLVMContext &Context) const override;
SDValue
LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
SDLoc dl, SelectionDAG &DAG) const override;
const SDLoc &dl, SelectionDAG &DAG) const override;
bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
bool mayBeEmittedAsTailCall(CallInst *CI) const override;
SDValue getCMOV(SDLoc dl, EVT VT, SDValue FalseVal, SDValue TrueVal,
SDValue getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal, SDValue TrueVal,
SDValue ARMcc, SDValue CCR, SDValue Cmp,
SelectionDAG &DAG) const;
SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
SDValue &ARMcc, SelectionDAG &DAG, SDLoc dl) const;
SDValue getVFPCmp(SDValue LHS, SDValue RHS,
SelectionDAG &DAG, SDLoc dl) const;
SDValue &ARMcc, SelectionDAG &DAG, const SDLoc &dl) const;
SDValue getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG,
const SDLoc &dl) const;
SDValue duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const;
SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const;

View File

@ -145,17 +145,19 @@ namespace {
MachineBasicBlock::const_iterator Before);
unsigned findFreeReg(const TargetRegisterClass &RegClass);
void UpdateBaseRegUses(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
DebugLoc DL, unsigned Base, unsigned WordOffset,
MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
unsigned Base, unsigned WordOffset,
ARMCC::CondCodes Pred, unsigned PredReg);
MachineInstr *CreateLoadStoreMulti(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore, int Offset, unsigned Base,
bool BaseKill, unsigned Opcode, ARMCC::CondCodes Pred, unsigned PredReg,
DebugLoc DL, ArrayRef<std::pair<unsigned, bool>> Regs);
MachineInstr *CreateLoadStoreDouble(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore, int Offset, unsigned Base,
bool BaseKill, unsigned Opcode, ARMCC::CondCodes Pred, unsigned PredReg,
DebugLoc DL, ArrayRef<std::pair<unsigned, bool>> Regs) const;
MachineInstr *CreateLoadStoreMulti(
MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore,
int Offset, unsigned Base, bool BaseKill, unsigned Opcode,
ARMCC::CondCodes Pred, unsigned PredReg, const DebugLoc &DL,
ArrayRef<std::pair<unsigned, bool>> Regs);
MachineInstr *CreateLoadStoreDouble(
MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore,
int Offset, unsigned Base, bool BaseKill, unsigned Opcode,
ARMCC::CondCodes Pred, unsigned PredReg, const DebugLoc &DL,
ArrayRef<std::pair<unsigned, bool>> Regs) const;
void FormCandidates(const MemOpQueue &MemOps);
MachineInstr *MergeOpsUpdate(const MergeCandidate &Cand);
bool FixInvalidRegPairOp(MachineBasicBlock &MBB,
@ -450,12 +452,12 @@ static unsigned getLSMultipleTransferSize(const MachineInstr *MI) {
/// Update future uses of the base register with the offset introduced
/// due to writeback. This function only works on Thumb1.
void
ARMLoadStoreOpt::UpdateBaseRegUses(MachineBasicBlock &MBB,
void ARMLoadStoreOpt::UpdateBaseRegUses(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
DebugLoc DL, unsigned Base,
const DebugLoc &DL, unsigned Base,
unsigned WordOffset,
ARMCC::CondCodes Pred, unsigned PredReg) {
ARMCC::CondCodes Pred,
unsigned PredReg) {
assert(isThumb1 && "Can only update base register uses for Thumb1!");
// Start updating any instructions with immediate offsets. Insert a SUB before
// the first non-updateable instruction (if any).
@ -588,10 +590,11 @@ static bool ContainsReg(const ArrayRef<std::pair<unsigned, bool>> &Regs,
/// Create and insert a LDM or STM with Base as base register and registers in
/// Regs as the register operands that would be loaded / stored. It returns
/// true if the transformation is done.
MachineInstr *ARMLoadStoreOpt::CreateLoadStoreMulti(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore, int Offset, unsigned Base,
bool BaseKill, unsigned Opcode, ARMCC::CondCodes Pred, unsigned PredReg,
DebugLoc DL, ArrayRef<std::pair<unsigned, bool>> Regs) {
MachineInstr *ARMLoadStoreOpt::CreateLoadStoreMulti(
MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore,
int Offset, unsigned Base, bool BaseKill, unsigned Opcode,
ARMCC::CondCodes Pred, unsigned PredReg, const DebugLoc &DL,
ArrayRef<std::pair<unsigned, bool>> Regs) {
unsigned NumRegs = Regs.size();
assert(NumRegs > 1);
@ -784,10 +787,11 @@ MachineInstr *ARMLoadStoreOpt::CreateLoadStoreMulti(MachineBasicBlock &MBB,
return MIB.getInstr();
}
MachineInstr *ARMLoadStoreOpt::CreateLoadStoreDouble(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore, int Offset, unsigned Base,
bool BaseKill, unsigned Opcode, ARMCC::CondCodes Pred, unsigned PredReg,
DebugLoc DL, ArrayRef<std::pair<unsigned, bool>> Regs) const {
MachineInstr *ARMLoadStoreOpt::CreateLoadStoreDouble(
MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore,
int Offset, unsigned Base, bool BaseKill, unsigned Opcode,
ARMCC::CondCodes Pred, unsigned PredReg, const DebugLoc &DL,
ArrayRef<std::pair<unsigned, bool>> Regs) const {
bool IsLoad = isi32Load(Opcode);
assert((IsLoad || isi32Store(Opcode)) && "Must have integer load or store");
unsigned LoadStoreOpcode = IsLoad ? ARM::t2LDRDi8 : ARM::t2STRDi8;
@ -1543,14 +1547,13 @@ static bool isMemoryOp(const MachineInstr &MI) {
}
static void InsertLDR_STR(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
int Offset, bool isDef,
DebugLoc DL, unsigned NewOpc,
MachineBasicBlock::iterator &MBBI, int Offset,
bool isDef, const DebugLoc &DL, unsigned NewOpc,
unsigned Reg, bool RegDeadKill, bool RegUndef,
unsigned BaseReg, bool BaseKill, bool BaseUndef,
bool OffKill, bool OffUndef,
ARMCC::CondCodes Pred, unsigned PredReg,
const TargetInstrInfo *TII, bool isT2) {
bool OffKill, bool OffUndef, ARMCC::CondCodes Pred,
unsigned PredReg, const TargetInstrInfo *TII,
bool isT2) {
if (isDef) {
MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MBBI->getDebugLoc(),
TII->get(NewOpc))

View File

@ -21,12 +21,9 @@ using namespace llvm;
// Emit, if possible, a specialized version of the given Libcall. Typically this
// means selecting the appropriately aligned version, but we also convert memset
// of 0 into memclr.
SDValue ARMSelectionDAGInfo::
EmitSpecializedLibcall(SelectionDAG &DAG, SDLoc dl,
SDValue Chain,
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
RTLIB::Libcall LC) const {
SDValue ARMSelectionDAGInfo::EmitSpecializedLibcall(
SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src,
SDValue Size, unsigned Align, RTLIB::Libcall LC) const {
const ARMSubtarget &Subtarget =
DAG.getMachineFunction().getSubtarget<ARMSubtarget>();
const ARMTargetLowering *TLI = Subtarget.getTargetLowering();
@ -128,14 +125,10 @@ EmitSpecializedLibcall(SelectionDAG &DAG, SDLoc dl,
return CallResult.second;
}
SDValue
ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
SDValue Chain,
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
bool isVolatile, bool AlwaysInline,
MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo) const {
SDValue ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(
SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src,
SDValue Size, unsigned Align, bool isVolatile, bool AlwaysInline,
MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const {
const ARMSubtarget &Subtarget =
DAG.getMachineFunction().getSubtarget<ARMSubtarget>();
// Do repeated 4-byte loads and stores. To be improved.
@ -252,25 +245,17 @@ ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
makeArrayRef(TFOps, i));
}
SDValue ARMSelectionDAGInfo::
EmitTargetCodeForMemmove(SelectionDAG &DAG, SDLoc dl,
SDValue Chain,
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
bool isVolatile,
MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo) const {
SDValue ARMSelectionDAGInfo::EmitTargetCodeForMemmove(
SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src,
SDValue Size, unsigned Align, bool isVolatile,
MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const {
return EmitSpecializedLibcall(DAG, dl, Chain, Dst, Src, Size, Align,
RTLIB::MEMMOVE);
}
SDValue ARMSelectionDAGInfo::
EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
SDValue Chain, SDValue Dst,
SDValue Src, SDValue Size,
unsigned Align, bool isVolatile,
SDValue ARMSelectionDAGInfo::EmitTargetCodeForMemset(
SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src,
SDValue Size, unsigned Align, bool isVolatile,
MachinePointerInfo DstPtrInfo) const {
return EmitSpecializedLibcall(DAG, dl, Chain, Dst, Src, Size, Align,
RTLIB::MEMSET);

View File

@ -38,33 +38,28 @@ namespace ARM_AM {
class ARMSelectionDAGInfo : public SelectionDAGTargetInfo {
public:
SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
SDValue Chain,
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
bool isVolatile, bool AlwaysInline,
SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, const SDLoc &dl,
SDValue Chain, SDValue Dst, SDValue Src,
SDValue Size, unsigned Align, bool isVolatile,
bool AlwaysInline,
MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo) const override;
SDValue EmitTargetCodeForMemmove(SelectionDAG &DAG, SDLoc dl,
SDValue Chain,
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align, bool isVolatile,
SDValue
EmitTargetCodeForMemmove(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain,
SDValue Dst, SDValue Src, SDValue Size,
unsigned Align, bool isVolatile,
MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo) const override;
// Adjust parameters for memset, see RTABI section 4.3.4
SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
SDValue Chain,
SDValue Op1, SDValue Op2,
SDValue Op3, unsigned Align,
bool isVolatile,
SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, const SDLoc &dl,
SDValue Chain, SDValue Op1, SDValue Op2,
SDValue Op3, unsigned Align, bool isVolatile,
MachinePointerInfo DstPtrInfo) const override;
SDValue EmitSpecializedLibcall(SelectionDAG &DAG, SDLoc dl,
SDValue Chain,
SDValue Dst, SDValue Src,
SDValue EmitSpecializedLibcall(SelectionDAG &DAG, const SDLoc &dl,
SDValue Chain, SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
RTLIB::Libcall LC) const;
};

View File

@ -38,12 +38,11 @@ bool Thumb1FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const{
return !MF.getFrameInfo()->hasVarSizedObjects();
}
static void
emitSPUpdate(MachineBasicBlock &MBB,
static void emitSPUpdate(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
const TargetInstrInfo &TII, DebugLoc dl,
const ThumbRegisterInfo &MRI,
int NumBytes, unsigned MIFlags = MachineInstr::NoFlags) {
const TargetInstrInfo &TII, const DebugLoc &dl,
const ThumbRegisterInfo &MRI, int NumBytes,
unsigned MIFlags = MachineInstr::NoFlags) {
emitThumbRegPlusImmediate(MBB, MBBI, dl, ARM::SP, ARM::SP, NumBytes, TII,
MRI, MIFlags);
}

View File

@ -38,9 +38,9 @@ unsigned Thumb1InstrInfo::getUnindexedOpcode(unsigned Opc) const {
}
void Thumb1InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const {
MachineBasicBlock::iterator I,
const DebugLoc &DL, unsigned DestReg,
unsigned SrcReg, bool KillSrc) const {
// Need to check the arch.
MachineFunction &MF = *MBB.getParent();
const ARMSubtarget &st = MF.getSubtarget<ARMSubtarget>();

View File

@ -38,9 +38,8 @@ public:
///
const ThumbRegisterInfo &getRegisterInfo() const override { return RI; }
void copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc) const override;
void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,

View File

@ -110,9 +110,9 @@ Thumb2InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB,
}
void Thumb2InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const {
MachineBasicBlock::iterator I,
const DebugLoc &DL, unsigned DestReg,
unsigned SrcReg, bool KillSrc) const {
// Handle SPR, DPR, and QPR copies.
if (!ARM::GPRRegClass.contains(DestReg, SrcReg))
return ARMBaseInstrInfo::copyPhysReg(MBB, I, DL, DestReg, SrcReg, KillSrc);
@ -219,10 +219,12 @@ Thumb2InstrInfo::expandLoadStackGuard(MachineBasicBlock::iterator MI,
}
void llvm::emitT2RegPlusImmediate(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI, DebugLoc dl,
unsigned DestReg, unsigned BaseReg, int NumBytes,
MachineBasicBlock::iterator &MBBI,
const DebugLoc &dl, unsigned DestReg,
unsigned BaseReg, int NumBytes,
ARMCC::CondCodes Pred, unsigned PredReg,
const ARMBaseInstrInfo &TII, unsigned MIFlags) {
const ARMBaseInstrInfo &TII,
unsigned MIFlags) {
if (NumBytes == 0 && DestReg != BaseReg) {
BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), DestReg)
.addReg(BaseReg, RegState::Kill)

View File

@ -39,9 +39,8 @@ public:
bool isLegalToSplitMBBAt(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI) const override;
void copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc) const override;
void storeRegToStackSlot(MachineBasicBlock &MBB,

View File

@ -61,7 +61,7 @@ ThumbRegisterInfo::getPointerRegClass(const MachineFunction &MF,
static void emitThumb1LoadConstPool(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
DebugLoc dl, unsigned DestReg,
const DebugLoc &dl, unsigned DestReg,
unsigned SubIdx, int Val,
ARMCC::CondCodes Pred, unsigned PredReg,
unsigned MIFlags) {
@ -81,7 +81,7 @@ static void emitThumb1LoadConstPool(MachineBasicBlock &MBB,
static void emitThumb2LoadConstPool(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
DebugLoc dl, unsigned DestReg,
const DebugLoc &dl, unsigned DestReg,
unsigned SubIdx, int Val,
ARMCC::CondCodes Pred, unsigned PredReg,
unsigned MIFlags) {
@ -101,9 +101,9 @@ static void emitThumb2LoadConstPool(MachineBasicBlock &MBB,
/// emitLoadConstPool - Emits a load from constpool to materialize the
/// specified immediate.
void ThumbRegisterInfo::emitLoadConstPool(
MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, DebugLoc dl,
unsigned DestReg, unsigned SubIdx, int Val, ARMCC::CondCodes Pred,
unsigned PredReg, unsigned MIFlags) const {
MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
const DebugLoc &dl, unsigned DestReg, unsigned SubIdx, int Val,
ARMCC::CondCodes Pred, unsigned PredReg, unsigned MIFlags) const {
MachineFunction &MF = *MBB.getParent();
const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
if (STI.isThumb1Only()) {
@ -120,15 +120,11 @@ void ThumbRegisterInfo::emitLoadConstPool(
/// a destreg = basereg + immediate in Thumb code. Materialize the immediate
/// in a register using mov / mvn sequences or load the immediate from a
/// constpool entry.
static
void emitThumbRegPlusImmInReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
DebugLoc dl,
unsigned DestReg, unsigned BaseReg,
int NumBytes, bool CanChangeCC,
const TargetInstrInfo &TII,
const ARMBaseRegisterInfo& MRI,
unsigned MIFlags = MachineInstr::NoFlags) {
static void emitThumbRegPlusImmInReg(
MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
const DebugLoc &dl, unsigned DestReg, unsigned BaseReg, int NumBytes,
bool CanChangeCC, const TargetInstrInfo &TII,
const ARMBaseRegisterInfo &MRI, unsigned MIFlags = MachineInstr::NoFlags) {
MachineFunction &MF = *MBB.getParent();
bool isHigh = !isARMLowRegister(DestReg) ||
(BaseReg != 0 && !isARMLowRegister(BaseReg));
@ -149,21 +145,23 @@ void emitThumbRegPlusImmInReg(MachineBasicBlock &MBB,
if (NumBytes <= 255 && NumBytes >= 0 && CanChangeCC) {
AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVi8), LdReg))
.addImm(NumBytes).setMIFlags(MIFlags);
.addImm(NumBytes)
.setMIFlags(MIFlags);
} else if (NumBytes < 0 && NumBytes >= -255 && CanChangeCC) {
AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVi8), LdReg))
.addImm(NumBytes).setMIFlags(MIFlags);
.addImm(NumBytes)
.setMIFlags(MIFlags);
AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TII.get(ARM::tRSB), LdReg))
.addReg(LdReg, RegState::Kill).setMIFlags(MIFlags);
.addReg(LdReg, RegState::Kill)
.setMIFlags(MIFlags);
} else
MRI.emitLoadConstPool(MBB, MBBI, dl, LdReg, 0, NumBytes,
ARMCC::AL, 0, MIFlags);
MRI.emitLoadConstPool(MBB, MBBI, dl, LdReg, 0, NumBytes, ARMCC::AL, 0,
MIFlags);
// Emit add / sub.
int Opc = (isSub) ? ARM::tSUBrr : ((isHigh || !CanChangeCC) ? ARM::tADDhirr
: ARM::tADDrr);
MachineInstrBuilder MIB =
BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg);
int Opc = (isSub) ? ARM::tSUBrr
: ((isHigh || !CanChangeCC) ? ARM::tADDhirr : ARM::tADDrr);
MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg);
if (Opc != ARM::tADDhirr)
MIB = AddDefaultT1CC(MIB);
if (DestReg == ARM::SP || isSub)
@ -179,9 +177,9 @@ void emitThumbRegPlusImmInReg(MachineBasicBlock &MBB,
/// be too long. This is allowed to modify the condition flags.
void llvm::emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
DebugLoc dl,
unsigned DestReg, unsigned BaseReg,
int NumBytes, const TargetInstrInfo &TII,
const DebugLoc &dl, unsigned DestReg,
unsigned BaseReg, int NumBytes,
const TargetInstrInfo &TII,
const ARMBaseRegisterInfo &MRI,
unsigned MIFlags) {
bool isSub = NumBytes < 0;

View File

@ -39,8 +39,9 @@ public:
/// specified immediate.
void
emitLoadConstPool(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
DebugLoc dl, unsigned DestReg, unsigned SubIdx, int Val,
ARMCC::CondCodes Pred = ARMCC::AL, unsigned PredReg = 0,
const DebugLoc &dl, unsigned DestReg, unsigned SubIdx,
int Val, ARMCC::CondCodes Pred = ARMCC::AL,
unsigned PredReg = 0,
unsigned MIFlags = MachineInstr::NoFlags) const override;
// rewrite MI to access 'Offset' bytes from the FP. Update Offset to be

View File

@ -127,19 +127,19 @@ private:
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals, SDLoc dl,
const SmallVectorImpl<SDValue> &OutVals, const SDLoc &dl,
SelectionDAG &DAG) const override;
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc dl, SelectionDAG &DAG,
const SDLoc &dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const override;
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const override;
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc dl,
SelectionDAG &DAG,
const SmallVectorImpl<ISD::InputArg> &Ins,
const SDLoc &dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
private:

View File

@ -39,9 +39,9 @@ AVRInstrInfo::AVRInstrInfo()
: AVRGenInstrInfo(AVR::ADJCALLSTACKDOWN, AVR::ADJCALLSTACKUP), RI() {}
void AVRInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const {
MachineBasicBlock::iterator MI,
const DebugLoc &DL, unsigned DestReg,
unsigned SrcReg, bool KillSrc) const {
unsigned Opc;
if (AVR::GPR8RegClass.contains(DestReg, SrcReg)) {
@ -377,7 +377,7 @@ unsigned AVRInstrInfo::InsertBranch(MachineBasicBlock &MBB,
MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
ArrayRef<MachineOperand> Cond,
DebugLoc DL) const {
const DebugLoc &DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 1 || Cond.size() == 0) &&

View File

@ -73,7 +73,7 @@ public:
unsigned GetInstSizeInBytes(const MachineInstr *MI) const;
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
DebugLoc DL, unsigned DestReg, unsigned SrcReg,
const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc) const override;
void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI, unsigned SrcReg,
@ -96,7 +96,7 @@ public:
bool AllowModify = false) const override;
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
DebugLoc DL) const override;
const DebugLoc &DL) const override;
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
bool
ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;

View File

@ -33,13 +33,14 @@ using namespace llvm;
#define DEBUG_TYPE "bpf-lower"
static void fail(SDLoc DL, SelectionDAG &DAG, const char *Msg) {
static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg) {
MachineFunction &MF = DAG.getMachineFunction();
DAG.getContext()->diagnose(
DiagnosticInfoUnsupported(*MF.getFunction(), Msg, DL.getDebugLoc()));
}
static void fail(SDLoc DL, SelectionDAG &DAG, const char *Msg, SDValue Val) {
static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg,
SDValue Val) {
MachineFunction &MF = DAG.getMachineFunction();
std::string Str;
raw_string_ostream OS(Str);
@ -149,8 +150,8 @@ SDValue BPFTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
SDValue BPFTargetLowering::LowerFormalArguments(
SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
switch (CallConv) {
default:
llvm_unreachable("Unsupported calling convention");
@ -345,7 +346,7 @@ BPFTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
bool IsVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
SDLoc DL, SelectionDAG &DAG) const {
const SDLoc &DL, SelectionDAG &DAG) const {
unsigned Opc = BPFISD::RET_FLAG;
// CCValAssign - represent the assignment of the return value to a location
@ -390,8 +391,8 @@ BPFTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
SDValue BPFTargetLowering::LowerCallResult(
SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
// Assign locations to each value returned by this call.

View File

@ -54,8 +54,8 @@ private:
// Lower the result values of a call, copying them out of physregs into vregs
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool IsVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL,
SelectionDAG &DAG,
const SmallVectorImpl<ISD::InputArg> &Ins,
const SDLoc &DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
// Maximum number of arguments to a call
@ -69,12 +69,12 @@ private:
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
bool IsVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc DL, SelectionDAG &DAG,
const SDLoc &DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const override;
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals, SDLoc DL,
const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
SelectionDAG &DAG) const override;
EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,

View File

@ -32,9 +32,9 @@ BPFInstrInfo::BPFInstrInfo()
: BPFGenInstrInfo(BPF::ADJCALLSTACKDOWN, BPF::ADJCALLSTACKUP) {}
void BPFInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const {
MachineBasicBlock::iterator I,
const DebugLoc &DL, unsigned DestReg,
unsigned SrcReg, bool KillSrc) const {
if (BPF::GPRRegClass.contains(DestReg, SrcReg))
BuildMI(MBB, I, DL, get(BPF::MOV_rr), DestReg)
.addReg(SrcReg, getKillRegState(KillSrc));
@ -134,7 +134,7 @@ unsigned BPFInstrInfo::InsertBranch(MachineBasicBlock &MBB,
MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
ArrayRef<MachineOperand> Cond,
DebugLoc DL) const {
const DebugLoc &DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");

View File

@ -31,7 +31,7 @@ public:
const BPFRegisterInfo &getRegisterInfo() const { return RI; }
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
DebugLoc DL, unsigned DestReg, unsigned SrcReg,
const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc) const override;
void storeRegToStackSlot(MachineBasicBlock &MBB,
@ -52,7 +52,7 @@ public:
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
DebugLoc DL) const override;
const DebugLoc &DL) const override;
};
}

View File

@ -93,11 +93,13 @@ public:
bool tryLoadOfLoadIntrinsic(LoadSDNode *N);
void SelectLoad(SDNode *N);
void SelectBaseOffsetLoad(LoadSDNode *LD, SDLoc dl);
void SelectIndexedLoad(LoadSDNode *LD, SDLoc dl);
void SelectIndexedLoadZeroExtend64(LoadSDNode *LD, unsigned Opcode, SDLoc dl);
void SelectIndexedLoadSignExtend64(LoadSDNode *LD, unsigned Opcode, SDLoc dl);
void SelectIndexedLoad(LoadSDNode *LD, const SDLoc &dl);
void SelectIndexedLoadZeroExtend64(LoadSDNode *LD, unsigned Opcode,
const SDLoc &dl);
void SelectIndexedLoadSignExtend64(LoadSDNode *LD, unsigned Opcode,
const SDLoc &dl);
void SelectBaseOffsetStore(StoreSDNode *ST, SDLoc dl);
void SelectIndexedStore(StoreSDNode *ST, SDLoc dl);
void SelectIndexedStore(StoreSDNode *ST, const SDLoc &dl);
void SelectStore(SDNode *N);
void SelectSHL(SDNode *N);
void SelectMul(SDNode *N);
@ -112,7 +114,7 @@ public:
// XformMskToBitPosU5Imm - Returns the bit position which
// the single bit 32 bit mask represents.
// Used in Clr and Set bit immediate memops.
SDValue XformMskToBitPosU5Imm(uint32_t Imm, SDLoc DL) {
SDValue XformMskToBitPosU5Imm(uint32_t Imm, const SDLoc &DL) {
int32_t bitPos;
bitPos = Log2_32(Imm);
assert(bitPos >= 0 && bitPos < 32 &&
@ -122,13 +124,13 @@ public:
// XformMskToBitPosU4Imm - Returns the bit position which the single-bit
// 16 bit mask represents. Used in Clr and Set bit immediate memops.
SDValue XformMskToBitPosU4Imm(uint16_t Imm, SDLoc DL) {
SDValue XformMskToBitPosU4Imm(uint16_t Imm, const SDLoc &DL) {
return XformMskToBitPosU5Imm(Imm, DL);
}
// XformMskToBitPosU3Imm - Returns the bit position which the single-bit
// 8 bit mask represents. Used in Clr and Set bit immediate memops.
SDValue XformMskToBitPosU3Imm(uint8_t Imm, SDLoc DL) {
SDValue XformMskToBitPosU3Imm(uint8_t Imm, const SDLoc &DL) {
return XformMskToBitPosU5Imm(Imm, DL);
}
@ -141,36 +143,36 @@ public:
// XformM5ToU5Imm - Return a target constant with the specified value, of
// type i32 where the negative literal is transformed into a positive literal
// for use in -= memops.
inline SDValue XformM5ToU5Imm(signed Imm, SDLoc DL) {
inline SDValue XformM5ToU5Imm(signed Imm, const SDLoc &DL) {
assert((Imm >= -31 && Imm <= -1) && "Constant out of range for Memops");
return CurDAG->getTargetConstant(-Imm, DL, MVT::i32);
}
// XformU7ToU7M1Imm - Return a target constant decremented by 1, in range
// [1..128], used in cmpb.gtu instructions.
inline SDValue XformU7ToU7M1Imm(signed Imm, SDLoc DL) {
inline SDValue XformU7ToU7M1Imm(signed Imm, const SDLoc &DL) {
assert((Imm >= 1 && Imm <= 128) && "Constant out of range for cmpb op");
return CurDAG->getTargetConstant(Imm - 1, DL, MVT::i8);
}
// XformS8ToS8M1Imm - Return a target constant decremented by 1.
inline SDValue XformSToSM1Imm(signed Imm, SDLoc DL) {
inline SDValue XformSToSM1Imm(signed Imm, const SDLoc &DL) {
return CurDAG->getTargetConstant(Imm - 1, DL, MVT::i32);
}
// XformU8ToU8M1Imm - Return a target constant decremented by 1.
inline SDValue XformUToUM1Imm(unsigned Imm, SDLoc DL) {
inline SDValue XformUToUM1Imm(unsigned Imm, const SDLoc &DL) {
assert((Imm >= 1) && "Cannot decrement unsigned int less than 1");
return CurDAG->getTargetConstant(Imm - 1, DL, MVT::i32);
}
// XformSToSM2Imm - Return a target constant decremented by 2.
inline SDValue XformSToSM2Imm(unsigned Imm, SDLoc DL) {
inline SDValue XformSToSM2Imm(unsigned Imm, const SDLoc &DL) {
return CurDAG->getTargetConstant(Imm - 2, DL, MVT::i32);
}
// XformSToSM3Imm - Return a target constant decremented by 3.
inline SDValue XformSToSM3Imm(unsigned Imm, SDLoc DL) {
inline SDValue XformSToSM3Imm(unsigned Imm, const SDLoc &DL) {
return CurDAG->getTargetConstant(Imm - 3, DL, MVT::i32);
}
@ -241,7 +243,7 @@ static bool doesIntrinsicReturnPredicate(unsigned ID) {
void HexagonDAGToDAGISel::SelectIndexedLoadSignExtend64(LoadSDNode *LD,
unsigned Opcode,
SDLoc dl) {
const SDLoc &dl) {
SDValue Chain = LD->getChain();
EVT LoadedVT = LD->getMemoryVT();
SDValue Base = LD->getBasePtr();
@ -294,7 +296,7 @@ void HexagonDAGToDAGISel::SelectIndexedLoadSignExtend64(LoadSDNode *LD,
void HexagonDAGToDAGISel::SelectIndexedLoadZeroExtend64(LoadSDNode *LD,
unsigned Opcode,
SDLoc dl) {
const SDLoc &dl) {
SDValue Chain = LD->getChain();
EVT LoadedVT = LD->getMemoryVT();
SDValue Base = LD->getBasePtr();
@ -354,8 +356,7 @@ void HexagonDAGToDAGISel::SelectIndexedLoadZeroExtend64(LoadSDNode *LD,
return;
}
void HexagonDAGToDAGISel::SelectIndexedLoad(LoadSDNode *LD, SDLoc dl) {
void HexagonDAGToDAGISel::SelectIndexedLoad(LoadSDNode *LD, const SDLoc &dl) {
SDValue Chain = LD->getChain();
SDValue Base = LD->getBasePtr();
SDValue Offset = LD->getOffset();
@ -662,8 +663,7 @@ void HexagonDAGToDAGISel::SelectLoad(SDNode *N) {
SelectCode(LD);
}
void HexagonDAGToDAGISel::SelectIndexedStore(StoreSDNode *ST, SDLoc dl) {
void HexagonDAGToDAGISel::SelectIndexedStore(StoreSDNode *ST, const SDLoc &dl) {
SDValue Chain = ST->getChain();
SDValue Base = ST->getBasePtr();
SDValue Offset = ST->getOffset();

View File

@ -528,10 +528,9 @@ const {
/// specified by the specific parameter attribute. The copy will be passed as
/// a byval function parameter. Sometimes what we are copying is the end of a
/// larger object, the part that does not fit in registers.
static SDValue
CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
SDLoc dl) {
static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
SDValue Chain, ISD::ArgFlagsTy Flags,
SelectionDAG &DAG, const SDLoc &dl) {
SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
@ -554,11 +553,11 @@ static bool IsHvxVectorType(MVT ty) {
// passed by value, the function prototype is modified to return void and
// the value is stored in memory pointed by a pointer passed by caller.
SDValue
HexagonTargetLowering::LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
HexagonTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
SDLoc dl, SelectionDAG &DAG) const {
const SDLoc &dl, SelectionDAG &DAG) const {
// CCValAssign - represent the assignment of the return value to locations.
SmallVector<CCValAssign, 16> RVLocs;
@ -608,15 +607,11 @@ bool HexagonTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call
/// being lowered. Returns a SDNode with the same number of values as the
/// ISD::CALL.
SDValue
HexagonTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
const
SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals,
const SmallVectorImpl<SDValue> &OutVals,
SDValue Callee) const {
SDValue HexagonTargetLowering::LowerCallResult(
SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
const SmallVectorImpl<SDValue> &OutVals, SDValue Callee) const {
// Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs;
@ -1067,15 +1062,10 @@ HexagonTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
return AA;
}
SDValue
HexagonTargetLowering::LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv,
bool isVarArg,
const
SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals)
const {
SDValue HexagonTargetLowering::LowerFormalArguments(
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
@ -1226,7 +1216,8 @@ HexagonTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
}
// Creates a SPLAT instruction for a constant value VAL.
static SDValue createSplat(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue Val) {
static SDValue createSplat(SelectionDAG &DAG, const SDLoc &dl, EVT VT,
SDValue Val) {
if (VT.getSimpleVT() == MVT::v4i8)
return DAG.getNode(HexagonISD::VSPLATB, dl, VT, Val);

View File

@ -131,9 +131,11 @@ bool isPositiveHalfWord(SDNode *N);
SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerEH_LABEL(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc dl,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const override;
SDValue
LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
const SDLoc &dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const override;
SDValue LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
@ -152,9 +154,11 @@ bool isPositiveHalfWord(SDNode *N);
SmallVectorImpl<SDValue> &InVals) const override;
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc dl,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
const SmallVectorImpl<SDValue> &OutVals, SDValue Callee) const;
const SmallVectorImpl<ISD::InputArg> &Ins,
const SDLoc &dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals,
const SmallVectorImpl<SDValue> &OutVals,
SDValue Callee) const;
SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVSELECT(SDValue Op, SelectionDAG &DAG) const;
@ -164,10 +168,10 @@ bool isPositiveHalfWord(SDNode *N);
SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals, SDLoc dl,
SelectionDAG &DAG) const override;
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SDLoc &dl, SelectionDAG &DAG) const override;
bool mayBeEmittedAsTailCall(CallInst *CI) const override;
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr *MI,

View File

@ -561,10 +561,11 @@ unsigned HexagonInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
return Count;
}
unsigned HexagonInstrInfo::InsertBranch(MachineBasicBlock &MBB,
MachineBasicBlock *TBB, MachineBasicBlock *FBB,
ArrayRef<MachineOperand> Cond, DebugLoc DL) const {
MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
ArrayRef<MachineOperand> Cond,
const DebugLoc &DL) const {
unsigned BOpc = Hexagon::J2_jump;
unsigned BccOpc = Hexagon::J2_jumpt;
assert(validateBranchCond(Cond) && "Invalid branching condition");
@ -677,9 +678,9 @@ bool HexagonInstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
return NumInstrs <= 4;
}
void HexagonInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, DebugLoc DL, unsigned DestReg,
MachineBasicBlock::iterator I,
const DebugLoc &DL, unsigned DestReg,
unsigned SrcReg, bool KillSrc) const {
auto &HRI = getRegisterInfo();
unsigned KillFlag = getKillRegState(KillSrc);

View File

@ -101,7 +101,7 @@ public:
/// merging needs to be disabled.
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
DebugLoc DL) const override;
const DebugLoc &DL) const override;
/// Return true if it's profitable to predicate
/// instructions with accumulated instruction latency of "NumCycles"
@ -141,9 +141,8 @@ public:
/// The source and destination registers may overlap, which may require a
/// careful implementation when multiple copy instructions are required for
/// large registers. See for example the ARM target.
void copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc) const override;
/// Store the specified register of the given register class to the specified

View File

@ -17,13 +17,10 @@ using namespace llvm;
#define DEBUG_TYPE "hexagon-selectiondag-info"
SDValue
HexagonSelectionDAGInfo::
EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl, SDValue Chain,
SDValue Dst, SDValue Src, SDValue Size, unsigned Align,
bool isVolatile, bool AlwaysInline,
MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo) const {
SDValue HexagonSelectionDAGInfo::EmitTargetCodeForMemcpy(
SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src,
SDValue Size, unsigned Align, bool isVolatile, bool AlwaysInline,
MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const {
ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
if (AlwaysInline || (Align & 0x3) != 0 || !ConstantSize)
return SDValue();

View File

@ -20,12 +20,10 @@ namespace llvm {
class HexagonSelectionDAGInfo : public SelectionDAGTargetInfo {
public:
SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
SDValue Chain,
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
bool isVolatile, bool AlwaysInline,
SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, const SDLoc &dl,
SDValue Chain, SDValue Dst, SDValue Src,
SDValue Size, unsigned Align, bool isVolatile,
bool AlwaysInline,
MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo) const override;
};

View File

@ -83,7 +83,7 @@ private:
SDValue &AluOp);
// getI32Imm - Return a target constant with the specified value, of type i32.
inline SDValue getI32Imm(unsigned Imm, SDLoc DL) {
inline SDValue getI32Imm(unsigned Imm, const SDLoc &DL) {
return CurDAG->getTargetConstant(Imm, DL, MVT::i32);
}

View File

@ -373,8 +373,8 @@ static bool CC_Lanai32_VarArg(unsigned ValNo, MVT ValVT, MVT LocVT,
SDValue LanaiTargetLowering::LowerFormalArguments(
SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
switch (CallConv) {
case CallingConv::C:
case CallingConv::Fast:
@ -414,8 +414,8 @@ SDValue LanaiTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// generate load operations for arguments places on the stack.
SDValue LanaiTargetLowering::LowerCCCArguments(
SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
MachineRegisterInfo &RegInfo = MF.getRegInfo();
@ -514,7 +514,7 @@ LanaiTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
bool IsVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
SDLoc DL, SelectionDAG &DAG) const {
const SDLoc &DL, SelectionDAG &DAG) const {
// CCValAssign - represent the assignment of the return value to a location
SmallVector<CCValAssign, 16> RVLocs;
@ -576,8 +576,8 @@ SDValue LanaiTargetLowering::LowerCCCCallTo(
SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool IsVarArg,
bool IsTailCall, const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
@ -755,8 +755,8 @@ SDValue LanaiTargetLowering::LowerCCCCallTo(
// appropriate copies out of appropriate physical registers.
SDValue LanaiTargetLowering::LowerCallResult(
SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
// Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
@ -780,8 +780,9 @@ SDValue LanaiTargetLowering::LowerCallResult(
// Custom Lowerings
//===----------------------------------------------------------------------===//
static LPCC::CondCode IntCondCCodeToICC(SDValue CC, SDLoc DL, SDValue &LHS,
SDValue &RHS, SelectionDAG &DAG) {
static LPCC::CondCode IntCondCCodeToICC(SDValue CC, const SDLoc &DL,
SDValue &LHS, SDValue &RHS,
SelectionDAG &DAG) {
ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
// For integer, only the SETEQ, SETNE, SETLT, SETLE, SETGT, SETGE, SETULT,

View File

@ -109,20 +109,20 @@ private:
bool IsTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc dl,
SelectionDAG &DAG,
const SmallVectorImpl<ISD::InputArg> &Ins,
const SDLoc &dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue LowerCCCArguments(SDValue Chain, CallingConv::ID CallConv,
bool IsVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL,
SelectionDAG &DAG,
const SmallVectorImpl<ISD::InputArg> &Ins,
const SDLoc &DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool IsVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL,
SelectionDAG &DAG,
const SmallVectorImpl<ISD::InputArg> &Ins,
const SDLoc &DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
@ -131,12 +131,12 @@ private:
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
bool IsVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc DL, SelectionDAG &DAG,
const SDLoc &DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const override;
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals, SDLoc DL,
const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
SelectionDAG &DAG) const override;
const LanaiRegisterInfo *TRI;

View File

@ -34,7 +34,8 @@ LanaiInstrInfo::LanaiInstrInfo()
void LanaiInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator Position,
DebugLoc DL, unsigned DestinationRegister,
const DebugLoc &DL,
unsigned DestinationRegister,
unsigned SourceRegister,
bool KillSource) const {
if (!Lanai::GPRRegClass.contains(DestinationRegister, SourceRegister)) {
@ -272,7 +273,7 @@ unsigned LanaiInstrInfo::InsertBranch(MachineBasicBlock &MBB,
MachineBasicBlock *TrueBlock,
MachineBasicBlock *FalseBlock,
ArrayRef<MachineOperand> Condition,
DebugLoc DL) const {
const DebugLoc &DL) const {
// Shouldn't be a fall through.
assert(TrueBlock && "InsertBranch must not be told to insert a fallthrough");

View File

@ -48,7 +48,7 @@ public:
int &FrameIndex) const override;
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator Position,
DebugLoc DL, unsigned DestinationRegister,
const DebugLoc &DL, unsigned DestinationRegister,
unsigned SourceRegister, bool KillSource) const override;
void
@ -88,7 +88,7 @@ public:
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TrueBlock,
MachineBasicBlock *FalseBlock,
ArrayRef<MachineOperand> Condition,
DebugLoc DL) const override;
const DebugLoc &DL) const override;
};
static inline bool isSPLSOpcode(unsigned Opcode) {

View File

@ -20,7 +20,7 @@
namespace llvm {
SDValue LanaiSelectionDAGInfo::EmitTargetCodeForMemcpy(
SelectionDAG &DAG, SDLoc dl, SDValue Chain, SDValue Dst, SDValue Src,
SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src,
SDValue Size, unsigned Align, bool isVolatile, bool AlwaysInline,
MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const {
ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);

View File

@ -23,9 +23,9 @@ class LanaiSelectionDAGInfo : public SelectionDAGTargetInfo {
public:
LanaiSelectionDAGInfo() = default;
SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl, SDValue Chain,
SDValue Dst, SDValue Src, SDValue Size,
unsigned Align, bool isVolatile,
SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, const SDLoc &dl,
SDValue Chain, SDValue Dst, SDValue Src,
SDValue Size, unsigned Align, bool isVolatile,
bool AlwaysInline,
MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo) const override;

View File

@ -358,16 +358,10 @@ static void AnalyzeReturnValues(CCState &State,
std::reverse(RVLocs.begin(), RVLocs.end());
}
SDValue
MSP430TargetLowering::LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg>
&Ins,
SDLoc dl,
SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals)
const {
SDValue MSP430TargetLowering::LowerFormalArguments(
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
switch (CallConv) {
default:
@ -414,16 +408,10 @@ MSP430TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
/// LowerCCCArguments - transform physical registers into virtual registers and
/// generate load operations for arguments places on the stack.
// FIXME: struct return stuff
SDValue
MSP430TargetLowering::LowerCCCArguments(SDValue Chain,
CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg>
&Ins,
SDLoc dl,
SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals)
const {
SDValue MSP430TargetLowering::LowerCCCArguments(
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
MachineRegisterInfo &RegInfo = MF.getRegInfo();
@ -514,11 +502,11 @@ MSP430TargetLowering::LowerCCCArguments(SDValue Chain,
}
SDValue
MSP430TargetLowering::LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
MSP430TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
SDLoc dl, SelectionDAG &DAG) const {
const SDLoc &dl, SelectionDAG &DAG) const {
// CCValAssign - represent the assignment of the return value to a location
SmallVector<CCValAssign, 16> RVLocs;
@ -566,16 +554,12 @@ MSP430TargetLowering::LowerReturn(SDValue Chain,
/// LowerCCCCallTo - functions arguments are copied from virtual regs to
/// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
// TODO: sret.
SDValue
MSP430TargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
bool isTailCall,
const SmallVectorImpl<ISD::OutputArg>
&Outs,
SDValue MSP430TargetLowering::LowerCCCCallTo(
SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
bool isTailCall, const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
@ -704,12 +688,10 @@ MSP430TargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
/// LowerCallResult - Lower the result values of a call into the
/// appropriate copies out of appropriate physical registers.
///
SDValue
MSP430TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
SDValue MSP430TargetLowering::LowerCallResult(
SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
// Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs;
@ -804,8 +786,7 @@ SDValue MSP430TargetLowering::LowerBlockAddress(SDValue Op,
}
static SDValue EmitCMP(SDValue &LHS, SDValue &RHS, SDValue &TargetCC,
ISD::CondCode CC,
SDLoc dl, SelectionDAG &DAG) {
ISD::CondCode CC, const SDLoc &dl, SelectionDAG &DAG) {
// FIXME: Handle bittests someday
assert(!LHS.getValueType().isFloatingPoint() && "We don't handle FP yet");

View File

@ -133,38 +133,34 @@ namespace llvm {
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc dl, SelectionDAG &DAG,
const SDLoc &dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue LowerCCCArguments(SDValue Chain,
CallingConv::ID CallConv,
SDValue LowerCCCArguments(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc dl,
SelectionDAG &DAG,
const SDLoc &dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc dl, SelectionDAG &DAG,
const SDLoc &dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue
LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc dl, SelectionDAG &DAG,
const SDLoc &dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const override;
SDValue
LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const override;
SDValue LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
SDLoc dl, SelectionDAG &DAG) const override;
const SDLoc &dl, SelectionDAG &DAG) const override;
bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
SDValue &Base,

View File

@ -89,9 +89,9 @@ void MSP430InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
}
void MSP430InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const {
MachineBasicBlock::iterator I,
const DebugLoc &DL, unsigned DestReg,
unsigned SrcReg, bool KillSrc) const {
unsigned Opc;
if (MSP430::GR16RegClass.contains(DestReg, SrcReg))
Opc = MSP430::MOV16rr;
@ -260,11 +260,11 @@ bool MSP430InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
return false;
}
unsigned
MSP430InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
unsigned MSP430InstrInfo::InsertBranch(MachineBasicBlock &MBB,
MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
ArrayRef<MachineOperand> Cond,
DebugLoc DL) const {
const DebugLoc &DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 1 || Cond.size() == 0) &&

View File

@ -52,9 +52,8 @@ public:
///
const TargetRegisterInfo &getRegisterInfo() const { return RI; }
void copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc) const override;
void storeRegToStackSlot(MachineBasicBlock &MBB,
@ -83,8 +82,7 @@ public:
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
DebugLoc DL) const override;
const DebugLoc &DL) const override;
};
}

View File

@ -43,7 +43,7 @@ bool Mips16DAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
}
/// Select multiply instructions.
std::pair<SDNode *, SDNode *>
Mips16DAGToDAGISel::selectMULT(SDNode *N, unsigned Opc, SDLoc DL, EVT Ty,
Mips16DAGToDAGISel::selectMULT(SDNode *N, unsigned Opc, const SDLoc &DL, EVT Ty,
bool HasLo, bool HasHi) {
SDNode *Lo = nullptr, *Hi = nullptr;
SDNode *Mul = CurDAG->getMachineNode(Opc, DL, MVT::Glue, N->getOperand(0),

View File

@ -23,8 +23,9 @@ public:
explicit Mips16DAGToDAGISel(MipsTargetMachine &TM) : MipsDAGToDAGISel(TM) {}
private:
std::pair<SDNode*, SDNode*> selectMULT(SDNode *N, unsigned Opc, SDLoc DL,
EVT Ty, bool HasLo, bool HasHi);
std::pair<SDNode *, SDNode *> selectMULT(SDNode *N, unsigned Opc,
const SDLoc &DL, EVT Ty, bool HasLo,
bool HasHi);
SDValue getMips16SPAliasReg();

View File

@ -57,9 +57,9 @@ unsigned Mips16InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
}
void Mips16InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const {
MachineBasicBlock::iterator I,
const DebugLoc &DL, unsigned DestReg,
unsigned SrcReg, bool KillSrc) const {
unsigned Opc = 0;
if (Mips::CPU16RegsRegClass.contains(DestReg) &&
@ -305,7 +305,8 @@ void Mips16InstrInfo::adjustStackPtr(unsigned SP, int64_t Amount,
unsigned Mips16InstrInfo::loadImmediate(unsigned FrameReg, int64_t Imm,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator II,
DebugLoc DL, unsigned &NewImm) const {
const DebugLoc &DL,
unsigned &NewImm) const {
//
// given original instruction is:
// Instr rx, T[offset] where offset is too big.

View File

@ -43,9 +43,8 @@ public:
unsigned isStoreToStackSlot(const MachineInstr *MI,
int &FrameIndex) const override;
void copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc) const override;
void storeRegToStack(MachineBasicBlock &MBB,
@ -83,9 +82,8 @@ public:
// This is to adjust some FrameReg. We return the new register to be used
// in place of FrameReg and the adjusted immediate field (&NewImm)
//
unsigned loadImmediate(unsigned FrameReg,
int64_t Imm, MachineBasicBlock &MBB,
MachineBasicBlock::iterator II, DebugLoc DL,
unsigned loadImmediate(unsigned FrameReg, int64_t Imm, MachineBasicBlock &MBB,
MachineBasicBlock::iterator II, const DebugLoc &DL,
unsigned &NewImm) const;
static bool validImmediate(unsigned Opcode, unsigned Reg, int64_t Amount);

View File

@ -224,8 +224,8 @@ namespace {
private:
bool runOnMachineBasicBlock(MachineBasicBlock &MBB);
Iter replaceWithCompactBranch(MachineBasicBlock &MBB,
Iter Branch, DebugLoc DL);
Iter replaceWithCompactBranch(MachineBasicBlock &MBB, Iter Branch,
const DebugLoc &DL);
/// This function checks if it is valid to move Candidate to the delay slot
/// and returns true if it isn't. It also updates memory and register
@ -529,8 +529,8 @@ getUnderlyingObjects(const MachineInstr &MI,
}
// Replace Branch with the compact branch instruction.
Iter Filler::replaceWithCompactBranch(MachineBasicBlock &MBB,
Iter Branch, DebugLoc DL) {
Iter Filler::replaceWithCompactBranch(MachineBasicBlock &MBB, Iter Branch,
const DebugLoc &DL) {
const MipsSubtarget &STI = MBB.getParent()->getSubtarget<MipsSubtarget>();
const MipsInstrInfo *TII = STI.getInstrInfo();

View File

@ -564,7 +564,7 @@ static SDValue createFPCmp(SelectionDAG &DAG, const SDValue &Op) {
// Creates and returns a CMovFPT/F node.
static SDValue createCMovFP(SelectionDAG &DAG, SDValue Cond, SDValue True,
SDValue False, SDLoc DL) {
SDValue False, const SDLoc &DL) {
ConstantSDNode *CC = cast<ConstantSDNode>(Cond.getOperand(2));
bool invert = invertFPCondCodeUser((Mips::CondCode)CC->getSExtValue());
SDValue FCC0 = DAG.getRegister(Mips::FCC0, MVT::i32);
@ -2529,10 +2529,10 @@ static unsigned getNextIntArgReg(unsigned Reg) {
return (Reg == Mips::A0) ? Mips::A1 : Mips::A3;
}
SDValue
MipsTargetLowering::passArgOnStack(SDValue StackPtr, unsigned Offset,
SDValue Chain, SDValue Arg, SDLoc DL,
bool IsTailCall, SelectionDAG &DAG) const {
SDValue MipsTargetLowering::passArgOnStack(SDValue StackPtr, unsigned Offset,
SDValue Chain, SDValue Arg,
const SDLoc &DL, bool IsTailCall,
SelectionDAG &DAG) const {
if (!IsTailCall) {
SDValue PtrOff =
DAG.getNode(ISD::ADD, DL, getPointerTy(DAG.getDataLayout()), StackPtr,
@ -2858,8 +2858,8 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
/// appropriate copies out of appropriate physical registers.
SDValue MipsTargetLowering::LowerCallResult(
SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
TargetLowering::CallLoweringInfo &CLI) const {
// Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs;
@ -2920,7 +2920,8 @@ SDValue MipsTargetLowering::LowerCallResult(
}
static SDValue UnpackFromArgumentSlot(SDValue Val, const CCValAssign &VA,
EVT ArgVT, SDLoc DL, SelectionDAG &DAG) {
EVT ArgVT, const SDLoc &DL,
SelectionDAG &DAG) {
MVT LocVT = VA.getLocVT();
EVT ValVT = VA.getValVT();
@ -2978,14 +2979,10 @@ static SDValue UnpackFromArgumentSlot(SDValue Val, const CCValAssign &VA,
//===----------------------------------------------------------------------===//
/// LowerFormalArguments - transform physical registers into virtual registers
/// and generate load operations for arguments places on the stack.
SDValue
MipsTargetLowering::LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv,
bool IsVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals)
const {
SDValue MipsTargetLowering::LowerFormalArguments(
SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
@ -3158,7 +3155,8 @@ MipsTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const
SDValue
MipsTargetLowering::LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
SDLoc DL, SelectionDAG &DAG) const {
const SDLoc &DL,
SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
@ -3173,7 +3171,7 @@ MipsTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
bool IsVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
SDLoc DL, SelectionDAG &DAG) const {
const SDLoc &DL, SelectionDAG &DAG) const {
// CCValAssign - represent the assignment of
// the return value to a location
SmallVector<CCValAssign, 16> RVLocs;
@ -3681,10 +3679,11 @@ bool MipsTargetLowering::useSoftFloat() const {
}
void MipsTargetLowering::copyByValRegs(
SDValue Chain, SDLoc DL, std::vector<SDValue> &OutChains, SelectionDAG &DAG,
const ISD::ArgFlagsTy &Flags, SmallVectorImpl<SDValue> &InVals,
const Argument *FuncArg, unsigned FirstReg, unsigned LastReg,
const CCValAssign &VA, MipsCCState &State) const {
SDValue Chain, const SDLoc &DL, std::vector<SDValue> &OutChains,
SelectionDAG &DAG, const ISD::ArgFlagsTy &Flags,
SmallVectorImpl<SDValue> &InVals, const Argument *FuncArg,
unsigned FirstReg, unsigned LastReg, const CCValAssign &VA,
MipsCCState &State) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
unsigned GPRSizeInBytes = Subtarget.getGPRSizeInBytes();
@ -3729,7 +3728,7 @@ void MipsTargetLowering::copyByValRegs(
// Copy byVal arg to registers and stack.
void MipsTargetLowering::passByValArg(
SDValue Chain, SDLoc DL,
SDValue Chain, const SDLoc &DL,
std::deque<std::pair<unsigned, SDValue>> &RegsToPass,
SmallVectorImpl<SDValue> &MemOpChains, SDValue StackPtr,
MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg, unsigned FirstReg,
@ -3827,7 +3826,7 @@ void MipsTargetLowering::passByValArg(
}
void MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains,
SDValue Chain, SDLoc DL,
SDValue Chain, const SDLoc &DL,
SelectionDAG &DAG,
CCState &State) const {
ArrayRef<MCPhysReg> ArgRegs = ABI.GetVarArgRegs();

View File

@ -304,7 +304,7 @@ namespace llvm {
//
// (add (load (wrapper $gp, %got(sym)), %lo(sym))
template <class NodeTy>
SDValue getAddrLocal(NodeTy *N, SDLoc DL, EVT Ty, SelectionDAG &DAG,
SDValue getAddrLocal(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG,
bool IsN32OrN64) const {
unsigned GOTFlag = IsN32OrN64 ? MipsII::MO_GOT_PAGE : MipsII::MO_GOT;
SDValue GOT = DAG.getNode(MipsISD::Wrapper, DL, Ty, getGlobalReg(DAG, Ty),
@ -324,7 +324,7 @@ namespace llvm {
//
// (load (wrapper $gp, %got(sym)))
template <class NodeTy>
SDValue getAddrGlobal(NodeTy *N, SDLoc DL, EVT Ty, SelectionDAG &DAG,
SDValue getAddrGlobal(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG,
unsigned Flag, SDValue Chain,
const MachinePointerInfo &PtrInfo) const {
SDValue Tgt = DAG.getNode(MipsISD::Wrapper, DL, Ty, getGlobalReg(DAG, Ty),
@ -337,7 +337,7 @@ namespace llvm {
//
// (load (wrapper (add %hi(sym), $gp), %lo(sym)))
template <class NodeTy>
SDValue getAddrGlobalLargeGOT(NodeTy *N, SDLoc DL, EVT Ty,
SDValue getAddrGlobalLargeGOT(NodeTy *N, const SDLoc &DL, EVT Ty,
SelectionDAG &DAG, unsigned HiFlag,
unsigned LoFlag, SDValue Chain,
const MachinePointerInfo &PtrInfo) const {
@ -355,7 +355,7 @@ namespace llvm {
//
// (add %hi(sym), %lo(sym))
template <class NodeTy>
SDValue getAddrNonPIC(NodeTy *N, SDLoc DL, EVT Ty,
SDValue getAddrNonPIC(NodeTy *N, const SDLoc &DL, EVT Ty,
SelectionDAG &DAG) const {
SDValue Hi = getTargetNode(N, Ty, DAG, MipsII::MO_ABS_HI);
SDValue Lo = getTargetNode(N, Ty, DAG, MipsII::MO_ABS_LO);
@ -369,7 +369,8 @@ namespace llvm {
//
// (add $gp, %gp_rel(sym))
template <class NodeTy>
SDValue getAddrGPRel(NodeTy *N, SDLoc DL, EVT Ty, SelectionDAG &DAG) const {
SDValue getAddrGPRel(NodeTy *N, const SDLoc &DL, EVT Ty,
SelectionDAG &DAG) const {
assert(Ty == MVT::i32);
SDValue GPRel = getTargetNode(N, Ty, DAG, MipsII::MO_GPREL);
return DAG.getNode(ISD::ADD, DL, Ty,
@ -421,8 +422,9 @@ namespace llvm {
// Lower Operand helpers
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc dl,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
const SDLoc &dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals,
TargetLowering::CallLoweringInfo &CLI) const;
// Lower Operand specifics
@ -459,15 +461,16 @@ namespace llvm {
/// copyByValArg - Copy argument registers which were used to pass a byval
/// argument to the stack. Create a stack frame object for the byval
/// argument.
void copyByValRegs(SDValue Chain, SDLoc DL, std::vector<SDValue> &OutChains,
SelectionDAG &DAG, const ISD::ArgFlagsTy &Flags,
void copyByValRegs(SDValue Chain, const SDLoc &DL,
std::vector<SDValue> &OutChains, SelectionDAG &DAG,
const ISD::ArgFlagsTy &Flags,
SmallVectorImpl<SDValue> &InVals,
const Argument *FuncArg, unsigned FirstReg,
unsigned LastReg, const CCValAssign &VA,
MipsCCState &State) const;
/// passByValArg - Pass a byval argument in registers or on stack.
void passByValArg(SDValue Chain, SDLoc DL,
void passByValArg(SDValue Chain, const SDLoc &DL,
std::deque<std::pair<unsigned, SDValue>> &RegsToPass,
SmallVectorImpl<SDValue> &MemOpChains, SDValue StackPtr,
MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg,
@ -479,17 +482,17 @@ namespace llvm {
/// to the stack. Also create a stack frame object for the first variable
/// argument.
void writeVarArgRegs(std::vector<SDValue> &OutChains, SDValue Chain,
SDLoc DL, SelectionDAG &DAG, CCState &State) const;
const SDLoc &DL, SelectionDAG &DAG,
CCState &State) const;
SDValue
LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc dl, SelectionDAG &DAG,
const SDLoc &dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const override;
SDValue passArgOnStack(SDValue StackPtr, unsigned Offset, SDValue Chain,
SDValue Arg, SDLoc DL, bool IsTailCall,
SDValue Arg, const SDLoc &DL, bool IsTailCall,
SelectionDAG &DAG) const;
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
@ -500,14 +503,13 @@ namespace llvm {
const SmallVectorImpl<ISD::OutputArg> &Outs,
LLVMContext &Context) const override;
SDValue LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
SDLoc dl, SelectionDAG &DAG) const override;
const SDLoc &dl, SelectionDAG &DAG) const override;
SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps, SDLoc DL,
SelectionDAG &DAG) const;
SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
const SDLoc &DL, SelectionDAG &DAG) const;
bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const override;

Some files were not shown because too many files have changed in this diff Show More