mirror of
https://github.com/RPCSX/llvm.git
synced 2024-11-28 14:10:41 +00:00
[FastISel] Rename public visible FastISel functions. NFC.
This commit renames the following public FastISel functions: LowerArguments -> lowerArguments SelectInstruction -> selectInstruction TargetSelectInstruction -> fastSelectInstruction FastLowerArguments -> fastLowerArguments FastLowerCall -> fastLowerCall FastLowerIntrinsicCall -> fastLowerIntrinsicCall FastEmitZExtFromI1 -> fastEmitZExtFromI1 FastEmitBranch -> fastEmitBranch UpdateValueMap -> updateValueMap TargetMaterializeConstant -> fastMaterializeConstant TargetMaterializeAlloca -> fastMaterializeAlloca TargetMaterializeFloatZero -> fastMaterializeFloatZero LowerCallTo -> lowerCallTo Reviewed by Eric git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@217074 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
e361d518d1
commit
6042034603
@ -214,12 +214,12 @@ public:
|
||||
/// \brief Do "fast" instruction selection for function arguments and append
|
||||
/// the machine instructions to the current block. Returns true when
|
||||
/// successful.
|
||||
bool LowerArguments();
|
||||
bool lowerArguments();
|
||||
|
||||
/// \brief Do "fast" instruction selection for the given LLVM IR instruction
|
||||
/// and append the generated machine instructions to the current block.
|
||||
/// Returns true if selection was successful.
|
||||
bool SelectInstruction(const Instruction *I);
|
||||
bool selectInstruction(const Instruction *I);
|
||||
|
||||
/// \brief Do "fast" instruction selection for the given LLVM IR operator
|
||||
/// (Instruction or ConstantExpr), and append generated machine instructions
|
||||
@ -295,19 +295,19 @@ protected:
|
||||
/// FastISel process fails to select an instruction. This gives targets a
|
||||
/// chance to emit code for anything that doesn't fit into FastISel's
|
||||
/// framework. It returns true if it was successful.
|
||||
virtual bool TargetSelectInstruction(const Instruction *I) = 0;
|
||||
virtual bool fastSelectInstruction(const Instruction *I) = 0;
|
||||
|
||||
/// \brief This method is called by target-independent code to do target-
|
||||
/// specific argument lowering. It returns true if it was successful.
|
||||
virtual bool FastLowerArguments();
|
||||
virtual bool fastLowerArguments();
|
||||
|
||||
/// \brief This method is called by target-independent code to do target-
|
||||
/// specific call lowering. It returns true if it was successful.
|
||||
virtual bool FastLowerCall(CallLoweringInfo &CLI);
|
||||
virtual bool fastLowerCall(CallLoweringInfo &CLI);
|
||||
|
||||
/// \brief This method is called by target-independent code to do target-
|
||||
/// specific intrinsic lowering. It returns true if it was successful.
|
||||
virtual bool FastLowerIntrinsicCall(const IntrinsicInst *II);
|
||||
virtual bool fastLowerIntrinsicCall(const IntrinsicInst *II);
|
||||
|
||||
/// \brief This method is called by target-independent code to request that an
|
||||
/// instruction with the given type and opcode be emitted.
|
||||
@ -433,11 +433,11 @@ protected:
|
||||
|
||||
/// \brief Emit MachineInstrs to compute the value of Op with all but the
|
||||
/// least significant bit set to zero.
|
||||
unsigned FastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill);
|
||||
unsigned fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill);
|
||||
|
||||
/// \brief Emit an unconditional branch to the given block, unless it is the
|
||||
/// immediate (fall-through) successor, and update the CFG.
|
||||
void FastEmitBranch(MachineBasicBlock *MBB, DebugLoc DL);
|
||||
void fastEmitBranch(MachineBasicBlock *MBB, DebugLoc DL);
|
||||
|
||||
/// \brief Update the value map to include the new mapping for this
|
||||
/// instruction, or insert an extra copy to get the result in a previous
|
||||
@ -446,7 +446,7 @@ protected:
|
||||
/// NOTE: This is only necessary because we might select a block that uses a
|
||||
/// value before we select the block that defines the value. It might be
|
||||
/// possible to fix this by selecting blocks in reverse postorder.
|
||||
void UpdateValueMap(const Value *I, unsigned Reg, unsigned NumRegs = 1);
|
||||
void updateValueMap(const Value *I, unsigned Reg, unsigned NumRegs = 1);
|
||||
|
||||
unsigned createResultReg(const TargetRegisterClass *RC);
|
||||
|
||||
@ -458,14 +458,14 @@ protected:
|
||||
|
||||
/// \brief Emit a constant in a register using target-specific logic, such as
|
||||
/// constant pool loads.
|
||||
virtual unsigned TargetMaterializeConstant(const Constant *C) { return 0; }
|
||||
virtual unsigned fastMaterializeConstant(const Constant *C) { return 0; }
|
||||
|
||||
/// \brief Emit an alloca address in a register using target-specific logic.
|
||||
virtual unsigned TargetMaterializeAlloca(const AllocaInst *C) { return 0; }
|
||||
virtual unsigned fastMaterializeAlloca(const AllocaInst *C) { return 0; }
|
||||
|
||||
/// \brief Emit the floating-point constant +0.0 in a register using target-
|
||||
/// specific logic.
|
||||
virtual unsigned TargetMaterializeFloatZero(const ConstantFP *CF) {
|
||||
virtual unsigned fastMaterializeFloatZero(const ConstantFP *CF) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -484,7 +484,7 @@ protected:
|
||||
/// \brief Create a machine mem operand from the given instruction.
|
||||
MachineMemOperand *createMachineMemOperandFor(const Instruction *I) const;
|
||||
|
||||
bool LowerCallTo(const CallInst *CI, const char *SymName, unsigned NumArgs);
|
||||
bool lowerCallTo(const CallInst *CI, const char *SymName, unsigned NumArgs);
|
||||
bool lowerCallTo(CallLoweringInfo &CLI);
|
||||
|
||||
bool isCommutativeIntrinsic(IntrinsicInst const *II) {
|
||||
@ -499,6 +499,8 @@ protected:
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool lowerCall(const CallInst *I);
|
||||
/// \brief Select and emit code for a binary operator instruction, which has
|
||||
/// an opcode which directly corresponds to the given ISD opcode.
|
||||
bool selectBinaryOp(const User *I, unsigned ISDOpcode);
|
||||
@ -506,7 +508,6 @@ protected:
|
||||
bool selectGetElementPtr(const User *I);
|
||||
bool selectStackmap(const CallInst *I);
|
||||
bool selectPatchpoint(const CallInst *I);
|
||||
bool lowerCall(const CallInst *I);
|
||||
bool selectCall(const User *Call);
|
||||
bool selectIntrinsicCall(const IntrinsicInst *II);
|
||||
bool selectBitCast(const User *I);
|
||||
|
@ -103,13 +103,13 @@ void FastISel::startNewBlock() {
|
||||
LastLocalValue = EmitStartPt;
|
||||
}
|
||||
|
||||
bool FastISel::LowerArguments() {
|
||||
bool FastISel::lowerArguments() {
|
||||
if (!FuncInfo.CanLowerReturn)
|
||||
// Fallback to SDISel argument lowering code to deal with sret pointer
|
||||
// parameter.
|
||||
return false;
|
||||
|
||||
if (!FastLowerArguments())
|
||||
if (!fastLowerArguments())
|
||||
return false;
|
||||
|
||||
// Enter arguments into ValueMap for uses in non-entry BBs.
|
||||
@ -209,7 +209,7 @@ unsigned FastISel::materializeConstant(const Value *V, MVT VT) {
|
||||
if (CI->getValue().getActiveBits() <= 64)
|
||||
Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
|
||||
} else if (isa<AllocaInst>(V))
|
||||
Reg = TargetMaterializeAlloca(cast<AllocaInst>(V));
|
||||
Reg = fastMaterializeAlloca(cast<AllocaInst>(V));
|
||||
else if (isa<ConstantPointerNull>(V))
|
||||
// Translate this as an integer zero so that it can be
|
||||
// local-CSE'd with actual integer zeros.
|
||||
@ -217,7 +217,7 @@ unsigned FastISel::materializeConstant(const Value *V, MVT VT) {
|
||||
Constant::getNullValue(DL.getIntPtrType(V->getContext())));
|
||||
else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
|
||||
if (CF->isNullValue())
|
||||
Reg = TargetMaterializeFloatZero(CF);
|
||||
Reg = fastMaterializeFloatZero(CF);
|
||||
else
|
||||
// Try to emit the constant directly.
|
||||
Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
|
||||
@ -245,7 +245,7 @@ unsigned FastISel::materializeConstant(const Value *V, MVT VT) {
|
||||
} else if (const auto *Op = dyn_cast<Operator>(V)) {
|
||||
if (!selectOperator(Op, Op->getOpcode()))
|
||||
if (!isa<Instruction>(Op) ||
|
||||
!TargetSelectInstruction(cast<Instruction>(Op)))
|
||||
!fastSelectInstruction(cast<Instruction>(Op)))
|
||||
return 0;
|
||||
Reg = lookUpRegForValue(Op);
|
||||
} else if (isa<UndefValue>(V)) {
|
||||
@ -263,7 +263,7 @@ unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
|
||||
unsigned Reg = 0;
|
||||
// Give the target-specific code a try first.
|
||||
if (isa<Constant>(V))
|
||||
Reg = TargetMaterializeConstant(cast<Constant>(V));
|
||||
Reg = fastMaterializeConstant(cast<Constant>(V));
|
||||
|
||||
// If target-specific code couldn't or didn't want to handle the value, then
|
||||
// give target-independent code a try.
|
||||
@ -290,7 +290,7 @@ unsigned FastISel::lookUpRegForValue(const Value *V) {
|
||||
return LocalValueMap[V];
|
||||
}
|
||||
|
||||
void FastISel::UpdateValueMap(const Value *I, unsigned Reg, unsigned NumRegs) {
|
||||
void FastISel::updateValueMap(const Value *I, unsigned Reg, unsigned NumRegs) {
|
||||
if (!isa<Instruction>(I)) {
|
||||
LocalValueMap[I] = Reg;
|
||||
return;
|
||||
@ -412,7 +412,7 @@ bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
|
||||
return false;
|
||||
|
||||
// We successfully emitted code for the given LLVM Instruction.
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -445,7 +445,7 @@ bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
|
||||
return false;
|
||||
|
||||
// We successfully emitted code for the given LLVM Instruction.
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -455,7 +455,7 @@ bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
|
||||
ISDOpcode, Op0, Op0IsKill, CF);
|
||||
if (ResultReg) {
|
||||
// We successfully emitted code for the given LLVM Instruction.
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -474,7 +474,7 @@ bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
|
||||
return false;
|
||||
|
||||
// We successfully emitted code for the given LLVM Instruction.
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -562,7 +562,7 @@ bool FastISel::selectGetElementPtr(const User *I) {
|
||||
}
|
||||
|
||||
// We successfully emitted code for the given LLVM Instruction.
|
||||
UpdateValueMap(I, N);
|
||||
updateValueMap(I, N);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -829,7 +829,7 @@ bool FastISel::selectPatchpoint(const CallInst *I) {
|
||||
FuncInfo.MF->getFrameInfo()->setHasPatchPoint();
|
||||
|
||||
if (CLI.NumResultRegs)
|
||||
UpdateValueMap(I, CLI.ResultReg, CLI.NumResultRegs);
|
||||
updateValueMap(I, CLI.ResultReg, CLI.NumResultRegs);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -848,7 +848,7 @@ static AttributeSet getReturnAttrs(FastISel::CallLoweringInfo &CLI) {
|
||||
Attrs);
|
||||
}
|
||||
|
||||
bool FastISel::LowerCallTo(const CallInst *CI, const char *SymName,
|
||||
bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName,
|
||||
unsigned NumArgs) {
|
||||
ImmutableCallSite CS(CI);
|
||||
|
||||
@ -966,7 +966,7 @@ bool FastISel::lowerCallTo(CallLoweringInfo &CLI) {
|
||||
CLI.OutFlags.push_back(Flags);
|
||||
}
|
||||
|
||||
if (!FastLowerCall(CLI))
|
||||
if (!fastLowerCall(CLI))
|
||||
return false;
|
||||
|
||||
// Set all unused physreg defs as dead.
|
||||
@ -974,7 +974,7 @@ bool FastISel::lowerCallTo(CallLoweringInfo &CLI) {
|
||||
CLI.Call->setPhysRegsDeadExcept(CLI.InRegs, TRI);
|
||||
|
||||
if (CLI.NumResultRegs && CLI.CS)
|
||||
UpdateValueMap(CLI.CS->getInstruction(), CLI.ResultReg, CLI.NumResultRegs);
|
||||
updateValueMap(CLI.CS->getInstruction(), CLI.ResultReg, CLI.NumResultRegs);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -1007,7 +1007,7 @@ bool FastISel::lowerCall(const CallInst *CI) {
|
||||
}
|
||||
|
||||
// Check if target-independent constraints permit a tail call here.
|
||||
// Target-dependent constraints are checked within FastLowerCall.
|
||||
// Target-dependent constraints are checked within fastLowerCall.
|
||||
bool IsTailCall = CI->isTailCall();
|
||||
if (IsTailCall && !isInTailCallPosition(CS, TM))
|
||||
IsTailCall = false;
|
||||
@ -1185,14 +1185,14 @@ bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
|
||||
unsigned ResultReg = getRegForValue(ResCI);
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
UpdateValueMap(II, ResultReg);
|
||||
updateValueMap(II, ResultReg);
|
||||
return true;
|
||||
}
|
||||
case Intrinsic::expect: {
|
||||
unsigned ResultReg = getRegForValue(II->getArgOperand(0));
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
UpdateValueMap(II, ResultReg);
|
||||
updateValueMap(II, ResultReg);
|
||||
return true;
|
||||
}
|
||||
case Intrinsic::experimental_stackmap:
|
||||
@ -1202,7 +1202,7 @@ bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
|
||||
return selectPatchpoint(II);
|
||||
}
|
||||
|
||||
return FastLowerIntrinsicCall(II);
|
||||
return fastLowerIntrinsicCall(II);
|
||||
}
|
||||
|
||||
bool FastISel::selectCast(const User *I, unsigned Opcode) {
|
||||
@ -1234,7 +1234,7 @@ bool FastISel::selectCast(const User *I, unsigned Opcode) {
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1244,7 +1244,7 @@ bool FastISel::selectBitCast(const User *I) {
|
||||
unsigned Reg = getRegForValue(I->getOperand(0));
|
||||
if (!Reg)
|
||||
return false;
|
||||
UpdateValueMap(I, Reg);
|
||||
updateValueMap(I, Reg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1283,11 +1283,11 @@ bool FastISel::selectBitCast(const User *I) {
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool FastISel::SelectInstruction(const Instruction *I) {
|
||||
bool FastISel::selectInstruction(const Instruction *I) {
|
||||
// Just before the terminator instruction, insert instructions to
|
||||
// feed PHI nodes in successor blocks.
|
||||
if (isa<TerminatorInst>(I))
|
||||
@ -1332,7 +1332,7 @@ bool FastISel::SelectInstruction(const Instruction *I) {
|
||||
SavedInsertPt = FuncInfo.InsertPt;
|
||||
}
|
||||
// Next, try calling the target to attempt to handle the instruction.
|
||||
if (TargetSelectInstruction(I)) {
|
||||
if (fastSelectInstruction(I)) {
|
||||
++NumFastIselSuccessTarget;
|
||||
DbgLoc = DebugLoc();
|
||||
return true;
|
||||
@ -1352,10 +1352,9 @@ bool FastISel::SelectInstruction(const Instruction *I) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// FastEmitBranch - Emit an unconditional branch to the given block,
|
||||
/// unless it is the immediate (fall-through) successor, and update
|
||||
/// the CFG.
|
||||
void FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DbgLoc) {
|
||||
/// Emit an unconditional branch to the given block, unless it is the immediate
|
||||
/// (fall-through) successor, and update the CFG.
|
||||
void FastISel::fastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DbgLoc) {
|
||||
if (FuncInfo.MBB->getBasicBlock()->size() > 1 &&
|
||||
FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
|
||||
// For more accurate line information if this is the only instruction
|
||||
@ -1373,8 +1372,7 @@ void FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DbgLoc) {
|
||||
FuncInfo.MBB->addSuccessor(MSucc, BranchWeight);
|
||||
}
|
||||
|
||||
/// SelectFNeg - Emit an FNeg operation.
|
||||
///
|
||||
/// Emit an FNeg operation.
|
||||
bool FastISel::selectFNeg(const User *I) {
|
||||
unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
|
||||
if (!OpReg)
|
||||
@ -1386,7 +1384,7 @@ bool FastISel::selectFNeg(const User *I) {
|
||||
unsigned ResultReg = FastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
|
||||
OpReg, OpRegIsKill);
|
||||
if (ResultReg) {
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1414,7 +1412,7 @@ bool FastISel::selectFNeg(const User *I) {
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1454,7 +1452,7 @@ bool FastISel::selectExtractValue(const User *U) {
|
||||
for (unsigned i = 0; i < VTIndex; i++)
|
||||
ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
|
||||
|
||||
UpdateValueMap(EVI, ResultReg);
|
||||
updateValueMap(EVI, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1509,7 +1507,7 @@ bool FastISel::selectOperator(const User *I, unsigned Opcode) {
|
||||
if (BI->isUnconditional()) {
|
||||
const BasicBlock *LLVMSucc = BI->getSuccessor(0);
|
||||
MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
|
||||
FastEmitBranch(MSucc, BI->getDebugLoc());
|
||||
fastEmitBranch(MSucc, BI->getDebugLoc());
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1560,7 +1558,7 @@ bool FastISel::selectOperator(const User *I, unsigned Opcode) {
|
||||
unsigned Reg = getRegForValue(I->getOperand(0));
|
||||
if (!Reg)
|
||||
return false;
|
||||
UpdateValueMap(I, Reg);
|
||||
updateValueMap(I, Reg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1589,11 +1587,11 @@ FastISel::FastISel(FunctionLoweringInfo &FuncInfo,
|
||||
|
||||
FastISel::~FastISel() {}
|
||||
|
||||
bool FastISel::FastLowerArguments() { return false; }
|
||||
bool FastISel::fastLowerArguments() { return false; }
|
||||
|
||||
bool FastISel::FastLowerCall(CallLoweringInfo & /*CLI*/) { return false; }
|
||||
bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; }
|
||||
|
||||
bool FastISel::FastLowerIntrinsicCall(const IntrinsicInst * /*II*/) {
|
||||
bool FastISel::fastLowerIntrinsicCall(const IntrinsicInst * /*II*/) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1636,8 +1634,8 @@ unsigned FastISel::FastEmit_rri(MVT, MVT, unsigned, unsigned /*Op0*/,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries
|
||||
/// to emit an instruction with an immediate operand using FastEmit_ri.
|
||||
/// This method is a wrapper of FastEmit_ri. It first tries to emit an
|
||||
/// instruction with an immediate operand using FastEmit_ri.
|
||||
/// If that fails, it materializes the immediate into a register and try
|
||||
/// FastEmit_rr instead.
|
||||
unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
|
||||
@ -1950,9 +1948,9 @@ unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT, unsigned Op0,
|
||||
return ResultReg;
|
||||
}
|
||||
|
||||
/// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op
|
||||
/// with all but the least significant bit set to zero.
|
||||
unsigned FastISel::FastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
|
||||
/// Emit MachineInstrs to compute the value of Op with all but the least
|
||||
/// significant bit set to zero.
|
||||
unsigned FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
|
||||
return FastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
|
||||
}
|
||||
|
||||
|
@ -1095,7 +1095,7 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
|
||||
++NumEntryBlocks;
|
||||
|
||||
// Lower any arguments needed in this block if this is the entry block.
|
||||
if (!FastIS->LowerArguments()) {
|
||||
if (!FastIS->lowerArguments()) {
|
||||
// Fast isel failed to lower these arguments
|
||||
++NumFastIselFailLowerArguments;
|
||||
if (EnableFastISelAbortArgs)
|
||||
@ -1133,7 +1133,7 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
|
||||
FastIS->recomputeInsertPt();
|
||||
|
||||
// Try to select the instruction with FastISel.
|
||||
if (FastIS->SelectInstruction(Inst)) {
|
||||
if (FastIS->selectInstruction(Inst)) {
|
||||
--NumFastIselRemaining;
|
||||
++NumFastIselSuccess;
|
||||
// If fast isel succeeded, skip over all the folded instructions, and
|
||||
|
@ -107,9 +107,9 @@ class AArch64FastISel : public FastISel {
|
||||
const AArch64Subtarget *Subtarget;
|
||||
LLVMContext *Context;
|
||||
|
||||
bool FastLowerArguments() override;
|
||||
bool FastLowerCall(CallLoweringInfo &CLI) override;
|
||||
bool FastLowerIntrinsicCall(const IntrinsicInst *II) override;
|
||||
bool fastLowerArguments() override;
|
||||
bool fastLowerCall(CallLoweringInfo &CLI) override;
|
||||
bool fastLowerIntrinsicCall(const IntrinsicInst *II) override;
|
||||
|
||||
private:
|
||||
// Selection routines.
|
||||
@ -226,9 +226,9 @@ private:
|
||||
|
||||
public:
|
||||
// Backend specific FastISel code.
|
||||
unsigned TargetMaterializeAlloca(const AllocaInst *AI) override;
|
||||
unsigned TargetMaterializeConstant(const Constant *C) override;
|
||||
unsigned TargetMaterializeFloatZero(const ConstantFP* CF) override;
|
||||
unsigned fastMaterializeAlloca(const AllocaInst *AI) override;
|
||||
unsigned fastMaterializeConstant(const Constant *C) override;
|
||||
unsigned fastMaterializeFloatZero(const ConstantFP* CF) override;
|
||||
|
||||
explicit AArch64FastISel(FunctionLoweringInfo &FuncInfo,
|
||||
const TargetLibraryInfo *LibInfo)
|
||||
@ -237,7 +237,7 @@ public:
|
||||
Context = &FuncInfo.Fn->getContext();
|
||||
}
|
||||
|
||||
bool TargetSelectInstruction(const Instruction *I) override;
|
||||
bool fastSelectInstruction(const Instruction *I) override;
|
||||
|
||||
#include "AArch64GenFastISel.inc"
|
||||
};
|
||||
@ -252,7 +252,7 @@ CCAssignFn *AArch64FastISel::CCAssignFnForCall(CallingConv::ID CC) const {
|
||||
return Subtarget->isTargetDarwin() ? CC_AArch64_DarwinPCS : CC_AArch64_AAPCS;
|
||||
}
|
||||
|
||||
unsigned AArch64FastISel::TargetMaterializeAlloca(const AllocaInst *AI) {
|
||||
unsigned AArch64FastISel::fastMaterializeAlloca(const AllocaInst *AI) {
|
||||
assert(TLI.getValueType(AI->getType(), true) == MVT::i64 &&
|
||||
"Alloca should always return a pointer.");
|
||||
|
||||
@ -297,7 +297,7 @@ unsigned AArch64FastISel::AArch64MaterializeFP(const ConstantFP *CFP, MVT VT) {
|
||||
// Positive zero (+0.0) has to be materialized with a fmov from the zero
|
||||
// register, because the immediate version of fmov cannot encode zero.
|
||||
if (CFP->isNullValue())
|
||||
return TargetMaterializeFloatZero(CFP);
|
||||
return fastMaterializeFloatZero(CFP);
|
||||
|
||||
if (VT != MVT::f32 && VT != MVT::f64)
|
||||
return 0;
|
||||
@ -380,7 +380,7 @@ unsigned AArch64FastISel::AArch64MaterializeGV(const GlobalValue *GV) {
|
||||
return ResultReg;
|
||||
}
|
||||
|
||||
unsigned AArch64FastISel::TargetMaterializeConstant(const Constant *C) {
|
||||
unsigned AArch64FastISel::fastMaterializeConstant(const Constant *C) {
|
||||
EVT CEVT = TLI.getValueType(C->getType(), true);
|
||||
|
||||
// Only handle simple types.
|
||||
@ -398,7 +398,7 @@ unsigned AArch64FastISel::TargetMaterializeConstant(const Constant *C) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned AArch64FastISel::TargetMaterializeFloatZero(const ConstantFP* CFP) {
|
||||
unsigned AArch64FastISel::fastMaterializeFloatZero(const ConstantFP* CFP) {
|
||||
assert(CFP->isNullValue() &&
|
||||
"Floating-point constant is not a positive zero.");
|
||||
MVT VT;
|
||||
@ -1337,7 +1337,7 @@ bool AArch64FastISel::selectAddSub(const Instruction *I) {
|
||||
llvm_unreachable("Unexpected instruction.");
|
||||
|
||||
assert(ResultReg && "Couldn't select Add/Sub instruction.");
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1358,7 +1358,7 @@ bool AArch64FastISel::SelectLoad(const Instruction *I) {
|
||||
if (!EmitLoad(VT, ResultReg, Addr, createMachineMemOperandFor(I)))
|
||||
return false;
|
||||
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1526,7 +1526,7 @@ bool AArch64FastISel::SelectBranch(const Instruction *I) {
|
||||
const BranchInst *BI = cast<BranchInst>(I);
|
||||
if (BI->isUnconditional()) {
|
||||
MachineBasicBlock *MSucc = FuncInfo.MBBMap[BI->getSuccessor(0)];
|
||||
FastEmitBranch(MSucc, BI->getDebugLoc());
|
||||
fastEmitBranch(MSucc, BI->getDebugLoc());
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1557,7 +1557,7 @@ bool AArch64FastISel::SelectBranch(const Instruction *I) {
|
||||
TBB->getBasicBlock());
|
||||
FuncInfo.MBB->addSuccessor(TBB, BranchWeight);
|
||||
|
||||
FastEmitBranch(FBB, DbgLoc);
|
||||
fastEmitBranch(FBB, DbgLoc);
|
||||
return true;
|
||||
}
|
||||
} else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) {
|
||||
@ -1595,7 +1595,7 @@ bool AArch64FastISel::SelectBranch(const Instruction *I) {
|
||||
TBB->getBasicBlock());
|
||||
FuncInfo.MBB->addSuccessor(TBB, BranchWeight);
|
||||
|
||||
FastEmitBranch(FBB, DbgLoc);
|
||||
fastEmitBranch(FBB, DbgLoc);
|
||||
return true;
|
||||
}
|
||||
} else if (const ConstantInt *CI =
|
||||
@ -1631,7 +1631,7 @@ bool AArch64FastISel::SelectBranch(const Instruction *I) {
|
||||
TBB->getBasicBlock());
|
||||
FuncInfo.MBB->addSuccessor(TBB, BranchWeight);
|
||||
|
||||
FastEmitBranch(FBB, DbgLoc);
|
||||
fastEmitBranch(FBB, DbgLoc);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1665,7 +1665,7 @@ bool AArch64FastISel::SelectBranch(const Instruction *I) {
|
||||
TBB->getBasicBlock());
|
||||
FuncInfo.MBB->addSuccessor(TBB, BranchWeight);
|
||||
|
||||
FastEmitBranch(FBB, DbgLoc);
|
||||
fastEmitBranch(FBB, DbgLoc);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1708,7 +1708,7 @@ bool AArch64FastISel::SelectCmp(const Instruction *I) {
|
||||
.addReg(AArch64::WZR)
|
||||
.addImm(invertedCC);
|
||||
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1766,7 +1766,7 @@ bool AArch64FastISel::SelectSelect(const Instruction *I) {
|
||||
|
||||
unsigned ResultReg = FastEmitInst_rri(SelectOpc, RC, TrueReg, TrueIsKill,
|
||||
FalseReg, FalseIsKill, CC);
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1782,7 +1782,7 @@ bool AArch64FastISel::SelectFPExt(const Instruction *I) {
|
||||
unsigned ResultReg = createResultReg(&AArch64::FPR64RegClass);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::FCVTDSr),
|
||||
ResultReg).addReg(Op);
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1798,7 +1798,7 @@ bool AArch64FastISel::SelectFPTrunc(const Instruction *I) {
|
||||
unsigned ResultReg = createResultReg(&AArch64::FPR32RegClass);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::FCVTSDr),
|
||||
ResultReg).addReg(Op);
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1832,7 +1832,7 @@ bool AArch64FastISel::SelectFPToInt(const Instruction *I, bool Signed) {
|
||||
DestVT == MVT::i32 ? &AArch64::GPR32RegClass : &AArch64::GPR64RegClass);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
|
||||
.addReg(SrcReg);
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1874,11 +1874,11 @@ bool AArch64FastISel::SelectIntToFP(const Instruction *I, bool Signed) {
|
||||
|
||||
unsigned ResultReg = FastEmitInst_r(Opc, TLI.getRegClassFor(DestVT), SrcReg,
|
||||
SrcIsKill);
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool AArch64FastISel::FastLowerArguments() {
|
||||
bool AArch64FastISel::fastLowerArguments() {
|
||||
if (!FuncInfo.CanLowerReturn)
|
||||
return false;
|
||||
|
||||
@ -1968,7 +1968,7 @@ bool AArch64FastISel::FastLowerArguments() {
|
||||
|
||||
// Skip unused arguments.
|
||||
if (Arg.use_empty()) {
|
||||
UpdateValueMap(&Arg, 0);
|
||||
updateValueMap(&Arg, 0);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -1980,7 +1980,7 @@ bool AArch64FastISel::FastLowerArguments() {
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
TII.get(TargetOpcode::COPY), ResultReg)
|
||||
.addReg(DstReg, getKillRegState(true));
|
||||
UpdateValueMap(&Arg, ResultReg);
|
||||
updateValueMap(&Arg, ResultReg);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@ -2110,7 +2110,7 @@ bool AArch64FastISel::FinishCall(CallLoweringInfo &CLI, MVT RetVT,
|
||||
return true;
|
||||
}
|
||||
|
||||
bool AArch64FastISel::FastLowerCall(CallLoweringInfo &CLI) {
|
||||
bool AArch64FastISel::fastLowerCall(CallLoweringInfo &CLI) {
|
||||
CallingConv::ID CC = CLI.CallConv;
|
||||
bool IsTailCall = CLI.IsTailCall;
|
||||
bool IsVarArg = CLI.IsVarArg;
|
||||
@ -2345,7 +2345,7 @@ bool AArch64FastISel::foldXALUIntrinsic(AArch64CC::CondCode &CC,
|
||||
return true;
|
||||
}
|
||||
|
||||
bool AArch64FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
// FIXME: Handle more intrinsics.
|
||||
switch (II->getIntrinsicID()) {
|
||||
default: return false;
|
||||
@ -2374,7 +2374,7 @@ bool AArch64FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
SrcReg = DestReg;
|
||||
}
|
||||
|
||||
UpdateValueMap(II, SrcReg);
|
||||
updateValueMap(II, SrcReg);
|
||||
return true;
|
||||
}
|
||||
case Intrinsic::memcpy:
|
||||
@ -2411,7 +2411,7 @@ bool AArch64FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
return false;
|
||||
|
||||
const char *IntrMemName = isa<MemCpyInst>(II) ? "memcpy" : "memmove";
|
||||
return LowerCallTo(II, IntrMemName, II->getNumArgOperands() - 2);
|
||||
return lowerCallTo(II, IntrMemName, II->getNumArgOperands() - 2);
|
||||
}
|
||||
case Intrinsic::memset: {
|
||||
const MemSetInst *MSI = cast<MemSetInst>(II);
|
||||
@ -2427,7 +2427,7 @@ bool AArch64FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
// address spaces.
|
||||
return false;
|
||||
|
||||
return LowerCallTo(II, "memset", II->getNumArgOperands() - 2);
|
||||
return lowerCallTo(II, "memset", II->getNumArgOperands() - 2);
|
||||
}
|
||||
case Intrinsic::trap: {
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::BRK))
|
||||
@ -2450,7 +2450,7 @@ bool AArch64FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
|
||||
UpdateValueMap(II, ResultReg);
|
||||
updateValueMap(II, ResultReg);
|
||||
return true;
|
||||
}
|
||||
case Intrinsic::sadd_with_overflow:
|
||||
@ -2572,7 +2572,7 @@ bool AArch64FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
/*IsKill=*/true, getInvertedCondCode(CC));
|
||||
assert((ResultReg1 + 1) == ResultReg2 &&
|
||||
"Nonconsecutive result registers.");
|
||||
UpdateValueMap(II, ResultReg1, 2);
|
||||
updateValueMap(II, ResultReg1, 2);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -2730,7 +2730,7 @@ bool AArch64FastISel::SelectTrunc(const Instruction *I) {
|
||||
.addReg(SrcReg, getKillRegState(SrcIsKill));
|
||||
}
|
||||
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -3212,7 +3212,7 @@ bool AArch64FastISel::SelectIntExt(const Instruction *I) {
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -3258,7 +3258,7 @@ bool AArch64FastISel::SelectRem(const Instruction *I, unsigned ISDOpcode) {
|
||||
unsigned ResultReg = FastEmitInst_rrr(MSubOpc, RC, QuotReg, /*IsKill=*/true,
|
||||
Src1Reg, Src1IsKill, Src0Reg,
|
||||
Src0IsKill);
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -3289,7 +3289,7 @@ bool AArch64FastISel::SelectMul(const Instruction *I) {
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -3340,7 +3340,7 @@ bool AArch64FastISel::SelectShift(const Instruction *I) {
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -3371,7 +3371,7 @@ bool AArch64FastISel::SelectShift(const Instruction *I) {
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -3412,11 +3412,11 @@ bool AArch64FastISel::SelectBitCast(const Instruction *I) {
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool AArch64FastISel::TargetSelectInstruction(const Instruction *I) {
|
||||
bool AArch64FastISel::fastSelectInstruction(const Instruction *I) {
|
||||
switch (I->getOpcode()) {
|
||||
default:
|
||||
return false;
|
||||
@ -3537,7 +3537,7 @@ bool AArch64FastISel::TargetSelectInstruction(const Instruction *I) {
|
||||
unsigned Reg = getRegForValue(I->getOperand(0));
|
||||
if (!Reg)
|
||||
return false;
|
||||
UpdateValueMap(I, Reg);
|
||||
updateValueMap(I, Reg);
|
||||
return true;
|
||||
}
|
||||
case Instruction::ExtractValue:
|
||||
|
@ -132,12 +132,12 @@ class ARMFastISel final : public FastISel {
|
||||
|
||||
// Backend specific FastISel code.
|
||||
private:
|
||||
bool TargetSelectInstruction(const Instruction *I) override;
|
||||
unsigned TargetMaterializeConstant(const Constant *C) override;
|
||||
unsigned TargetMaterializeAlloca(const AllocaInst *AI) override;
|
||||
bool fastSelectInstruction(const Instruction *I) override;
|
||||
unsigned fastMaterializeConstant(const Constant *C) override;
|
||||
unsigned fastMaterializeAlloca(const AllocaInst *AI) override;
|
||||
bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
|
||||
const LoadInst *LI) override;
|
||||
bool FastLowerArguments() override;
|
||||
bool fastLowerArguments() override;
|
||||
private:
|
||||
#include "ARMGenFastISel.inc"
|
||||
|
||||
@ -687,7 +687,7 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) {
|
||||
return DestReg;
|
||||
}
|
||||
|
||||
unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) {
|
||||
unsigned ARMFastISel::fastMaterializeConstant(const Constant *C) {
|
||||
EVT CEVT = TLI.getValueType(C->getType(), true);
|
||||
|
||||
// Only handle simple types.
|
||||
@ -706,7 +706,7 @@ unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) {
|
||||
|
||||
// TODO: unsigned ARMFastISel::TargetMaterializeFloatZero(const ConstantFP *CF);
|
||||
|
||||
unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) {
|
||||
unsigned ARMFastISel::fastMaterializeAlloca(const AllocaInst *AI) {
|
||||
// Don't handle dynamic allocas.
|
||||
if (!FuncInfo.StaticAllocaMap.count(AI)) return 0;
|
||||
|
||||
@ -1082,7 +1082,7 @@ bool ARMFastISel::SelectLoad(const Instruction *I) {
|
||||
unsigned ResultReg;
|
||||
if (!ARMEmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment()))
|
||||
return false;
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1284,7 +1284,7 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
|
||||
unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc))
|
||||
.addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR);
|
||||
FastEmitBranch(FBB, DbgLoc);
|
||||
fastEmitBranch(FBB, DbgLoc);
|
||||
FuncInfo.MBB->addSuccessor(TBB);
|
||||
return true;
|
||||
}
|
||||
@ -1309,7 +1309,7 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc))
|
||||
.addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR);
|
||||
|
||||
FastEmitBranch(FBB, DbgLoc);
|
||||
fastEmitBranch(FBB, DbgLoc);
|
||||
FuncInfo.MBB->addSuccessor(TBB);
|
||||
return true;
|
||||
}
|
||||
@ -1317,7 +1317,7 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
|
||||
dyn_cast<ConstantInt>(BI->getCondition())) {
|
||||
uint64_t Imm = CI->getZExtValue();
|
||||
MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB;
|
||||
FastEmitBranch(Target, DbgLoc);
|
||||
fastEmitBranch(Target, DbgLoc);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1347,7 +1347,7 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
|
||||
unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc))
|
||||
.addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR);
|
||||
FastEmitBranch(FBB, DbgLoc);
|
||||
fastEmitBranch(FBB, DbgLoc);
|
||||
FuncInfo.MBB->addSuccessor(TBB);
|
||||
return true;
|
||||
}
|
||||
@ -1505,13 +1505,13 @@ bool ARMFastISel::SelectCmp(const Instruction *I) {
|
||||
(const TargetRegisterClass*)&ARM::GPRRegClass;
|
||||
unsigned DestReg = createResultReg(RC);
|
||||
Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0);
|
||||
unsigned ZeroReg = TargetMaterializeConstant(Zero);
|
||||
unsigned ZeroReg = fastMaterializeConstant(Zero);
|
||||
// ARMEmitCmp emits a FMSTAT when necessary, so it's always safe to use CPSR.
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc), DestReg)
|
||||
.addReg(ZeroReg).addImm(1)
|
||||
.addImm(ARMPred).addReg(ARM::CPSR);
|
||||
|
||||
UpdateValueMap(I, DestReg);
|
||||
updateValueMap(I, DestReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1530,7 +1530,7 @@ bool ARMFastISel::SelectFPExt(const Instruction *I) {
|
||||
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
TII.get(ARM::VCVTDS), Result)
|
||||
.addReg(Op));
|
||||
UpdateValueMap(I, Result);
|
||||
updateValueMap(I, Result);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1549,7 +1549,7 @@ bool ARMFastISel::SelectFPTrunc(const Instruction *I) {
|
||||
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
TII.get(ARM::VCVTSD), Result)
|
||||
.addReg(Op));
|
||||
UpdateValueMap(I, Result);
|
||||
updateValueMap(I, Result);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1593,7 +1593,7 @@ bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) {
|
||||
unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT));
|
||||
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
TII.get(Opc), ResultReg).addReg(FP));
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1625,7 +1625,7 @@ bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) {
|
||||
unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg);
|
||||
if (IntReg == 0) return false;
|
||||
|
||||
UpdateValueMap(I, IntReg);
|
||||
updateValueMap(I, IntReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1701,7 +1701,7 @@ bool ARMFastISel::SelectSelect(const Instruction *I) {
|
||||
.addImm(ARMCC::EQ)
|
||||
.addReg(ARM::CPSR);
|
||||
}
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1791,7 +1791,7 @@ bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) {
|
||||
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
TII.get(Opc), ResultReg)
|
||||
.addReg(SrcReg1).addReg(SrcReg2));
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1833,7 +1833,7 @@ bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) {
|
||||
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
TII.get(Opc), ResultReg)
|
||||
.addReg(Op1).addReg(Op2));
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2059,7 +2059,7 @@ bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
|
||||
UsedRegs.push_back(RVLocs[1].getLocReg());
|
||||
|
||||
// Finally update the result.
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
} else {
|
||||
assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!");
|
||||
MVT CopyVT = RVLocs[0].getValVT();
|
||||
@ -2077,7 +2077,7 @@ bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
|
||||
UsedRegs.push_back(RVLocs[0].getLocReg());
|
||||
|
||||
// Finally update the result.
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2520,7 +2520,7 @@ bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) {
|
||||
.addReg(SrcReg).addImm(0));
|
||||
SrcReg = DestReg;
|
||||
}
|
||||
UpdateValueMap(&I, SrcReg);
|
||||
updateValueMap(&I, SrcReg);
|
||||
return true;
|
||||
}
|
||||
case Intrinsic::memcpy:
|
||||
@ -2598,7 +2598,7 @@ bool ARMFastISel::SelectTrunc(const Instruction *I) {
|
||||
|
||||
// Because the high bits are undefined, a truncate doesn't generate
|
||||
// any code.
|
||||
UpdateValueMap(I, SrcReg);
|
||||
updateValueMap(I, SrcReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2760,7 +2760,7 @@ bool ARMFastISel::SelectIntExt(const Instruction *I) {
|
||||
MVT DestVT = DestEVT.getSimpleVT();
|
||||
unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt);
|
||||
if (ResultReg == 0) return false;
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2815,12 +2815,12 @@ bool ARMFastISel::SelectShift(const Instruction *I,
|
||||
}
|
||||
|
||||
AddOptionalDefs(MIB);
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
// TODO: SoftFP support.
|
||||
bool ARMFastISel::TargetSelectInstruction(const Instruction *I) {
|
||||
bool ARMFastISel::fastSelectInstruction(const Instruction *I) {
|
||||
|
||||
switch (I->getOpcode()) {
|
||||
case Instruction::Load:
|
||||
@ -2998,7 +2998,7 @@ unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV,
|
||||
return DestReg2;
|
||||
}
|
||||
|
||||
bool ARMFastISel::FastLowerArguments() {
|
||||
bool ARMFastISel::fastLowerArguments() {
|
||||
if (!FuncInfo.CanLowerReturn)
|
||||
return false;
|
||||
|
||||
@ -3065,7 +3065,7 @@ bool ARMFastISel::FastLowerArguments() {
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
TII.get(TargetOpcode::COPY),
|
||||
ResultReg).addReg(DstReg, getKillRegState(true));
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -64,8 +64,8 @@ public:
|
||||
(Subtarget->hasMips32r2() && (Subtarget->isABI_O32())));
|
||||
}
|
||||
|
||||
bool TargetSelectInstruction(const Instruction *I) override;
|
||||
unsigned TargetMaterializeConstant(const Constant *C) override;
|
||||
bool fastSelectInstruction(const Instruction *I) override;
|
||||
unsigned fastMaterializeConstant(const Constant *C) override;
|
||||
|
||||
bool ComputeAddress(const Value *Obj, Address &Addr);
|
||||
|
||||
@ -194,7 +194,7 @@ bool MipsFastISel::EmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
|
||||
|
||||
// Materialize a constant into a register, and return the register
|
||||
// number (or zero if we failed to handle it).
|
||||
unsigned MipsFastISel::TargetMaterializeConstant(const Constant *C) {
|
||||
unsigned MipsFastISel::fastMaterializeConstant(const Constant *C) {
|
||||
EVT CEVT = TLI.getValueType(C->getType(), true);
|
||||
|
||||
// Only handle simple types.
|
||||
@ -259,7 +259,7 @@ bool MipsFastISel::SelectLoad(const Instruction *I) {
|
||||
unsigned ResultReg;
|
||||
if (!EmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment()))
|
||||
return false;
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -303,7 +303,7 @@ bool MipsFastISel::SelectRet(const Instruction *I) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool MipsFastISel::TargetSelectInstruction(const Instruction *I) {
|
||||
bool MipsFastISel::fastSelectInstruction(const Instruction *I) {
|
||||
if (!TargetSupported)
|
||||
return false;
|
||||
switch (I->getOpcode()) {
|
||||
|
@ -39,7 +39,7 @@
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// TBD:
|
||||
// FastLowerArguments: Handle simple cases.
|
||||
// fastLowerArguments: Handle simple cases.
|
||||
// PPCMaterializeGV: Handle TLS.
|
||||
// SelectCall: Handle function pointers.
|
||||
// SelectCall: Handle multi-register return values.
|
||||
@ -100,12 +100,12 @@ class PPCFastISel final : public FastISel {
|
||||
|
||||
// Backend specific FastISel code.
|
||||
private:
|
||||
bool TargetSelectInstruction(const Instruction *I) override;
|
||||
unsigned TargetMaterializeConstant(const Constant *C) override;
|
||||
unsigned TargetMaterializeAlloca(const AllocaInst *AI) override;
|
||||
bool fastSelectInstruction(const Instruction *I) override;
|
||||
unsigned fastMaterializeConstant(const Constant *C) override;
|
||||
unsigned fastMaterializeAlloca(const AllocaInst *AI) override;
|
||||
bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
|
||||
const LoadInst *LI) override;
|
||||
bool FastLowerArguments() override;
|
||||
bool fastLowerArguments() override;
|
||||
unsigned FastEmit_i(MVT Ty, MVT RetTy, unsigned Opc, uint64_t Imm) override;
|
||||
unsigned FastEmitInst_ri(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
@ -559,7 +559,7 @@ bool PPCFastISel::SelectLoad(const Instruction *I) {
|
||||
unsigned ResultReg = 0;
|
||||
if (!PPCEmitLoad(VT, ResultReg, Addr, RC))
|
||||
return false;
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -706,7 +706,7 @@ bool PPCFastISel::SelectBranch(const Instruction *I) {
|
||||
|
||||
BuildMI(*BrBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::BCC))
|
||||
.addImm(PPCPred).addReg(CondReg).addMBB(TBB);
|
||||
FastEmitBranch(FBB, DbgLoc);
|
||||
fastEmitBranch(FBB, DbgLoc);
|
||||
FuncInfo.MBB->addSuccessor(TBB);
|
||||
return true;
|
||||
|
||||
@ -714,7 +714,7 @@ bool PPCFastISel::SelectBranch(const Instruction *I) {
|
||||
dyn_cast<ConstantInt>(BI->getCondition())) {
|
||||
uint64_t Imm = CI->getZExtValue();
|
||||
MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB;
|
||||
FastEmitBranch(Target, DbgLoc);
|
||||
fastEmitBranch(Target, DbgLoc);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -837,7 +837,7 @@ bool PPCFastISel::SelectFPExt(const Instruction *I) {
|
||||
return false;
|
||||
|
||||
// No code is generated for a FP extend.
|
||||
UpdateValueMap(I, SrcReg);
|
||||
updateValueMap(I, SrcReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -859,7 +859,7 @@ bool PPCFastISel::SelectFPTrunc(const Instruction *I) {
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::FRSP), DestReg)
|
||||
.addReg(SrcReg);
|
||||
|
||||
UpdateValueMap(I, DestReg);
|
||||
updateValueMap(I, DestReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -978,7 +978,7 @@ bool PPCFastISel::SelectIToFP(const Instruction *I, bool IsSigned) {
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg)
|
||||
.addReg(FPReg);
|
||||
|
||||
UpdateValueMap(I, DestReg);
|
||||
updateValueMap(I, DestReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1079,7 +1079,7 @@ bool PPCFastISel::SelectFPToI(const Instruction *I, bool IsSigned) {
|
||||
if (IntReg == 0)
|
||||
return false;
|
||||
|
||||
UpdateValueMap(I, IntReg);
|
||||
updateValueMap(I, IntReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1168,7 +1168,7 @@ bool PPCFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) {
|
||||
ResultReg)
|
||||
.addReg(SrcReg1)
|
||||
.addImm(Imm);
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -1184,7 +1184,7 @@ bool PPCFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) {
|
||||
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
|
||||
.addReg(SrcReg1).addReg(SrcReg2);
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1366,7 +1366,7 @@ void PPCFastISel::finishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
|
||||
|
||||
assert(ResultReg && "ResultReg unset!");
|
||||
UsedRegs.push_back(SourcePhysReg);
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1720,7 +1720,7 @@ bool PPCFastISel::SelectTrunc(const Instruction *I) {
|
||||
SrcReg = ResultReg;
|
||||
}
|
||||
|
||||
UpdateValueMap(I, SrcReg);
|
||||
updateValueMap(I, SrcReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1759,13 +1759,13 @@ bool PPCFastISel::SelectIntExt(const Instruction *I) {
|
||||
if (!PPCEmitIntExt(SrcVT, SrcReg, DestVT, ResultReg, IsZExt))
|
||||
return false;
|
||||
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Attempt to fast-select an instruction that wasn't handled by
|
||||
// the table-generated machinery.
|
||||
bool PPCFastISel::TargetSelectInstruction(const Instruction *I) {
|
||||
bool PPCFastISel::fastSelectInstruction(const Instruction *I) {
|
||||
|
||||
switch (I->getOpcode()) {
|
||||
case Instruction::Load:
|
||||
@ -2054,7 +2054,7 @@ unsigned PPCFastISel::PPCMaterializeInt(const Constant *C, MVT VT) {
|
||||
|
||||
// Materialize a constant into a register, and return the register
|
||||
// number (or zero if we failed to handle it).
|
||||
unsigned PPCFastISel::TargetMaterializeConstant(const Constant *C) {
|
||||
unsigned PPCFastISel::fastMaterializeConstant(const Constant *C) {
|
||||
EVT CEVT = TLI.getValueType(C->getType(), true);
|
||||
|
||||
// Only handle simple types.
|
||||
@ -2073,7 +2073,7 @@ unsigned PPCFastISel::TargetMaterializeConstant(const Constant *C) {
|
||||
|
||||
// Materialize the address created by an alloca into a register, and
|
||||
// return the register number (or zero if we failed to handle it).
|
||||
unsigned PPCFastISel::TargetMaterializeAlloca(const AllocaInst *AI) {
|
||||
unsigned PPCFastISel::fastMaterializeAlloca(const AllocaInst *AI) {
|
||||
// Don't handle dynamic allocas.
|
||||
if (!FuncInfo.StaticAllocaMap.count(AI)) return 0;
|
||||
|
||||
@ -2173,7 +2173,7 @@ bool PPCFastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
|
||||
|
||||
// Attempt to lower call arguments in a faster way than done by
|
||||
// the selection DAG code.
|
||||
bool PPCFastISel::FastLowerArguments() {
|
||||
bool PPCFastISel::fastLowerArguments() {
|
||||
// Defer to normal argument lowering for now. It's reasonably
|
||||
// efficient. Consider doing something like ARM to handle the
|
||||
// case where all args fit in registers, no varargs, no float
|
||||
|
@ -64,7 +64,7 @@ public:
|
||||
X86ScalarSSEf32 = Subtarget->hasSSE1();
|
||||
}
|
||||
|
||||
bool TargetSelectInstruction(const Instruction *I) override;
|
||||
bool fastSelectInstruction(const Instruction *I) override;
|
||||
|
||||
/// \brief The specified machine instr operand is a vreg, and that
|
||||
/// vreg is being provided by the specified load instruction. If possible,
|
||||
@ -73,9 +73,9 @@ public:
|
||||
bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
|
||||
const LoadInst *LI) override;
|
||||
|
||||
bool FastLowerArguments() override;
|
||||
bool FastLowerCall(CallLoweringInfo &CLI) override;
|
||||
bool FastLowerIntrinsicCall(const IntrinsicInst *II) override;
|
||||
bool fastLowerArguments() override;
|
||||
bool fastLowerCall(CallLoweringInfo &CLI) override;
|
||||
bool fastLowerIntrinsicCall(const IntrinsicInst *II) override;
|
||||
|
||||
#include "X86GenFastISel.inc"
|
||||
|
||||
@ -138,11 +138,11 @@ private:
|
||||
unsigned X86MaterializeInt(const ConstantInt *CI, MVT VT);
|
||||
unsigned X86MaterializeFP(const ConstantFP *CFP, MVT VT);
|
||||
unsigned X86MaterializeGV(const GlobalValue *GV,MVT VT);
|
||||
unsigned TargetMaterializeConstant(const Constant *C) override;
|
||||
unsigned fastMaterializeConstant(const Constant *C) override;
|
||||
|
||||
unsigned TargetMaterializeAlloca(const AllocaInst *C) override;
|
||||
unsigned fastMaterializeAlloca(const AllocaInst *C) override;
|
||||
|
||||
unsigned TargetMaterializeFloatZero(const ConstantFP *CF) override;
|
||||
unsigned fastMaterializeFloatZero(const ConstantFP *CF) override;
|
||||
|
||||
/// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is
|
||||
/// computed in an SSE register, not on the X87 floating point stack.
|
||||
@ -1038,7 +1038,7 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
|
||||
if (SrcVT == MVT::i1) {
|
||||
if (Outs[0].Flags.isSExt())
|
||||
return false;
|
||||
SrcReg = FastEmitZExtFromI1(MVT::i8, SrcReg, /*TODO: Kill=*/false);
|
||||
SrcReg = fastEmitZExtFromI1(MVT::i8, SrcReg, /*TODO: Kill=*/false);
|
||||
SrcVT = MVT::i8;
|
||||
}
|
||||
unsigned Op = Outs[0].Flags.isZExt() ? ISD::ZERO_EXTEND :
|
||||
@ -1106,7 +1106,7 @@ bool X86FastISel::X86SelectLoad(const Instruction *I) {
|
||||
if (!X86FastEmitLoad(VT, AM, createMachineMemOperandFor(LI), ResultReg))
|
||||
return false;
|
||||
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1211,7 +1211,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
|
||||
}
|
||||
|
||||
if (ResultReg) {
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1252,7 +1252,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
|
||||
FlagReg2);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[2]),
|
||||
ResultReg).addReg(FlagReg1).addReg(FlagReg2);
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1270,7 +1270,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
|
||||
return false;
|
||||
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg);
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1287,7 +1287,7 @@ bool X86FastISel::X86SelectZExt(const Instruction *I) {
|
||||
MVT SrcVT = TLI.getSimpleValueType(I->getOperand(0)->getType());
|
||||
if (SrcVT.SimpleTy == MVT::i1) {
|
||||
// Set the high bits to zero.
|
||||
ResultReg = FastEmitZExtFromI1(MVT::i8, ResultReg, /*TODO: Kill=*/false);
|
||||
ResultReg = fastEmitZExtFromI1(MVT::i8, ResultReg, /*TODO: Kill=*/false);
|
||||
SrcVT = MVT::i8;
|
||||
|
||||
if (ResultReg == 0)
|
||||
@ -1320,7 +1320,7 @@ bool X86FastISel::X86SelectZExt(const Instruction *I) {
|
||||
return false;
|
||||
}
|
||||
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1344,8 +1344,8 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
|
||||
CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
|
||||
switch (Predicate) {
|
||||
default: break;
|
||||
case CmpInst::FCMP_FALSE: FastEmitBranch(FalseMBB, DbgLoc); return true;
|
||||
case CmpInst::FCMP_TRUE: FastEmitBranch(TrueMBB, DbgLoc); return true;
|
||||
case CmpInst::FCMP_FALSE: fastEmitBranch(FalseMBB, DbgLoc); return true;
|
||||
case CmpInst::FCMP_TRUE: fastEmitBranch(TrueMBB, DbgLoc); return true;
|
||||
}
|
||||
|
||||
const Value *CmpLHS = CI->getOperand(0);
|
||||
@ -1415,7 +1415,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
|
||||
|
||||
// Emits an unconditional branch to the FalseBB, obtains the branch
|
||||
// weight, and adds it to the successor list.
|
||||
FastEmitBranch(FalseMBB, DbgLoc);
|
||||
fastEmitBranch(FalseMBB, DbgLoc);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -1447,7 +1447,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
|
||||
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(JmpOpc))
|
||||
.addMBB(TrueMBB);
|
||||
FastEmitBranch(FalseMBB, DbgLoc);
|
||||
fastEmitBranch(FalseMBB, DbgLoc);
|
||||
uint32_t BranchWeight = 0;
|
||||
if (FuncInfo.BPI)
|
||||
BranchWeight = FuncInfo.BPI->getEdgeWeight(BI->getParent(),
|
||||
@ -1467,7 +1467,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
|
||||
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BranchOpc))
|
||||
.addMBB(TrueMBB);
|
||||
FastEmitBranch(FalseMBB, DbgLoc);
|
||||
fastEmitBranch(FalseMBB, DbgLoc);
|
||||
uint32_t BranchWeight = 0;
|
||||
if (FuncInfo.BPI)
|
||||
BranchWeight = FuncInfo.BPI->getEdgeWeight(BI->getParent(),
|
||||
@ -1486,7 +1486,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
|
||||
.addReg(OpReg).addImm(1);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JNE_4))
|
||||
.addMBB(TrueMBB);
|
||||
FastEmitBranch(FalseMBB, DbgLoc);
|
||||
fastEmitBranch(FalseMBB, DbgLoc);
|
||||
uint32_t BranchWeight = 0;
|
||||
if (FuncInfo.BPI)
|
||||
BranchWeight = FuncInfo.BPI->getEdgeWeight(BI->getParent(),
|
||||
@ -1560,7 +1560,7 @@ bool X86FastISel::X86SelectShift(const Instruction *I) {
|
||||
unsigned ResultReg = createResultReg(RC);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(OpReg), ResultReg)
|
||||
.addReg(Op0Reg);
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1723,7 +1723,7 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) {
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Copy), ResultReg)
|
||||
.addReg(OpEntry.DivRemResultReg);
|
||||
}
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -1841,7 +1841,7 @@ bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) {
|
||||
unsigned Opc = X86::getCMovFromCond(CC, RC->getSize());
|
||||
unsigned ResultReg = FastEmitInst_rr(Opc, RC, RHSReg, RHSIsKill,
|
||||
LHSReg, LHSIsKill);
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1927,7 +1927,7 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
|
||||
RHSReg, RHSIsKill);
|
||||
unsigned ResultReg = FastEmitInst_rr(Opc[3], RC, AndNReg, /*IsKill=*/true,
|
||||
AndReg, /*IsKill=*/true);
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1991,7 +1991,7 @@ bool X86FastISel::X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I) {
|
||||
|
||||
unsigned ResultReg =
|
||||
FastEmitInst_rri(Opc, RC, RHSReg, RHSIsKill, LHSReg, LHSIsKill, CC);
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2020,7 +2020,7 @@ bool X86FastISel::X86SelectSelect(const Instruction *I) {
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
TII.get(TargetOpcode::COPY), ResultReg)
|
||||
.addReg(OpReg, getKillRegState(OpIsKill));
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -2053,7 +2053,7 @@ bool X86FastISel::X86SelectFPExt(const Instruction *I) {
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
TII.get(X86::CVTSS2SDrr), ResultReg)
|
||||
.addReg(OpReg);
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -2072,7 +2072,7 @@ bool X86FastISel::X86SelectFPTrunc(const Instruction *I) {
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
TII.get(X86::CVTSD2SSrr), ResultReg)
|
||||
.addReg(OpReg);
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -2098,7 +2098,7 @@ bool X86FastISel::X86SelectTrunc(const Instruction *I) {
|
||||
|
||||
if (SrcVT == MVT::i8) {
|
||||
// Truncate from i8 to i1; no code needed.
|
||||
UpdateValueMap(I, InputReg);
|
||||
updateValueMap(I, InputReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2121,7 +2121,7 @@ bool X86FastISel::X86SelectTrunc(const Instruction *I) {
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
|
||||
UpdateValueMap(I, ResultReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2165,7 +2165,7 @@ bool X86FastISel::TryEmitSmallMemcpy(X86AddressMode DestAM,
|
||||
return true;
|
||||
}
|
||||
|
||||
bool X86FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
// FIXME: Handle more intrinsics.
|
||||
switch (II->getIntrinsicID()) {
|
||||
default: return false;
|
||||
@ -2218,7 +2218,7 @@ bool X86FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
SrcReg = DestReg;
|
||||
}
|
||||
|
||||
UpdateValueMap(II, SrcReg);
|
||||
updateValueMap(II, SrcReg);
|
||||
return true;
|
||||
}
|
||||
case Intrinsic::memcpy: {
|
||||
@ -2248,7 +2248,7 @@ bool X86FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
if (MCI->getSourceAddressSpace() > 255 || MCI->getDestAddressSpace() > 255)
|
||||
return false;
|
||||
|
||||
return LowerCallTo(II, "memcpy", II->getNumArgOperands() - 2);
|
||||
return lowerCallTo(II, "memcpy", II->getNumArgOperands() - 2);
|
||||
}
|
||||
case Intrinsic::memset: {
|
||||
const MemSetInst *MSI = cast<MemSetInst>(II);
|
||||
@ -2263,7 +2263,7 @@ bool X86FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
if (MSI->getDestAddressSpace() > 255)
|
||||
return false;
|
||||
|
||||
return LowerCallTo(II, "memset", II->getNumArgOperands() - 2);
|
||||
return lowerCallTo(II, "memset", II->getNumArgOperands() - 2);
|
||||
}
|
||||
case Intrinsic::stackprotector: {
|
||||
// Emit code to store the stack guard onto the stack.
|
||||
@ -2346,7 +2346,7 @@ bool X86FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
|
||||
MIB.addReg(SrcReg);
|
||||
|
||||
UpdateValueMap(II, ResultReg);
|
||||
updateValueMap(II, ResultReg);
|
||||
return true;
|
||||
}
|
||||
case Intrinsic::sadd_with_overflow:
|
||||
@ -2478,7 +2478,7 @@ bool X86FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CondOpc),
|
||||
ResultReg2);
|
||||
|
||||
UpdateValueMap(II, ResultReg, 2);
|
||||
updateValueMap(II, ResultReg, 2);
|
||||
return true;
|
||||
}
|
||||
case Intrinsic::x86_sse_cvttss2si:
|
||||
@ -2544,13 +2544,13 @@ bool X86FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
|
||||
.addReg(Reg);
|
||||
|
||||
UpdateValueMap(II, ResultReg);
|
||||
updateValueMap(II, ResultReg);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool X86FastISel::FastLowerArguments() {
|
||||
bool X86FastISel::fastLowerArguments() {
|
||||
if (!FuncInfo.CanLowerReturn)
|
||||
return false;
|
||||
|
||||
@ -2640,7 +2640,7 @@ bool X86FastISel::FastLowerArguments() {
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
TII.get(TargetOpcode::COPY), ResultReg)
|
||||
.addReg(DstReg, getKillRegState(true));
|
||||
UpdateValueMap(&Arg, ResultReg);
|
||||
updateValueMap(&Arg, ResultReg);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@ -2662,7 +2662,7 @@ static unsigned computeBytesPoppedByCallee(const X86Subtarget *Subtarget,
|
||||
return 4;
|
||||
}
|
||||
|
||||
bool X86FastISel::FastLowerCall(CallLoweringInfo &CLI) {
|
||||
bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
|
||||
auto &OutVals = CLI.OutVals;
|
||||
auto &OutFlags = CLI.OutFlags;
|
||||
auto &OutRegs = CLI.OutRegs;
|
||||
@ -2748,7 +2748,7 @@ bool X86FastISel::FastLowerCall(CallLoweringInfo &CLI) {
|
||||
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
UpdateValueMap(Val, ResultReg);
|
||||
updateValueMap(Val, ResultReg);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -3051,7 +3051,7 @@ bool X86FastISel::FastLowerCall(CallLoweringInfo &CLI) {
|
||||
}
|
||||
|
||||
bool
|
||||
X86FastISel::TargetSelectInstruction(const Instruction *I) {
|
||||
X86FastISel::fastSelectInstruction(const Instruction *I) {
|
||||
switch (I->getOpcode()) {
|
||||
default: break;
|
||||
case Instruction::Load:
|
||||
@ -3094,7 +3094,7 @@ X86FastISel::TargetSelectInstruction(const Instruction *I) {
|
||||
return X86SelectTrunc(I);
|
||||
unsigned Reg = getRegForValue(I->getOperand(0));
|
||||
if (Reg == 0) return false;
|
||||
UpdateValueMap(I, Reg);
|
||||
updateValueMap(I, Reg);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -3160,7 +3160,7 @@ unsigned X86FastISel::X86MaterializeInt(const ConstantInt *CI, MVT VT) {
|
||||
|
||||
unsigned X86FastISel::X86MaterializeFP(const ConstantFP *CFP, MVT VT) {
|
||||
if (CFP->isNullValue())
|
||||
return TargetMaterializeFloatZero(CFP);
|
||||
return fastMaterializeFloatZero(CFP);
|
||||
|
||||
// Can't handle alternate code models yet.
|
||||
CodeModel::Model CM = TM.getCodeModel();
|
||||
@ -3273,7 +3273,7 @@ unsigned X86FastISel::X86MaterializeGV(const GlobalValue *GV, MVT VT) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
|
||||
unsigned X86FastISel::fastMaterializeConstant(const Constant *C) {
|
||||
EVT CEVT = TLI.getValueType(C->getType(), true);
|
||||
|
||||
// Only handle simple types.
|
||||
@ -3291,14 +3291,14 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned X86FastISel::TargetMaterializeAlloca(const AllocaInst *C) {
|
||||
unsigned X86FastISel::fastMaterializeAlloca(const AllocaInst *C) {
|
||||
// Fail on dynamic allocas. At this point, getRegForValue has already
|
||||
// checked its CSE maps, so if we're here trying to handle a dynamic
|
||||
// alloca, we're not going to succeed. X86SelectAddress has a
|
||||
// check for dynamic allocas, because it's called directly from
|
||||
// various places, but TargetMaterializeAlloca also needs a check
|
||||
// various places, but targetMaterializeAlloca also needs a check
|
||||
// in order to avoid recursion between getRegForValue,
|
||||
// X86SelectAddrss, and TargetMaterializeAlloca.
|
||||
// X86SelectAddrss, and targetMaterializeAlloca.
|
||||
if (!FuncInfo.StaticAllocaMap.count(C))
|
||||
return 0;
|
||||
assert(C->isStaticAlloca() && "dynamic alloca in the static alloca map?");
|
||||
@ -3314,7 +3314,7 @@ unsigned X86FastISel::TargetMaterializeAlloca(const AllocaInst *C) {
|
||||
return ResultReg;
|
||||
}
|
||||
|
||||
unsigned X86FastISel::TargetMaterializeFloatZero(const ConstantFP *CF) {
|
||||
unsigned X86FastISel::fastMaterializeFloatZero(const ConstantFP *CF) {
|
||||
MVT VT;
|
||||
if (!isTypeLegal(CF->getType(), VT))
|
||||
return 0;
|
||||
|
Loading…
Reference in New Issue
Block a user