mirror of
https://github.com/RPCSX/llvm.git
synced 2024-11-28 14:10:41 +00:00
[FastISel][tblgen] Rename tblgen generated FastISel functions. NFC.
This is the final round of renaming. This changes tblgen to emit lower-case function names for FastEmitInst_* and FastEmit_*, and updates all its uses in the source code. Reviewed by Eric git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@217075 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
6042034603
commit
ecadea992a
@ -311,124 +311,124 @@ protected:
|
||||
|
||||
/// \brief This method is called by target-independent code to request that an
|
||||
/// instruction with the given type and opcode be emitted.
|
||||
virtual unsigned FastEmit_(MVT VT, MVT RetVT, unsigned Opcode);
|
||||
virtual unsigned fastEmit_(MVT VT, MVT RetVT, unsigned Opcode);
|
||||
|
||||
/// \brief This method is called by target-independent code to request that an
|
||||
/// instruction with the given type, opcode, and register operand be emitted.
|
||||
virtual unsigned FastEmit_r(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
|
||||
virtual unsigned fastEmit_r(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
|
||||
bool Op0IsKill);
|
||||
|
||||
/// \brief This method is called by target-independent code to request that an
|
||||
/// instruction with the given type, opcode, and register operands be emitted.
|
||||
virtual unsigned FastEmit_rr(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
|
||||
virtual unsigned fastEmit_rr(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
|
||||
bool Op0IsKill, unsigned Op1, bool Op1IsKill);
|
||||
|
||||
/// \brief This method is called by target-independent code to request that an
|
||||
/// instruction with the given type, opcode, and register and immediate
|
||||
// operands be emitted.
|
||||
virtual unsigned FastEmit_ri(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
|
||||
virtual unsigned fastEmit_ri(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
|
||||
bool Op0IsKill, uint64_t Imm);
|
||||
|
||||
/// \brief This method is called by target-independent code to request that an
|
||||
/// instruction with the given type, opcode, and register and floating-point
|
||||
/// immediate operands be emitted.
|
||||
virtual unsigned FastEmit_rf(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
|
||||
virtual unsigned fastEmit_rf(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
|
||||
bool Op0IsKill, const ConstantFP *FPImm);
|
||||
|
||||
/// \brief This method is called by target-independent code to request that an
|
||||
/// instruction with the given type, opcode, and register and immediate
|
||||
/// operands be emitted.
|
||||
virtual unsigned FastEmit_rri(MVT VT, MVT RetVT, unsigned Opcode,
|
||||
virtual unsigned fastEmit_rri(MVT VT, MVT RetVT, unsigned Opcode,
|
||||
unsigned Op0, bool Op0IsKill, unsigned Op1,
|
||||
bool Op1IsKill, uint64_t Imm);
|
||||
|
||||
/// \brief This method is a wrapper of FastEmit_ri.
|
||||
/// \brief This method is a wrapper of fastEmit_ri.
|
||||
///
|
||||
/// It first tries to emit an instruction with an immediate operand using
|
||||
/// FastEmit_ri. If that fails, it materializes the immediate into a register
|
||||
/// and try FastEmit_rr instead.
|
||||
unsigned FastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, bool Op0IsKill,
|
||||
/// fastEmit_ri. If that fails, it materializes the immediate into a register
|
||||
/// and try fastEmit_rr instead.
|
||||
unsigned fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, bool Op0IsKill,
|
||||
uint64_t Imm, MVT ImmType);
|
||||
|
||||
/// \brief This method is called by target-independent code to request that an
|
||||
/// instruction with the given type, opcode, and immediate operand be emitted.
|
||||
virtual unsigned FastEmit_i(MVT VT, MVT RetVT, unsigned Opcode, uint64_t Imm);
|
||||
virtual unsigned fastEmit_i(MVT VT, MVT RetVT, unsigned Opcode, uint64_t Imm);
|
||||
|
||||
/// \brief This method is called by target-independent code to request that an
|
||||
/// instruction with the given type, opcode, and floating-point immediate
|
||||
/// operand be emitted.
|
||||
virtual unsigned FastEmit_f(MVT VT, MVT RetVT, unsigned Opcode,
|
||||
virtual unsigned fastEmit_f(MVT VT, MVT RetVT, unsigned Opcode,
|
||||
const ConstantFP *FPImm);
|
||||
|
||||
/// \brief Emit a MachineInstr with no operands and a result register in the
|
||||
/// given register class.
|
||||
unsigned FastEmitInst_(unsigned MachineInstOpcode,
|
||||
unsigned fastEmitInst_(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC);
|
||||
|
||||
/// \brief Emit a MachineInstr with one register operand and a result register
|
||||
/// in the given register class.
|
||||
unsigned FastEmitInst_r(unsigned MachineInstOpcode,
|
||||
unsigned fastEmitInst_r(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC, unsigned Op0,
|
||||
bool Op0IsKill);
|
||||
|
||||
/// \brief Emit a MachineInstr with two register operands and a result
|
||||
/// register in the given register class.
|
||||
unsigned FastEmitInst_rr(unsigned MachineInstOpcode,
|
||||
unsigned fastEmitInst_rr(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC, unsigned Op0,
|
||||
bool Op0IsKill, unsigned Op1, bool Op1IsKill);
|
||||
|
||||
/// \brief Emit a MachineInstr with three register operands and a result
|
||||
/// register in the given register class.
|
||||
unsigned FastEmitInst_rrr(unsigned MachineInstOpcode,
|
||||
unsigned fastEmitInst_rrr(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC, unsigned Op0,
|
||||
bool Op0IsKill, unsigned Op1, bool Op1IsKill,
|
||||
unsigned Op2, bool Op2IsKill);
|
||||
|
||||
/// \brief Emit a MachineInstr with a register operand, an immediate, and a
|
||||
/// result register in the given register class.
|
||||
unsigned FastEmitInst_ri(unsigned MachineInstOpcode,
|
||||
unsigned fastEmitInst_ri(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC, unsigned Op0,
|
||||
bool Op0IsKill, uint64_t Imm);
|
||||
|
||||
/// \brief Emit a MachineInstr with one register operand and two immediate
|
||||
/// operands.
|
||||
unsigned FastEmitInst_rii(unsigned MachineInstOpcode,
|
||||
unsigned fastEmitInst_rii(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC, unsigned Op0,
|
||||
bool Op0IsKill, uint64_t Imm1, uint64_t Imm2);
|
||||
|
||||
/// \brief Emit a MachineInstr with two register operands and a result
|
||||
/// register in the given register class.
|
||||
unsigned FastEmitInst_rf(unsigned MachineInstOpcode,
|
||||
unsigned fastEmitInst_rf(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC, unsigned Op0,
|
||||
bool Op0IsKill, const ConstantFP *FPImm);
|
||||
|
||||
/// \brief Emit a MachineInstr with two register operands, an immediate, and a
|
||||
/// result register in the given register class.
|
||||
unsigned FastEmitInst_rri(unsigned MachineInstOpcode,
|
||||
unsigned fastEmitInst_rri(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC, unsigned Op0,
|
||||
bool Op0IsKill, unsigned Op1, bool Op1IsKill,
|
||||
uint64_t Imm);
|
||||
|
||||
/// \brief Emit a MachineInstr with two register operands, two immediates
|
||||
/// operands, and a result register in the given register class.
|
||||
unsigned FastEmitInst_rrii(unsigned MachineInstOpcode,
|
||||
unsigned fastEmitInst_rrii(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC, unsigned Op0,
|
||||
bool Op0IsKill, unsigned Op1, bool Op1IsKill,
|
||||
uint64_t Imm1, uint64_t Imm2);
|
||||
|
||||
/// \brief Emit a MachineInstr with a single immediate operand, and a result
|
||||
/// register in the given register class.
|
||||
unsigned FastEmitInst_i(unsigned MachineInstrOpcode,
|
||||
unsigned fastEmitInst_i(unsigned MachineInstrOpcode,
|
||||
const TargetRegisterClass *RC, uint64_t Imm);
|
||||
|
||||
/// \brief Emit a MachineInstr with a two immediate operands.
|
||||
unsigned FastEmitInst_ii(unsigned MachineInstrOpcode,
|
||||
unsigned fastEmitInst_ii(unsigned MachineInstrOpcode,
|
||||
const TargetRegisterClass *RC, uint64_t Imm1,
|
||||
uint64_t Imm2);
|
||||
|
||||
/// \brief Emit a MachineInstr for an extract_subreg from a specified index of
|
||||
/// a superregister to a specified type.
|
||||
unsigned FastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, bool Op0IsKill,
|
||||
unsigned fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, bool Op0IsKill,
|
||||
uint32_t Idx);
|
||||
|
||||
/// \brief Emit MachineInstrs to compute the value of Op with all but the
|
||||
|
@ -207,7 +207,7 @@ unsigned FastISel::materializeConstant(const Value *V, MVT VT) {
|
||||
unsigned Reg = 0;
|
||||
if (const auto *CI = dyn_cast<ConstantInt>(V)) {
|
||||
if (CI->getValue().getActiveBits() <= 64)
|
||||
Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
|
||||
Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
|
||||
} else if (isa<AllocaInst>(V))
|
||||
Reg = fastMaterializeAlloca(cast<AllocaInst>(V));
|
||||
else if (isa<ConstantPointerNull>(V))
|
||||
@ -220,7 +220,7 @@ unsigned FastISel::materializeConstant(const Value *V, MVT VT) {
|
||||
Reg = fastMaterializeFloatZero(CF);
|
||||
else
|
||||
// Try to emit the constant directly.
|
||||
Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
|
||||
Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF);
|
||||
|
||||
if (!Reg) {
|
||||
// Try to emit the constant by using an integer constant with a cast.
|
||||
@ -238,7 +238,7 @@ unsigned FastISel::materializeConstant(const Value *V, MVT VT) {
|
||||
unsigned IntegerReg =
|
||||
getRegForValue(ConstantInt::get(V->getContext(), IntVal));
|
||||
if (IntegerReg != 0)
|
||||
Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg,
|
||||
Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg,
|
||||
/*Kill=*/false);
|
||||
}
|
||||
}
|
||||
@ -321,12 +321,12 @@ std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
|
||||
MVT PtrVT = TLI.getPointerTy();
|
||||
EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
|
||||
if (IdxVT.bitsLT(PtrVT)) {
|
||||
IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN,
|
||||
IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN,
|
||||
IdxNIsKill);
|
||||
IdxNIsKill = true;
|
||||
} else if (IdxVT.bitsGT(PtrVT)) {
|
||||
IdxN =
|
||||
FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN, IdxNIsKill);
|
||||
fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN, IdxNIsKill);
|
||||
IdxNIsKill = true;
|
||||
}
|
||||
return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
|
||||
@ -406,7 +406,7 @@ bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
|
||||
bool Op1IsKill = hasTrivialKill(I->getOperand(1));
|
||||
|
||||
unsigned ResultReg =
|
||||
FastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, Op1IsKill,
|
||||
fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, Op1IsKill,
|
||||
CI->getZExtValue(), VT.getSimpleVT());
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
@ -439,7 +439,7 @@ bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
|
||||
ISDOpcode = ISD::AND;
|
||||
}
|
||||
|
||||
unsigned ResultReg = FastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
|
||||
unsigned ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
|
||||
Op0IsKill, Imm, VT.getSimpleVT());
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
@ -451,7 +451,7 @@ bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
|
||||
|
||||
// Check if the second operand is a constant float.
|
||||
if (const auto *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
|
||||
unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
|
||||
unsigned ResultReg = fastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
|
||||
ISDOpcode, Op0, Op0IsKill, CF);
|
||||
if (ResultReg) {
|
||||
// We successfully emitted code for the given LLVM Instruction.
|
||||
@ -466,7 +466,7 @@ bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
|
||||
bool Op1IsKill = hasTrivialKill(I->getOperand(1));
|
||||
|
||||
// Now we have both operands in registers. Emit the instruction.
|
||||
unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
|
||||
unsigned ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
|
||||
ISDOpcode, Op0, Op0IsKill, Op1, Op1IsKill);
|
||||
if (!ResultReg)
|
||||
// Target-specific code wasn't able to find a machine opcode for
|
||||
@ -501,7 +501,7 @@ bool FastISel::selectGetElementPtr(const User *I) {
|
||||
// N = N + Offset
|
||||
TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
|
||||
if (TotalOffs >= MaxOffs) {
|
||||
N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
|
||||
N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
|
||||
if (!N) // Unhandled operand. Halt "fast" selection and bail.
|
||||
return false;
|
||||
NIsKill = true;
|
||||
@ -520,7 +520,7 @@ bool FastISel::selectGetElementPtr(const User *I) {
|
||||
TotalOffs +=
|
||||
DL.getTypeAllocSize(Ty) * cast<ConstantInt>(CI)->getSExtValue();
|
||||
if (TotalOffs >= MaxOffs) {
|
||||
N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
|
||||
N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
|
||||
if (!N) // Unhandled operand. Halt "fast" selection and bail.
|
||||
return false;
|
||||
NIsKill = true;
|
||||
@ -529,7 +529,7 @@ bool FastISel::selectGetElementPtr(const User *I) {
|
||||
continue;
|
||||
}
|
||||
if (TotalOffs) {
|
||||
N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
|
||||
N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
|
||||
if (!N) // Unhandled operand. Halt "fast" selection and bail.
|
||||
return false;
|
||||
NIsKill = true;
|
||||
@ -545,18 +545,18 @@ bool FastISel::selectGetElementPtr(const User *I) {
|
||||
return false;
|
||||
|
||||
if (ElementSize != 1) {
|
||||
IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
|
||||
IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
|
||||
if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
|
||||
return false;
|
||||
IdxNIsKill = true;
|
||||
}
|
||||
N = FastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
|
||||
N = fastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
|
||||
if (!N) // Unhandled operand. Halt "fast" selection and bail.
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (TotalOffs) {
|
||||
N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
|
||||
N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
|
||||
if (!N) // Unhandled operand. Halt "fast" selection and bail.
|
||||
return false;
|
||||
}
|
||||
@ -1229,7 +1229,7 @@ bool FastISel::selectCast(const User *I, unsigned Opcode) {
|
||||
|
||||
bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
|
||||
|
||||
unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
|
||||
unsigned ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
|
||||
Opcode, InputReg, InputRegIsKill);
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
@ -1278,7 +1278,7 @@ bool FastISel::selectBitCast(const User *I) {
|
||||
|
||||
// If the reg-reg copy failed, select a BITCAST opcode.
|
||||
if (!ResultReg)
|
||||
ResultReg = FastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0, Op0IsKill);
|
||||
ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0, Op0IsKill);
|
||||
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
@ -1381,7 +1381,7 @@ bool FastISel::selectFNeg(const User *I) {
|
||||
|
||||
// If the target has ISD::FNEG, use it.
|
||||
EVT VT = TLI.getValueType(I->getType());
|
||||
unsigned ResultReg = FastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
|
||||
unsigned ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
|
||||
OpReg, OpRegIsKill);
|
||||
if (ResultReg) {
|
||||
updateValueMap(I, ResultReg);
|
||||
@ -1396,18 +1396,18 @@ bool FastISel::selectFNeg(const User *I) {
|
||||
if (!TLI.isTypeLegal(IntVT))
|
||||
return false;
|
||||
|
||||
unsigned IntReg = FastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
|
||||
unsigned IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
|
||||
ISD::BITCAST, OpReg, OpRegIsKill);
|
||||
if (!IntReg)
|
||||
return false;
|
||||
|
||||
unsigned IntResultReg = FastEmit_ri_(
|
||||
unsigned IntResultReg = fastEmit_ri_(
|
||||
IntVT.getSimpleVT(), ISD::XOR, IntReg, /*IsKill=*/true,
|
||||
UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT());
|
||||
if (!IntResultReg)
|
||||
return false;
|
||||
|
||||
ResultReg = FastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
|
||||
ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
|
||||
IntResultReg, /*IsKill=*/true);
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
@ -1518,7 +1518,7 @@ bool FastISel::selectOperator(const User *I, unsigned Opcode) {
|
||||
|
||||
case Instruction::Unreachable:
|
||||
if (TM.Options.TrapUnreachable)
|
||||
return FastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
|
||||
return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
|
||||
else
|
||||
return true;
|
||||
|
||||
@ -1595,50 +1595,50 @@ bool FastISel::fastLowerIntrinsicCall(const IntrinsicInst * /*II*/) {
|
||||
return false;
|
||||
}
|
||||
|
||||
unsigned FastISel::FastEmit_(MVT, MVT, unsigned) { return 0; }
|
||||
unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; }
|
||||
|
||||
unsigned FastISel::FastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/,
|
||||
unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/,
|
||||
bool /*Op0IsKill*/) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned FastISel::FastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/,
|
||||
unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/,
|
||||
bool /*Op0IsKill*/, unsigned /*Op1*/,
|
||||
bool /*Op1IsKill*/) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned FastISel::FastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
|
||||
unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned FastISel::FastEmit_f(MVT, MVT, unsigned,
|
||||
unsigned FastISel::fastEmit_f(MVT, MVT, unsigned,
|
||||
const ConstantFP * /*FPImm*/) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned FastISel::FastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/,
|
||||
unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/,
|
||||
bool /*Op0IsKill*/, uint64_t /*Imm*/) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned FastISel::FastEmit_rf(MVT, MVT, unsigned, unsigned /*Op0*/,
|
||||
unsigned FastISel::fastEmit_rf(MVT, MVT, unsigned, unsigned /*Op0*/,
|
||||
bool /*Op0IsKill*/,
|
||||
const ConstantFP * /*FPImm*/) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned FastISel::FastEmit_rri(MVT, MVT, unsigned, unsigned /*Op0*/,
|
||||
unsigned FastISel::fastEmit_rri(MVT, MVT, unsigned, unsigned /*Op0*/,
|
||||
bool /*Op0IsKill*/, unsigned /*Op1*/,
|
||||
bool /*Op1IsKill*/, uint64_t /*Imm*/) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// This method is a wrapper of FastEmit_ri. It first tries to emit an
|
||||
/// instruction with an immediate operand using FastEmit_ri.
|
||||
/// This method is a wrapper of fastEmit_ri. It first tries to emit an
|
||||
/// instruction with an immediate operand using fastEmit_ri.
|
||||
/// If that fails, it materializes the immediate into a register and try
|
||||
/// FastEmit_rr instead.
|
||||
unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
|
||||
/// fastEmit_rr instead.
|
||||
unsigned FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
|
||||
bool Op0IsKill, uint64_t Imm, MVT ImmType) {
|
||||
// If this is a multiply by a power of two, emit this as a shift left.
|
||||
if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
|
||||
@ -1657,10 +1657,10 @@ unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
|
||||
return 0;
|
||||
|
||||
// First check if immediate type is legal. If not, we can't use the ri form.
|
||||
unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
|
||||
unsigned ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
|
||||
if (ResultReg)
|
||||
return ResultReg;
|
||||
unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
|
||||
unsigned MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
|
||||
if (!MaterialReg) {
|
||||
// This is a bit ugly/slow, but failing here means falling out of
|
||||
// fast-isel, which would be very slow.
|
||||
@ -1670,7 +1670,7 @@ unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
|
||||
if (!MaterialReg)
|
||||
return 0;
|
||||
}
|
||||
return FastEmit_rr(VT, VT, Opcode, Op0, Op0IsKill, MaterialReg,
|
||||
return fastEmit_rr(VT, VT, Opcode, Op0, Op0IsKill, MaterialReg,
|
||||
/*IsKill=*/true);
|
||||
}
|
||||
|
||||
@ -1695,7 +1695,7 @@ unsigned FastISel::constrainOperandRegClass(const MCInstrDesc &II, unsigned Op,
|
||||
return Op;
|
||||
}
|
||||
|
||||
unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
|
||||
unsigned FastISel::fastEmitInst_(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC) {
|
||||
unsigned ResultReg = createResultReg(RC);
|
||||
const MCInstrDesc &II = TII.get(MachineInstOpcode);
|
||||
@ -1704,7 +1704,7 @@ unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
|
||||
return ResultReg;
|
||||
}
|
||||
|
||||
unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
|
||||
unsigned FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC, unsigned Op0,
|
||||
bool Op0IsKill) {
|
||||
const MCInstrDesc &II = TII.get(MachineInstOpcode);
|
||||
@ -1725,7 +1725,7 @@ unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
|
||||
return ResultReg;
|
||||
}
|
||||
|
||||
unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
|
||||
unsigned FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC, unsigned Op0,
|
||||
bool Op0IsKill, unsigned Op1,
|
||||
bool Op1IsKill) {
|
||||
@ -1749,7 +1749,7 @@ unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
|
||||
return ResultReg;
|
||||
}
|
||||
|
||||
unsigned FastISel::FastEmitInst_rrr(unsigned MachineInstOpcode,
|
||||
unsigned FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC, unsigned Op0,
|
||||
bool Op0IsKill, unsigned Op1,
|
||||
bool Op1IsKill, unsigned Op2,
|
||||
@ -1777,7 +1777,7 @@ unsigned FastISel::FastEmitInst_rrr(unsigned MachineInstOpcode,
|
||||
return ResultReg;
|
||||
}
|
||||
|
||||
unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
|
||||
unsigned FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC, unsigned Op0,
|
||||
bool Op0IsKill, uint64_t Imm) {
|
||||
const MCInstrDesc &II = TII.get(MachineInstOpcode);
|
||||
@ -1799,7 +1799,7 @@ unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
|
||||
return ResultReg;
|
||||
}
|
||||
|
||||
unsigned FastISel::FastEmitInst_rii(unsigned MachineInstOpcode,
|
||||
unsigned FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC, unsigned Op0,
|
||||
bool Op0IsKill, uint64_t Imm1,
|
||||
uint64_t Imm2) {
|
||||
@ -1824,7 +1824,7 @@ unsigned FastISel::FastEmitInst_rii(unsigned MachineInstOpcode,
|
||||
return ResultReg;
|
||||
}
|
||||
|
||||
unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
|
||||
unsigned FastISel::fastEmitInst_rf(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC, unsigned Op0,
|
||||
bool Op0IsKill, const ConstantFP *FPImm) {
|
||||
const MCInstrDesc &II = TII.get(MachineInstOpcode);
|
||||
@ -1846,7 +1846,7 @@ unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
|
||||
return ResultReg;
|
||||
}
|
||||
|
||||
unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
|
||||
unsigned FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC, unsigned Op0,
|
||||
bool Op0IsKill, unsigned Op1,
|
||||
bool Op1IsKill, uint64_t Imm) {
|
||||
@ -1872,7 +1872,7 @@ unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
|
||||
return ResultReg;
|
||||
}
|
||||
|
||||
unsigned FastISel::FastEmitInst_rrii(unsigned MachineInstOpcode,
|
||||
unsigned FastISel::fastEmitInst_rrii(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill, unsigned Op1,
|
||||
bool Op1IsKill, uint64_t Imm1,
|
||||
@ -1901,7 +1901,7 @@ unsigned FastISel::FastEmitInst_rrii(unsigned MachineInstOpcode,
|
||||
return ResultReg;
|
||||
}
|
||||
|
||||
unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
|
||||
unsigned FastISel::fastEmitInst_i(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC, uint64_t Imm) {
|
||||
unsigned ResultReg = createResultReg(RC);
|
||||
const MCInstrDesc &II = TII.get(MachineInstOpcode);
|
||||
@ -1917,7 +1917,7 @@ unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
|
||||
return ResultReg;
|
||||
}
|
||||
|
||||
unsigned FastISel::FastEmitInst_ii(unsigned MachineInstOpcode,
|
||||
unsigned FastISel::fastEmitInst_ii(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC, uint64_t Imm1,
|
||||
uint64_t Imm2) {
|
||||
unsigned ResultReg = createResultReg(RC);
|
||||
@ -1936,7 +1936,7 @@ unsigned FastISel::FastEmitInst_ii(unsigned MachineInstOpcode,
|
||||
return ResultReg;
|
||||
}
|
||||
|
||||
unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT, unsigned Op0,
|
||||
unsigned FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0,
|
||||
bool Op0IsKill, uint32_t Idx) {
|
||||
unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
|
||||
assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
|
||||
@ -1951,7 +1951,7 @@ unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT, unsigned Op0,
|
||||
/// Emit MachineInstrs to compute the value of Op with all but the least
|
||||
/// significant bit set to zero.
|
||||
unsigned FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
|
||||
return FastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
|
||||
return fastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
|
||||
}
|
||||
|
||||
/// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
|
||||
|
@ -281,7 +281,7 @@ unsigned AArch64FastISel::AArch64MaterializeInt(const ConstantInt *CI, MVT VT) {
|
||||
return 0;
|
||||
|
||||
if (!CI->isZero())
|
||||
return FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
|
||||
return fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
|
||||
|
||||
// Create a copy from the zero register to materialize a "0" value.
|
||||
const TargetRegisterClass *RC = (VT == MVT::i64) ? &AArch64::GPR64RegClass
|
||||
@ -311,7 +311,7 @@ unsigned AArch64FastISel::AArch64MaterializeFP(const ConstantFP *CFP, MVT VT) {
|
||||
Is64Bit ? AArch64_AM::getFP64Imm(Val) : AArch64_AM::getFP32Imm(Val);
|
||||
assert((Imm != -1) && "Cannot encode floating-point constant.");
|
||||
unsigned Opc = Is64Bit ? AArch64::FMOVDi : AArch64::FMOVSi;
|
||||
return FastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm);
|
||||
return fastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm);
|
||||
}
|
||||
|
||||
// Materialize via constant pool. MachineConstantPool wants an explicit
|
||||
@ -411,7 +411,7 @@ unsigned AArch64FastISel::fastMaterializeFloatZero(const ConstantFP* CFP) {
|
||||
bool Is64Bit = (VT == MVT::f64);
|
||||
unsigned ZReg = Is64Bit ? AArch64::XZR : AArch64::WZR;
|
||||
unsigned Opc = Is64Bit ? AArch64::FMOVXDr : AArch64::FMOVWSr;
|
||||
return FastEmitInst_r(Opc, TLI.getRegClassFor(VT), ZReg, /*IsKill=*/true);
|
||||
return fastEmitInst_r(Opc, TLI.getRegClassFor(VT), ZReg, /*IsKill=*/true);
|
||||
}
|
||||
|
||||
// Computes the address to get to an object.
|
||||
@ -796,10 +796,10 @@ bool AArch64FastISel::SimplifyAddress(Address &Addr, MVT VT) {
|
||||
if (ImmediateOffsetNeedsLowering) {
|
||||
unsigned ResultReg = 0;
|
||||
if (Addr.getReg())
|
||||
ResultReg = FastEmit_ri_(MVT::i64, ISD::ADD, Addr.getReg(),
|
||||
ResultReg = fastEmit_ri_(MVT::i64, ISD::ADD, Addr.getReg(),
|
||||
/*IsKill=*/false, Offset, MVT::i64);
|
||||
else
|
||||
ResultReg = FastEmit_i(MVT::i64, MVT::i64, ISD::Constant, Offset);
|
||||
ResultReg = fastEmit_i(MVT::i64, MVT::i64, ISD::Constant, Offset);
|
||||
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
@ -1246,7 +1246,7 @@ unsigned AArch64FastISel::emitAND_ri(MVT RetVT, unsigned LHSReg, bool LHSIsKill,
|
||||
if (!AArch64_AM::isLogicalImmediate(Imm, RegSize))
|
||||
return 0;
|
||||
|
||||
return FastEmitInst_ri(Opc, RC, LHSReg, LHSIsKill,
|
||||
return fastEmitInst_ri(Opc, RC, LHSReg, LHSIsKill,
|
||||
AArch64_AM::encodeLogicalImmediate(Imm, RegSize));
|
||||
}
|
||||
|
||||
@ -1571,7 +1571,7 @@ bool AArch64FastISel::SelectBranch(const Instruction *I) {
|
||||
|
||||
// Issue an extract_subreg to get the lower 32-bits.
|
||||
if (SrcVT == MVT::i64) {
|
||||
CondReg = FastEmitInst_extractsubreg(MVT::i32, CondReg, CondIsKill,
|
||||
CondReg = fastEmitInst_extractsubreg(MVT::i32, CondReg, CondIsKill,
|
||||
AArch64::sub_32);
|
||||
CondIsKill = true;
|
||||
}
|
||||
@ -1764,7 +1764,7 @@ bool AArch64FastISel::SelectSelect(const Instruction *I) {
|
||||
if (!TrueReg || !FalseReg)
|
||||
return false;
|
||||
|
||||
unsigned ResultReg = FastEmitInst_rri(SelectOpc, RC, TrueReg, TrueIsKill,
|
||||
unsigned ResultReg = fastEmitInst_rri(SelectOpc, RC, TrueReg, TrueIsKill,
|
||||
FalseReg, FalseIsKill, CC);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
@ -1872,7 +1872,7 @@ bool AArch64FastISel::SelectIntToFP(const Instruction *I, bool Signed) {
|
||||
Opc = (DestVT == MVT::f32) ? AArch64::UCVTFUWSri : AArch64::UCVTFUWDri;
|
||||
}
|
||||
|
||||
unsigned ResultReg = FastEmitInst_r(Opc, TLI.getRegClassFor(DestVT), SrcReg,
|
||||
unsigned ResultReg = fastEmitInst_r(Opc, TLI.getRegClassFor(DestVT), SrcReg,
|
||||
SrcIsKill);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
@ -2368,7 +2368,7 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
unsigned DestReg;
|
||||
unsigned Depth = cast<ConstantInt>(II->getOperand(0))->getZExtValue();
|
||||
while (Depth--) {
|
||||
DestReg = FastEmitInst_ri(AArch64::LDRXui, &AArch64::GPR64RegClass,
|
||||
DestReg = fastEmitInst_ri(AArch64::LDRXui, &AArch64::GPR64RegClass,
|
||||
SrcReg, /*IsKill=*/true, 0);
|
||||
assert(DestReg && "Unexpected LDR instruction emission failure.");
|
||||
SrcReg = DestReg;
|
||||
@ -2446,7 +2446,7 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
return false;
|
||||
bool Op0IsKill = hasTrivialKill(II->getOperand(0));
|
||||
|
||||
unsigned ResultReg = FastEmit_r(VT, VT, ISD::FSQRT, Op0Reg, Op0IsKill);
|
||||
unsigned ResultReg = fastEmit_r(VT, VT, ISD::FSQRT, Op0Reg, Op0IsKill);
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
|
||||
@ -2514,16 +2514,16 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
MulReg = Emit_SMULL_rr(MVT::i64, LHSReg, LHSIsKill, RHSReg, RHSIsKill);
|
||||
unsigned ShiftReg = emitLSR_ri(MVT::i64, MVT::i64, MulReg,
|
||||
/*IsKill=*/false, 32);
|
||||
MulReg = FastEmitInst_extractsubreg(VT, MulReg, /*IsKill=*/true,
|
||||
MulReg = fastEmitInst_extractsubreg(VT, MulReg, /*IsKill=*/true,
|
||||
AArch64::sub_32);
|
||||
ShiftReg = FastEmitInst_extractsubreg(VT, ShiftReg, /*IsKill=*/true,
|
||||
ShiftReg = fastEmitInst_extractsubreg(VT, ShiftReg, /*IsKill=*/true,
|
||||
AArch64::sub_32);
|
||||
emitSubs_rs(VT, ShiftReg, /*IsKill=*/true, MulReg, /*IsKill=*/false,
|
||||
AArch64_AM::ASR, 31, /*WantResult=*/false);
|
||||
} else {
|
||||
assert(VT == MVT::i64 && "Unexpected value type.");
|
||||
MulReg = Emit_MUL_rr(VT, LHSReg, LHSIsKill, RHSReg, RHSIsKill);
|
||||
unsigned SMULHReg = FastEmit_rr(VT, VT, ISD::MULHS, LHSReg, LHSIsKill,
|
||||
unsigned SMULHReg = fastEmit_rr(VT, VT, ISD::MULHS, LHSReg, LHSIsKill,
|
||||
RHSReg, RHSIsKill);
|
||||
emitSubs_rs(VT, SMULHReg, /*IsKill=*/true, MulReg, /*IsKill=*/false,
|
||||
AArch64_AM::ASR, 63, /*WantResult=*/false);
|
||||
@ -2547,12 +2547,12 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
emitSubs_rs(MVT::i64, AArch64::XZR, /*IsKill=*/true, MulReg,
|
||||
/*IsKill=*/false, AArch64_AM::LSR, 32,
|
||||
/*WantResult=*/false);
|
||||
MulReg = FastEmitInst_extractsubreg(VT, MulReg, /*IsKill=*/true,
|
||||
MulReg = fastEmitInst_extractsubreg(VT, MulReg, /*IsKill=*/true,
|
||||
AArch64::sub_32);
|
||||
} else {
|
||||
assert(VT == MVT::i64 && "Unexpected value type.");
|
||||
MulReg = Emit_MUL_rr(VT, LHSReg, LHSIsKill, RHSReg, RHSIsKill);
|
||||
unsigned UMULHReg = FastEmit_rr(VT, VT, ISD::MULHU, LHSReg, LHSIsKill,
|
||||
unsigned UMULHReg = fastEmit_rr(VT, VT, ISD::MULHU, LHSReg, LHSIsKill,
|
||||
RHSReg, RHSIsKill);
|
||||
emitSubs_rr(VT, AArch64::XZR, /*IsKill=*/true, UMULHReg,
|
||||
/*IsKill=*/false, /*WantResult=*/false);
|
||||
@ -2567,7 +2567,7 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
TII.get(TargetOpcode::COPY), ResultReg1).addReg(MulReg);
|
||||
}
|
||||
|
||||
ResultReg2 = FastEmitInst_rri(AArch64::CSINCWr, &AArch64::GPR32RegClass,
|
||||
ResultReg2 = fastEmitInst_rri(AArch64::CSINCWr, &AArch64::GPR32RegClass,
|
||||
AArch64::WZR, /*IsKill=*/true, AArch64::WZR,
|
||||
/*IsKill=*/true, getInvertedCondCode(CC));
|
||||
assert((ResultReg1 + 1) == ResultReg2 &&
|
||||
@ -2718,7 +2718,7 @@ bool AArch64FastISel::SelectTrunc(const Instruction *I) {
|
||||
break;
|
||||
}
|
||||
// Issue an extract_subreg to get the lower 32-bits.
|
||||
unsigned Reg32 = FastEmitInst_extractsubreg(MVT::i32, SrcReg, SrcIsKill,
|
||||
unsigned Reg32 = fastEmitInst_extractsubreg(MVT::i32, SrcReg, SrcIsKill,
|
||||
AArch64::sub_32);
|
||||
// Create the AND instruction which performs the actual truncation.
|
||||
ResultReg = emitAND_ri(MVT::i32, Reg32, /*IsKill=*/true, Mask);
|
||||
@ -2762,7 +2762,7 @@ unsigned AArch64FastISel::Emiti1Ext(unsigned SrcReg, MVT DestVT, bool isZExt) {
|
||||
// FIXME: We're SExt i1 to i64.
|
||||
return 0;
|
||||
}
|
||||
return FastEmitInst_rii(AArch64::SBFMWri, &AArch64::GPR32RegClass, SrcReg,
|
||||
return fastEmitInst_rii(AArch64::SBFMWri, &AArch64::GPR32RegClass, SrcReg,
|
||||
/*TODO:IsKill=*/false, 0, 0);
|
||||
}
|
||||
}
|
||||
@ -2783,7 +2783,7 @@ unsigned AArch64FastISel::Emit_MUL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
|
||||
|
||||
const TargetRegisterClass *RC =
|
||||
(RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
|
||||
return FastEmitInst_rrr(Opc, RC, Op0, Op0IsKill, Op1, Op1IsKill,
|
||||
return fastEmitInst_rrr(Opc, RC, Op0, Op0IsKill, Op1, Op1IsKill,
|
||||
/*IsKill=*/ZReg, true);
|
||||
}
|
||||
|
||||
@ -2792,7 +2792,7 @@ unsigned AArch64FastISel::Emit_SMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
|
||||
if (RetVT != MVT::i64)
|
||||
return 0;
|
||||
|
||||
return FastEmitInst_rrr(AArch64::SMADDLrrr, &AArch64::GPR64RegClass,
|
||||
return fastEmitInst_rrr(AArch64::SMADDLrrr, &AArch64::GPR64RegClass,
|
||||
Op0, Op0IsKill, Op1, Op1IsKill,
|
||||
AArch64::XZR, /*IsKill=*/true);
|
||||
}
|
||||
@ -2802,7 +2802,7 @@ unsigned AArch64FastISel::Emit_UMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
|
||||
if (RetVT != MVT::i64)
|
||||
return 0;
|
||||
|
||||
return FastEmitInst_rrr(AArch64::UMADDLrrr, &AArch64::GPR64RegClass,
|
||||
return fastEmitInst_rrr(AArch64::UMADDLrrr, &AArch64::GPR64RegClass,
|
||||
Op0, Op0IsKill, Op1, Op1IsKill,
|
||||
AArch64::XZR, /*IsKill=*/true);
|
||||
}
|
||||
@ -2826,7 +2826,7 @@ unsigned AArch64FastISel::emitLSL_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill,
|
||||
Op1Reg = emitAND_ri(MVT::i32, Op1Reg, Op1IsKill, Mask);
|
||||
Op1IsKill = true;
|
||||
}
|
||||
unsigned ResultReg = FastEmitInst_rr(Opc, RC, Op0Reg, Op0IsKill, Op1Reg,
|
||||
unsigned ResultReg = fastEmitInst_rr(Opc, RC, Op0Reg, Op0IsKill, Op1Reg,
|
||||
Op1IsKill);
|
||||
if (NeedTrunc)
|
||||
ResultReg = emitAND_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask);
|
||||
@ -2897,7 +2897,7 @@ unsigned AArch64FastISel::emitLSL_ri(MVT RetVT, MVT SrcVT, unsigned Op0,
|
||||
Op0 = TmpReg;
|
||||
Op0IsKill = true;
|
||||
}
|
||||
return FastEmitInst_rii(Opc, RC, Op0, Op0IsKill, ImmR, ImmS);
|
||||
return fastEmitInst_rii(Opc, RC, Op0, Op0IsKill, ImmR, ImmS);
|
||||
}
|
||||
|
||||
unsigned AArch64FastISel::emitLSR_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill,
|
||||
@ -2920,7 +2920,7 @@ unsigned AArch64FastISel::emitLSR_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill,
|
||||
Op1Reg = emitAND_ri(MVT::i32, Op1Reg, Op1IsKill, Mask);
|
||||
Op0IsKill = Op1IsKill = true;
|
||||
}
|
||||
unsigned ResultReg = FastEmitInst_rr(Opc, RC, Op0Reg, Op0IsKill, Op1Reg,
|
||||
unsigned ResultReg = fastEmitInst_rr(Opc, RC, Op0Reg, Op0IsKill, Op1Reg,
|
||||
Op1IsKill);
|
||||
if (NeedTrunc)
|
||||
ResultReg = emitAND_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask);
|
||||
@ -3006,7 +3006,7 @@ unsigned AArch64FastISel::emitLSR_ri(MVT RetVT, MVT SrcVT, unsigned Op0,
|
||||
Op0 = TmpReg;
|
||||
Op0IsKill = true;
|
||||
}
|
||||
return FastEmitInst_rii(Opc, RC, Op0, Op0IsKill, ImmR, ImmS);
|
||||
return fastEmitInst_rii(Opc, RC, Op0, Op0IsKill, ImmR, ImmS);
|
||||
}
|
||||
|
||||
unsigned AArch64FastISel::emitASR_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill,
|
||||
@ -3029,7 +3029,7 @@ unsigned AArch64FastISel::emitASR_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill,
|
||||
Op1Reg = emitAND_ri(MVT::i32, Op1Reg, Op1IsKill, Mask);
|
||||
Op0IsKill = Op1IsKill = true;
|
||||
}
|
||||
unsigned ResultReg = FastEmitInst_rr(Opc, RC, Op0Reg, Op0IsKill, Op1Reg,
|
||||
unsigned ResultReg = fastEmitInst_rr(Opc, RC, Op0Reg, Op0IsKill, Op1Reg,
|
||||
Op1IsKill);
|
||||
if (NeedTrunc)
|
||||
ResultReg = emitAND_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask);
|
||||
@ -3103,7 +3103,7 @@ unsigned AArch64FastISel::emitASR_ri(MVT RetVT, MVT SrcVT, unsigned Op0,
|
||||
Op0 = TmpReg;
|
||||
Op0IsKill = true;
|
||||
}
|
||||
return FastEmitInst_rii(Opc, RC, Op0, Op0IsKill, ImmR, ImmS);
|
||||
return fastEmitInst_rii(Opc, RC, Op0, Op0IsKill, ImmR, ImmS);
|
||||
}
|
||||
|
||||
unsigned AArch64FastISel::EmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
|
||||
@ -3164,7 +3164,7 @@ unsigned AArch64FastISel::EmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
|
||||
|
||||
const TargetRegisterClass *RC =
|
||||
(DestVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
|
||||
return FastEmitInst_rii(Opc, RC, SrcReg, /*TODO:IsKill=*/false, 0, Imm);
|
||||
return fastEmitInst_rii(Opc, RC, SrcReg, /*TODO:IsKill=*/false, 0, Imm);
|
||||
}
|
||||
|
||||
bool AArch64FastISel::SelectIntExt(const Instruction *I) {
|
||||
@ -3250,12 +3250,12 @@ bool AArch64FastISel::SelectRem(const Instruction *I, unsigned ISDOpcode) {
|
||||
|
||||
const TargetRegisterClass *RC =
|
||||
(DestVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
|
||||
unsigned QuotReg = FastEmitInst_rr(DivOpc, RC, Src0Reg, /*IsKill=*/false,
|
||||
unsigned QuotReg = fastEmitInst_rr(DivOpc, RC, Src0Reg, /*IsKill=*/false,
|
||||
Src1Reg, /*IsKill=*/false);
|
||||
assert(QuotReg && "Unexpected DIV instruction emission failure.");
|
||||
// The remainder is computed as numerator - (quotient * denominator) using the
|
||||
// MSUB instruction.
|
||||
unsigned ResultReg = FastEmitInst_rrr(MSubOpc, RC, QuotReg, /*IsKill=*/true,
|
||||
unsigned ResultReg = fastEmitInst_rrr(MSubOpc, RC, QuotReg, /*IsKill=*/true,
|
||||
Src1Reg, Src1IsKill, Src0Reg,
|
||||
Src0IsKill);
|
||||
updateValueMap(I, ResultReg);
|
||||
@ -3407,7 +3407,7 @@ bool AArch64FastISel::SelectBitCast(const Instruction *I) {
|
||||
if (!Op0Reg)
|
||||
return false;
|
||||
bool Op0IsKill = hasTrivialKill(I->getOperand(0));
|
||||
unsigned ResultReg = FastEmitInst_r(Opc, RC, Op0Reg, Op0IsKill);
|
||||
unsigned ResultReg = fastEmitInst_r(Opc, RC, Op0Reg, Op0IsKill);
|
||||
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
@ -3483,7 +3483,7 @@ bool AArch64FastISel::fastSelectInstruction(const Instruction *I) {
|
||||
return SelectIndirectBr(I);
|
||||
case Instruction::Unreachable:
|
||||
if (TM.Options.TrapUnreachable)
|
||||
return FastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
|
||||
return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
|
||||
else
|
||||
return true;
|
||||
case Instruction::Alloca:
|
||||
|
@ -105,28 +105,28 @@ class ARMFastISel final : public FastISel {
|
||||
|
||||
// Code from FastISel.cpp.
|
||||
private:
|
||||
unsigned FastEmitInst_r(unsigned MachineInstOpcode,
|
||||
unsigned fastEmitInst_r(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill);
|
||||
unsigned FastEmitInst_rr(unsigned MachineInstOpcode,
|
||||
unsigned fastEmitInst_rr(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill,
|
||||
unsigned Op1, bool Op1IsKill);
|
||||
unsigned FastEmitInst_rrr(unsigned MachineInstOpcode,
|
||||
unsigned fastEmitInst_rrr(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill,
|
||||
unsigned Op1, bool Op1IsKill,
|
||||
unsigned Op2, bool Op2IsKill);
|
||||
unsigned FastEmitInst_ri(unsigned MachineInstOpcode,
|
||||
unsigned fastEmitInst_ri(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill,
|
||||
uint64_t Imm);
|
||||
unsigned FastEmitInst_rri(unsigned MachineInstOpcode,
|
||||
unsigned fastEmitInst_rri(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill,
|
||||
unsigned Op1, bool Op1IsKill,
|
||||
uint64_t Imm);
|
||||
unsigned FastEmitInst_i(unsigned MachineInstOpcode,
|
||||
unsigned fastEmitInst_i(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
uint64_t Imm);
|
||||
|
||||
@ -285,7 +285,7 @@ ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) {
|
||||
return MIB;
|
||||
}
|
||||
|
||||
unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode,
|
||||
unsigned ARMFastISel::fastEmitInst_r(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill) {
|
||||
unsigned ResultReg = createResultReg(RC);
|
||||
@ -307,7 +307,7 @@ unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode,
|
||||
return ResultReg;
|
||||
}
|
||||
|
||||
unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
|
||||
unsigned ARMFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill,
|
||||
unsigned Op1, bool Op1IsKill) {
|
||||
@ -335,7 +335,7 @@ unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
|
||||
return ResultReg;
|
||||
}
|
||||
|
||||
unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode,
|
||||
unsigned ARMFastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill,
|
||||
unsigned Op1, bool Op1IsKill,
|
||||
@ -367,7 +367,7 @@ unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode,
|
||||
return ResultReg;
|
||||
}
|
||||
|
||||
unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
|
||||
unsigned ARMFastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill,
|
||||
uint64_t Imm) {
|
||||
@ -393,7 +393,7 @@ unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
|
||||
return ResultReg;
|
||||
}
|
||||
|
||||
unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
|
||||
unsigned ARMFastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill,
|
||||
unsigned Op1, bool Op1IsKill,
|
||||
@ -423,7 +423,7 @@ unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
|
||||
return ResultReg;
|
||||
}
|
||||
|
||||
unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode,
|
||||
unsigned ARMFastISel::fastEmitInst_i(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
uint64_t Imm) {
|
||||
unsigned ResultReg = createResultReg(RC);
|
||||
@ -548,7 +548,7 @@ unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) {
|
||||
|
||||
unsigned ResultReg = 0;
|
||||
if (Subtarget->useMovt(*FuncInfo.MF))
|
||||
ResultReg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
|
||||
ResultReg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
|
||||
|
||||
if (ResultReg)
|
||||
return ResultReg;
|
||||
@ -909,7 +909,7 @@ void ARMFastISel::ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3) {
|
||||
// Since the offset is too large for the load/store instruction
|
||||
// get the reg+offset into a register.
|
||||
if (needsLowering) {
|
||||
Addr.Base.Reg = FastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg,
|
||||
Addr.Base.Reg = fastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg,
|
||||
/*Op0IsKill*/false, Addr.Offset, MVT::i32);
|
||||
Addr.Offset = 0;
|
||||
}
|
||||
@ -1976,7 +1976,7 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
|
||||
break;
|
||||
}
|
||||
case CCValAssign::BCvt: {
|
||||
unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg,
|
||||
unsigned BC = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg,
|
||||
/*TODO: Kill=*/false);
|
||||
assert(BC != 0 && "Failed to emit a bitcast!");
|
||||
Arg = BC;
|
||||
|
@ -89,7 +89,7 @@ private:
|
||||
// for some reason, this default is not generated by tablegen
|
||||
// so we explicitly generate it here.
|
||||
//
|
||||
unsigned FastEmitInst_riir(uint64_t inst, const TargetRegisterClass *RC,
|
||||
unsigned fastEmitInst_riir(uint64_t inst, const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill, uint64_t imm1,
|
||||
uint64_t imm2, unsigned Op3, bool Op3IsKill) {
|
||||
return 0;
|
||||
|
@ -106,15 +106,15 @@ class PPCFastISel final : public FastISel {
|
||||
bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
|
||||
const LoadInst *LI) override;
|
||||
bool fastLowerArguments() override;
|
||||
unsigned FastEmit_i(MVT Ty, MVT RetTy, unsigned Opc, uint64_t Imm) override;
|
||||
unsigned FastEmitInst_ri(unsigned MachineInstOpcode,
|
||||
unsigned fastEmit_i(MVT Ty, MVT RetTy, unsigned Opc, uint64_t Imm) override;
|
||||
unsigned fastEmitInst_ri(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill,
|
||||
uint64_t Imm);
|
||||
unsigned FastEmitInst_r(unsigned MachineInstOpcode,
|
||||
unsigned fastEmitInst_r(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill);
|
||||
unsigned FastEmitInst_rr(unsigned MachineInstOpcode,
|
||||
unsigned fastEmitInst_rr(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill,
|
||||
unsigned Op1, bool Op1IsKill);
|
||||
@ -2183,7 +2183,7 @@ bool PPCFastISel::fastLowerArguments() {
|
||||
|
||||
// Handle materializing integer constants into a register. This is not
|
||||
// automatically generated for PowerPC, so must be explicitly created here.
|
||||
unsigned PPCFastISel::FastEmit_i(MVT Ty, MVT VT, unsigned Opc, uint64_t Imm) {
|
||||
unsigned PPCFastISel::fastEmit_i(MVT Ty, MVT VT, unsigned Opc, uint64_t Imm) {
|
||||
|
||||
if (Opc != ISD::Constant)
|
||||
return 0;
|
||||
@ -2220,7 +2220,7 @@ unsigned PPCFastISel::FastEmit_i(MVT Ty, MVT VT, unsigned Opc, uint64_t Imm) {
|
||||
// assigning R0 or X0 to the output register for GPRC and G8RC
|
||||
// register classes, as any such result could be used in ADDI, etc.,
|
||||
// where those regs have another meaning.
|
||||
unsigned PPCFastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
|
||||
unsigned PPCFastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill,
|
||||
uint64_t Imm) {
|
||||
@ -2233,27 +2233,27 @@ unsigned PPCFastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
|
||||
(RC == &PPC::GPRCRegClass ? &PPC::GPRC_and_GPRC_NOR0RegClass :
|
||||
(RC == &PPC::G8RCRegClass ? &PPC::G8RC_and_G8RC_NOX0RegClass : RC));
|
||||
|
||||
return FastISel::FastEmitInst_ri(MachineInstOpcode, UseRC,
|
||||
return FastISel::fastEmitInst_ri(MachineInstOpcode, UseRC,
|
||||
Op0, Op0IsKill, Imm);
|
||||
}
|
||||
|
||||
// Override for instructions with one register operand to avoid use of
|
||||
// R0/X0. The automatic infrastructure isn't aware of the context so
|
||||
// we must be conservative.
|
||||
unsigned PPCFastISel::FastEmitInst_r(unsigned MachineInstOpcode,
|
||||
unsigned PPCFastISel::fastEmitInst_r(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass* RC,
|
||||
unsigned Op0, bool Op0IsKill) {
|
||||
const TargetRegisterClass *UseRC =
|
||||
(RC == &PPC::GPRCRegClass ? &PPC::GPRC_and_GPRC_NOR0RegClass :
|
||||
(RC == &PPC::G8RCRegClass ? &PPC::G8RC_and_G8RC_NOX0RegClass : RC));
|
||||
|
||||
return FastISel::FastEmitInst_r(MachineInstOpcode, UseRC, Op0, Op0IsKill);
|
||||
return FastISel::fastEmitInst_r(MachineInstOpcode, UseRC, Op0, Op0IsKill);
|
||||
}
|
||||
|
||||
// Override for instructions with two register operands to avoid use
|
||||
// of R0/X0. The automatic infrastructure isn't aware of the context
|
||||
// so we must be conservative.
|
||||
unsigned PPCFastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
|
||||
unsigned PPCFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass* RC,
|
||||
unsigned Op0, bool Op0IsKill,
|
||||
unsigned Op1, bool Op1IsKill) {
|
||||
@ -2261,7 +2261,7 @@ unsigned PPCFastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
|
||||
(RC == &PPC::GPRCRegClass ? &PPC::GPRC_and_GPRC_NOR0RegClass :
|
||||
(RC == &PPC::G8RCRegClass ? &PPC::G8RC_and_G8RC_NOX0RegClass : RC));
|
||||
|
||||
return FastISel::FastEmitInst_rr(MachineInstOpcode, UseRC, Op0, Op0IsKill,
|
||||
return FastISel::fastEmitInst_rr(MachineInstOpcode, UseRC, Op0, Op0IsKill,
|
||||
Op1, Op1IsKill);
|
||||
}
|
||||
|
||||
|
@ -532,7 +532,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
|
||||
bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT,
|
||||
unsigned Src, EVT SrcVT,
|
||||
unsigned &ResultReg) {
|
||||
unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc,
|
||||
unsigned RR = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc,
|
||||
Src, /*TODO: Kill=*/false);
|
||||
if (RR == 0)
|
||||
return false;
|
||||
@ -1043,7 +1043,7 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
|
||||
}
|
||||
unsigned Op = Outs[0].Flags.isZExt() ? ISD::ZERO_EXTEND :
|
||||
ISD::SIGN_EXTEND;
|
||||
SrcReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Op,
|
||||
SrcReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Op,
|
||||
SrcReg, /*TODO: Kill=*/false);
|
||||
}
|
||||
|
||||
@ -1196,7 +1196,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
|
||||
ResultReg = createResultReg(&X86::GR32RegClass);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV32r0),
|
||||
ResultReg);
|
||||
ResultReg = FastEmitInst_extractsubreg(MVT::i8, ResultReg, /*Kill=*/true,
|
||||
ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultReg, /*Kill=*/true,
|
||||
X86::sub_8bit);
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
@ -1314,7 +1314,7 @@ bool X86FastISel::X86SelectZExt(const Instruction *I) {
|
||||
ResultReg)
|
||||
.addImm(0).addReg(Result32).addImm(X86::sub_32bit);
|
||||
} else if (DstVT != MVT::i8) {
|
||||
ResultReg = FastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::ZERO_EXTEND,
|
||||
ResultReg = fastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::ZERO_EXTEND,
|
||||
ResultReg, /*Kill=*/true);
|
||||
if (ResultReg == 0)
|
||||
return false;
|
||||
@ -1714,7 +1714,7 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) {
|
||||
ResultSuperReg).addReg(SourceSuperReg).addImm(8);
|
||||
|
||||
// Now reference the 8-bit subreg of the result.
|
||||
ResultReg = FastEmitInst_extractsubreg(MVT::i8, ResultSuperReg,
|
||||
ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultSuperReg,
|
||||
/*Kill=*/true, X86::sub_8bit);
|
||||
}
|
||||
// Copy the result out of the physreg if we haven't already.
|
||||
@ -1839,7 +1839,7 @@ bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) {
|
||||
return false;
|
||||
|
||||
unsigned Opc = X86::getCMovFromCond(CC, RC->getSize());
|
||||
unsigned ResultReg = FastEmitInst_rr(Opc, RC, RHSReg, RHSIsKill,
|
||||
unsigned ResultReg = fastEmitInst_rr(Opc, RC, RHSReg, RHSIsKill,
|
||||
LHSReg, LHSIsKill);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
@ -1919,13 +1919,13 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
|
||||
return false;
|
||||
|
||||
const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
|
||||
unsigned CmpReg = FastEmitInst_rri(Opc[0], RC, CmpLHSReg, CmpLHSIsKill,
|
||||
unsigned CmpReg = fastEmitInst_rri(Opc[0], RC, CmpLHSReg, CmpLHSIsKill,
|
||||
CmpRHSReg, CmpRHSIsKill, CC);
|
||||
unsigned AndReg = FastEmitInst_rr(Opc[1], RC, CmpReg, /*IsKill=*/false,
|
||||
unsigned AndReg = fastEmitInst_rr(Opc[1], RC, CmpReg, /*IsKill=*/false,
|
||||
LHSReg, LHSIsKill);
|
||||
unsigned AndNReg = FastEmitInst_rr(Opc[2], RC, CmpReg, /*IsKill=*/true,
|
||||
unsigned AndNReg = fastEmitInst_rr(Opc[2], RC, CmpReg, /*IsKill=*/true,
|
||||
RHSReg, RHSIsKill);
|
||||
unsigned ResultReg = FastEmitInst_rr(Opc[3], RC, AndNReg, /*IsKill=*/true,
|
||||
unsigned ResultReg = fastEmitInst_rr(Opc[3], RC, AndNReg, /*IsKill=*/true,
|
||||
AndReg, /*IsKill=*/true);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
@ -1990,7 +1990,7 @@ bool X86FastISel::X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I) {
|
||||
const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
|
||||
|
||||
unsigned ResultReg =
|
||||
FastEmitInst_rri(Opc, RC, RHSReg, RHSIsKill, LHSReg, LHSIsKill, CC);
|
||||
fastEmitInst_rri(Opc, RC, RHSReg, RHSIsKill, LHSReg, LHSIsKill, CC);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
@ -2115,7 +2115,7 @@ bool X86FastISel::X86SelectTrunc(const Instruction *I) {
|
||||
}
|
||||
|
||||
// Issue an extract_subreg.
|
||||
unsigned ResultReg = FastEmitInst_extractsubreg(MVT::i8,
|
||||
unsigned ResultReg = fastEmitInst_extractsubreg(MVT::i8,
|
||||
InputReg, /*Kill=*/true,
|
||||
X86::sub_8bit);
|
||||
if (!ResultReg)
|
||||
@ -2307,7 +2307,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
if (!isTypeLegal(RetTy, VT))
|
||||
return false;
|
||||
|
||||
// Unfortunately we can't use FastEmit_r, because the AVX version of FSQRT
|
||||
// Unfortunately we can't use fastEmit_r, because the AVX version of FSQRT
|
||||
// is not generated by FastISel yet.
|
||||
// FIXME: Update this code once tablegen can handle it.
|
||||
static const unsigned SqrtOpc[2][2] = {
|
||||
@ -2425,7 +2425,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
TII.get(Opc[Is64Bit][IsDec][VT.SimpleTy-MVT::i8]), ResultReg)
|
||||
.addReg(LHSReg, getKillRegState(LHSIsKill));
|
||||
} else
|
||||
ResultReg = FastEmit_ri(VT, VT, BaseOpc, LHSReg, LHSIsKill,
|
||||
ResultReg = fastEmit_ri(VT, VT, BaseOpc, LHSReg, LHSIsKill,
|
||||
CI->getZExtValue());
|
||||
}
|
||||
|
||||
@ -2436,7 +2436,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
if (RHSReg == 0)
|
||||
return false;
|
||||
RHSIsKill = hasTrivialKill(RHS);
|
||||
ResultReg = FastEmit_rr(VT, VT, BaseOpc, LHSReg, LHSIsKill, RHSReg,
|
||||
ResultReg = fastEmit_rr(VT, VT, BaseOpc, LHSReg, LHSIsKill, RHSReg,
|
||||
RHSIsKill);
|
||||
}
|
||||
|
||||
@ -2451,7 +2451,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
TII.get(TargetOpcode::COPY), Reg[VT.SimpleTy-MVT::i8])
|
||||
.addReg(LHSReg, getKillRegState(LHSIsKill));
|
||||
ResultReg = FastEmitInst_r(MULOpc[VT.SimpleTy-MVT::i8],
|
||||
ResultReg = fastEmitInst_r(MULOpc[VT.SimpleTy-MVT::i8],
|
||||
TLI.getRegClassFor(VT), RHSReg, RHSIsKill);
|
||||
} else if (BaseOpc == X86ISD::SMUL && !ResultReg) {
|
||||
static const unsigned MULOpc[] =
|
||||
@ -2462,10 +2462,10 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
TII.get(TargetOpcode::COPY), X86::AL)
|
||||
.addReg(LHSReg, getKillRegState(LHSIsKill));
|
||||
ResultReg = FastEmitInst_r(MULOpc[0], TLI.getRegClassFor(VT), RHSReg,
|
||||
ResultReg = fastEmitInst_r(MULOpc[0], TLI.getRegClassFor(VT), RHSReg,
|
||||
RHSIsKill);
|
||||
} else
|
||||
ResultReg = FastEmitInst_rr(MULOpc[VT.SimpleTy-MVT::i8],
|
||||
ResultReg = fastEmitInst_rr(MULOpc[VT.SimpleTy-MVT::i8],
|
||||
TLI.getRegClassFor(VT), LHSReg, LHSIsKill,
|
||||
RHSReg, RHSIsKill);
|
||||
}
|
||||
@ -2744,7 +2744,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
|
||||
return false;
|
||||
|
||||
ResultReg =
|
||||
FastEmit_ri(ArgVT, ArgVT, ISD::AND, ResultReg, Val->hasOneUse(), 1);
|
||||
fastEmit_ri(ArgVT, ArgVT, ISD::AND, ResultReg, Val->hasOneUse(), 1);
|
||||
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
@ -2831,7 +2831,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
|
||||
break;
|
||||
}
|
||||
case CCValAssign::BCvt: {
|
||||
ArgReg = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, ArgReg,
|
||||
ArgReg = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, ArgReg,
|
||||
/*TODO: Kill=*/false);
|
||||
assert(ArgReg && "Failed to emit a bitcast!");
|
||||
ArgVT = VA.getLocVT();
|
||||
@ -3108,15 +3108,15 @@ unsigned X86FastISel::X86MaterializeInt(const ConstantInt *CI, MVT VT) {
|
||||
|
||||
uint64_t Imm = CI->getZExtValue();
|
||||
if (Imm == 0) {
|
||||
unsigned SrcReg = FastEmitInst_(X86::MOV32r0, &X86::GR32RegClass);
|
||||
unsigned SrcReg = fastEmitInst_(X86::MOV32r0, &X86::GR32RegClass);
|
||||
switch (VT.SimpleTy) {
|
||||
default: llvm_unreachable("Unexpected value type");
|
||||
case MVT::i1:
|
||||
case MVT::i8:
|
||||
return FastEmitInst_extractsubreg(MVT::i8, SrcReg, /*Kill=*/true,
|
||||
return fastEmitInst_extractsubreg(MVT::i8, SrcReg, /*Kill=*/true,
|
||||
X86::sub_8bit);
|
||||
case MVT::i16:
|
||||
return FastEmitInst_extractsubreg(MVT::i16, SrcReg, /*Kill=*/true,
|
||||
return fastEmitInst_extractsubreg(MVT::i16, SrcReg, /*Kill=*/true,
|
||||
X86::sub_16bit);
|
||||
case MVT::i32:
|
||||
return SrcReg;
|
||||
@ -3148,14 +3148,14 @@ unsigned X86FastISel::X86MaterializeInt(const ConstantInt *CI, MVT VT) {
|
||||
}
|
||||
}
|
||||
if (VT == MVT::i64 && Opc == X86::MOV32ri) {
|
||||
unsigned SrcReg = FastEmitInst_i(Opc, &X86::GR32RegClass, Imm);
|
||||
unsigned SrcReg = fastEmitInst_i(Opc, &X86::GR32RegClass, Imm);
|
||||
unsigned ResultReg = createResultReg(&X86::GR64RegClass);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
TII.get(TargetOpcode::SUBREG_TO_REG), ResultReg)
|
||||
.addImm(0).addReg(SrcReg).addImm(X86::sub_32bit);
|
||||
return ResultReg;
|
||||
}
|
||||
return FastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm);
|
||||
return fastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm);
|
||||
}
|
||||
|
||||
unsigned X86FastISel::X86MaterializeFP(const ConstantFP *CFP, MVT VT) {
|
||||
|
@ -347,7 +347,7 @@ struct OperandsSignature {
|
||||
// Implicit physical register operand. e.g. Instruction::Mul expect to
|
||||
// select to a binary op. On x86, mul may take a single operand with
|
||||
// the other operand being implicit. We must emit something that looks
|
||||
// like a binary instruction except for the very inner FastEmitInst_*
|
||||
// like a binary instruction except for the very inner fastEmitInst_*
|
||||
// call.
|
||||
continue;
|
||||
Operands[i].printManglingSuffix(OS, ImmPredicates, StripImmCodes);
|
||||
@ -610,7 +610,7 @@ void FastISelMap::printFunctionDefinitions(raw_ostream &OS) {
|
||||
const PredMap &PM = RI->second;
|
||||
bool HasPred = false;
|
||||
|
||||
OS << "unsigned FastEmit_"
|
||||
OS << "unsigned fastEmit_"
|
||||
<< getLegalCName(Opcode)
|
||||
<< "_" << getLegalCName(getName(VT))
|
||||
<< "_" << getLegalCName(getName(RetVT)) << "_";
|
||||
@ -643,7 +643,7 @@ void FastISelMap::printFunctionDefinitions(raw_ostream &OS) {
|
||||
<< (*Memo.PhysRegs)[i] << ").addReg(Op" << i << ");\n";
|
||||
}
|
||||
|
||||
OS << " return FastEmitInst_";
|
||||
OS << " return fastEmitInst_";
|
||||
if (Memo.SubRegNo.empty()) {
|
||||
Operands.PrintManglingSuffix(OS, *Memo.PhysRegs,
|
||||
ImmediatePredicates, true);
|
||||
@ -670,7 +670,7 @@ void FastISelMap::printFunctionDefinitions(raw_ostream &OS) {
|
||||
}
|
||||
|
||||
// Emit one function for the type that demultiplexes on return type.
|
||||
OS << "unsigned FastEmit_"
|
||||
OS << "unsigned fastEmit_"
|
||||
<< getLegalCName(Opcode) << "_"
|
||||
<< getLegalCName(getName(VT)) << "_";
|
||||
Operands.PrintManglingSuffix(OS, ImmediatePredicates);
|
||||
@ -682,7 +682,7 @@ void FastISelMap::printFunctionDefinitions(raw_ostream &OS) {
|
||||
for (RetPredMap::const_iterator RI = RM.begin(), RE = RM.end();
|
||||
RI != RE; ++RI) {
|
||||
MVT::SimpleValueType RetVT = RI->first;
|
||||
OS << " case " << getName(RetVT) << ": return FastEmit_"
|
||||
OS << " case " << getName(RetVT) << ": return fastEmit_"
|
||||
<< getLegalCName(Opcode) << "_" << getLegalCName(getName(VT))
|
||||
<< "_" << getLegalCName(getName(RetVT)) << "_";
|
||||
Operands.PrintManglingSuffix(OS, ImmediatePredicates);
|
||||
@ -694,7 +694,7 @@ void FastISelMap::printFunctionDefinitions(raw_ostream &OS) {
|
||||
|
||||
} else {
|
||||
// Non-variadic return type.
|
||||
OS << "unsigned FastEmit_"
|
||||
OS << "unsigned fastEmit_"
|
||||
<< getLegalCName(Opcode) << "_"
|
||||
<< getLegalCName(getName(VT)) << "_";
|
||||
Operands.PrintManglingSuffix(OS, ImmediatePredicates);
|
||||
@ -734,7 +734,7 @@ void FastISelMap::printFunctionDefinitions(raw_ostream &OS) {
|
||||
<< (*Memo.PhysRegs)[i] << ").addReg(Op" << i << ");\n";
|
||||
}
|
||||
|
||||
OS << " return FastEmitInst_";
|
||||
OS << " return fastEmitInst_";
|
||||
|
||||
if (Memo.SubRegNo.empty()) {
|
||||
Operands.PrintManglingSuffix(OS, *Memo.PhysRegs,
|
||||
@ -764,7 +764,7 @@ void FastISelMap::printFunctionDefinitions(raw_ostream &OS) {
|
||||
}
|
||||
|
||||
// Emit one function for the opcode that demultiplexes based on the type.
|
||||
OS << "unsigned FastEmit_"
|
||||
OS << "unsigned fastEmit_"
|
||||
<< getLegalCName(Opcode) << "_";
|
||||
Operands.PrintManglingSuffix(OS, ImmediatePredicates);
|
||||
OS << "(MVT VT, MVT RetVT";
|
||||
@ -777,7 +777,7 @@ void FastISelMap::printFunctionDefinitions(raw_ostream &OS) {
|
||||
TI != TE; ++TI) {
|
||||
MVT::SimpleValueType VT = TI->first;
|
||||
std::string TypeName = getName(VT);
|
||||
OS << " case " << TypeName << ": return FastEmit_"
|
||||
OS << " case " << TypeName << ": return fastEmit_"
|
||||
<< getLegalCName(Opcode) << "_" << getLegalCName(TypeName) << "_";
|
||||
Operands.PrintManglingSuffix(OS, ImmediatePredicates);
|
||||
OS << "(RetVT";
|
||||
@ -797,7 +797,7 @@ void FastISelMap::printFunctionDefinitions(raw_ostream &OS) {
|
||||
|
||||
// Emit one function for the operand signature that demultiplexes based
|
||||
// on opcode and type.
|
||||
OS << "unsigned FastEmit_";
|
||||
OS << "unsigned fastEmit_";
|
||||
Operands.PrintManglingSuffix(OS, ImmediatePredicates);
|
||||
OS << "(MVT VT, MVT RetVT, unsigned Opcode";
|
||||
if (!Operands.empty())
|
||||
@ -823,7 +823,7 @@ void FastISelMap::printFunctionDefinitions(raw_ostream &OS) {
|
||||
for (unsigned i = 0, e = MI->second.size(); i != e; ++i) {
|
||||
OS << " if (";
|
||||
MI->second[i].emitImmediatePredicate(OS, ImmediatePredicates);
|
||||
OS << ")\n if (unsigned Reg = FastEmit_";
|
||||
OS << ")\n if (unsigned Reg = fastEmit_";
|
||||
MI->second[i].PrintManglingSuffix(OS, ImmediatePredicates);
|
||||
OS << "(VT, RetVT, Opcode";
|
||||
if (!MI->second[i].empty())
|
||||
@ -841,7 +841,7 @@ void FastISelMap::printFunctionDefinitions(raw_ostream &OS) {
|
||||
I != E; ++I) {
|
||||
const std::string &Opcode = I->first;
|
||||
|
||||
OS << " case " << Opcode << ": return FastEmit_"
|
||||
OS << " case " << Opcode << ": return fastEmit_"
|
||||
<< getLegalCName(Opcode) << "_";
|
||||
Operands.PrintManglingSuffix(OS, ImmediatePredicates);
|
||||
OS << "(VT, RetVT";
|
||||
|
Loading…
Reference in New Issue
Block a user