mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-02-04 11:17:31 +00:00
GlobalISel: Add templated functions and pattern matcher support for some more opcodes
Summary: This patch adds templated functions to MachineIRBuilder for some opcodes and adds pattern matcher support for G_AND and G_OR. Reviewers: aditya_nandakumar Reviewed By: aditya_nandakumar Subscribers: rovka, kristof.beyls, llvm-commits Differential Revision: https://reviews.llvm.org/D43309 llvm-svn: 325162
This commit is contained in:
parent
7662be63cf
commit
d8161883cb
@ -208,6 +208,18 @@ m_GFMul(const LHS &L, const RHS &R) {
|
||||
return BinaryOp_match<LHS, RHS, TargetOpcode::G_FMUL, true>(L, R);
|
||||
}
|
||||
|
||||
template <typename LHS, typename RHS>
|
||||
inline BinaryOp_match<LHS, RHS, TargetOpcode::G_AND, true>
|
||||
m_GAnd(const LHS &L, const RHS &R) {
|
||||
return BinaryOp_match<LHS, RHS, TargetOpcode::G_AND, true>(L, R);
|
||||
}
|
||||
|
||||
template <typename LHS, typename RHS>
|
||||
inline BinaryOp_match<LHS, RHS, TargetOpcode::G_OR, true> m_GOr(const LHS &L,
|
||||
const RHS &R) {
|
||||
return BinaryOp_match<LHS, RHS, TargetOpcode::G_OR, true>(L, R);
|
||||
}
|
||||
|
||||
// Helper for unary instructions (G_[ZSA]EXT/G_TRUNC) etc
|
||||
template <typename SrcTy, unsigned Opcode> struct UnaryOp_match {
|
||||
SrcTy L;
|
||||
|
@ -375,6 +375,10 @@ public:
|
||||
/// with the same (scalar or vector) type).
|
||||
///
|
||||
/// \return a MachineInstrBuilder for the newly created instruction.
|
||||
template <typename DstTy, typename... UseArgsTy>
|
||||
MachineInstrBuilder buildOr(DstTy &&Dst, UseArgsTy &&... UseArgs) {
|
||||
return buildOr(getDestFromArg(Dst), getRegFromArg(UseArgs)...);
|
||||
}
|
||||
MachineInstrBuilder buildOr(unsigned Res, unsigned Op0, unsigned Op1);
|
||||
|
||||
/// Build and insert \p Res = G_ANYEXT \p Op0
|
||||
@ -441,6 +445,10 @@ public:
|
||||
/// \pre \p Op must be a generic virtual register with scalar or vector type.
|
||||
///
|
||||
/// \return The newly created instruction.
|
||||
template <typename DstTy, typename UseArgTy>
|
||||
MachineInstrBuilder buildSExtOrTrunc(DstTy &&Dst, UseArgTy &&Use) {
|
||||
return buildSExtOrTrunc(getDestFromArg(Dst), getRegFromArg(Use));
|
||||
}
|
||||
MachineInstrBuilder buildSExtOrTrunc(unsigned Res, unsigned Op);
|
||||
|
||||
/// Build and insert \p Res = G_ZEXT \p Op, \p Res = G_TRUNC \p Op, or
|
||||
@ -451,6 +459,10 @@ public:
|
||||
/// \pre \p Op must be a generic virtual register with scalar or vector type.
|
||||
///
|
||||
/// \return The newly created instruction.
|
||||
template <typename DstTy, typename UseArgTy>
|
||||
MachineInstrBuilder buildZExtOrTrunc(DstTy &&Dst, UseArgTy &&Use) {
|
||||
return buildZExtOrTrunc(getDestFromArg(Dst), getRegFromArg(Use));
|
||||
}
|
||||
MachineInstrBuilder buildZExtOrTrunc(unsigned Res, unsigned Op);
|
||||
|
||||
// Build and insert \p Res = G_ANYEXT \p Op, \p Res = G_TRUNC \p Op, or
|
||||
@ -480,6 +492,10 @@ public:
|
||||
unsigned Op);
|
||||
|
||||
/// Build and insert an appropriate cast between two registers of equal size.
|
||||
template <typename DstType, typename ArgType>
|
||||
MachineInstrBuilder buildCast(DstType &&Res, ArgType &&Arg) {
|
||||
return buildCast(getDestFromArg(Res), getRegFromArg(Arg));
|
||||
}
|
||||
MachineInstrBuilder buildCast(unsigned Dst, unsigned Src);
|
||||
|
||||
/// Build and insert G_BR \p Dest
|
||||
@ -550,6 +566,10 @@ public:
|
||||
/// \pre \p Res must be a generic virtual register with scalar type.
|
||||
///
|
||||
/// \return The newly created instruction.
|
||||
template <typename DstType>
|
||||
MachineInstrBuilder buildFConstant(DstType &&Res, const ConstantFP &Val) {
|
||||
return buildFConstant(getDestFromArg(Res), Val);
|
||||
}
|
||||
MachineInstrBuilder buildFConstant(unsigned Res, const ConstantFP &Val);
|
||||
|
||||
/// Build and insert \p Res = COPY Op
|
||||
@ -598,6 +618,9 @@ public:
|
||||
MachineInstrBuilder buildExtract(unsigned Res, unsigned Src, uint64_t Index);
|
||||
|
||||
/// Build and insert \p Res = IMPLICIT_DEF.
|
||||
template <typename DstType> MachineInstrBuilder buildUndef(DstType &&Res) {
|
||||
return buildUndef(getDestFromArg(Res));
|
||||
}
|
||||
MachineInstrBuilder buildUndef(unsigned Dst);
|
||||
|
||||
/// Build and insert instructions to put \p Ops together at the specified p
|
||||
@ -667,6 +690,10 @@ public:
|
||||
/// \pre \p Res must be smaller than \p Op
|
||||
///
|
||||
/// \return The newly created instruction.
|
||||
template <typename DstType, typename SrcType>
|
||||
MachineInstrBuilder buildFPTrunc(DstType &&Res, SrcType &&Src) {
|
||||
return buildFPTrunc(getDestFromArg(Res), getRegFromArg(Src));
|
||||
}
|
||||
MachineInstrBuilder buildFPTrunc(unsigned Res, unsigned Op);
|
||||
|
||||
/// Build and insert \p Res = G_TRUNC \p Op
|
||||
|
@ -286,11 +286,9 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
|
||||
int NumParts = SizeOp0 / NarrowSize;
|
||||
|
||||
SmallVector<unsigned, 2> DstRegs;
|
||||
for (int i = 0; i < NumParts; ++i) {
|
||||
unsigned Dst = MRI.createGenericVirtualRegister(NarrowTy);
|
||||
MIRBuilder.buildUndef(Dst);
|
||||
DstRegs.push_back(Dst);
|
||||
}
|
||||
for (int i = 0; i < NumParts; ++i)
|
||||
DstRegs.push_back(
|
||||
MIRBuilder.buildUndef(NarrowTy)->getOperand(0).getReg());
|
||||
MIRBuilder.buildMerge(MI.getOperand(0).getReg(), DstRegs);
|
||||
MI.eraseFromParent();
|
||||
return Legalized;
|
||||
@ -755,7 +753,6 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
|
||||
return Legalized;
|
||||
}
|
||||
case TargetOpcode::G_FCONSTANT: {
|
||||
unsigned DstExt = MRI.createGenericVirtualRegister(WideTy);
|
||||
const ConstantFP *CFP = MI.getOperand(1).getFPImm();
|
||||
APFloat Val = CFP->getValueAPF();
|
||||
LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext();
|
||||
@ -773,8 +770,8 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
|
||||
};
|
||||
bool LosesInfo;
|
||||
Val.convert(*LLT2Sem(WideTy), APFloat::rmTowardZero, &LosesInfo);
|
||||
MIRBuilder.buildFConstant(DstExt, *ConstantFP::get(Ctx, Val));
|
||||
MIRBuilder.buildFPTrunc(MI.getOperand(0).getReg(), DstExt);
|
||||
auto Cst = MIRBuilder.buildFConstant(WideTy, *ConstantFP::get(Ctx, Val));
|
||||
MIRBuilder.buildFPTrunc(MI.getOperand(0).getReg(), Cst);
|
||||
MI.eraseFromParent();
|
||||
return Legalized;
|
||||
}
|
||||
@ -969,11 +966,10 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
|
||||
}
|
||||
ConstantFP &ZeroForNegation =
|
||||
*cast<ConstantFP>(ConstantFP::getZeroValueForNegation(ZeroTy));
|
||||
unsigned Zero = MRI.createGenericVirtualRegister(Ty);
|
||||
MIRBuilder.buildFConstant(Zero, ZeroForNegation);
|
||||
auto Zero = MIRBuilder.buildFConstant(Ty, ZeroForNegation);
|
||||
MIRBuilder.buildInstr(TargetOpcode::G_FSUB)
|
||||
.addDef(Res)
|
||||
.addUse(Zero)
|
||||
.addUse(Zero->getOperand(0).getReg())
|
||||
.addUse(MI.getOperand(1).getReg());
|
||||
MI.eraseFromParent();
|
||||
return Legalized;
|
||||
|
@ -210,6 +210,24 @@ TEST(PatternMatchInstr, MatchBinaryOp) {
|
||||
ASSERT_TRUE(match);
|
||||
ASSERT_EQ(Cst, (uint64_t)42);
|
||||
ASSERT_EQ(Src0, Copies[0]);
|
||||
|
||||
// Build AND %0, %1
|
||||
auto MIBAnd = B.buildAnd(s64, Copies[0], Copies[1]);
|
||||
// Try to match AND.
|
||||
match = mi_match(MIBAnd->getOperand(0).getReg(), MRI,
|
||||
m_GAnd(m_Reg(Src0), m_Reg(Src1)));
|
||||
ASSERT_TRUE(match);
|
||||
ASSERT_EQ(Src0, Copies[0]);
|
||||
ASSERT_EQ(Src1, Copies[1]);
|
||||
|
||||
// Build OR %0, %1
|
||||
auto MIBOr = B.buildOr(s64, Copies[0], Copies[1]);
|
||||
// Try to match OR.
|
||||
match = mi_match(MIBOr->getOperand(0).getReg(), MRI,
|
||||
m_GOr(m_Reg(Src0), m_Reg(Src1)));
|
||||
ASSERT_TRUE(match);
|
||||
ASSERT_EQ(Src0, Copies[0]);
|
||||
ASSERT_EQ(Src1, Copies[1]);
|
||||
}
|
||||
|
||||
TEST(PatternMatchInstr, MatchExtendsTrunc) {
|
||||
@ -282,15 +300,23 @@ TEST(PatternMatchInstr, MatchSpecificType) {
|
||||
MachineIRBuilder B(*MF);
|
||||
MachineRegisterInfo &MRI = MF->getRegInfo();
|
||||
B.setInsertPt(*EntryMBB, EntryMBB->end());
|
||||
|
||||
// Try to match a 64bit add.
|
||||
LLT s64 = LLT::scalar(64);
|
||||
LLT s32 = LLT::scalar(32);
|
||||
auto MIBAdd = B.buildAdd(s64, Copies[0], Copies[1]);
|
||||
|
||||
// Try to match a 64bit add.
|
||||
ASSERT_FALSE(mi_match(MIBAdd->getOperand(0).getReg(), MRI,
|
||||
m_GAdd(m_SpecificType(s32), m_Reg())));
|
||||
ASSERT_TRUE(mi_match(MIBAdd->getOperand(0).getReg(), MRI,
|
||||
m_GAdd(m_SpecificType(s64), m_Reg())));
|
||||
|
||||
// Try to match the destination type of a bitcast.
|
||||
LLT v2s32 = LLT::vector(2, 32);
|
||||
auto MIBCast = B.buildCast(v2s32, Copies[0]);
|
||||
ASSERT_TRUE(
|
||||
mi_match(MIBCast->getOperand(0).getReg(), MRI, m_SpecificType(v2s32)));
|
||||
ASSERT_TRUE(
|
||||
mi_match(MIBCast->getOperand(1).getReg(), MRI, m_SpecificType(s64)));
|
||||
}
|
||||
|
||||
TEST(PatternMatchInstr, MatchCombinators) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user