Set ADDE/ADDC/SUBE/SUBC to expand by default

Summary:
They've been deprecated in favor of UADDO/ADDCARRY or USUBO/SUBCARRY for a while.

Target that uses these opcodes are changed in order to ensure their behavior doesn't change.

Reviewers: efriedma, craig.topper, dblaikie, bkramer

Subscribers: jholewinski, arsenm, jyknight, sdardis, nemanjai, nhaehnle, kbarton, fedor.sergeev, asb, rbar, johnrusso, simoncook, jordy.potman.lists, apazos, sabuasal, niosHD, jrtc27, zzheng, edward-jones, mgrang, atanasyan, llvm-commits

Differential Revision: https://reviews.llvm.org/D47422

llvm-svn: 333748
This commit is contained in:
Amaury Sechet 2018-06-01 13:21:33 +00:00
parent a09ac7f5a7
commit 72c691d9ab
12 changed files with 38 additions and 59 deletions

View File

@ -156,6 +156,12 @@ Changes to the C API
interface was made a deprecated no-op in LLVM 5. Use
``LLVMAddSLPVectorizePass`` instead to get the supported SLP vectorizer.
Changes to the DAG infrastructure
---------------------------------
* ADDC/ADDE/SUBC/SUBE are now deprecated and will default to expand. Backends
that wish to continue to use these opcodes should explicitely request so
using ``setOperationAction`` in their ``TargetLowering``. New backends
should use UADDO/ADDCARRY/USUBO/SUBCARRY instead of the deprecated opcodes.
External Open Source Projects Using LLVM 7
==========================================

View File

@ -621,6 +621,12 @@ void TargetLoweringBase::initActions() {
setOperationAction(ISD::SUBCARRY, VT, Expand);
setOperationAction(ISD::SETCCCARRY, VT, Expand);
// ADDC/ADDE/SUBC/SUBE default to expand.
setOperationAction(ISD::ADDC, VT, Expand);
setOperationAction(ISD::ADDE, VT, Expand);
setOperationAction(ISD::SUBC, VT, Expand);
setOperationAction(ISD::SUBE, VT, Expand);
// These default to Expand so they will be expanded to CTLZ/CTTZ by default.
setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);

View File

@ -391,6 +391,12 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::BSWAP, VT, Expand);
setOperationAction(ISD::CTTZ, VT, Expand);
setOperationAction(ISD::CTLZ, VT, Expand);
// AMDGPU uses ADDC/SUBC/ADDE/SUBE
setOperationAction(ISD::ADDC, VT, Legal);
setOperationAction(ISD::SUBC, VT, Legal);
setOperationAction(ISD::ADDE, VT, Legal);
setOperationAction(ISD::SUBE, VT, Legal);
}
if (!Subtarget->hasBCNT(32))
@ -470,10 +476,6 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::UMUL_LOHI, VT, Expand);
setOperationAction(ISD::SDIVREM, VT, Custom);
setOperationAction(ISD::UDIVREM, VT, Expand);
setOperationAction(ISD::ADDC, VT, Expand);
setOperationAction(ISD::SUBC, VT, Expand);
setOperationAction(ISD::ADDE, VT, Expand);
setOperationAction(ISD::SUBE, VT, Expand);
setOperationAction(ISD::SELECT, VT, Expand);
setOperationAction(ISD::VSELECT, VT, Expand);
setOperationAction(ISD::SELECT_CC, VT, Expand);

View File

@ -234,9 +234,6 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
setOperationAction(ISD::SUBCARRY, MVT::i64, Legal);
#endif
//setOperationAction(ISD::ADDC, MVT::i64, Expand);
//setOperationAction(ISD::SUBC, MVT::i64, Expand);
// We only support LOAD/STORE and vector manipulation ops for vectors
// with > 4 elements.
for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32,

View File

@ -88,10 +88,6 @@ BPFTargetLowering::BPFTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::MULHS, VT, Expand);
setOperationAction(ISD::UMUL_LOHI, VT, Expand);
setOperationAction(ISD::SMUL_LOHI, VT, Expand);
setOperationAction(ISD::ADDC, VT, Expand);
setOperationAction(ISD::ADDE, VT, Expand);
setOperationAction(ISD::SUBC, VT, Expand);
setOperationAction(ISD::SUBE, VT, Expand);
setOperationAction(ISD::ROTR, VT, Expand);
setOperationAction(ISD::ROTL, VT, Expand);
setOperationAction(ISD::SHL_PARTS, VT, Expand);

View File

@ -1327,28 +1327,6 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM,
setMinimumJumpTableEntries(std::numeric_limits<int>::max());
setOperationAction(ISD::BR_JT, MVT::Other, Expand);
// Hexagon has instructions for add/sub with carry. The problem with
// modeling these instructions is that they produce 2 results: Rdd and Px.
// To model the update of Px, we will have to use Defs[p0..p3] which will
// cause any predicate live range to spill. So, we pretend we dont't have
// these instructions.
setOperationAction(ISD::ADDE, MVT::i8, Expand);
setOperationAction(ISD::ADDE, MVT::i16, Expand);
setOperationAction(ISD::ADDE, MVT::i32, Expand);
setOperationAction(ISD::ADDE, MVT::i64, Expand);
setOperationAction(ISD::SUBE, MVT::i8, Expand);
setOperationAction(ISD::SUBE, MVT::i16, Expand);
setOperationAction(ISD::SUBE, MVT::i32, Expand);
setOperationAction(ISD::SUBE, MVT::i64, Expand);
setOperationAction(ISD::ADDC, MVT::i8, Expand);
setOperationAction(ISD::ADDC, MVT::i16, Expand);
setOperationAction(ISD::ADDC, MVT::i32, Expand);
setOperationAction(ISD::ADDC, MVT::i64, Expand);
setOperationAction(ISD::SUBC, MVT::i8, Expand);
setOperationAction(ISD::SUBC, MVT::i16, Expand);
setOperationAction(ISD::SUBC, MVT::i32, Expand);
setOperationAction(ISD::SUBC, MVT::i64, Expand);
// Only add and sub that detect overflow are the saturating ones.
for (MVT VT : MVT::integer_valuetypes()) {
setOperationAction(ISD::UADDO, VT, Expand);
@ -1428,10 +1406,9 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM,
// either "custom" or "legal" for specific cases.
static const unsigned VectExpOps[] = {
// Integer arithmetic:
ISD::ADD, ISD::SUB, ISD::MUL, ISD::SDIV, ISD::UDIV,
ISD::SREM, ISD::UREM, ISD::SDIVREM, ISD::UDIVREM, ISD::ADDC,
ISD::SUBC, ISD::SADDO, ISD::UADDO, ISD::SSUBO, ISD::USUBO,
ISD::SMUL_LOHI, ISD::UMUL_LOHI,
ISD::ADD, ISD::SUB, ISD::MUL, ISD::SDIV, ISD::UDIV,
ISD::SREM, ISD::UREM, ISD::SDIVREM, ISD::UDIVREM, ISD::SADDO,
ISD::UADDO, ISD::SSUBO, ISD::USUBO, ISD::SMUL_LOHI, ISD::UMUL_LOHI,
// Logical/bit:
ISD::AND, ISD::OR, ISD::XOR, ISD::ROTL, ISD::ROTR,
ISD::CTPOP, ISD::CTLZ, ISD::CTTZ,

View File

@ -393,18 +393,11 @@ MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM,
setOperationAction(ISD::UDIV, MVT::i64, Expand);
setOperationAction(ISD::UREM, MVT::i64, Expand);
if (!(Subtarget.hasDSP() && Subtarget.hasMips32r2())) {
setOperationAction(ISD::ADDC, MVT::i32, Expand);
setOperationAction(ISD::ADDE, MVT::i32, Expand);
if (Subtarget.hasDSP() && Subtarget.hasMips32r2()) {
setOperationAction(ISD::ADDC, MVT::i32, Legal);
setOperationAction(ISD::ADDE, MVT::i32, Legal);
}
setOperationAction(ISD::ADDC, MVT::i64, Expand);
setOperationAction(ISD::ADDE, MVT::i64, Expand);
setOperationAction(ISD::SUBC, MVT::i32, Expand);
setOperationAction(ISD::SUBE, MVT::i32, Expand);
setOperationAction(ISD::SUBC, MVT::i64, Expand);
setOperationAction(ISD::SUBE, MVT::i64, Expand);
// Operations not directly supported by Mips.
setOperationAction(ISD::BR_CC, MVT::f32, Expand);
setOperationAction(ISD::BR_CC, MVT::f64, Expand);

View File

@ -467,9 +467,6 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
// TRAP can be lowered to PTX trap
setOperationAction(ISD::TRAP, MVT::Other, Legal);
setOperationAction(ISD::ADDC, MVT::i64, Expand);
setOperationAction(ISD::ADDE, MVT::i64, Expand);
// Register custom handling for vector loads/stores
for (MVT VT : MVT::vector_valuetypes()) {
if (IsPTXVectorType(VT)) {

View File

@ -172,6 +172,15 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal);
setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal);
// PowerPC uses ADDC/ADDE/SUBC/SUBE to propagate carry.
const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
for (MVT VT : ScalarIntVTs) {
setOperationAction(ISD::ADDC, VT, Legal);
setOperationAction(ISD::ADDE, VT, Legal);
setOperationAction(ISD::SUBC, VT, Legal);
setOperationAction(ISD::SUBE, VT, Legal);
}
if (Subtarget.useCRBits()) {
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);

View File

@ -80,11 +80,6 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
for (auto VT : {MVT::i1, MVT::i8, MVT::i16})
setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
setOperationAction(ISD::ADDC, XLenVT, Expand);
setOperationAction(ISD::ADDE, XLenVT, Expand);
setOperationAction(ISD::SUBC, XLenVT, Expand);
setOperationAction(ISD::SUBE, XLenVT, Expand);
if (!Subtarget.hasStdExtM()) {
setOperationAction(ISD::MUL, XLenVT, Expand);
setOperationAction(ISD::MULHS, XLenVT, Expand);

View File

@ -1590,6 +1590,11 @@ SparcTargetLowering::SparcTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
setOperationAction(ISD::ADDC, MVT::i32, Custom);
setOperationAction(ISD::ADDE, MVT::i32, Custom);
setOperationAction(ISD::SUBC, MVT::i32, Custom);
setOperationAction(ISD::SUBE, MVT::i32, Custom);
if (Subtarget->is64Bit()) {
setOperationAction(ISD::ADDC, MVT::i64, Custom);
setOperationAction(ISD::ADDE, MVT::i64, Custom);

View File

@ -91,10 +91,6 @@ XCoreTargetLowering::XCoreTargetLowering(const TargetMachine &TM,
// XCore does not have the NodeTypes below.
setOperationAction(ISD::BR_CC, MVT::i32, Expand);
setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
setOperationAction(ISD::ADDC, MVT::i32, Expand);
setOperationAction(ISD::ADDE, MVT::i32, Expand);
setOperationAction(ISD::SUBC, MVT::i32, Expand);
setOperationAction(ISD::SUBE, MVT::i32, Expand);
// 64bit
setOperationAction(ISD::ADD, MVT::i64, Custom);