[ARM] Format ARMISD node definitions. NFC

This clang-formats the list of ARMISD nodes. Usually this is something I
would avoid, but these cause problems with formatting every time new
nodes are added.

The list in getTargetNodeName also makes use of MAKE_CASE macros, as
other backends do.
This commit is contained in:
David Green 2021-04-24 14:50:32 +01:00
parent d29a53d9ca
commit 7255d1f54f
2 changed files with 424 additions and 431 deletions

View File

@ -1609,210 +1609,199 @@ ARMTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
}
const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
#define MAKE_CASE(V) \
case V: \
return #V;
switch ((ARMISD::NodeType)Opcode) {
case ARMISD::FIRST_NUMBER: break;
case ARMISD::Wrapper: return "ARMISD::Wrapper";
case ARMISD::WrapperPIC: return "ARMISD::WrapperPIC";
case ARMISD::WrapperJT: return "ARMISD::WrapperJT";
case ARMISD::COPY_STRUCT_BYVAL: return "ARMISD::COPY_STRUCT_BYVAL";
case ARMISD::CALL: return "ARMISD::CALL";
case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED";
case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK";
case ARMISD::tSECALL: return "ARMISD::tSECALL";
case ARMISD::BRCOND: return "ARMISD::BRCOND";
case ARMISD::BR_JT: return "ARMISD::BR_JT";
case ARMISD::BR2_JT: return "ARMISD::BR2_JT";
case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG";
case ARMISD::SERET_FLAG: return "ARMISD::SERET_FLAG";
case ARMISD::INTRET_FLAG: return "ARMISD::INTRET_FLAG";
case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD";
case ARMISD::CMP: return "ARMISD::CMP";
case ARMISD::CMN: return "ARMISD::CMN";
case ARMISD::CMPZ: return "ARMISD::CMPZ";
case ARMISD::CMPFP: return "ARMISD::CMPFP";
case ARMISD::CMPFPE: return "ARMISD::CMPFPE";
case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0";
case ARMISD::CMPFPEw0: return "ARMISD::CMPFPEw0";
case ARMISD::BCC_i64: return "ARMISD::BCC_i64";
case ARMISD::FMSTAT: return "ARMISD::FMSTAT";
case ARMISD::CMOV: return "ARMISD::CMOV";
case ARMISD::SUBS: return "ARMISD::SUBS";
case ARMISD::SSAT: return "ARMISD::SSAT";
case ARMISD::USAT: return "ARMISD::USAT";
case ARMISD::ASRL: return "ARMISD::ASRL";
case ARMISD::LSRL: return "ARMISD::LSRL";
case ARMISD::LSLL: return "ARMISD::LSLL";
case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG";
case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG";
case ARMISD::RRX: return "ARMISD::RRX";
case ARMISD::ADDC: return "ARMISD::ADDC";
case ARMISD::ADDE: return "ARMISD::ADDE";
case ARMISD::SUBC: return "ARMISD::SUBC";
case ARMISD::SUBE: return "ARMISD::SUBE";
case ARMISD::LSLS: return "ARMISD::LSLS";
case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD";
case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR";
case ARMISD::VMOVhr: return "ARMISD::VMOVhr";
case ARMISD::VMOVrh: return "ARMISD::VMOVrh";
case ARMISD::VMOVSR: return "ARMISD::VMOVSR";
case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP";
case ARMISD::EH_SJLJ_LONGJMP: return "ARMISD::EH_SJLJ_LONGJMP";
case ARMISD::EH_SJLJ_SETUP_DISPATCH: return "ARMISD::EH_SJLJ_SETUP_DISPATCH";
case ARMISD::TC_RETURN: return "ARMISD::TC_RETURN";
case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER";
case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC";
case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR";
case ARMISD::PRELOAD: return "ARMISD::PRELOAD";
case ARMISD::LDRD: return "ARMISD::LDRD";
case ARMISD::STRD: return "ARMISD::STRD";
case ARMISD::WIN__CHKSTK: return "ARMISD::WIN__CHKSTK";
case ARMISD::WIN__DBZCHK: return "ARMISD::WIN__DBZCHK";
case ARMISD::PREDICATE_CAST: return "ARMISD::PREDICATE_CAST";
case ARMISD::VECTOR_REG_CAST: return "ARMISD::VECTOR_REG_CAST";
case ARMISD::VCMP: return "ARMISD::VCMP";
case ARMISD::VCMPZ: return "ARMISD::VCMPZ";
case ARMISD::VTST: return "ARMISD::VTST";
case ARMISD::VSHLs: return "ARMISD::VSHLs";
case ARMISD::VSHLu: return "ARMISD::VSHLu";
case ARMISD::VSHLIMM: return "ARMISD::VSHLIMM";
case ARMISD::VSHRsIMM: return "ARMISD::VSHRsIMM";
case ARMISD::VSHRuIMM: return "ARMISD::VSHRuIMM";
case ARMISD::VRSHRsIMM: return "ARMISD::VRSHRsIMM";
case ARMISD::VRSHRuIMM: return "ARMISD::VRSHRuIMM";
case ARMISD::VRSHRNIMM: return "ARMISD::VRSHRNIMM";
case ARMISD::VQSHLsIMM: return "ARMISD::VQSHLsIMM";
case ARMISD::VQSHLuIMM: return "ARMISD::VQSHLuIMM";
case ARMISD::VQSHLsuIMM: return "ARMISD::VQSHLsuIMM";
case ARMISD::VQSHRNsIMM: return "ARMISD::VQSHRNsIMM";
case ARMISD::VQSHRNuIMM: return "ARMISD::VQSHRNuIMM";
case ARMISD::VQSHRNsuIMM: return "ARMISD::VQSHRNsuIMM";
case ARMISD::VQRSHRNsIMM: return "ARMISD::VQRSHRNsIMM";
case ARMISD::VQRSHRNuIMM: return "ARMISD::VQRSHRNuIMM";
case ARMISD::VQRSHRNsuIMM: return "ARMISD::VQRSHRNsuIMM";
case ARMISD::VSLIIMM: return "ARMISD::VSLIIMM";
case ARMISD::VSRIIMM: return "ARMISD::VSRIIMM";
case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu";
case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs";
case ARMISD::VMOVIMM: return "ARMISD::VMOVIMM";
case ARMISD::VMVNIMM: return "ARMISD::VMVNIMM";
case ARMISD::VMOVFPIMM: return "ARMISD::VMOVFPIMM";
case ARMISD::VDUP: return "ARMISD::VDUP";
case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE";
case ARMISD::VEXT: return "ARMISD::VEXT";
case ARMISD::VREV64: return "ARMISD::VREV64";
case ARMISD::VREV32: return "ARMISD::VREV32";
case ARMISD::VREV16: return "ARMISD::VREV16";
case ARMISD::VZIP: return "ARMISD::VZIP";
case ARMISD::VUZP: return "ARMISD::VUZP";
case ARMISD::VTRN: return "ARMISD::VTRN";
case ARMISD::VTBL1: return "ARMISD::VTBL1";
case ARMISD::VTBL2: return "ARMISD::VTBL2";
case ARMISD::VMOVN: return "ARMISD::VMOVN";
case ARMISD::VQMOVNs: return "ARMISD::VQMOVNs";
case ARMISD::VQMOVNu: return "ARMISD::VQMOVNu";
case ARMISD::VCVTN: return "ARMISD::VCVTN";
case ARMISD::VCVTL: return "ARMISD::VCVTL";
case ARMISD::VMULLs: return "ARMISD::VMULLs";
case ARMISD::VMULLu: return "ARMISD::VMULLu";
case ARMISD::VQDMULH: return "ARMISD::VQDMULH";
case ARMISD::VADDVs: return "ARMISD::VADDVs";
case ARMISD::VADDVu: return "ARMISD::VADDVu";
case ARMISD::VADDVps: return "ARMISD::VADDVps";
case ARMISD::VADDVpu: return "ARMISD::VADDVpu";
case ARMISD::VADDLVs: return "ARMISD::VADDLVs";
case ARMISD::VADDLVu: return "ARMISD::VADDLVu";
case ARMISD::VADDLVAs: return "ARMISD::VADDLVAs";
case ARMISD::VADDLVAu: return "ARMISD::VADDLVAu";
case ARMISD::VADDLVps: return "ARMISD::VADDLVps";
case ARMISD::VADDLVpu: return "ARMISD::VADDLVpu";
case ARMISD::VADDLVAps: return "ARMISD::VADDLVAps";
case ARMISD::VADDLVApu: return "ARMISD::VADDLVApu";
case ARMISD::VMLAVs: return "ARMISD::VMLAVs";
case ARMISD::VMLAVu: return "ARMISD::VMLAVu";
case ARMISD::VMLAVps: return "ARMISD::VMLAVps";
case ARMISD::VMLAVpu: return "ARMISD::VMLAVpu";
case ARMISD::VMLALVs: return "ARMISD::VMLALVs";
case ARMISD::VMLALVu: return "ARMISD::VMLALVu";
case ARMISD::VMLALVps: return "ARMISD::VMLALVps";
case ARMISD::VMLALVpu: return "ARMISD::VMLALVpu";
case ARMISD::VMLALVAs: return "ARMISD::VMLALVAs";
case ARMISD::VMLALVAu: return "ARMISD::VMLALVAu";
case ARMISD::VMLALVAps: return "ARMISD::VMLALVAps";
case ARMISD::VMLALVApu: return "ARMISD::VMLALVApu";
case ARMISD::VMINVu: return "ARMISD::VMINVu";
case ARMISD::VMINVs: return "ARMISD::VMINVs";
case ARMISD::VMAXVu: return "ARMISD::VMAXVu";
case ARMISD::VMAXVs: return "ARMISD::VMAXVs";
case ARMISD::UMAAL: return "ARMISD::UMAAL";
case ARMISD::UMLAL: return "ARMISD::UMLAL";
case ARMISD::SMLAL: return "ARMISD::SMLAL";
case ARMISD::SMLALBB: return "ARMISD::SMLALBB";
case ARMISD::SMLALBT: return "ARMISD::SMLALBT";
case ARMISD::SMLALTB: return "ARMISD::SMLALTB";
case ARMISD::SMLALTT: return "ARMISD::SMLALTT";
case ARMISD::SMULWB: return "ARMISD::SMULWB";
case ARMISD::SMULWT: return "ARMISD::SMULWT";
case ARMISD::SMLALD: return "ARMISD::SMLALD";
case ARMISD::SMLALDX: return "ARMISD::SMLALDX";
case ARMISD::SMLSLD: return "ARMISD::SMLSLD";
case ARMISD::SMLSLDX: return "ARMISD::SMLSLDX";
case ARMISD::SMMLAR: return "ARMISD::SMMLAR";
case ARMISD::SMMLSR: return "ARMISD::SMMLSR";
case ARMISD::QADD16b: return "ARMISD::QADD16b";
case ARMISD::QSUB16b: return "ARMISD::QSUB16b";
case ARMISD::QADD8b: return "ARMISD::QADD8b";
case ARMISD::QSUB8b: return "ARMISD::QSUB8b";
case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR";
case ARMISD::BFI: return "ARMISD::BFI";
case ARMISD::VORRIMM: return "ARMISD::VORRIMM";
case ARMISD::VBICIMM: return "ARMISD::VBICIMM";
case ARMISD::VBSP: return "ARMISD::VBSP";
case ARMISD::MEMCPY: return "ARMISD::MEMCPY";
case ARMISD::VLD1DUP: return "ARMISD::VLD1DUP";
case ARMISD::VLD2DUP: return "ARMISD::VLD2DUP";
case ARMISD::VLD3DUP: return "ARMISD::VLD3DUP";
case ARMISD::VLD4DUP: return "ARMISD::VLD4DUP";
case ARMISD::VLD1_UPD: return "ARMISD::VLD1_UPD";
case ARMISD::VLD2_UPD: return "ARMISD::VLD2_UPD";
case ARMISD::VLD3_UPD: return "ARMISD::VLD3_UPD";
case ARMISD::VLD4_UPD: return "ARMISD::VLD4_UPD";
case ARMISD::VLD2LN_UPD: return "ARMISD::VLD2LN_UPD";
case ARMISD::VLD3LN_UPD: return "ARMISD::VLD3LN_UPD";
case ARMISD::VLD4LN_UPD: return "ARMISD::VLD4LN_UPD";
case ARMISD::VLD1DUP_UPD: return "ARMISD::VLD1DUP_UPD";
case ARMISD::VLD2DUP_UPD: return "ARMISD::VLD2DUP_UPD";
case ARMISD::VLD3DUP_UPD: return "ARMISD::VLD3DUP_UPD";
case ARMISD::VLD4DUP_UPD: return "ARMISD::VLD4DUP_UPD";
case ARMISD::VST1_UPD: return "ARMISD::VST1_UPD";
case ARMISD::VST2_UPD: return "ARMISD::VST2_UPD";
case ARMISD::VST3_UPD: return "ARMISD::VST3_UPD";
case ARMISD::VST4_UPD: return "ARMISD::VST4_UPD";
case ARMISD::VST2LN_UPD: return "ARMISD::VST2LN_UPD";
case ARMISD::VST3LN_UPD: return "ARMISD::VST3LN_UPD";
case ARMISD::VST4LN_UPD: return "ARMISD::VST4LN_UPD";
case ARMISD::WLS: return "ARMISD::WLS";
case ARMISD::WLSSETUP: return "ARMISD::WLSSETUP";
case ARMISD::LE: return "ARMISD::LE";
case ARMISD::LOOP_DEC: return "ARMISD::LOOP_DEC";
case ARMISD::CSINV: return "ARMISD::CSINV";
case ARMISD::CSNEG: return "ARMISD::CSNEG";
case ARMISD::CSINC: return "ARMISD::CSINC";
case ARMISD::FIRST_NUMBER:
break;
MAKE_CASE(ARMISD::Wrapper)
MAKE_CASE(ARMISD::WrapperPIC)
MAKE_CASE(ARMISD::WrapperJT)
MAKE_CASE(ARMISD::COPY_STRUCT_BYVAL)
MAKE_CASE(ARMISD::CALL)
MAKE_CASE(ARMISD::CALL_PRED)
MAKE_CASE(ARMISD::CALL_NOLINK)
MAKE_CASE(ARMISD::tSECALL)
MAKE_CASE(ARMISD::BRCOND)
MAKE_CASE(ARMISD::BR_JT)
MAKE_CASE(ARMISD::BR2_JT)
MAKE_CASE(ARMISD::RET_FLAG)
MAKE_CASE(ARMISD::SERET_FLAG)
MAKE_CASE(ARMISD::INTRET_FLAG)
MAKE_CASE(ARMISD::PIC_ADD)
MAKE_CASE(ARMISD::CMP)
MAKE_CASE(ARMISD::CMN)
MAKE_CASE(ARMISD::CMPZ)
MAKE_CASE(ARMISD::CMPFP)
MAKE_CASE(ARMISD::CMPFPE)
MAKE_CASE(ARMISD::CMPFPw0)
MAKE_CASE(ARMISD::CMPFPEw0)
MAKE_CASE(ARMISD::BCC_i64)
MAKE_CASE(ARMISD::FMSTAT)
MAKE_CASE(ARMISD::CMOV)
MAKE_CASE(ARMISD::SUBS)
MAKE_CASE(ARMISD::SSAT)
MAKE_CASE(ARMISD::USAT)
MAKE_CASE(ARMISD::ASRL)
MAKE_CASE(ARMISD::LSRL)
MAKE_CASE(ARMISD::LSLL)
MAKE_CASE(ARMISD::SRL_FLAG)
MAKE_CASE(ARMISD::SRA_FLAG)
MAKE_CASE(ARMISD::RRX)
MAKE_CASE(ARMISD::ADDC)
MAKE_CASE(ARMISD::ADDE)
MAKE_CASE(ARMISD::SUBC)
MAKE_CASE(ARMISD::SUBE)
MAKE_CASE(ARMISD::LSLS)
MAKE_CASE(ARMISD::VMOVRRD)
MAKE_CASE(ARMISD::VMOVDRR)
MAKE_CASE(ARMISD::VMOVhr)
MAKE_CASE(ARMISD::VMOVrh)
MAKE_CASE(ARMISD::VMOVSR)
MAKE_CASE(ARMISD::EH_SJLJ_SETJMP)
MAKE_CASE(ARMISD::EH_SJLJ_LONGJMP)
MAKE_CASE(ARMISD::EH_SJLJ_SETUP_DISPATCH)
MAKE_CASE(ARMISD::TC_RETURN)
MAKE_CASE(ARMISD::THREAD_POINTER)
MAKE_CASE(ARMISD::DYN_ALLOC)
MAKE_CASE(ARMISD::MEMBARRIER_MCR)
MAKE_CASE(ARMISD::PRELOAD)
MAKE_CASE(ARMISD::LDRD)
MAKE_CASE(ARMISD::STRD)
MAKE_CASE(ARMISD::WIN__CHKSTK)
MAKE_CASE(ARMISD::WIN__DBZCHK)
MAKE_CASE(ARMISD::PREDICATE_CAST)
MAKE_CASE(ARMISD::VECTOR_REG_CAST)
MAKE_CASE(ARMISD::VCMP)
MAKE_CASE(ARMISD::VCMPZ)
MAKE_CASE(ARMISD::VTST)
MAKE_CASE(ARMISD::VSHLs)
MAKE_CASE(ARMISD::VSHLu)
MAKE_CASE(ARMISD::VSHLIMM)
MAKE_CASE(ARMISD::VSHRsIMM)
MAKE_CASE(ARMISD::VSHRuIMM)
MAKE_CASE(ARMISD::VRSHRsIMM)
MAKE_CASE(ARMISD::VRSHRuIMM)
MAKE_CASE(ARMISD::VRSHRNIMM)
MAKE_CASE(ARMISD::VQSHLsIMM)
MAKE_CASE(ARMISD::VQSHLuIMM)
MAKE_CASE(ARMISD::VQSHLsuIMM)
MAKE_CASE(ARMISD::VQSHRNsIMM)
MAKE_CASE(ARMISD::VQSHRNuIMM)
MAKE_CASE(ARMISD::VQSHRNsuIMM)
MAKE_CASE(ARMISD::VQRSHRNsIMM)
MAKE_CASE(ARMISD::VQRSHRNuIMM)
MAKE_CASE(ARMISD::VQRSHRNsuIMM)
MAKE_CASE(ARMISD::VSLIIMM)
MAKE_CASE(ARMISD::VSRIIMM)
MAKE_CASE(ARMISD::VGETLANEu)
MAKE_CASE(ARMISD::VGETLANEs)
MAKE_CASE(ARMISD::VMOVIMM)
MAKE_CASE(ARMISD::VMVNIMM)
MAKE_CASE(ARMISD::VMOVFPIMM)
MAKE_CASE(ARMISD::VDUP)
MAKE_CASE(ARMISD::VDUPLANE)
MAKE_CASE(ARMISD::VEXT)
MAKE_CASE(ARMISD::VREV64)
MAKE_CASE(ARMISD::VREV32)
MAKE_CASE(ARMISD::VREV16)
MAKE_CASE(ARMISD::VZIP)
MAKE_CASE(ARMISD::VUZP)
MAKE_CASE(ARMISD::VTRN)
MAKE_CASE(ARMISD::VTBL1)
MAKE_CASE(ARMISD::VTBL2)
MAKE_CASE(ARMISD::VMOVN)
MAKE_CASE(ARMISD::VQMOVNs)
MAKE_CASE(ARMISD::VQMOVNu)
MAKE_CASE(ARMISD::VCVTN)
MAKE_CASE(ARMISD::VCVTL)
MAKE_CASE(ARMISD::VMULLs)
MAKE_CASE(ARMISD::VMULLu)
MAKE_CASE(ARMISD::VQDMULH)
MAKE_CASE(ARMISD::VADDVs)
MAKE_CASE(ARMISD::VADDVu)
MAKE_CASE(ARMISD::VADDVps)
MAKE_CASE(ARMISD::VADDVpu)
MAKE_CASE(ARMISD::VADDLVs)
MAKE_CASE(ARMISD::VADDLVu)
MAKE_CASE(ARMISD::VADDLVAs)
MAKE_CASE(ARMISD::VADDLVAu)
MAKE_CASE(ARMISD::VADDLVps)
MAKE_CASE(ARMISD::VADDLVpu)
MAKE_CASE(ARMISD::VADDLVAps)
MAKE_CASE(ARMISD::VADDLVApu)
MAKE_CASE(ARMISD::VMLAVs)
MAKE_CASE(ARMISD::VMLAVu)
MAKE_CASE(ARMISD::VMLAVps)
MAKE_CASE(ARMISD::VMLAVpu)
MAKE_CASE(ARMISD::VMLALVs)
MAKE_CASE(ARMISD::VMLALVu)
MAKE_CASE(ARMISD::VMLALVps)
MAKE_CASE(ARMISD::VMLALVpu)
MAKE_CASE(ARMISD::VMLALVAs)
MAKE_CASE(ARMISD::VMLALVAu)
MAKE_CASE(ARMISD::VMLALVAps)
MAKE_CASE(ARMISD::VMLALVApu)
MAKE_CASE(ARMISD::VMINVu)
MAKE_CASE(ARMISD::VMINVs)
MAKE_CASE(ARMISD::VMAXVu)
MAKE_CASE(ARMISD::VMAXVs)
MAKE_CASE(ARMISD::UMAAL)
MAKE_CASE(ARMISD::UMLAL)
MAKE_CASE(ARMISD::SMLAL)
MAKE_CASE(ARMISD::SMLALBB)
MAKE_CASE(ARMISD::SMLALBT)
MAKE_CASE(ARMISD::SMLALTB)
MAKE_CASE(ARMISD::SMLALTT)
MAKE_CASE(ARMISD::SMULWB)
MAKE_CASE(ARMISD::SMULWT)
MAKE_CASE(ARMISD::SMLALD)
MAKE_CASE(ARMISD::SMLALDX)
MAKE_CASE(ARMISD::SMLSLD)
MAKE_CASE(ARMISD::SMLSLDX)
MAKE_CASE(ARMISD::SMMLAR)
MAKE_CASE(ARMISD::SMMLSR)
MAKE_CASE(ARMISD::QADD16b)
MAKE_CASE(ARMISD::QSUB16b)
MAKE_CASE(ARMISD::QADD8b)
MAKE_CASE(ARMISD::QSUB8b)
MAKE_CASE(ARMISD::BUILD_VECTOR)
MAKE_CASE(ARMISD::BFI)
MAKE_CASE(ARMISD::VORRIMM)
MAKE_CASE(ARMISD::VBICIMM)
MAKE_CASE(ARMISD::VBSP)
MAKE_CASE(ARMISD::MEMCPY)
MAKE_CASE(ARMISD::VLD1DUP)
MAKE_CASE(ARMISD::VLD2DUP)
MAKE_CASE(ARMISD::VLD3DUP)
MAKE_CASE(ARMISD::VLD4DUP)
MAKE_CASE(ARMISD::VLD1_UPD)
MAKE_CASE(ARMISD::VLD2_UPD)
MAKE_CASE(ARMISD::VLD3_UPD)
MAKE_CASE(ARMISD::VLD4_UPD)
MAKE_CASE(ARMISD::VLD2LN_UPD)
MAKE_CASE(ARMISD::VLD3LN_UPD)
MAKE_CASE(ARMISD::VLD4LN_UPD)
MAKE_CASE(ARMISD::VLD1DUP_UPD)
MAKE_CASE(ARMISD::VLD2DUP_UPD)
MAKE_CASE(ARMISD::VLD3DUP_UPD)
MAKE_CASE(ARMISD::VLD4DUP_UPD)
MAKE_CASE(ARMISD::VST1_UPD)
MAKE_CASE(ARMISD::VST2_UPD)
MAKE_CASE(ARMISD::VST3_UPD)
MAKE_CASE(ARMISD::VST4_UPD)
MAKE_CASE(ARMISD::VST2LN_UPD)
MAKE_CASE(ARMISD::VST3LN_UPD)
MAKE_CASE(ARMISD::VST4LN_UPD)
MAKE_CASE(ARMISD::WLS)
MAKE_CASE(ARMISD::WLSSETUP)
MAKE_CASE(ARMISD::LE)
MAKE_CASE(ARMISD::LOOP_DEC)
MAKE_CASE(ARMISD::CSINV)
MAKE_CASE(ARMISD::CSNEG)
MAKE_CASE(ARMISD::CSINC)
#undef MAKE_CASE
}
return nullptr;
}

View File

@ -51,285 +51,289 @@ class VectorType;
namespace ARMISD {
// ARM Specific DAG Nodes
enum NodeType : unsigned {
// Start the numbering where the builtin ops and target ops leave off.
FIRST_NUMBER = ISD::BUILTIN_OP_END,
// ARM Specific DAG Nodes
enum NodeType : unsigned {
// Start the numbering where the builtin ops and target ops leave off.
FIRST_NUMBER = ISD::BUILTIN_OP_END,
Wrapper, // Wrapper - A wrapper node for TargetConstantPool,
// TargetExternalSymbol, and TargetGlobalAddress.
WrapperPIC, // WrapperPIC - A wrapper node for TargetGlobalAddress in
// PIC mode.
WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable
Wrapper, // Wrapper - A wrapper node for TargetConstantPool,
// TargetExternalSymbol, and TargetGlobalAddress.
WrapperPIC, // WrapperPIC - A wrapper node for TargetGlobalAddress in
// PIC mode.
WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable
// Add pseudo op to model memcpy for struct byval.
COPY_STRUCT_BYVAL,
// Add pseudo op to model memcpy for struct byval.
COPY_STRUCT_BYVAL,
CALL, // Function call.
CALL_PRED, // Function call that's predicable.
CALL_NOLINK, // Function call with branch not branch-and-link.
tSECALL, // CMSE non-secure function call.
BRCOND, // Conditional branch.
BR_JT, // Jumptable branch.
BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump).
RET_FLAG, // Return with a flag operand.
SERET_FLAG, // CMSE Entry function return with a flag operand.
INTRET_FLAG, // Interrupt return with an LR-offset and a flag operand.
CALL, // Function call.
CALL_PRED, // Function call that's predicable.
CALL_NOLINK, // Function call with branch not branch-and-link.
tSECALL, // CMSE non-secure function call.
BRCOND, // Conditional branch.
BR_JT, // Jumptable branch.
BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump).
RET_FLAG, // Return with a flag operand.
SERET_FLAG, // CMSE Entry function return with a flag operand.
INTRET_FLAG, // Interrupt return with an LR-offset and a flag operand.
PIC_ADD, // Add with a PC operand and a PIC label.
PIC_ADD, // Add with a PC operand and a PIC label.
ASRL, // MVE long arithmetic shift right.
LSRL, // MVE long shift right.
LSLL, // MVE long shift left.
ASRL, // MVE long arithmetic shift right.
LSRL, // MVE long shift right.
LSLL, // MVE long shift left.
CMP, // ARM compare instructions.
CMN, // ARM CMN instructions.
CMPZ, // ARM compare that sets only Z flag.
CMPFP, // ARM VFP compare instruction, sets FPSCR.
CMPFPE, // ARM VFP signalling compare instruction, sets FPSCR.
CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR.
CMPFPEw0, // ARM VFP signalling compare against zero instruction, sets FPSCR.
FMSTAT, // ARM fmstat instruction.
CMP, // ARM compare instructions.
CMN, // ARM CMN instructions.
CMPZ, // ARM compare that sets only Z flag.
CMPFP, // ARM VFP compare instruction, sets FPSCR.
CMPFPE, // ARM VFP signalling compare instruction, sets FPSCR.
CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR.
CMPFPEw0, // ARM VFP signalling compare against zero instruction, sets
// FPSCR.
FMSTAT, // ARM fmstat instruction.
CMOV, // ARM conditional move instructions.
SUBS, // Flag-setting subtraction.
CMOV, // ARM conditional move instructions.
SUBS, // Flag-setting subtraction.
SSAT, // Signed saturation
USAT, // Unsigned saturation
SSAT, // Signed saturation
USAT, // Unsigned saturation
BCC_i64,
BCC_i64,
SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out.
SRA_FLAG, // V,Flag = sra_flag X -> sra X, 1 + save carry out.
RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag.
SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out.
SRA_FLAG, // V,Flag = sra_flag X -> sra X, 1 + save carry out.
RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag.
ADDC, // Add with carry
ADDE, // Add using carry
SUBC, // Sub with carry
SUBE, // Sub using carry
LSLS, // Shift left producing carry
ADDC, // Add with carry
ADDE, // Add using carry
SUBC, // Sub with carry
SUBE, // Sub using carry
LSLS, // Shift left producing carry
VMOVRRD, // double to two gprs.
VMOVDRR, // Two gprs to double.
VMOVSR, // move gpr to single, used for f32 literal constructed in a gpr
VMOVRRD, // double to two gprs.
VMOVDRR, // Two gprs to double.
VMOVSR, // move gpr to single, used for f32 literal constructed in a gpr
EH_SJLJ_SETJMP, // SjLj exception handling setjmp.
EH_SJLJ_LONGJMP, // SjLj exception handling longjmp.
EH_SJLJ_SETUP_DISPATCH, // SjLj exception handling setup_dispatch.
EH_SJLJ_SETJMP, // SjLj exception handling setjmp.
EH_SJLJ_LONGJMP, // SjLj exception handling longjmp.
EH_SJLJ_SETUP_DISPATCH, // SjLj exception handling setup_dispatch.
TC_RETURN, // Tail call return pseudo.
TC_RETURN, // Tail call return pseudo.
THREAD_POINTER,
THREAD_POINTER,
DYN_ALLOC, // Dynamic allocation on the stack.
DYN_ALLOC, // Dynamic allocation on the stack.
MEMBARRIER_MCR, // Memory barrier (MCR)
MEMBARRIER_MCR, // Memory barrier (MCR)
PRELOAD, // Preload
PRELOAD, // Preload
WIN__CHKSTK, // Windows' __chkstk call to do stack probing.
WIN__DBZCHK, // Windows' divide by zero check
WIN__CHKSTK, // Windows' __chkstk call to do stack probing.
WIN__DBZCHK, // Windows' divide by zero check
WLS, // Low-overhead loops, While Loop Start branch. See t2WhileLoopStart
WLSSETUP, // Setup for the iteration count of a WLS. See t2WhileLoopSetup.
LOOP_DEC, // Really a part of LE, performs the sub
LE, // Low-overhead loops, Loop End
WLS, // Low-overhead loops, While Loop Start branch. See t2WhileLoopStart
WLSSETUP, // Setup for the iteration count of a WLS. See t2WhileLoopSetup.
LOOP_DEC, // Really a part of LE, performs the sub
LE, // Low-overhead loops, Loop End
PREDICATE_CAST, // Predicate cast for MVE i1 types
VECTOR_REG_CAST, // Reinterpret the current contents of a vector register
PREDICATE_CAST, // Predicate cast for MVE i1 types
VECTOR_REG_CAST, // Reinterpret the current contents of a vector register
VCMP, // Vector compare.
VCMPZ, // Vector compare to zero.
VTST, // Vector test bits.
VCMP, // Vector compare.
VCMPZ, // Vector compare to zero.
VTST, // Vector test bits.
// Vector shift by vector
VSHLs, // ...left/right by signed
VSHLu, // ...left/right by unsigned
// Vector shift by vector
VSHLs, // ...left/right by signed
VSHLu, // ...left/right by unsigned
// Vector shift by immediate:
VSHLIMM, // ...left
VSHRsIMM, // ...right (signed)
VSHRuIMM, // ...right (unsigned)
// Vector shift by immediate:
VSHLIMM, // ...left
VSHRsIMM, // ...right (signed)
VSHRuIMM, // ...right (unsigned)
// Vector rounding shift by immediate:
VRSHRsIMM, // ...right (signed)
VRSHRuIMM, // ...right (unsigned)
VRSHRNIMM, // ...right narrow
// Vector rounding shift by immediate:
VRSHRsIMM, // ...right (signed)
VRSHRuIMM, // ...right (unsigned)
VRSHRNIMM, // ...right narrow
// Vector saturating shift by immediate:
VQSHLsIMM, // ...left (signed)
VQSHLuIMM, // ...left (unsigned)
VQSHLsuIMM, // ...left (signed to unsigned)
VQSHRNsIMM, // ...right narrow (signed)
VQSHRNuIMM, // ...right narrow (unsigned)
VQSHRNsuIMM, // ...right narrow (signed to unsigned)
// Vector saturating shift by immediate:
VQSHLsIMM, // ...left (signed)
VQSHLuIMM, // ...left (unsigned)
VQSHLsuIMM, // ...left (signed to unsigned)
VQSHRNsIMM, // ...right narrow (signed)
VQSHRNuIMM, // ...right narrow (unsigned)
VQSHRNsuIMM, // ...right narrow (signed to unsigned)
// Vector saturating rounding shift by immediate:
VQRSHRNsIMM, // ...right narrow (signed)
VQRSHRNuIMM, // ...right narrow (unsigned)
VQRSHRNsuIMM, // ...right narrow (signed to unsigned)
// Vector saturating rounding shift by immediate:
VQRSHRNsIMM, // ...right narrow (signed)
VQRSHRNuIMM, // ...right narrow (unsigned)
VQRSHRNsuIMM, // ...right narrow (signed to unsigned)
// Vector shift and insert:
VSLIIMM, // ...left
VSRIIMM, // ...right
// Vector shift and insert:
VSLIIMM, // ...left
VSRIIMM, // ...right
// Vector get lane (VMOV scalar to ARM core register)
// (These are used for 8- and 16-bit element types only.)
VGETLANEu, // zero-extend vector extract element
VGETLANEs, // sign-extend vector extract element
// Vector get lane (VMOV scalar to ARM core register)
// (These are used for 8- and 16-bit element types only.)
VGETLANEu, // zero-extend vector extract element
VGETLANEs, // sign-extend vector extract element
// Vector move immediate and move negated immediate:
VMOVIMM,
VMVNIMM,
// Vector move immediate and move negated immediate:
VMOVIMM,
VMVNIMM,
// Vector move f32 immediate:
VMOVFPIMM,
// Vector move f32 immediate:
VMOVFPIMM,
// Move H <-> R, clearing top 16 bits
VMOVrh,
VMOVhr,
// Move H <-> R, clearing top 16 bits
VMOVrh,
VMOVhr,
// Vector duplicate:
VDUP,
VDUPLANE,
// Vector duplicate:
VDUP,
VDUPLANE,
// Vector shuffles:
VEXT, // extract
VREV64, // reverse elements within 64-bit doublewords
VREV32, // reverse elements within 32-bit words
VREV16, // reverse elements within 16-bit halfwords
VZIP, // zip (interleave)
VUZP, // unzip (deinterleave)
VTRN, // transpose
VTBL1, // 1-register shuffle with mask
VTBL2, // 2-register shuffle with mask
VMOVN, // MVE vmovn
// Vector shuffles:
VEXT, // extract
VREV64, // reverse elements within 64-bit doublewords
VREV32, // reverse elements within 32-bit words
VREV16, // reverse elements within 16-bit halfwords
VZIP, // zip (interleave)
VUZP, // unzip (deinterleave)
VTRN, // transpose
VTBL1, // 1-register shuffle with mask
VTBL2, // 2-register shuffle with mask
VMOVN, // MVE vmovn
// MVE Saturating truncates
VQMOVNs, // Vector (V) Saturating (Q) Move and Narrow (N), signed (s)
VQMOVNu, // Vector (V) Saturating (Q) Move and Narrow (N), unsigned (u)
// MVE Saturating truncates
VQMOVNs, // Vector (V) Saturating (Q) Move and Narrow (N), signed (s)
VQMOVNu, // Vector (V) Saturating (Q) Move and Narrow (N), unsigned (u)
// MVE float <> half converts
VCVTN, // MVE vcvt f32 -> f16, truncating into either the bottom or top lanes
VCVTL, // MVE vcvt f16 -> f32, extending from either the bottom or top lanes
// MVE float <> half converts
VCVTN, // MVE vcvt f32 -> f16, truncating into either the bottom or top
// lanes
VCVTL, // MVE vcvt f16 -> f32, extending from either the bottom or top lanes
// Vector multiply long:
VMULLs, // ...signed
VMULLu, // ...unsigned
// Vector multiply long:
VMULLs, // ...signed
VMULLu, // ...unsigned
VQDMULH, // MVE vqdmulh instruction
VQDMULH, // MVE vqdmulh instruction
// MVE reductions
VADDVs, // sign- or zero-extend the elements of a vector to i32,
VADDVu, // add them all together, and return an i32 of their sum
VADDVps, // Same as VADDV[su] but with a v4i1 predicate mask
VADDVpu,
VADDLVs, // sign- or zero-extend elements to i64 and sum, returning
VADDLVu, // the low and high 32-bit halves of the sum
VADDLVAs, // Same as VADDLV[su] but also add an input accumulator
VADDLVAu, // provided as low and high halves
VADDLVps, // Same as VADDLV[su] but with a v4i1 predicate mask
VADDLVpu,
VADDLVAps, // Same as VADDLVp[su] but with a v4i1 predicate mask
VADDLVApu,
VMLAVs, // sign- or zero-extend the elements of two vectors to i32, multiply them
VMLAVu, // and add the results together, returning an i32 of their sum
VMLAVps, // Same as VMLAV[su] with a v4i1 predicate mask
VMLAVpu,
VMLALVs, // Same as VMLAV but with i64, returning the low and
VMLALVu, // high 32-bit halves of the sum
VMLALVps, // Same as VMLALV[su] with a v4i1 predicate mask
VMLALVpu,
VMLALVAs, // Same as VMLALV but also add an input accumulator
VMLALVAu, // provided as low and high halves
VMLALVAps, // Same as VMLALVA[su] with a v4i1 predicate mask
VMLALVApu,
VMINVu, // Find minimum unsigned value of a vector and register
VMINVs, // Find minimum signed value of a vector and register
VMAXVu, // Find maximum unsigned value of a vector and register
VMAXVs, // Find maximum signed value of a vector and register
// MVE reductions
VADDVs, // sign- or zero-extend the elements of a vector to i32,
VADDVu, // add them all together, and return an i32 of their sum
VADDVps, // Same as VADDV[su] but with a v4i1 predicate mask
VADDVpu,
VADDLVs, // sign- or zero-extend elements to i64 and sum, returning
VADDLVu, // the low and high 32-bit halves of the sum
VADDLVAs, // Same as VADDLV[su] but also add an input accumulator
VADDLVAu, // provided as low and high halves
VADDLVps, // Same as VADDLV[su] but with a v4i1 predicate mask
VADDLVpu,
VADDLVAps, // Same as VADDLVp[su] but with a v4i1 predicate mask
VADDLVApu,
VMLAVs, // sign- or zero-extend the elements of two vectors to i32, multiply
// them
VMLAVu, // and add the results together, returning an i32 of their sum
VMLAVps, // Same as VMLAV[su] with a v4i1 predicate mask
VMLAVpu,
VMLALVs, // Same as VMLAV but with i64, returning the low and
VMLALVu, // high 32-bit halves of the sum
VMLALVps, // Same as VMLALV[su] with a v4i1 predicate mask
VMLALVpu,
VMLALVAs, // Same as VMLALV but also add an input accumulator
VMLALVAu, // provided as low and high halves
VMLALVAps, // Same as VMLALVA[su] with a v4i1 predicate mask
VMLALVApu,
VMINVu, // Find minimum unsigned value of a vector and register
VMINVs, // Find minimum signed value of a vector and register
VMAXVu, // Find maximum unsigned value of a vector and register
VMAXVs, // Find maximum signed value of a vector and register
SMULWB, // Signed multiply word by half word, bottom
SMULWT, // Signed multiply word by half word, top
UMLAL, // 64bit Unsigned Accumulate Multiply
SMLAL, // 64bit Signed Accumulate Multiply
UMAAL, // 64-bit Unsigned Accumulate Accumulate Multiply
SMLALBB, // 64-bit signed accumulate multiply bottom, bottom 16
SMLALBT, // 64-bit signed accumulate multiply bottom, top 16
SMLALTB, // 64-bit signed accumulate multiply top, bottom 16
SMLALTT, // 64-bit signed accumulate multiply top, top 16
SMLALD, // Signed multiply accumulate long dual
SMLALDX, // Signed multiply accumulate long dual exchange
SMLSLD, // Signed multiply subtract long dual
SMLSLDX, // Signed multiply subtract long dual exchange
SMMLAR, // Signed multiply long, round and add
SMMLSR, // Signed multiply long, subtract and round
SMULWB, // Signed multiply word by half word, bottom
SMULWT, // Signed multiply word by half word, top
UMLAL, // 64bit Unsigned Accumulate Multiply
SMLAL, // 64bit Signed Accumulate Multiply
UMAAL, // 64-bit Unsigned Accumulate Accumulate Multiply
SMLALBB, // 64-bit signed accumulate multiply bottom, bottom 16
SMLALBT, // 64-bit signed accumulate multiply bottom, top 16
SMLALTB, // 64-bit signed accumulate multiply top, bottom 16
SMLALTT, // 64-bit signed accumulate multiply top, top 16
SMLALD, // Signed multiply accumulate long dual
SMLALDX, // Signed multiply accumulate long dual exchange
SMLSLD, // Signed multiply subtract long dual
SMLSLDX, // Signed multiply subtract long dual exchange
SMMLAR, // Signed multiply long, round and add
SMMLSR, // Signed multiply long, subtract and round
// Single Lane QADD8 and QADD16. Only the bottom lane. That's what the b stands for.
QADD8b,
QSUB8b,
QADD16b,
QSUB16b,
// Single Lane QADD8 and QADD16. Only the bottom lane. That's what the b
// stands for.
QADD8b,
QSUB8b,
QADD16b,
QSUB16b,
// Operands of the standard BUILD_VECTOR node are not legalized, which
// is fine if BUILD_VECTORs are always lowered to shuffles or other
// operations, but for ARM some BUILD_VECTORs are legal as-is and their
// operands need to be legalized. Define an ARM-specific version of
// BUILD_VECTOR for this purpose.
BUILD_VECTOR,
// Operands of the standard BUILD_VECTOR node are not legalized, which
// is fine if BUILD_VECTORs are always lowered to shuffles or other
// operations, but for ARM some BUILD_VECTORs are legal as-is and their
// operands need to be legalized. Define an ARM-specific version of
// BUILD_VECTOR for this purpose.
BUILD_VECTOR,
// Bit-field insert
BFI,
// Bit-field insert
BFI,
// Vector OR with immediate
VORRIMM,
// Vector AND with NOT of immediate
VBICIMM,
// Vector OR with immediate
VORRIMM,
// Vector AND with NOT of immediate
VBICIMM,
// Pseudo vector bitwise select
VBSP,
// Pseudo vector bitwise select
VBSP,
// Pseudo-instruction representing a memory copy using ldm/stm
// instructions.
MEMCPY,
// Pseudo-instruction representing a memory copy using ldm/stm
// instructions.
MEMCPY,
// V8.1MMainline condition select
CSINV, // Conditional select invert.
CSNEG, // Conditional select negate.
CSINC, // Conditional select increment.
// V8.1MMainline condition select
CSINV, // Conditional select invert.
CSNEG, // Conditional select negate.
CSINC, // Conditional select increment.
// Vector load N-element structure to all lanes:
VLD1DUP = ISD::FIRST_TARGET_MEMORY_OPCODE,
VLD2DUP,
VLD3DUP,
VLD4DUP,
// Vector load N-element structure to all lanes:
VLD1DUP = ISD::FIRST_TARGET_MEMORY_OPCODE,
VLD2DUP,
VLD3DUP,
VLD4DUP,
// NEON loads with post-increment base updates:
VLD1_UPD,
VLD2_UPD,
VLD3_UPD,
VLD4_UPD,
VLD2LN_UPD,
VLD3LN_UPD,
VLD4LN_UPD,
VLD1DUP_UPD,
VLD2DUP_UPD,
VLD3DUP_UPD,
VLD4DUP_UPD,
// NEON loads with post-increment base updates:
VLD1_UPD,
VLD2_UPD,
VLD3_UPD,
VLD4_UPD,
VLD2LN_UPD,
VLD3LN_UPD,
VLD4LN_UPD,
VLD1DUP_UPD,
VLD2DUP_UPD,
VLD3DUP_UPD,
VLD4DUP_UPD,
// NEON stores with post-increment base updates:
VST1_UPD,
VST2_UPD,
VST3_UPD,
VST4_UPD,
VST2LN_UPD,
VST3LN_UPD,
VST4LN_UPD,
// NEON stores with post-increment base updates:
VST1_UPD,
VST2_UPD,
VST3_UPD,
VST4_UPD,
VST2LN_UPD,
VST3LN_UPD,
VST4LN_UPD,
// Load/Store of dual registers
LDRD,
STRD
};
// Load/Store of dual registers
LDRD,
STRD
};
} // end namespace ARMISD