From fdfb7d78f128943fb3f20296fd0dfdf73f62295a Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Sun, 27 Jan 2019 15:57:23 +0000 Subject: [PATCH] GlobalISel: Verify load/store has a pointer input I expected this to be automatically verified, but it seems nothing uses that the type index was declared as a "ptype" llvm-svn: 352319 --- llvm/lib/CodeGen/MachineVerifier.cpp | 7 ++++- .../inst-select-amdgcn.cvt.pkrtz.mir | 8 +++--- .../AMDGPU/GlobalISel/inst-select-ashr.mir | 4 +-- .../AMDGPU/GlobalISel/inst-select-bitcast.mir | 4 +-- .../GlobalISel/inst-select-constant.mir | 20 ++++++------- .../AMDGPU/GlobalISel/inst-select-copy.mir | 4 +-- .../AMDGPU/GlobalISel/inst-select-fadd.mir | 2 +- .../AMDGPU/GlobalISel/inst-select-fmul.mir | 8 +++--- .../AMDGPU/GlobalISel/inst-select-fptoui.mir | 2 +- .../GlobalISel/inst-select-implicit-def.mir | 14 +++++----- .../AMDGPU/GlobalISel/inst-select-maxnum.mir | 2 +- .../AMDGPU/GlobalISel/inst-select-minnum.mir | 2 +- .../AMDGPU/GlobalISel/inst-select-or.mir | 4 +-- .../AMDGPU/GlobalISel/inst-select-sitofp.mir | 2 +- .../CodeGen/MIR/AArch64/invalid-extload.mir | 23 --------------- llvm/test/Verifier/test_g_load.mir | 18 ++++++++++++ llvm/test/Verifier/test_g_sextload.mir | 28 +++++++++++++++++++ llvm/test/Verifier/test_g_store.mir | 19 +++++++++++++ llvm/test/Verifier/test_g_zextload.mir | 28 +++++++++++++++++++ 19 files changed, 137 insertions(+), 62 deletions(-) delete mode 100644 llvm/test/CodeGen/MIR/AArch64/invalid-extload.mir create mode 100644 llvm/test/Verifier/test_g_load.mir create mode 100644 llvm/test/Verifier/test_g_sextload.mir create mode 100644 llvm/test/Verifier/test_g_store.mir create mode 100644 llvm/test/Verifier/test_g_zextload.mir diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp index 879a3d33469c..54d2bb4a0325 100644 --- a/llvm/lib/CodeGen/MachineVerifier.cpp +++ b/llvm/lib/CodeGen/MachineVerifier.cpp @@ -1003,7 +1003,11 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) { case TargetOpcode::G_LOAD: case TargetOpcode::G_STORE: case TargetOpcode::G_ZEXTLOAD: - case TargetOpcode::G_SEXTLOAD: + case TargetOpcode::G_SEXTLOAD: { + LLT PtrTy = MRI->getType(MI->getOperand(1).getReg()); + if (!PtrTy.isPointer()) + report("Generic memory instruction must access a pointer", MI); + // Generic loads and stores must have a single MachineMemOperand // describing that access. if (!MI->hasOneMemOperand()) { @@ -1021,6 +1025,7 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) { } break; + } case TargetOpcode::G_PHI: { LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); if (!DstTy.isValid() || diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pkrtz.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pkrtz.mir index f1c19f124ffa..f241c2dad1e9 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pkrtz.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pkrtz.mir @@ -21,7 +21,7 @@ body: | %1:vgpr(s32) = COPY $vgpr0 ; GCN: [[VGPR1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 %2:vgpr(s32) = COPY $vgpr1 - %3:vgpr(s64) = COPY $vgpr3_vgpr4 + %3:vgpr(p1) = COPY $vgpr3_vgpr4 ; cvt_pkrtz vs ; GCN: V_CVT_PKRTZ_F16_F32_e64 0, [[VGPR0]], 0, [[SGPR0]] @@ -38,8 +38,8 @@ body: | %7:vgpr(s32) = G_BITCAST %4 %8:vgpr(s32) = G_BITCAST %5 %9:vgpr(s32) = G_BITCAST %6 - G_STORE %7, %3 :: (store 4 into %ir.global0) - G_STORE %8, %3 :: (store 4 into %ir.global0) - G_STORE %9, %3 :: (store 4 into %ir.global0) + G_STORE %7, %3 :: (store 4 into %ir.global0, addrspace 1) + G_STORE %8, %3 :: (store 4 into %ir.global0, addrspace 1) + G_STORE %9, %3 :: (store 4 into %ir.global0, addrspace 1) ... --- diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ashr.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ashr.mir index 86692c3e1d62..a9126854c21b 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ashr.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ashr.mir @@ -20,7 +20,7 @@ body: | %0:sgpr(s32) = COPY $sgpr0 %1:sgpr(s32) = COPY $sgpr1 %2:vgpr(s32) = COPY $vgpr0 - %3:vgpr(s64) = COPY $vgpr3_vgpr4 + %3:vgpr(p1) = COPY $vgpr3_vgpr4 ; GCN: [[C1:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 1 ; GCN: [[C4096:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 4096 @@ -80,7 +80,7 @@ body: | %17:vgpr(s32) = G_ASHR %16, %5 - G_STORE %17, %3 :: (store 4 into %ir.global0) + G_STORE %17, %3 :: (store 4 into %ir.global0, addrspace 1) ... --- diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-bitcast.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-bitcast.mir index 88f811160c0d..157e7b703d78 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-bitcast.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-bitcast.mir @@ -18,9 +18,9 @@ body: | bb.0: liveins: $sgpr0, $vgpr3_vgpr4 %0:vgpr(s32) = COPY $vgpr0 - %1:vgpr(s64) = COPY $vgpr3_vgpr4 + %1:vgpr(p1) = COPY $vgpr3_vgpr4 %2:vgpr(<2 x s16>) = G_BITCAST %0 %3:vgpr(s32) = G_BITCAST %2 - G_STORE %3, %1 :: (store 4 into %ir.global0) + G_STORE %3, %1 :: (store 4 into %ir.global0, addrspace 1) ... --- diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-constant.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-constant.mir index f848edaf6675..19ad8729a02b 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-constant.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-constant.mir @@ -14,8 +14,8 @@ body: | bb.0: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3 ; GCN-LABEL: name: constant - %0:vgpr(s64) = COPY $vgpr0_vgpr1 - %1:vgpr(s64) = COPY $vgpr2_vgpr3 + %0:vgpr(p1) = COPY $vgpr0_vgpr1 + %1:vgpr(p1) = COPY $vgpr2_vgpr3 ; GCN: %{{[0-9]+}}:sreg_32 = S_MOV_B32 1 %2:sreg_32(s32) = G_CONSTANT i32 1 @@ -49,13 +49,13 @@ body: | ; GCN: %{{[0-9]+}}:vreg_64 = REG_SEQUENCE [[LO3]], %subreg.sub0, [[HI3]], %subreg.sub1 %9:vgpr(s64) = G_FCONSTANT double 1.0 - G_STORE %2, %0 :: (volatile store 4 into %ir.global0) - G_STORE %4, %0 :: (volatile store 4 into %ir.global0) - G_STORE %6, %0 :: (volatile store 4 into %ir.global0) - G_STORE %8, %0 :: (volatile store 4 into %ir.global0) - G_STORE %3, %1 :: (volatile store 8 into %ir.global1) - G_STORE %5, %1 :: (volatile store 8 into %ir.global1) - G_STORE %7, %1 :: (volatile store 8 into %ir.global1) - G_STORE %9, %1 :: (volatile store 8 into %ir.global1) + G_STORE %2, %0 :: (volatile store 4 into %ir.global0, addrspace 1) + G_STORE %4, %0 :: (volatile store 4 into %ir.global0, addrspace 1) + G_STORE %6, %0 :: (volatile store 4 into %ir.global0, addrspace 1) + G_STORE %8, %0 :: (volatile store 4 into %ir.global0, addrspace 1) + G_STORE %3, %1 :: (volatile store 8 into %ir.global1, addrspace 1) + G_STORE %5, %1 :: (volatile store 8 into %ir.global1, addrspace 1) + G_STORE %7, %1 :: (volatile store 8 into %ir.global1, addrspace 1) + G_STORE %9, %1 :: (volatile store 8 into %ir.global1, addrspace 1) ... --- diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy.mir index 495acf634726..5c169ca6b613 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy.mir @@ -19,8 +19,8 @@ body: | ; GCN: [[COPY1:%[0-9]+]]:vreg_64 = COPY [[COPY]] ; GCN: [[DEF:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF ; GCN: FLAT_STORE_DWORD [[COPY1]], [[DEF]], 0, 0, 0, implicit $exec, implicit $flat_scr - %0:sgpr(s64) = COPY $sgpr2_sgpr3 - %1:vgpr(s64) = COPY %0 + %0:sgpr(p1) = COPY $sgpr2_sgpr3 + %1:vgpr(p1) = COPY %0 %2:vgpr(s32) = G_IMPLICIT_DEF G_STORE %2, %1 :: (store 4 into %ir.global0) ... diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.mir index 01a59f05a610..74c83e76aec1 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.mir @@ -16,7 +16,7 @@ body: | %0:sgpr(s32) = COPY $sgpr0 %1:vgpr(s32) = COPY $vgpr0 %2:vgpr(s32) = COPY $vgpr1 - %3:vgpr(s64) = COPY $vgpr3_vgpr4 + %3:vgpr(p1) = COPY $vgpr3_vgpr4 ; fadd vs ; GCN: V_ADD_F32_e64 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.mir index b7e472abc93f..f28caf2de08b 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.mir @@ -16,7 +16,7 @@ body: | %0:sgpr(s32) = COPY $sgpr0 %1:vgpr(s32) = COPY $vgpr0 %2:vgpr(s32) = COPY $vgpr1 - %3:vgpr(s64) = COPY $vgpr3_vgpr4 + %3:vgpr(p1) = COPY $vgpr3_vgpr4 ; fmul vs ; GCN: V_MUL_F32_e64 @@ -30,8 +30,8 @@ body: | ; GCN: V_MUL_F32_e64 %6:vgpr(s32) = G_FMUL %1, %2 - G_STORE %4, %3 :: (store 4 into %ir.global0) - G_STORE %5, %3 :: (store 4 into %ir.global0) - G_STORE %6, %3 :: (store 4 into %ir.global0) + G_STORE %4, %3 :: (store 4 into %ir.global0, addrspace 1) + G_STORE %5, %3 :: (store 4 into %ir.global0, addrspace 1) + G_STORE %6, %3 :: (store 4 into %ir.global0, addrspace 1) ... --- diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptoui.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptoui.mir index 07f19c4b34d6..950e7a508977 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptoui.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptoui.mir @@ -20,7 +20,7 @@ body: | ; GCN: [[VGPR:%[0-9]+]]:vgpr_32 = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr0 - %2:vgpr(s64) = COPY $vgpr3_vgpr4 + %2:vgpr(p1) = COPY $vgpr3_vgpr4 ; fptoui s ; GCN: V_CVT_U32_F32_e64 0, [[SGPR]], 0, 0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-implicit-def.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-implicit-def.mir index 0596d8321a6a..d6ead4ba808d 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-implicit-def.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-implicit-def.mir @@ -14,9 +14,9 @@ body: | ; GCN: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4 ; GCN: [[DEF:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF ; GCN: FLAT_STORE_DWORD [[COPY]], [[DEF]], 0, 0, 0, implicit $exec, implicit $flat_scr - %0:vgpr(s64) = COPY $vgpr3_vgpr4 + %0:vgpr(p1) = COPY $vgpr3_vgpr4 %1:vgpr(s32) = G_IMPLICIT_DEF - G_STORE %1, %0 :: (store 4) + G_STORE %1, %0 :: (store 4, addrspace 1) ... --- @@ -31,9 +31,9 @@ body: | ; GCN: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4 ; GCN: [[DEF:%[0-9]+]]:vreg_64 = IMPLICIT_DEF ; GCN: FLAT_STORE_DWORDX2 [[COPY]], [[DEF]], 0, 0, 0, implicit $exec, implicit $flat_scr - %0:vgpr(s64) = COPY $vgpr3_vgpr4 + %0:vgpr(p1) = COPY $vgpr3_vgpr4 %1:vgpr(s64) = G_IMPLICIT_DEF - G_STORE %1, %0 :: (store 8) + G_STORE %1, %0 :: (store 8, addrspace 1) --- --- @@ -63,7 +63,7 @@ body: | ; GCN: FLAT_STORE_DWORD [[DEF]], [[V_MOV_B32_e32_]], 0, 0, 0, implicit $exec, implicit $flat_scr %0:vgpr(p1) = G_IMPLICIT_DEF %1:vgpr(s32) = G_CONSTANT 4 - G_STORE %1, %0 :: (store 4) + G_STORE %1, %0 :: (store 4, addrspace 1) ... --- @@ -79,7 +79,7 @@ body: | ; GCN: FLAT_STORE_DWORD [[DEF]], [[V_MOV_B32_e32_]], 0, 0, 0, implicit $exec, implicit $flat_scr %0:vgpr(p3) = G_IMPLICIT_DEF %1:vgpr(s32) = G_CONSTANT 4 - G_STORE %1, %0 :: (store 4) + G_STORE %1, %0 :: (store 4, addrspace 1) ... --- @@ -95,5 +95,5 @@ body: | ; GCN: FLAT_STORE_DWORD [[DEF]], [[V_MOV_B32_e32_]], 0, 0, 0, implicit $exec, implicit $flat_scr %0:vgpr(p4) = G_IMPLICIT_DEF %1:vgpr(s32) = G_CONSTANT 4 - G_STORE %1, %0 :: (store 4) + G_STORE %1, %0 :: (store 4, addrspace 1) ... diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-maxnum.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-maxnum.mir index a473259201dc..67fe61df3e3b 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-maxnum.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-maxnum.mir @@ -19,7 +19,7 @@ body: | %0:sgpr(s32) = COPY $sgpr0 %1:vgpr(s32) = COPY $vgpr0 %2:vgpr(s32) = COPY $vgpr1 - %3:vgpr(s64) = COPY $vgpr3_vgpr4 + %3:vgpr(p1) = COPY $vgpr3_vgpr4 ; GCN: [[SGPR64_0:%[0-9]+]]:sreg_64_xexec = COPY $sgpr10_sgpr11 ; GCN: [[VGPR64_0:%[0-9]+]]:vreg_64 = COPY $vgpr10_vgpr11 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-minnum.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-minnum.mir index 0bfe9bb7217b..f8132d7a46e6 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-minnum.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-minnum.mir @@ -19,7 +19,7 @@ body: | %0:sgpr(s32) = COPY $sgpr0 %1:vgpr(s32) = COPY $vgpr0 %2:vgpr(s32) = COPY $vgpr1 - %3:vgpr(s64) = COPY $vgpr3_vgpr4 + %3:vgpr(p1) = COPY $vgpr3_vgpr4 ; GCN: [[SGPR64_0:%[0-9]+]]:sreg_64_xexec = COPY $sgpr10_sgpr11 ; GCN: [[VGPR64_0:%[0-9]+]]:vreg_64 = COPY $vgpr10_vgpr11 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-or.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-or.mir index 60e1a4cdd5ce..4d0de1dff5a6 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-or.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-or.mir @@ -19,7 +19,7 @@ body: | %0:sgpr(s32) = COPY $sgpr0 %1:sgpr(s32) = COPY $sgpr1 %2:vgpr(s32) = COPY $vgpr0 - %3:vgpr(s64) = COPY $vgpr3_vgpr4 + %3:vgpr(p1) = COPY $vgpr3_vgpr4 %4:sgpr(s32) = G_CONSTANT i32 1 %5:sgpr(s32) = G_CONSTANT i32 4096 @@ -39,7 +39,7 @@ body: | ; GCN: [[VV:%[0-9]+]]:vgpr_32 = V_OR_B32_e32 [[SV]], [[VGPR0]] %9:vgpr(s32) = G_OR %8, %2 - G_STORE %9, %3 :: (store 4 into %ir.global0) + G_STORE %9, %3 :: (store 4 into %ir.global0, addrspace 1) ... --- diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sitofp.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sitofp.mir index 42fc095985ae..bedcaf01f36d 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sitofp.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sitofp.mir @@ -20,7 +20,7 @@ body: | ; GCN: [[VGPR:%[0-9]+]]:vgpr_32 = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr0 - %2:vgpr(s64) = COPY $vgpr3_vgpr4 + %2:vgpr(p1) = COPY $vgpr3_vgpr4 ; sitofp s ; GCN: V_CVT_F32_I32_e64 [[SGPR]], 0, 0 diff --git a/llvm/test/CodeGen/MIR/AArch64/invalid-extload.mir b/llvm/test/CodeGen/MIR/AArch64/invalid-extload.mir deleted file mode 100644 index cce2639dded9..000000000000 --- a/llvm/test/CodeGen/MIR/AArch64/invalid-extload.mir +++ /dev/null @@ -1,23 +0,0 @@ -# RUN: not llc -mtriple=aarch64-none-linux-gnu -run-pass none -o - %s 2>&1 | FileCheck %s - -# CHECK: *** Bad machine code: Generic extload must have a narrower memory type *** -# CHECK: *** Bad machine code: Generic extload must have a narrower memory type *** -# CHECK: *** Bad machine code: Generic extload must have a narrower memory type *** -# CHECK: *** Bad machine code: Generic extload must have a narrower memory type *** -# CHECK: *** Bad machine code: Generic instruction accessing memory must have one mem operand *** -# CHECK: *** Bad machine code: Generic instruction accessing memory must have one mem operand *** - ---- -name: invalid_extload_memory_sizes -body: | - bb.0: - - %0:_(p0) = COPY $x0 - %1:_(s64) = G_ZEXTLOAD %0(p0) :: (load 8) - %2:_(s64) = G_ZEXTLOAD %0(p0) :: (load 16) - %3:_(s64) = G_SEXTLOAD %0(p0) :: (load 8) - %4:_(s64) = G_SEXTLOAD %0(p0) :: (load 16) - %5:_(s64) = G_ZEXTLOAD %0(p0) - %6:_(s64) = G_SEXTLOAD %0(p0) - -... diff --git a/llvm/test/Verifier/test_g_load.mir b/llvm/test/Verifier/test_g_load.mir new file mode 100644 index 000000000000..40fa21a54ef4 --- /dev/null +++ b/llvm/test/Verifier/test_g_load.mir @@ -0,0 +1,18 @@ +#RUN: not llc -o - -global-isel -run-pass=none -verify-machineinstrs %s 2>&1 | FileCheck %s +# REQUIRES: global-isel, aarch64-registered-target + +--- +name: test_load +legalized: true +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: +body: | + bb.0: + + ; CHECK: Bad machine code: Generic memory instruction must access a pointer + %0:_(s64) = G_CONSTANT i32 0 + %1:_(s32) = G_LOAD %0 :: (load 4) + +... diff --git a/llvm/test/Verifier/test_g_sextload.mir b/llvm/test/Verifier/test_g_sextload.mir new file mode 100644 index 000000000000..b195481d1e60 --- /dev/null +++ b/llvm/test/Verifier/test_g_sextload.mir @@ -0,0 +1,28 @@ +# RUN: not llc -o - -global-isel -run-pass=none -verify-machineinstrs %s 2>&1 | FileCheck %s +# REQUIRES: global-isel, aarch64-registered-target + +--- +name: test_sextload +legalized: true +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: +body: | + bb.0: + + ; CHECK: Bad machine code: Generic memory instruction must access a pointer + %0:_(s64) = G_CONSTANT i32 0 + %1:_(s32) = G_SEXTLOAD %0 :: (load 1) + + ; CHECK: *** Bad machine code: Generic instruction accessing memory must have one mem operand *** + %2:_(p0) = G_IMPLICIT_DEF + %3:_(s64) = G_SEXTLOAD %2 + + ; CHECK: Bad machine code: Generic extload must have a narrower memory type + ; CHECK: Bad machine code: Generic extload must have a narrower memory type + + %4:_(s64) = G_SEXTLOAD %2 :: (load 8) + %5:_(s64) = G_SEXTLOAD %2 :: (load 16) + +... diff --git a/llvm/test/Verifier/test_g_store.mir b/llvm/test/Verifier/test_g_store.mir new file mode 100644 index 000000000000..132e13cd623c --- /dev/null +++ b/llvm/test/Verifier/test_g_store.mir @@ -0,0 +1,19 @@ +# RUN: not llc -o - -global-isel -run-pass=none -verify-machineinstrs %s 2>&1 | FileCheck %s +# REQUIRES: global-isel, aarch64-registered-target + +--- +name: test_store +legalized: true +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: +body: | + bb.0: + + ; CHECK: Bad machine code: Generic memory instruction must access a pointer + %0:_(s64) = G_CONSTANT i32 0 + %1:_(s32) = G_CONSTANT i32 1 + G_STORE %1, %0 :: (store 4) + +... diff --git a/llvm/test/Verifier/test_g_zextload.mir b/llvm/test/Verifier/test_g_zextload.mir new file mode 100644 index 000000000000..a2bd321bfa3a --- /dev/null +++ b/llvm/test/Verifier/test_g_zextload.mir @@ -0,0 +1,28 @@ +# RUN: not llc -o - -global-isel -run-pass=none -verify-machineinstrs %s 2>&1 | FileCheck %s +# REQUIRES: global-isel, aarch64-registered-target + +--- +name: test_zextload +legalized: true +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: +body: | + bb.0: + + ; CHECK: Bad machine code: Generic memory instruction must access a pointer + %0:_(s64) = G_CONSTANT i32 0 + %1:_(s32) = G_ZEXTLOAD %0 :: (load 1) + + ; CHECK: *** Bad machine code: Generic instruction accessing memory must have one mem operand *** + %2:_(p0) = G_IMPLICIT_DEF + %3:_(s64) = G_ZEXTLOAD %2 + + ; CHECK: Bad machine code: Generic extload must have a narrower memory type + ; CHECK: Bad machine code: Generic extload must have a narrower memory type + + %4:_(s64) = G_ZEXTLOAD %2 :: (load 8) + %5:_(s64) = G_ZEXTLOAD %2 :: (load 16) + +...