mirror of
https://github.com/capstone-engine/llvm-capstone.git
synced 2025-01-15 20:51:35 +00:00
AMDGPU: Don't ignore carry out user when expanding add_co_pseudo
This was resulting in a missing vreg def in the use select instruction. The output of the pseudo doesn't make sense, since it really shouldn't have the vreg output in the first place, and instead an implicit scc def to match the real scalar behavior. We could have easier to understand tests if we selected scalar versions of the [us]{add|sub}.with.overflow intrinsics. This does still end up producing vector code in the end, since it gets moved later.
This commit is contained in:
parent
f9e24a563c
commit
c19c153e74
@ -3880,6 +3880,7 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
|
||||
MachineBasicBlock::iterator MII = MI;
|
||||
const DebugLoc &DL = MI.getDebugLoc();
|
||||
MachineOperand &Dest = MI.getOperand(0);
|
||||
MachineOperand &CarryDest = MI.getOperand(1);
|
||||
MachineOperand &Src0 = MI.getOperand(2);
|
||||
MachineOperand &Src1 = MI.getOperand(3);
|
||||
MachineOperand &Src2 = MI.getOperand(4);
|
||||
@ -3916,6 +3917,9 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
|
||||
}
|
||||
|
||||
BuildMI(*BB, MII, DL, TII->get(Opc), Dest.getReg()).add(Src0).add(Src1);
|
||||
|
||||
BuildMI(*BB, MII, DL, TII->get(AMDGPU::COPY), CarryDest.getReg())
|
||||
.addReg(AMDGPU::SCC);
|
||||
MI.eraseFromParent();
|
||||
return BB;
|
||||
}
|
||||
|
121
llvm/test/CodeGen/AMDGPU/expand-scalar-carry-out-select-user.ll
Normal file
121
llvm/test/CodeGen/AMDGPU/expand-scalar-carry-out-select-user.ll
Normal file
@ -0,0 +1,121 @@
|
||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,GFX9 %s
|
||||
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 < %s | FileCheck -check-prefixes=GCN,GFX10 %s
|
||||
|
||||
define i32 @s_add_co_select_user() {
|
||||
; GFX9-LABEL: s_add_co_select_user:
|
||||
; GFX9: ; %bb.0: ; %bb
|
||||
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
||||
; GFX9-NEXT: s_load_dword s6, s[4:5], 0x0
|
||||
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
||||
; GFX9-NEXT: v_add_co_u32_e64 v0, s[4:5], s6, s6
|
||||
; GFX9-NEXT: s_cmp_lg_u64 s[4:5], 0
|
||||
; GFX9-NEXT: s_addc_u32 s4, s6, 0
|
||||
; GFX9-NEXT: s_cselect_b64 vcc, 1, 0
|
||||
; GFX9-NEXT: v_mov_b32_e32 v1, s4
|
||||
; GFX9-NEXT: s_cmp_gt_u32 s6, 31
|
||||
; GFX9-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
|
||||
; GFX9-NEXT: s_cselect_b64 vcc, -1, 0
|
||||
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
|
||||
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
||||
;
|
||||
; GFX10-LABEL: s_add_co_select_user:
|
||||
; GFX10: ; %bb.0: ; %bb
|
||||
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
|
||||
; GFX10-NEXT: s_mov_b64 s[4:5], 0
|
||||
; GFX10-NEXT: ; implicit-def: $vcc_hi
|
||||
; GFX10-NEXT: s_load_dword s4, s[4:5], 0x0
|
||||
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
||||
; GFX10-NEXT: v_add_co_u32_e64 v0, s5, s4, s4
|
||||
; GFX10-NEXT: s_cmpk_lg_u32 s5, 0x0
|
||||
; GFX10-NEXT: s_addc_u32 s5, s4, 0
|
||||
; GFX10-NEXT: s_cselect_b32 s6, 1, 0
|
||||
; GFX10-NEXT: s_cmp_gt_u32 s4, 31
|
||||
; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, s5, s6
|
||||
; GFX10-NEXT: s_cselect_b32 vcc_lo, -1, 0
|
||||
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
|
||||
; GFX10-NEXT: s_setpc_b64 s[30:31]
|
||||
bb:
|
||||
%i = load volatile i32, i32 addrspace(4)* null, align 8
|
||||
%i1 = add i32 %i, %i
|
||||
%i2 = icmp ult i32 %i1, %i
|
||||
%i3 = zext i1 %i2 to i32
|
||||
%i4 = add nuw nsw i32 %i3, 0
|
||||
%i5 = add i32 %i4, %i
|
||||
%i6 = icmp ult i32 %i5, %i4
|
||||
%i7 = select i1 %i6, i32 %i5, i32 0
|
||||
%i8 = icmp ugt i32 %i, 31
|
||||
%i9 = select i1 %i8, i32 %i1, i32 %i7
|
||||
ret i32 %i9
|
||||
}
|
||||
|
||||
define amdgpu_kernel void @s_add_co_br_user(i32 %i) {
|
||||
; GFX9-LABEL: s_add_co_br_user:
|
||||
; GFX9: ; %bb.0: ; %bb
|
||||
; GFX9-NEXT: s_load_dword s0, s[4:5], 0x0
|
||||
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
||||
; GFX9-NEXT: s_add_i32 s1, s0, s0
|
||||
; GFX9-NEXT: v_mov_b32_e32 v0, s0
|
||||
; GFX9-NEXT: v_cmp_lt_u32_e32 vcc, s1, v0
|
||||
; GFX9-NEXT: s_cmp_lg_u64 vcc, 0
|
||||
; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
|
||||
; GFX9-NEXT: s_addc_u32 s0, s0, 0
|
||||
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, s0, v0
|
||||
; GFX9-NEXT: s_and_b64 vcc, exec, vcc
|
||||
; GFX9-NEXT: s_cbranch_vccnz BB1_2
|
||||
; GFX9-NEXT: ; %bb.1: ; %bb0
|
||||
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
||||
; GFX9-NEXT: v_mov_b32_e32 v2, 9
|
||||
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
||||
; GFX9-NEXT: global_store_dword v[0:1], v2, off
|
||||
; GFX9-NEXT: BB1_2: ; %bb1
|
||||
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
||||
; GFX9-NEXT: v_mov_b32_e32 v2, 10
|
||||
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
||||
; GFX9-NEXT: global_store_dword v[0:1], v2, off
|
||||
; GFX9-NEXT: s_endpgm
|
||||
;
|
||||
; GFX10-LABEL: s_add_co_br_user:
|
||||
; GFX10: ; %bb.0: ; %bb
|
||||
; GFX10-NEXT: s_load_dword s0, s[4:5], 0x0
|
||||
; GFX10-NEXT: ; implicit-def: $vcc_hi
|
||||
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
||||
; GFX10-NEXT: s_add_i32 s1, s0, s0
|
||||
; GFX10-NEXT: v_cmp_lt_u32_e64 s1, s1, s0
|
||||
; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, s1
|
||||
; GFX10-NEXT: s_cmpk_lg_u32 s1, 0x0
|
||||
; GFX10-NEXT: s_addc_u32 s0, s0, 0
|
||||
; GFX10-NEXT: v_cmp_ge_u32_e32 vcc_lo, s0, v0
|
||||
; GFX10-NEXT: s_and_b32 vcc_lo, exec_lo, vcc_lo
|
||||
; GFX10-NEXT: s_cbranch_vccnz BB1_2
|
||||
; GFX10-NEXT: ; %bb.1: ; %bb0
|
||||
; GFX10-NEXT: v_mov_b32_e32 v0, 0
|
||||
; GFX10-NEXT: v_mov_b32_e32 v2, 9
|
||||
; GFX10-NEXT: v_mov_b32_e32 v1, 0
|
||||
; GFX10-NEXT: global_store_dword v[0:1], v2, off
|
||||
; GFX10-NEXT: BB1_2: ; %bb1
|
||||
; GFX10-NEXT: v_mov_b32_e32 v0, 0
|
||||
; GFX10-NEXT: v_mov_b32_e32 v2, 10
|
||||
; GFX10-NEXT: v_mov_b32_e32 v1, 0
|
||||
; GFX10-NEXT: global_store_dword v[0:1], v2, off
|
||||
; GFX10-NEXT: s_endpgm
|
||||
bb:
|
||||
%i1 = add i32 %i, %i
|
||||
%i2 = icmp ult i32 %i1, %i
|
||||
%i3 = zext i1 %i2 to i32
|
||||
%i4 = add nuw nsw i32 %i3, 0
|
||||
%i5 = add i32 %i4, %i
|
||||
%i6 = icmp ult i32 %i5, %i4
|
||||
%i7 = select i1 %i6, i32 %i5, i32 0
|
||||
br i1 %i6, label %bb0, label %bb1
|
||||
|
||||
bb0:
|
||||
store volatile i32 9, i32 addrspace(1)* null
|
||||
br label %bb1
|
||||
|
||||
bb1:
|
||||
store volatile i32 10, i32 addrspace(1)* null
|
||||
ret void
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user