mirror of
https://github.com/RPCSX/llvm.git
synced 2024-12-13 14:46:53 +00:00
702b589510
Summary: Multi-dword constant loads generated unnecessary moves from SGPRs into VGPRs, increasing the code size and VGPR pressure. These moves are now folded away. Note that this lack of operand folding was not a problem for VMEM loads, because COPY nodes from VReg_Nnn to VGPR32 are eliminated by the register coalescer. Some tests are updated, note that the fsub.ll test explicitly checks that the move is elided. With the IR generated by current Mesa, the changes are obviously relatively minor: 7063 shaders in 3531 tests Totals: SGPRS: 351872 -> 352560 (0.20 %) VGPRS: 199984 -> 200732 (0.37 %) Code Size: 9876968 -> 9881112 (0.04 %) bytes LDS: 91 -> 91 (0.00 %) blocks Scratch: 1779712 -> 1767424 (-0.69 %) bytes per wave Wait states: 295164 -> 295337 (0.06 %) Totals from affected shaders: SGPRS: 65784 -> 66472 (1.05 %) VGPRS: 38064 -> 38812 (1.97 %) Code Size: 1993828 -> 1997972 (0.21 %) bytes LDS: 42 -> 42 (0.00 %) blocks Scratch: 795648 -> 783360 (-1.54 %) bytes per wave Wait states: 54026 -> 54199 (0.32 %) Reviewers: tstellarAMD, arsenm, mareko Subscribers: arsenm, llvm-commits Differential Revision: http://reviews.llvm.org/D15875 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@257074 91177308-0d34-0410-b5e6-96231b3b80d8
72 lines
2.3 KiB
LLVM
72 lines
2.3 KiB
LLVM
; RUN: llc -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
|
|
|
|
; FUNC-LABEL: {{^}}round_f64:
|
|
; SI: s_endpgm
|
|
define void @round_f64(double addrspace(1)* %out, double %x) #0 {
|
|
%result = call double @llvm.round.f64(double %x) #1
|
|
store double %result, double addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
; This is a pretty large function, so just test a few of the
|
|
; instructions that are necessary.
|
|
|
|
; FUNC-LABEL: {{^}}v_round_f64:
|
|
; SI: buffer_load_dwordx2
|
|
; SI: v_bfe_u32 [[EXP:v[0-9]+]], v{{[0-9]+}}, 20, 11
|
|
|
|
; SI-DAG: v_not_b32_e32
|
|
; SI-DAG: v_not_b32_e32
|
|
|
|
; SI-DAG: v_cmp_eq_i32
|
|
|
|
; SI-DAG: s_mov_b32 [[BFIMASK:s[0-9]+]], 0x7fffffff
|
|
; SI-DAG: v_cmp_gt_i32
|
|
; SI-DAG: v_bfi_b32 [[COPYSIGN:v[0-9]+]], [[BFIMASK]]
|
|
|
|
; SI: buffer_store_dwordx2
|
|
; SI: s_endpgm
|
|
define void @v_round_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
|
|
%tid = call i32 @llvm.r600.read.tidig.x() #1
|
|
%gep = getelementptr double, double addrspace(1)* %in, i32 %tid
|
|
%out.gep = getelementptr double, double addrspace(1)* %out, i32 %tid
|
|
%x = load double, double addrspace(1)* %gep
|
|
%result = call double @llvm.round.f64(double %x) #1
|
|
store double %result, double addrspace(1)* %out.gep
|
|
ret void
|
|
}
|
|
|
|
; FUNC-LABEL: {{^}}round_v2f64:
|
|
; SI: s_endpgm
|
|
define void @round_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %in) #0 {
|
|
%result = call <2 x double> @llvm.round.v2f64(<2 x double> %in) #1
|
|
store <2 x double> %result, <2 x double> addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
; FUNC-LABEL: {{^}}round_v4f64:
|
|
; SI: s_endpgm
|
|
define void @round_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %in) #0 {
|
|
%result = call <4 x double> @llvm.round.v4f64(<4 x double> %in) #1
|
|
store <4 x double> %result, <4 x double> addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
; FUNC-LABEL: {{^}}round_v8f64:
|
|
; SI: s_endpgm
|
|
define void @round_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %in) #0 {
|
|
%result = call <8 x double> @llvm.round.v8f64(<8 x double> %in) #1
|
|
store <8 x double> %result, <8 x double> addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
declare i32 @llvm.r600.read.tidig.x() #1
|
|
|
|
declare double @llvm.round.f64(double) #1
|
|
declare <2 x double> @llvm.round.v2f64(<2 x double>) #1
|
|
declare <4 x double> @llvm.round.v4f64(<4 x double>) #1
|
|
declare <8 x double> @llvm.round.v8f64(<8 x double>) #1
|
|
|
|
attributes #0 = { nounwind }
|
|
attributes #1 = { nounwind readnone }
|