mirror of
https://github.com/RPCSX/llvm.git
synced 2025-04-03 16:51:42 +00:00

This can be done only with moves which theoretically will optimize better later. Although this transform increases the instruction count, it should be code size / cycle count neutral in the worst VALU case. It also seems to slightly improve a couple of testcases due to other DAG combines this exposes. This is probably slightly worse for the SALU case, so it might be better to handle this during moveToVALU, although then you lose some simplifications like the load width reducing in the simple testcase. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@242177 91177308-0d34-0410-b5e6-96231b3b80d8
19 lines
731 B
LLVM
19 lines
731 B
LLVM
; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
|
|
|
|
declare i32 @llvm.SI.tid() readnone
|
|
|
|
; SI-LABEL: {{^}}test_array_ptr_calc:
|
|
; SI-DAG: v_mul_lo_i32
|
|
; SI-DAG: v_mul_hi_i32
|
|
; SI: s_endpgm
|
|
define void @test_array_ptr_calc(i32 addrspace(1)* noalias %out, [1025 x i32] addrspace(1)* noalias %inA, i32 addrspace(1)* noalias %inB) {
|
|
%tid = call i32 @llvm.SI.tid() readnone
|
|
%a_ptr = getelementptr [1025 x i32], [1025 x i32] addrspace(1)* %inA, i32 %tid, i32 0
|
|
%b_ptr = getelementptr i32, i32 addrspace(1)* %inB, i32 %tid
|
|
%a = load i32, i32 addrspace(1)* %a_ptr
|
|
%b = load i32, i32 addrspace(1)* %b_ptr
|
|
%result = add i32 %a, %b
|
|
store i32 %result, i32 addrspace(1)* %out
|
|
ret void
|
|
}
|