llvm/test/CodeGen/AMDGPU/rotr.i64.ll
Alexander Timofeev f9e9586c80 [AMDGPU] Switch scalarize global loads ON by default
Differential revision: https://reviews.llvm.org/D34407

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@307097 91177308-0d34-0410-b5e6-96231b3b80d8
2017-07-04 17:32:00 +00:00

62 lines
2.0 KiB
LLVM

; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=BOTH %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=BOTH %s
; BOTH-LABEL: {{^}}s_rotr_i64:
; BOTH-DAG: s_sub_i32
; BOTH-DAG: s_lshr_b64
; BOTH-DAG: s_lshl_b64
; BOTH: s_or_b64
define amdgpu_kernel void @s_rotr_i64(i64 addrspace(1)* %in, i64 %x, i64 %y) {
entry:
%tmp0 = sub i64 64, %y
%tmp1 = shl i64 %x, %tmp0
%tmp2 = lshr i64 %x, %y
%tmp3 = or i64 %tmp1, %tmp2
store i64 %tmp3, i64 addrspace(1)* %in
ret void
}
; BOTH-LABEL: {{^}}v_rotr_i64:
; BOTH-DAG: v_sub_i32
; SI-DAG: v_lshr_b64
; SI-DAG: v_lshl_b64
; VI-DAG: v_lshrrev_b64
; VI-DAG: v_lshlrev_b64
; BOTH: v_or_b32
; BOTH: v_or_b32
define amdgpu_kernel void @v_rotr_i64(i64 addrspace(1)* %in, i64 addrspace(1)* %xptr, i64 addrspace(1)* %yptr) {
entry:
%x = load i64, i64 addrspace(1)* %xptr, align 8
%y = load i64, i64 addrspace(1)* %yptr, align 8
%tmp0 = sub i64 64, %y
%tmp1 = shl i64 %x, %tmp0
%tmp2 = lshr i64 %x, %y
%tmp3 = or i64 %tmp1, %tmp2
store i64 %tmp3, i64 addrspace(1)* %in
ret void
}
; BOTH-LABEL: {{^}}s_rotr_v2i64:
define amdgpu_kernel void @s_rotr_v2i64(<2 x i64> addrspace(1)* %in, <2 x i64> %x, <2 x i64> %y) {
entry:
%tmp0 = sub <2 x i64> <i64 64, i64 64>, %y
%tmp1 = shl <2 x i64> %x, %tmp0
%tmp2 = lshr <2 x i64> %x, %y
%tmp3 = or <2 x i64> %tmp1, %tmp2
store <2 x i64> %tmp3, <2 x i64> addrspace(1)* %in
ret void
}
; BOTH-LABEL: {{^}}v_rotr_v2i64:
define amdgpu_kernel void @v_rotr_v2i64(<2 x i64> addrspace(1)* %in, <2 x i64> addrspace(1)* %xptr, <2 x i64> addrspace(1)* %yptr) {
entry:
%x = load <2 x i64>, <2 x i64> addrspace(1)* %xptr, align 8
%y = load <2 x i64>, <2 x i64> addrspace(1)* %yptr, align 8
%tmp0 = sub <2 x i64> <i64 64, i64 64>, %y
%tmp1 = shl <2 x i64> %x, %tmp0
%tmp2 = lshr <2 x i64> %x, %y
%tmp3 = or <2 x i64> %tmp1, %tmp2
store <2 x i64> %tmp3, <2 x i64> addrspace(1)* %in
ret void
}