llvm/test/CodeGen/AMDGPU/load-weird-sizes.ll
Matt Arsenault df587174eb AMDGPU: Improve load/store of illegal types.
There was a combine before to handle the simple copy case.
Split this into handling loads and stores separately.

We might want to change how this handles some of the vector
extloads, since this can result in large code size increases.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@274394 91177308-0d34-0410-b5e6-96231b3b80d8
2016-07-01 22:47:50 +00:00

32 lines
1.4 KiB
LLVM

; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI-NOHSA -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -mtriple=amdgcn-amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck -check-prefix=FUNC -check-prefix=CI-HSA -check-prefix=SI %s
; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI-NOHSA -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=EG -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=cayman < %s | FileCheck -check-prefix=R600 -check-prefix=CM -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}load_i24:
; SI: {{flat|buffer}}_load_ubyte
; SI: {{flat|buffer}}_load_ushort
; SI: {{flat|buffer}}_store_dword
define void @load_i24(i32 addrspace(1)* %out, i24 addrspace(1)* %in) #0 {
%1 = load i24, i24 addrspace(1)* %in
%2 = zext i24 %1 to i32
store i32 %2, i32 addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}load_i25:
; SI-NOHSA: buffer_load_dword [[VAL:v[0-9]+]]
; SI-NOHSA: buffer_store_dword [[VAL]]
; CI-HSA: flat_load_dword [[VAL:v[0-9]+]]
; CI-HSA: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[VAL]]
define void @load_i25(i32 addrspace(1)* %out, i25 addrspace(1)* %in) #0 {
%1 = load i25, i25 addrspace(1)* %in
%2 = zext i25 %1 to i32
store i32 %2, i32 addrspace(1)* %out
ret void
}
attributes #0 = { nounwind }