Add vload* for addrspace(2) and use as constant load for R600

Signed-off-by: Aaron Watry <awatry@gmail.com>
Reviewed-by: Tom Stellard <thomas.stellard@amd.com>
llvm-svn: 188179
This commit is contained in:
Aaron Watry 2013-08-12 14:42:49 +00:00
parent 3841fa38da
commit 7d52565321
3 changed files with 42 additions and 4 deletions

View File

@ -1,4 +1,5 @@
; This provides optimized implementations of vload4/8/16 for 32-bit int/uint
; This provides optimized implementations of vload2/3/4/8/16 for 32-bit int/uint
; The address spaces get mapped to data types in target-specific usages
define <2 x i32> @__clc_vload2_i32__addr1(i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
%1 = bitcast i32 addrspace(1)* %addr to <2 x i32> addrspace(1)*
@ -30,6 +31,36 @@ define <16 x i32> @__clc_vload16_i32__addr1(i32 addrspace(1)* nocapture %addr) n
ret <16 x i32> %2
}
define <2 x i32> @__clc_vload2_i32__addr2(i32 addrspace(2)* nocapture %addr) nounwind readonly alwaysinline {
%1 = bitcast i32 addrspace(2)* %addr to <2 x i32> addrspace(2)*
%2 = load <2 x i32> addrspace(2)* %1, align 4, !tbaa !3
ret <2 x i32> %2
}
define <3 x i32> @__clc_vload3_i32__addr2(i32 addrspace(2)* nocapture %addr) nounwind readonly alwaysinline {
%1 = bitcast i32 addrspace(2)* %addr to <3 x i32> addrspace(2)*
%2 = load <3 x i32> addrspace(2)* %1, align 4, !tbaa !3
ret <3 x i32> %2
}
define <4 x i32> @__clc_vload4_i32__addr2(i32 addrspace(2)* nocapture %addr) nounwind readonly alwaysinline {
%1 = bitcast i32 addrspace(2)* %addr to <4 x i32> addrspace(2)*
%2 = load <4 x i32> addrspace(2)* %1, align 4, !tbaa !3
ret <4 x i32> %2
}
define <8 x i32> @__clc_vload8_i32__addr2(i32 addrspace(2)* nocapture %addr) nounwind readonly alwaysinline {
%1 = bitcast i32 addrspace(2)* %addr to <8 x i32> addrspace(2)*
%2 = load <8 x i32> addrspace(2)* %1, align 4, !tbaa !3
ret <8 x i32> %2
}
define <16 x i32> @__clc_vload16_i32__addr2(i32 addrspace(2)* nocapture %addr) nounwind readonly alwaysinline {
%1 = bitcast i32 addrspace(2)* %addr to <16 x i32> addrspace(2)*
%2 = load <16 x i32> addrspace(2)* %1, align 4, !tbaa !3
ret <16 x i32> %2
}
!1 = metadata !{metadata !"char", metadata !5}
!2 = metadata !{metadata !"short", metadata !5}
!3 = metadata !{metadata !"int", metadata !5}

View File

@ -1,4 +1,5 @@
; This provides optimized implementations of vstore4/8/16 for 32-bit int/uint
; This provides optimized implementations of vstore2/3/4/8/16 for 32-bit int/uint
; The address spaces get mapped to data types in target-specific usages
define void @__clc_vstore2_i32__addr1(<2 x i32> %vec, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
%1 = bitcast i32 addrspace(1)* %addr to <2 x i32> addrspace(1)*

View File

@ -48,10 +48,8 @@ VLOAD_TYPES()
VLOAD_VECTORIZE(int, __private)
VLOAD_VECTORIZE(int, __local)
VLOAD_VECTORIZE(int, __constant)
VLOAD_VECTORIZE(uint, __private)
VLOAD_VECTORIZE(uint, __local)
VLOAD_VECTORIZE(uint, __constant)
_CLC_OVERLOAD _CLC_DEF int3 vload3(size_t offset, const global int *x) {
return (int3)(vload2(0, &x[3*offset]), x[3*offset+2]);
@ -59,6 +57,12 @@ _CLC_OVERLOAD _CLC_DEF int3 vload3(size_t offset, const global int *x) {
_CLC_OVERLOAD _CLC_DEF uint3 vload3(size_t offset, const global uint *x) {
return (uint3)(vload2(0, &x[3*offset]), x[3*offset+2]);
}
_CLC_OVERLOAD _CLC_DEF int3 vload3(size_t offset, const constant int *x) {
return (int3)(vload2(0, &x[3*offset]), x[3*offset+2]);
}
_CLC_OVERLOAD _CLC_DEF uint3 vload3(size_t offset, const constant uint *x) {
return (uint3)(vload2(0, &x[3*offset]), x[3*offset+2]);
}
//We only define functions for typeN vloadN(), and then just bitcast the result for unsigned types
#define _CLC_VLOAD_ASM_DECL(PRIM_TYPE,LLVM_SCALAR_TYPE,ADDR_SPACE,ADDR_SPACE_ID) \
@ -83,9 +87,11 @@ _CLC_DECL PRIM_TYPE##16 __clc_vload16_##LLVM_SCALAR_TYPE##__addr##ADDR_SPACE_ID
#define _CLC_VLOAD_ASM_OVERLOAD_ADDR_SPACES(PRIM_TYPE,S_PRIM_TYPE,LLVM_TYPE) \
_CLC_VLOAD_ASM_OVERLOAD_SIZES(PRIM_TYPE, S_PRIM_TYPE, LLVM_TYPE, global, 1) \
_CLC_VLOAD_ASM_OVERLOAD_SIZES(PRIM_TYPE, S_PRIM_TYPE, LLVM_TYPE, constant, 2) \
#define _CLC_VLOAD_ASM_OVERLOADS() \
_CLC_VLOAD_ASM_DECL(int,i32,__global,1) \
_CLC_VLOAD_ASM_DECL(int,i32,__constant,2) \
_CLC_VLOAD_ASM_OVERLOAD_ADDR_SPACES(int,int,i32) \
_CLC_VLOAD_ASM_OVERLOAD_ADDR_SPACES(uint,int,i32) \