R600/SI: Add global atomicrmw xchg

v2: Add separate offset/no-offset tests

Signed-off-by: Aaron Watry <awatry@gmail.com>
Reviewed-by: Matt Arsenault <matthew.arsenault@amd.com>
llvm-svn: 220110
This commit is contained in:
Aaron Watry 2014-10-17 23:33:03 +00:00
parent d852df8bcb
commit ea88a00c76
3 changed files with 84 additions and 0 deletions

View File

@ -386,6 +386,7 @@ class global_binary_atomic_op<SDNode atomic_op> : PatFrag<
[{return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;}]
>;
def atomic_swap_global : global_binary_atomic_op<atomic_swap>;
def atomic_add_global : global_binary_atomic_op<atomic_load_add>;
def atomic_and_global : global_binary_atomic_op<atomic_load_and>;
def atomic_max_global : global_binary_atomic_op<atomic_load_max>;

View File

@ -896,6 +896,9 @@ defm BUFFER_STORE_DWORDX4 : MUBUF_Store_Helper <
0x0000001e, "BUFFER_STORE_DWORDX4", VReg_128, v4i32, global_store
>;
//def BUFFER_ATOMIC_SWAP : MUBUF_ <0x00000030, "BUFFER_ATOMIC_SWAP", []>;
defm BUFFER_ATOMIC_SWAP : MUBUF_Atomic <
0x00000030, "BUFFER_ATOMIC_SWAP", VReg_32, i32, atomic_swap_global
>;
//def BUFFER_ATOMIC_CMPSWAP : MUBUF_ <0x00000031, "BUFFER_ATOMIC_CMPSWAP", []>;
defm BUFFER_ATOMIC_ADD : MUBUF_Atomic <
0x00000032, "BUFFER_ATOMIC_ADD", VReg_32, i32, atomic_add_global

View File

@ -640,6 +640,86 @@ entry:
ret void
}
; FUNC-LABEL: {{^}}atomic_xchg_i32_offset:
; SI: BUFFER_ATOMIC_SWAP v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10{{$}}
define void @atomic_xchg_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
%gep = getelementptr i32 addrspace(1)* %out, i32 4
%0 = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
; FUNC-LABEL: {{^}}atomic_xchg_i32_ret_offset:
; SI: BUFFER_ATOMIC_SWAP [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10 glc {{$}}
; SI: BUFFER_STORE_DWORD [[RET]]
define void @atomic_xchg_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
%gep = getelementptr i32 addrspace(1)* %out, i32 4
%0 = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst
store i32 %0, i32 addrspace(1)* %out2
ret void
}
; FUNC-LABEL: {{^}}atomic_xchg_i32_addr64_offset:
; SI: BUFFER_ATOMIC_SWAP v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10{{$}}
define void @atomic_xchg_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32 addrspace(1)* %out, i64 %index
%gep = getelementptr i32 addrspace(1)* %ptr, i32 4
%0 = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
; FUNC-LABEL: {{^}}atomic_xchg_i32_ret_addr64_offset:
; SI: BUFFER_ATOMIC_SWAP [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10 glc{{$}}
; SI: BUFFER_STORE_DWORD [[RET]]
define void @atomic_xchg_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32 addrspace(1)* %out, i64 %index
%gep = getelementptr i32 addrspace(1)* %ptr, i32 4
%0 = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst
store i32 %0, i32 addrspace(1)* %out2
ret void
}
; FUNC-LABEL: {{^}}atomic_xchg_i32:
; SI: BUFFER_ATOMIC_SWAP v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
define void @atomic_xchg_i32(i32 addrspace(1)* %out, i32 %in) {
entry:
%0 = atomicrmw volatile xchg i32 addrspace(1)* %out, i32 %in seq_cst
ret void
}
; FUNC-LABEL: {{^}}atomic_xchg_i32_ret:
; SI: BUFFER_ATOMIC_SWAP [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
; SI: BUFFER_STORE_DWORD [[RET]]
define void @atomic_xchg_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
%0 = atomicrmw volatile xchg i32 addrspace(1)* %out, i32 %in seq_cst
store i32 %0, i32 addrspace(1)* %out2
ret void
}
; FUNC-LABEL: {{^}}atomic_xchg_i32_addr64:
; SI: BUFFER_ATOMIC_SWAP v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
define void @atomic_xchg_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32 addrspace(1)* %out, i64 %index
%0 = atomicrmw volatile xchg i32 addrspace(1)* %ptr, i32 %in seq_cst
ret void
}
; FUNC-LABEL: {{^}}atomic_xchg_i32_ret_addr64:
; SI: BUFFER_ATOMIC_SWAP [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
; SI: BUFFER_STORE_DWORD [[RET]]
define void @atomic_xchg_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32 addrspace(1)* %out, i64 %index
%0 = atomicrmw volatile xchg i32 addrspace(1)* %ptr, i32 %in seq_cst
store i32 %0, i32 addrspace(1)* %out2
ret void
}
; FUNC-LABEL: {{^}}atomic_xor_i32_offset:
; SI: BUFFER_ATOMIC_XOR v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10{{$}}
define void @atomic_xor_i32_offset(i32 addrspace(1)* %out, i32 %in) {