mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-12-02 00:16:25 +00:00
6f4137a9fc
We would emit a libcall for a 64-bit atomic on x86 after SVN r212119. This was due to the misuse of hasCmpxchg16 to indicate if cmpxchg8b was supported on a 32-bit target. They were added at different times and would result in the border condition being mishandled. This fixes the border case to emit the cmpxchg8b instruction for 64-bit atomic operations on x86 at the cost of restoring a long-standing bug in the codegen. We emit a cmpxchg8b on all x86 targets even where the CPU does not support this instruction (pre-Pentium CPUs). Although this bug should be fixed, this was present prior to SVN r212119 and this change, so this is not really introducing a regression. llvm-svn: 212956
45 lines
1.1 KiB
LLVM
45 lines
1.1 KiB
LLVM
; RUN: llc -mtriple=i386-linux-gnu %s -o - | FileCheck %s
|
|
; XFAIL: *
|
|
|
|
define i64 @test_add(i64* %addr, i64 %inc) {
|
|
; CHECK-LABEL: test_add:
|
|
; CHECK: calll __sync_fetch_and_add_8
|
|
%old = atomicrmw add i64* %addr, i64 %inc seq_cst
|
|
ret i64 %old
|
|
}
|
|
|
|
define i64 @test_sub(i64* %addr, i64 %inc) {
|
|
; CHECK-LABEL: test_sub:
|
|
; CHECK: calll __sync_fetch_and_sub_8
|
|
%old = atomicrmw sub i64* %addr, i64 %inc seq_cst
|
|
ret i64 %old
|
|
}
|
|
|
|
define i64 @test_and(i64* %andr, i64 %inc) {
|
|
; CHECK-LABEL: test_and:
|
|
; CHECK: calll __sync_fetch_and_and_8
|
|
%old = atomicrmw and i64* %andr, i64 %inc seq_cst
|
|
ret i64 %old
|
|
}
|
|
|
|
define i64 @test_or(i64* %orr, i64 %inc) {
|
|
; CHECK-LABEL: test_or:
|
|
; CHECK: calll __sync_fetch_and_or_8
|
|
%old = atomicrmw or i64* %orr, i64 %inc seq_cst
|
|
ret i64 %old
|
|
}
|
|
|
|
define i64 @test_xor(i64* %xorr, i64 %inc) {
|
|
; CHECK-LABEL: test_xor:
|
|
; CHECK: calll __sync_fetch_and_xor_8
|
|
%old = atomicrmw xor i64* %xorr, i64 %inc seq_cst
|
|
ret i64 %old
|
|
}
|
|
|
|
define i64 @test_nand(i64* %nandr, i64 %inc) {
|
|
; CHECK-LABEL: test_nand:
|
|
; CHECK: calll __sync_fetch_and_nand_8
|
|
%old = atomicrmw nand i64* %nandr, i64 %inc seq_cst
|
|
ret i64 %old
|
|
}
|