2011-12-21 03:04:10 +00:00
|
|
|
; RUN: llc < %s -mtriple=armv7-apple-ios -verify-machineinstrs | FileCheck %s
|
|
|
|
; RUN: llc < %s -mtriple=thumbv7-apple-ios -verify-machineinstrs | FileCheck %s
|
2013-10-25 09:30:20 +00:00
|
|
|
; RUN: llc < %s -mtriple=thumbv6-apple-ios -verify-machineinstrs | FileCheck %s --check-prefix=CHECK-T1
|
2014-08-06 11:13:14 +00:00
|
|
|
; RUN: llc < %s -mtriple=thumbv6-apple-ios -verify-machineinstrs -mcpu=cortex-m0 | FileCheck %s --check-prefix=CHECK-M0
|
2014-08-21 14:35:47 +00:00
|
|
|
; RUN: llc < %s -mtriple=thumbv7--none-eabi -thread-model single -verify-machineinstrs | FileCheck %s --check-prefix=CHECK-BAREMETAL
|
2011-05-27 23:54:00 +00:00
|
|
|
|
2014-10-14 22:12:14 +00:00
|
|
|
target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
|
|
|
|
|
2011-05-27 23:54:00 +00:00
|
|
|
define void @func(i32 %argc, i8** %argv) nounwind {
|
|
|
|
entry:
|
|
|
|
%argc.addr = alloca i32 ; <i32*> [#uses=1]
|
|
|
|
%argv.addr = alloca i8** ; <i8***> [#uses=1]
|
|
|
|
%val1 = alloca i32 ; <i32*> [#uses=2]
|
|
|
|
%val2 = alloca i32 ; <i32*> [#uses=15]
|
|
|
|
%andt = alloca i32 ; <i32*> [#uses=2]
|
|
|
|
%ort = alloca i32 ; <i32*> [#uses=2]
|
|
|
|
%xort = alloca i32 ; <i32*> [#uses=2]
|
|
|
|
%old = alloca i32 ; <i32*> [#uses=18]
|
|
|
|
%temp = alloca i32 ; <i32*> [#uses=2]
|
|
|
|
store i32 %argc, i32* %argc.addr
|
|
|
|
store i8** %argv, i8*** %argv.addr
|
|
|
|
store i32 0, i32* %val1
|
|
|
|
store i32 31, i32* %val2
|
|
|
|
store i32 3855, i32* %andt
|
|
|
|
store i32 3855, i32* %ort
|
|
|
|
store i32 3855, i32* %xort
|
|
|
|
store i32 4, i32* %temp
|
2015-02-27 21:17:42 +00:00
|
|
|
%tmp = load i32, i32* %temp
|
2011-05-27 23:54:00 +00:00
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: add
|
|
|
|
; CHECK: strex
|
2013-10-25 09:30:20 +00:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_add_4
|
2014-08-06 11:13:14 +00:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_add_4
|
2014-08-21 14:35:47 +00:00
|
|
|
; CHECK-BAREMETAL: add
|
|
|
|
; CHECK-BAREMETAL-NOT: __sync
|
2011-09-26 20:27:49 +00:00
|
|
|
%0 = atomicrmw add i32* %val1, i32 %tmp monotonic
|
2011-05-27 23:54:00 +00:00
|
|
|
store i32 %0, i32* %old
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: sub
|
|
|
|
; CHECK: strex
|
2013-10-25 09:30:20 +00:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_sub_4
|
2014-08-06 11:13:14 +00:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_sub_4
|
2014-08-21 14:35:47 +00:00
|
|
|
; CHECK-BAREMETAL: sub
|
|
|
|
; CHECK-BAREMETAL-NOT: __sync
|
2011-09-26 20:27:49 +00:00
|
|
|
%1 = atomicrmw sub i32* %val2, i32 30 monotonic
|
2011-05-27 23:54:00 +00:00
|
|
|
store i32 %1, i32* %old
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: add
|
|
|
|
; CHECK: strex
|
2013-10-25 09:30:20 +00:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_add_4
|
2014-08-06 11:13:14 +00:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_add_4
|
2014-08-21 14:35:47 +00:00
|
|
|
; CHECK-BAREMETAL: add
|
|
|
|
; CHECK-BAREMETAL-NOT: __sync
|
2011-09-26 20:27:49 +00:00
|
|
|
%2 = atomicrmw add i32* %val2, i32 1 monotonic
|
2011-05-27 23:54:00 +00:00
|
|
|
store i32 %2, i32* %old
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: sub
|
|
|
|
; CHECK: strex
|
2013-10-25 09:30:20 +00:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_sub_4
|
2014-08-06 11:13:14 +00:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_sub_4
|
2014-08-21 14:35:47 +00:00
|
|
|
; CHECK-BAREMETAL: sub
|
|
|
|
; CHECK-BAREMETAL-NOT: __sync
|
2011-09-26 20:27:49 +00:00
|
|
|
%3 = atomicrmw sub i32* %val2, i32 1 monotonic
|
2011-05-27 23:54:00 +00:00
|
|
|
store i32 %3, i32* %old
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: and
|
|
|
|
; CHECK: strex
|
2013-10-25 09:30:20 +00:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_and_4
|
2014-08-06 11:13:14 +00:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_and_4
|
2014-08-21 14:35:47 +00:00
|
|
|
; CHECK-BAREMETAL: and
|
|
|
|
; CHECK-BAREMETAL-NOT: __sync
|
2011-09-26 20:27:49 +00:00
|
|
|
%4 = atomicrmw and i32* %andt, i32 4080 monotonic
|
2011-05-27 23:54:00 +00:00
|
|
|
store i32 %4, i32* %old
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: or
|
|
|
|
; CHECK: strex
|
2013-10-25 09:30:20 +00:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_or_4
|
2014-08-06 11:13:14 +00:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_or_4
|
2014-08-21 14:35:47 +00:00
|
|
|
; CHECK-BAREMETAL: or
|
|
|
|
; CHECK-BAREMETAL-NOT: __sync
|
2011-09-26 20:27:49 +00:00
|
|
|
%5 = atomicrmw or i32* %ort, i32 4080 monotonic
|
2011-05-27 23:54:00 +00:00
|
|
|
store i32 %5, i32* %old
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: eor
|
|
|
|
; CHECK: strex
|
2013-10-25 09:30:20 +00:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_xor_4
|
2014-08-06 11:13:14 +00:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_xor_4
|
2014-08-21 14:35:47 +00:00
|
|
|
; CHECK-BAREMETAL: eor
|
|
|
|
; CHECK-BAREMETAL-NOT: __sync
|
2011-09-26 20:27:49 +00:00
|
|
|
%6 = atomicrmw xor i32* %xort, i32 4080 monotonic
|
2011-05-27 23:54:00 +00:00
|
|
|
store i32 %6, i32* %old
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: cmp
|
|
|
|
; CHECK: strex
|
2013-10-25 09:30:20 +00:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_min_4
|
2014-08-06 11:13:14 +00:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_min_4
|
2014-08-21 14:35:47 +00:00
|
|
|
; CHECK-BAREMETAL: cmp
|
|
|
|
; CHECK-BAREMETAL-NOT: __sync
|
2011-09-26 20:27:49 +00:00
|
|
|
%7 = atomicrmw min i32* %val2, i32 16 monotonic
|
2011-05-27 23:54:00 +00:00
|
|
|
store i32 %7, i32* %old
|
2011-12-21 03:04:10 +00:00
|
|
|
%neg = sub i32 0, 1
|
2011-05-27 23:54:00 +00:00
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: cmp
|
|
|
|
; CHECK: strex
|
2013-10-25 09:30:20 +00:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_min_4
|
2014-08-06 11:13:14 +00:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_min_4
|
2014-08-21 14:35:47 +00:00
|
|
|
; CHECK-BAREMETAL: cmp
|
|
|
|
; CHECK-BAREMETAL-NOT: __sync
|
2011-09-26 20:27:49 +00:00
|
|
|
%8 = atomicrmw min i32* %val2, i32 %neg monotonic
|
2011-05-27 23:54:00 +00:00
|
|
|
store i32 %8, i32* %old
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: cmp
|
|
|
|
; CHECK: strex
|
2013-10-25 09:30:20 +00:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_max_4
|
2014-08-06 11:13:14 +00:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_max_4
|
2014-08-21 14:35:47 +00:00
|
|
|
; CHECK-BAREMETAL: cmp
|
|
|
|
; CHECK-BAREMETAL-NOT: __sync
|
2011-09-26 20:27:49 +00:00
|
|
|
%9 = atomicrmw max i32* %val2, i32 1 monotonic
|
2011-05-27 23:54:00 +00:00
|
|
|
store i32 %9, i32* %old
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: cmp
|
|
|
|
; CHECK: strex
|
2013-10-25 09:30:20 +00:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_max_4
|
2014-08-06 11:13:14 +00:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_max_4
|
2014-08-21 14:35:47 +00:00
|
|
|
; CHECK-BAREMETAL: cmp
|
|
|
|
; CHECK-BAREMETAL-NOT: __sync
|
2011-09-26 20:27:49 +00:00
|
|
|
%10 = atomicrmw max i32* %val2, i32 0 monotonic
|
2011-05-27 23:54:00 +00:00
|
|
|
store i32 %10, i32* %old
|
2011-12-21 03:04:10 +00:00
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: cmp
|
|
|
|
; CHECK: strex
|
2013-10-25 09:30:20 +00:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_umin_4
|
2014-08-06 11:13:14 +00:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_umin_4
|
2014-08-21 14:35:47 +00:00
|
|
|
; CHECK-BAREMETAL: cmp
|
|
|
|
; CHECK-BAREMETAL-NOT: __sync
|
2011-12-21 03:04:10 +00:00
|
|
|
%11 = atomicrmw umin i32* %val2, i32 16 monotonic
|
|
|
|
store i32 %11, i32* %old
|
|
|
|
%uneg = sub i32 0, 1
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: cmp
|
|
|
|
; CHECK: strex
|
2013-10-25 09:30:20 +00:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_umin_4
|
2014-08-06 11:13:14 +00:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_umin_4
|
2014-08-21 14:35:47 +00:00
|
|
|
; CHECK-BAREMETAL: cmp
|
|
|
|
; CHECK-BAREMETAL-NOT: __sync
|
2011-12-21 03:04:10 +00:00
|
|
|
%12 = atomicrmw umin i32* %val2, i32 %uneg monotonic
|
|
|
|
store i32 %12, i32* %old
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: cmp
|
|
|
|
; CHECK: strex
|
2013-10-25 09:30:20 +00:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_umax_4
|
2014-08-06 11:13:14 +00:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_umax_4
|
2014-08-21 14:35:47 +00:00
|
|
|
; CHECK-BAREMETAL: cmp
|
|
|
|
; CHECK-BAREMETAL-NOT: __sync
|
2011-12-21 03:04:10 +00:00
|
|
|
%13 = atomicrmw umax i32* %val2, i32 1 monotonic
|
|
|
|
store i32 %13, i32* %old
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: cmp
|
|
|
|
; CHECK: strex
|
2013-10-25 09:30:20 +00:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_umax_4
|
2014-08-06 11:13:14 +00:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_umax_4
|
2014-08-21 14:35:47 +00:00
|
|
|
; CHECK-BAREMETAL: cmp
|
|
|
|
; CHECK-BAREMETAL-NOT: __sync
|
2011-12-21 03:04:10 +00:00
|
|
|
%14 = atomicrmw umax i32* %val2, i32 0 monotonic
|
|
|
|
store i32 %14, i32* %old
|
|
|
|
|
|
|
|
ret void
|
2011-05-27 23:54:00 +00:00
|
|
|
}
|
2011-12-21 18:56:22 +00:00
|
|
|
|
|
|
|
define void @func2() nounwind {
|
|
|
|
entry:
|
|
|
|
%val = alloca i16
|
|
|
|
%old = alloca i16
|
|
|
|
store i16 31, i16* %val
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: cmp
|
|
|
|
; CHECK: strex
|
2013-10-25 09:30:20 +00:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_umin_2
|
2014-08-06 11:13:14 +00:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_umin_2
|
2014-08-21 14:35:47 +00:00
|
|
|
; CHECK-BAREMETAL: cmp
|
|
|
|
; CHECK-BAREMETAL-NOT: __sync
|
2011-12-21 18:56:22 +00:00
|
|
|
%0 = atomicrmw umin i16* %val, i16 16 monotonic
|
|
|
|
store i16 %0, i16* %old
|
|
|
|
%uneg = sub i16 0, 1
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: cmp
|
|
|
|
; CHECK: strex
|
2013-10-25 09:30:20 +00:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_umin_2
|
2014-08-06 11:13:14 +00:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_umin_2
|
2014-08-21 14:35:47 +00:00
|
|
|
; CHECK-BAREMETAL: cmp
|
|
|
|
; CHECK-BAREMETAL-NOT: __sync
|
2011-12-21 18:56:22 +00:00
|
|
|
%1 = atomicrmw umin i16* %val, i16 %uneg monotonic
|
|
|
|
store i16 %1, i16* %old
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: cmp
|
|
|
|
; CHECK: strex
|
2013-10-25 09:30:20 +00:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_umax_2
|
2014-08-06 11:13:14 +00:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_umax_2
|
2014-08-21 14:35:47 +00:00
|
|
|
; CHECK-BAREMETAL: cmp
|
|
|
|
; CHECK-BAREMETAL-NOT: __sync
|
2011-12-21 18:56:22 +00:00
|
|
|
%2 = atomicrmw umax i16* %val, i16 1 monotonic
|
|
|
|
store i16 %2, i16* %old
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: cmp
|
|
|
|
; CHECK: strex
|
2013-10-25 09:30:20 +00:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_umax_2
|
2014-08-06 11:13:14 +00:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_umax_2
|
2014-08-21 14:35:47 +00:00
|
|
|
; CHECK-BAREMETAL: cmp
|
|
|
|
; CHECK-BAREMETAL-NOT: __sync
|
2011-12-21 18:56:22 +00:00
|
|
|
%3 = atomicrmw umax i16* %val, i16 0 monotonic
|
|
|
|
store i16 %3, i16* %old
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define void @func3() nounwind {
|
|
|
|
entry:
|
|
|
|
%val = alloca i8
|
|
|
|
%old = alloca i8
|
|
|
|
store i8 31, i8* %val
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: cmp
|
|
|
|
; CHECK: strex
|
2013-10-25 09:30:20 +00:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_umin_1
|
2014-08-06 11:13:14 +00:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_umin_1
|
2014-08-21 14:35:47 +00:00
|
|
|
; CHECK-BAREMETAL: cmp
|
|
|
|
; CHECK-BAREMETAL-NOT: __sync
|
2011-12-21 18:56:22 +00:00
|
|
|
%0 = atomicrmw umin i8* %val, i8 16 monotonic
|
|
|
|
store i8 %0, i8* %old
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: cmp
|
|
|
|
; CHECK: strex
|
2013-10-25 09:30:20 +00:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_umin_1
|
2014-08-06 11:13:14 +00:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_umin_1
|
2014-08-21 14:35:47 +00:00
|
|
|
; CHECK-BAREMETAL: cmp
|
|
|
|
; CHECK-BAREMETAL-NOT: __sync
|
2011-12-21 18:56:22 +00:00
|
|
|
%uneg = sub i8 0, 1
|
|
|
|
%1 = atomicrmw umin i8* %val, i8 %uneg monotonic
|
|
|
|
store i8 %1, i8* %old
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: cmp
|
|
|
|
; CHECK: strex
|
2013-10-25 09:30:20 +00:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_umax_1
|
2014-08-06 11:13:14 +00:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_umax_1
|
2014-08-21 14:35:47 +00:00
|
|
|
; CHECK-BAREMETAL: cmp
|
|
|
|
; CHECK-BAREMETAL-NOT: __sync
|
2011-12-21 18:56:22 +00:00
|
|
|
%2 = atomicrmw umax i8* %val, i8 1 monotonic
|
|
|
|
store i8 %2, i8* %old
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: cmp
|
|
|
|
; CHECK: strex
|
2013-10-25 09:30:20 +00:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_umax_1
|
2014-08-06 11:13:14 +00:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_umax_1
|
2014-08-21 14:35:47 +00:00
|
|
|
; CHECK-BAREMETAL: cmp
|
|
|
|
; CHECK-BAREMETAL-NOT: __sync
|
2011-12-21 18:56:22 +00:00
|
|
|
%3 = atomicrmw umax i8* %val, i8 0 monotonic
|
|
|
|
store i8 %3, i8* %old
|
|
|
|
ret void
|
|
|
|
}
|
2012-08-31 02:08:34 +00:00
|
|
|
|
|
|
|
; CHECK: func4
|
|
|
|
; This function should not need to use callee-saved registers.
|
|
|
|
; rdar://problem/12203728
|
|
|
|
; CHECK-NOT: r4
|
|
|
|
define i32 @func4(i32* %p) nounwind optsize ssp {
|
|
|
|
entry:
|
|
|
|
%0 = atomicrmw add i32* %p, i32 1 monotonic
|
|
|
|
ret i32 %0
|
|
|
|
}
|
2014-04-03 13:06:54 +00:00
|
|
|
|
|
|
|
define i32 @test_cmpxchg_fail_order(i32 *%addr, i32 %desired, i32 %new) {
|
|
|
|
; CHECK-LABEL: test_cmpxchg_fail_order:
|
|
|
|
|
IR: add "cmpxchg weak" variant to support permitted failure.
This commit adds a weak variant of the cmpxchg operation, as described
in C++11. A cmpxchg instruction with this modifier is permitted to
fail to store, even if the comparison indicated it should.
As a result, cmpxchg instructions must return a flag indicating
success in addition to their original iN value loaded. Thus, for
uniformity *all* cmpxchg instructions now return "{ iN, i1 }". The
second flag is 1 when the store succeeded.
At the DAG level, a new ATOMIC_CMP_SWAP_WITH_SUCCESS node has been
added as the natural representation for the new cmpxchg instructions.
It is a strong cmpxchg.
By default this gets Expanded to the existing ATOMIC_CMP_SWAP during
Legalization, so existing backends should see no change in behaviour.
If they wish to deal with the enhanced node instead, they can call
setOperationAction on it. Beware: as a node with 2 results, it cannot
be selected from TableGen.
Currently, no use is made of the extra information provided in this
patch. Test updates are almost entirely adapting the input IR to the
new scheme.
Summary for out of tree users:
------------------------------
+ Legacy Bitcode files are upgraded during read.
+ Legacy assembly IR files will be invalid.
+ Front-ends must adapt to different type for "cmpxchg".
+ Backends should be unaffected by default.
llvm-svn: 210903
2014-06-13 14:24:07 +00:00
|
|
|
%pair = cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst monotonic
|
|
|
|
%oldval = extractvalue { i32, i1 } %pair, 0
|
2014-04-03 13:06:54 +00:00
|
|
|
; CHECK: dmb ish
|
|
|
|
; CHECK: [[LOOP_BB:\.?LBB[0-9]+_1]]:
|
|
|
|
; CHECK: ldrex [[OLDVAL:r[0-9]+]], [r[[ADDR:[0-9]+]]]
|
|
|
|
; CHECK: cmp [[OLDVAL]], r1
|
|
|
|
; CHECK: bxne lr
|
|
|
|
; CHECK: strex [[SUCCESS:r[0-9]+]], r2, [r[[ADDR]]]
|
|
|
|
; CHECK: cmp [[SUCCESS]], #0
|
|
|
|
; CHECK: bne [[LOOP_BB]]
|
|
|
|
; CHECK: dmb ish
|
|
|
|
; CHECK: bx lr
|
|
|
|
|
|
|
|
ret i32 %oldval
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @test_cmpxchg_fail_order1(i32 *%addr, i32 %desired, i32 %new) {
|
|
|
|
; CHECK-LABEL: test_cmpxchg_fail_order1:
|
|
|
|
|
IR: add "cmpxchg weak" variant to support permitted failure.
This commit adds a weak variant of the cmpxchg operation, as described
in C++11. A cmpxchg instruction with this modifier is permitted to
fail to store, even if the comparison indicated it should.
As a result, cmpxchg instructions must return a flag indicating
success in addition to their original iN value loaded. Thus, for
uniformity *all* cmpxchg instructions now return "{ iN, i1 }". The
second flag is 1 when the store succeeded.
At the DAG level, a new ATOMIC_CMP_SWAP_WITH_SUCCESS node has been
added as the natural representation for the new cmpxchg instructions.
It is a strong cmpxchg.
By default this gets Expanded to the existing ATOMIC_CMP_SWAP during
Legalization, so existing backends should see no change in behaviour.
If they wish to deal with the enhanced node instead, they can call
setOperationAction on it. Beware: as a node with 2 results, it cannot
be selected from TableGen.
Currently, no use is made of the extra information provided in this
patch. Test updates are almost entirely adapting the input IR to the
new scheme.
Summary for out of tree users:
------------------------------
+ Legacy Bitcode files are upgraded during read.
+ Legacy assembly IR files will be invalid.
+ Front-ends must adapt to different type for "cmpxchg".
+ Backends should be unaffected by default.
llvm-svn: 210903
2014-06-13 14:24:07 +00:00
|
|
|
%pair = cmpxchg i32* %addr, i32 %desired, i32 %new acquire acquire
|
|
|
|
%oldval = extractvalue { i32, i1 } %pair, 0
|
2014-04-03 13:06:54 +00:00
|
|
|
; CHECK-NOT: dmb ish
|
|
|
|
; CHECK: [[LOOP_BB:\.?LBB[0-9]+_1]]:
|
|
|
|
; CHECK: ldrex [[OLDVAL:r[0-9]+]], [r[[ADDR:[0-9]+]]]
|
|
|
|
; CHECK: cmp [[OLDVAL]], r1
|
|
|
|
; CHECK: bne [[END_BB:\.?LBB[0-9]+_[0-9]+]]
|
|
|
|
; CHECK: strex [[SUCCESS:r[0-9]+]], r2, [r[[ADDR]]]
|
|
|
|
; CHECK: cmp [[SUCCESS]], #0
|
|
|
|
; CHECK: bne [[LOOP_BB]]
|
|
|
|
; CHECK: [[END_BB]]:
|
|
|
|
; CHECK: dmb ish
|
|
|
|
; CHECK: bx lr
|
|
|
|
|
|
|
|
ret i32 %oldval
|
|
|
|
}
|
2014-08-21 14:35:47 +00:00
|
|
|
|
|
|
|
define i32 @load_load_add_acquire(i32* %mem1, i32* %mem2) nounwind {
|
|
|
|
; CHECK-LABEL: load_load_add_acquire
|
2015-02-27 21:17:42 +00:00
|
|
|
%val1 = load atomic i32, i32* %mem1 acquire, align 4
|
|
|
|
%val2 = load atomic i32, i32* %mem2 acquire, align 4
|
2014-08-21 14:35:47 +00:00
|
|
|
%tmp = add i32 %val1, %val2
|
|
|
|
|
|
|
|
; CHECK: ldr {{r[0-9]}}, [r0]
|
|
|
|
; CHECK: dmb
|
|
|
|
; CHECK: ldr {{r[0-9]}}, [r1]
|
|
|
|
; CHECK: dmb
|
|
|
|
; CHECK: add r0,
|
|
|
|
|
|
|
|
; CHECK-M0: ___sync_val_compare_and_swap_4
|
|
|
|
; CHECK-M0: ___sync_val_compare_and_swap_4
|
|
|
|
|
|
|
|
; CHECK-BAREMETAL: ldr {{r[0-9]}}, [r0]
|
|
|
|
; CHECK-BAREMETAL-NOT: dmb
|
|
|
|
; CHECK-BAREMETAL: ldr {{r[0-9]}}, [r1]
|
|
|
|
; CHECK-BAREMETAL-NOT: dmb
|
|
|
|
; CHECK-BAREMETAL: add r0,
|
|
|
|
|
|
|
|
ret i32 %tmp
|
|
|
|
}
|
|
|
|
|
|
|
|
define void @store_store_release(i32* %mem1, i32 %val1, i32* %mem2, i32 %val2) {
|
|
|
|
; CHECK-LABEL: store_store_release
|
|
|
|
store atomic i32 %val1, i32* %mem1 release, align 4
|
|
|
|
store atomic i32 %val2, i32* %mem2 release, align 4
|
|
|
|
|
|
|
|
; CHECK: dmb
|
|
|
|
; CHECK: str r1, [r0]
|
|
|
|
; CHECK: dmb
|
|
|
|
; CHECK: str r3, [r2]
|
|
|
|
|
|
|
|
; CHECK-M0: ___sync_lock_test_and_set
|
|
|
|
; CHECK-M0: ___sync_lock_test_and_set
|
|
|
|
|
|
|
|
; CHECK-BAREMETAL-NOT: dmb
|
|
|
|
; CHECK-BAREMTEAL: str r1, [r0]
|
|
|
|
; CHECK-BAREMETAL-NOT: dmb
|
|
|
|
; CHECK-BAREMTEAL: str r3, [r2]
|
|
|
|
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define void @load_fence_store_monotonic(i32* %mem1, i32* %mem2) {
|
|
|
|
; CHECK-LABEL: load_fence_store_monotonic
|
2015-02-27 21:17:42 +00:00
|
|
|
%val = load atomic i32, i32* %mem1 monotonic, align 4
|
2014-08-21 14:35:47 +00:00
|
|
|
fence seq_cst
|
|
|
|
store atomic i32 %val, i32* %mem2 monotonic, align 4
|
|
|
|
|
|
|
|
; CHECK: ldr [[R0:r[0-9]]], [r0]
|
|
|
|
; CHECK: dmb
|
|
|
|
; CHECK: str [[R0]], [r1]
|
|
|
|
|
|
|
|
; CHECK-M0: ldr [[R0:r[0-9]]], [r0]
|
|
|
|
; CHECK-M0: dmb
|
|
|
|
; CHECK-M0: str [[R0]], [r1]
|
|
|
|
|
|
|
|
; CHECK-BAREMETAL: ldr [[R0:r[0-9]]], [r0]
|
|
|
|
; CHECK-BAREMETAL-NOT: dmb
|
|
|
|
; CHECK-BAREMETAL: str [[R0]], [r1]
|
|
|
|
|
|
|
|
ret void
|
|
|
|
}
|