From ad2d46d0a5a3e8593a2754b618acd6e9cfe39453 Mon Sep 17 00:00:00 2001 From: Eli Friedman Date: Mon, 26 Sep 2011 20:27:49 +0000 Subject: [PATCH] Convert more tests over to the new atomic instructions. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@140559 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/ARM/atomic-cmp.ll | 4 +-- test/CodeGen/ARM/atomic-op.ll | 44 +++++++------------------- test/CodeGen/Mips/atomic.ll | 36 ++++++--------------- test/Transforms/LowerAtomic/barrier.ll | 4 +-- 4 files changed, 23 insertions(+), 65 deletions(-) diff --git a/test/CodeGen/ARM/atomic-cmp.ll b/test/CodeGen/ARM/atomic-cmp.ll index e179017d10a..82726daebca 100644 --- a/test/CodeGen/ARM/atomic-cmp.ll +++ b/test/CodeGen/ARM/atomic-cmp.ll @@ -10,8 +10,6 @@ define i8 @t(i8* %a, i8 %b, i8 %c) nounwind { ; T2: t: ; T2: ldrexb ; T2: strexb - %tmp0 = tail call i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* %a, i8 %b, i8 %c) + %tmp0 = cmpxchg i8* %a, i8 %b, i8 %c monotonic ret i8 %tmp0 } - -declare i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* nocapture, i8, i8) nounwind diff --git a/test/CodeGen/ARM/atomic-op.ll b/test/CodeGen/ARM/atomic-op.ll index b700eb1661d..02ce5a14691 100644 --- a/test/CodeGen/ARM/atomic-op.ll +++ b/test/CodeGen/ARM/atomic-op.ll @@ -24,80 +24,58 @@ entry: ; CHECK: ldrex ; CHECK: add ; CHECK: strex - call i32 @llvm.atomic.load.add.i32.p0i32( i32* %val1, i32 %tmp ) ; :0 [#uses=1] + %0 = atomicrmw add i32* %val1, i32 %tmp monotonic store i32 %0, i32* %old ; CHECK: ldrex ; CHECK: sub ; CHECK: strex - call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %val2, i32 30 ) ; :1 [#uses=1] + %1 = atomicrmw sub i32* %val2, i32 30 monotonic store i32 %1, i32* %old ; CHECK: ldrex ; CHECK: add ; CHECK: strex - call i32 @llvm.atomic.load.add.i32.p0i32( i32* %val2, i32 1 ) ; :2 [#uses=1] + %2 = atomicrmw add i32* %val2, i32 1 monotonic store i32 %2, i32* %old ; CHECK: ldrex ; CHECK: sub ; CHECK: strex - call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %val2, i32 1 ) ; :3 [#uses=1] + %3 = atomicrmw sub i32* %val2, i32 1 monotonic store i32 %3, i32* %old ; CHECK: ldrex ; CHECK: and ; CHECK: strex - call i32 @llvm.atomic.load.and.i32.p0i32( i32* %andt, i32 4080 ) ; :4 [#uses=1] + %4 = atomicrmw and i32* %andt, i32 4080 monotonic store i32 %4, i32* %old ; CHECK: ldrex ; CHECK: or ; CHECK: strex - call i32 @llvm.atomic.load.or.i32.p0i32( i32* %ort, i32 4080 ) ; :5 [#uses=1] + %5 = atomicrmw or i32* %ort, i32 4080 monotonic store i32 %5, i32* %old ; CHECK: ldrex ; CHECK: eor ; CHECK: strex - call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %xort, i32 4080 ) ; :6 [#uses=1] + %6 = atomicrmw xor i32* %xort, i32 4080 monotonic store i32 %6, i32* %old ; CHECK: ldrex ; CHECK: cmp ; CHECK: strex - call i32 @llvm.atomic.load.min.i32.p0i32( i32* %val2, i32 16 ) ; :7 [#uses=1] + %7 = atomicrmw min i32* %val2, i32 16 monotonic store i32 %7, i32* %old %neg = sub i32 0, 1 ; [#uses=1] ; CHECK: ldrex ; CHECK: cmp ; CHECK: strex - call i32 @llvm.atomic.load.min.i32.p0i32( i32* %val2, i32 %neg ) ; :8 [#uses=1] + %8 = atomicrmw min i32* %val2, i32 %neg monotonic store i32 %8, i32* %old ; CHECK: ldrex ; CHECK: cmp ; CHECK: strex - call i32 @llvm.atomic.load.max.i32.p0i32( i32* %val2, i32 1 ) ; :9 [#uses=1] + %9 = atomicrmw max i32* %val2, i32 1 monotonic store i32 %9, i32* %old ; CHECK: ldrex ; CHECK: cmp ; CHECK: strex - call i32 @llvm.atomic.load.max.i32.p0i32( i32* %val2, i32 0 ) ; :10 [#uses=1] + %10 = atomicrmw max i32* %val2, i32 0 monotonic store i32 %10, i32* %old ret void } - -declare i32 @llvm.atomic.load.add.i32.p0i32(i32*, i32) nounwind - -declare i32 @llvm.atomic.load.sub.i32.p0i32(i32*, i32) nounwind - -declare i32 @llvm.atomic.load.and.i32.p0i32(i32*, i32) nounwind - -declare i32 @llvm.atomic.load.or.i32.p0i32(i32*, i32) nounwind - -declare i32 @llvm.atomic.load.xor.i32.p0i32(i32*, i32) nounwind - -declare i32 @llvm.atomic.load.min.i32.p0i32(i32*, i32) nounwind - -declare i32 @llvm.atomic.load.max.i32.p0i32(i32*, i32) nounwind - -declare i32 @llvm.atomic.load.umax.i32.p0i32(i32*, i32) nounwind - -declare i32 @llvm.atomic.load.umin.i32.p0i32(i32*, i32) nounwind - -declare i32 @llvm.atomic.swap.i32.p0i32(i32*, i32) nounwind - -declare i32 @llvm.atomic.cmp.swap.i32.p0i32(i32*, i32, i32) nounwind diff --git a/test/CodeGen/Mips/atomic.ll b/test/CodeGen/Mips/atomic.ll index bc6bf5f2754..a4763b130d4 100644 --- a/test/CodeGen/Mips/atomic.ll +++ b/test/CodeGen/Mips/atomic.ll @@ -1,24 +1,10 @@ ; RUN: llc -march=mipsel < %s | FileCheck %s - -declare i32 @llvm.atomic.load.add.i32.p0i32(i32* nocapture, i32) nounwind -declare i32 @llvm.atomic.load.nand.i32.p0i32(i32* nocapture, i32) nounwind -declare i32 @llvm.atomic.swap.i32.p0i32(i32* nocapture, i32) nounwind -declare i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* nocapture, i32, i32) nounwind - -declare i8 @llvm.atomic.load.add.i8.p0i8(i8* nocapture, i8) nounwind -declare i8 @llvm.atomic.load.sub.i8.p0i8(i8* nocapture, i8) nounwind -declare i8 @llvm.atomic.load.nand.i8.p0i8(i8* nocapture, i8) nounwind -declare i8 @llvm.atomic.swap.i8.p0i8(i8* nocapture, i8) nounwind -declare i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* nocapture, i8, i8) nounwind - -declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind - @x = common global i32 0, align 4 define i32 @AtomicLoadAdd32(i32 %incr) nounwind { entry: - %0 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* @x, i32 %incr) + %0 = atomicrmw add i32* @x, i32 %incr monotonic ret i32 %0 ; CHECK: AtomicLoadAdd32: @@ -32,7 +18,7 @@ entry: define i32 @AtomicLoadNand32(i32 %incr) nounwind { entry: - %0 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* @x, i32 %incr) + %0 = atomicrmw nand i32* @x, i32 %incr monotonic ret i32 %0 ; CHECK: AtomicLoadNand32: @@ -50,7 +36,7 @@ entry: %newval.addr = alloca i32, align 4 store i32 %newval, i32* %newval.addr, align 4 %tmp = load i32* %newval.addr, align 4 - %0 = call i32 @llvm.atomic.swap.i32.p0i32(i32* @x, i32 %tmp) + %0 = atomicrmw xchg i32* @x, i32 %tmp monotonic ret i32 %0 ; CHECK: AtomicSwap32: @@ -66,7 +52,7 @@ entry: %newval.addr = alloca i32, align 4 store i32 %newval, i32* %newval.addr, align 4 %tmp = load i32* %newval.addr, align 4 - %0 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* @x, i32 %oldval, i32 %tmp) + %0 = cmpxchg i32* @x, i32 %oldval, i32 %tmp monotonic ret i32 %0 ; CHECK: AtomicCmpSwap32: @@ -85,7 +71,7 @@ entry: define signext i8 @AtomicLoadAdd8(i8 signext %incr) nounwind { entry: - %0 = call i8 @llvm.atomic.load.add.i8.p0i8(i8* @y, i8 %incr) + %0 = atomicrmw add i8* @y, i8 %incr monotonic ret i8 %0 ; CHECK: AtomicLoadAdd8: @@ -116,7 +102,7 @@ entry: define signext i8 @AtomicLoadSub8(i8 signext %incr) nounwind { entry: - %0 = call i8 @llvm.atomic.load.sub.i8.p0i8(i8* @y, i8 %incr) + %0 = atomicrmw sub i8* @y, i8 %incr monotonic ret i8 %0 ; CHECK: AtomicLoadSub8: @@ -147,7 +133,7 @@ entry: define signext i8 @AtomicLoadNand8(i8 signext %incr) nounwind { entry: - %0 = call i8 @llvm.atomic.load.nand.i8.p0i8(i8* @y, i8 %incr) + %0 = atomicrmw nand i8* @y, i8 %incr monotonic ret i8 %0 ; CHECK: AtomicLoadNand8: @@ -179,7 +165,7 @@ entry: define signext i8 @AtomicSwap8(i8 signext %newval) nounwind { entry: - %0 = call i8 @llvm.atomic.swap.i8.p0i8(i8* @y, i8 %newval) + %0 = atomicrmw xchg i8* @y, i8 %newval monotonic ret i8 %0 ; CHECK: AtomicSwap8: @@ -208,7 +194,7 @@ entry: define signext i8 @AtomicCmpSwap8(i8 signext %oldval, i8 signext %newval) nounwind { entry: - %0 = call i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* @y, i8 %oldval, i8 %newval) + %0 = cmpxchg i8* @y, i8 %oldval, i8 %newval monotonic ret i8 %0 ; CHECK: AtomicCmpSwap8: @@ -245,9 +231,7 @@ entry: define i32 @CheckSync(i32 %v) nounwind noinline { entry: - tail call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) - %0 = tail call i32 @llvm.atomic.load.add.i32.p0i32(i32* @countsint, i32 %v) - tail call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) + %0 = atomicrmw add i32* @countsint, i32 %v seq_cst ret i32 %0 ; CHECK: CheckSync: diff --git a/test/Transforms/LowerAtomic/barrier.ll b/test/Transforms/LowerAtomic/barrier.ll index 218c5ba8d18..814d7afb5ff 100644 --- a/test/Transforms/LowerAtomic/barrier.ll +++ b/test/Transforms/LowerAtomic/barrier.ll @@ -1,10 +1,8 @@ ; RUN: opt < %s -loweratomic -S | FileCheck %s -declare void @llvm.memory.barrier(i1 %ll, i1 %ls, i1 %sl, i1 %ss, i1 %device) - define void @barrier() { ; CHECK: @barrier - call void @llvm.memory.barrier(i1 0, i1 0, i1 0, i1 0, i1 0) + fence seq_cst ; CHECK-NEXT: ret ret void }