llvm/test/CodeGen/X86/atomic16.ll
Michael Liao c537f79dcd Fix PR15355
- Clear 'mayStore' flag when loading from the atomic variable before the
  spin loop
- Clear kill flag from one use to multiple use in registers forming the
  address to that atomic variable
- don't use a physical register as live-in register in BB (neither entry
  nor landing pad.) by copying it into virtual register

(patch by Cameron Zwarich)



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@176538 91177308-0d34-0410-b5e6-96231b3b80d8
2013-03-06 00:17:04 +00:00

251 lines
5.7 KiB
LLVM

; RUN: llc < %s -O0 -mtriple=x86_64-unknown-unknown -mcpu=corei7 -verify-machineinstrs -show-mc-encoding | FileCheck %s --check-prefix X64
; RUN: llc < %s -O0 -mtriple=i386-unknown-unknown -mcpu=corei7 -verify-machineinstrs | FileCheck %s --check-prefix X32
@sc16 = external global i16
define void @atomic_fetch_add16() nounwind {
; X64: atomic_fetch_add16
; X32: atomic_fetch_add16
entry:
; 32-bit
%t1 = atomicrmw add i16* @sc16, i16 1 acquire
; X64: lock
; X64: incw
; X32: lock
; X32: incw
%t2 = atomicrmw add i16* @sc16, i16 3 acquire
; X64: lock
; X64: addw $3, {{.*}} # encoding: [0xf0,0x66
; X32: lock
; X32: addw $3
%t3 = atomicrmw add i16* @sc16, i16 5 acquire
; X64: lock
; X64: xaddw {{.*}} # encoding: [0xf0,0x66
; X32: lock
; X32: xaddw
%t4 = atomicrmw add i16* @sc16, i16 %t3 acquire
; X64: lock
; X64: addw {{.*}} # encoding: [0xf0,0x66
; X32: lock
; X32: addw
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_sub16() nounwind {
; X64: atomic_fetch_sub16
; X32: atomic_fetch_sub16
%t1 = atomicrmw sub i16* @sc16, i16 1 acquire
; X64: lock
; X64: decw
; X32: lock
; X32: decw
%t2 = atomicrmw sub i16* @sc16, i16 3 acquire
; X64: lock
; X64: subw $3, {{.*}} # encoding: [0xf0,0x66
; X32: lock
; X32: subw $3
%t3 = atomicrmw sub i16* @sc16, i16 5 acquire
; X64: lock
; X64: xaddw {{.*}} # encoding: [0xf0,0x66
; X32: lock
; X32: xaddw
%t4 = atomicrmw sub i16* @sc16, i16 %t3 acquire
; X64: lock
; X64: subw {{.*}} # encoding: [0xf0,0x66
; X32: lock
; X32: subw
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_and16() nounwind {
; X64: atomic_fetch_and16
; X32: atomic_fetch_and16
%t1 = atomicrmw and i16* @sc16, i16 3 acquire
; X64: lock
; X64: andw $3, {{.*}} # encoding: [0xf0,0x66
; X32: lock
; X32: andw $3
%t2 = atomicrmw and i16* @sc16, i16 5 acquire
; X64: andw
; X64: lock
; X64: cmpxchgw
; X32: andw
; X32: lock
; X32: cmpxchgw
%t3 = atomicrmw and i16* @sc16, i16 %t2 acquire
; X64: lock
; X64: andw {{.*}} # encoding: [0xf0,0x66
; X32: lock
; X32: andw
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_or16() nounwind {
; X64: atomic_fetch_or16
; X32: atomic_fetch_or16
%t1 = atomicrmw or i16* @sc16, i16 3 acquire
; X64: lock
; X64: orw $3, {{.*}} # encoding: [0xf0,0x66
; X32: lock
; X32: orw $3
%t2 = atomicrmw or i16* @sc16, i16 5 acquire
; X64: orw
; X64: lock
; X64: cmpxchgw
; X32: orw
; X32: lock
; X32: cmpxchgw
%t3 = atomicrmw or i16* @sc16, i16 %t2 acquire
; X64: lock
; X64: orw {{.*}} # encoding: [0xf0,0x66
; X32: lock
; X32: orw
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_xor16() nounwind {
; X64: atomic_fetch_xor16
; X32: atomic_fetch_xor16
%t1 = atomicrmw xor i16* @sc16, i16 3 acquire
; X64: lock
; X64: xorw $3, {{.*}} # encoding: [0xf0,0x66
; X32: lock
; X32: xorw $3
%t2 = atomicrmw xor i16* @sc16, i16 5 acquire
; X64: xorw
; X64: lock
; X64: cmpxchgw
; X32: xorw
; X32: lock
; X32: cmpxchgw
%t3 = atomicrmw xor i16* @sc16, i16 %t2 acquire
; X64: lock
; X64: xorw {{.*}} # encoding: [0xf0,0x66
; X32: lock
; X32: xorw
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_nand16(i16 %x) nounwind {
; X64: atomic_fetch_nand16
; X32: atomic_fetch_nand16
%t1 = atomicrmw nand i16* @sc16, i16 %x acquire
; X64: andw
; X64: notw
; X64: lock
; X64: cmpxchgw
; X32: andw
; X32: notw
; X32: lock
; X32: cmpxchgw
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_max16(i16 %x) nounwind {
%t1 = atomicrmw max i16* @sc16, i16 %x acquire
; X64: cmpw
; X64: cmov
; X64: lock
; X64: cmpxchgw
; X32: cmpw
; X32: cmov
; X32: lock
; X32: cmpxchgw
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_min16(i16 %x) nounwind {
%t1 = atomicrmw min i16* @sc16, i16 %x acquire
; X64: cmpw
; X64: cmov
; X64: lock
; X64: cmpxchgw
; X32: cmpw
; X32: cmov
; X32: lock
; X32: cmpxchgw
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_umax16(i16 %x) nounwind {
%t1 = atomicrmw umax i16* @sc16, i16 %x acquire
; X64: cmpw
; X64: cmov
; X64: lock
; X64: cmpxchgw
; X32: cmpw
; X32: cmov
; X32: lock
; X32: cmpxchgw
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_umin16(i16 %x) nounwind {
%t1 = atomicrmw umin i16* @sc16, i16 %x acquire
; X64: cmpw
; X64: cmov
; X64: lock
; X64: cmpxchgw
; X32: cmpw
; X32: cmov
; X32: lock
; X32: cmpxchgw
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_cmpxchg16() nounwind {
%t1 = cmpxchg i16* @sc16, i16 0, i16 1 acquire
; X64: lock
; X64: cmpxchgw
; X32: lock
; X32: cmpxchgw
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_store16(i16 %x) nounwind {
store atomic i16 %x, i16* @sc16 release, align 4
; X64-NOT: lock
; X64: movw
; X32-NOT: lock
; X32: movw
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_swap16(i16 %x) nounwind {
%t1 = atomicrmw xchg i16* @sc16, i16 %x acquire
; X64-NOT: lock
; X64: xchgw
; X32-NOT: lock
; X32: xchgw
ret void
; X64: ret
; X32: ret
}