mirror of
https://github.com/RPCSX/llvm.git
synced 2024-12-13 14:46:53 +00:00
94c82b281d
This patch makes MemorySSA recognize atomic/volatile loads, and makes MSSA treat said loads specially. This allows us to be a bit more aggressive in some cases. Administrative note: Revision was LGTM'ed by reames in person. Additionally, this doesn't include the `invariant.load` recognition in the differential revision, because I feel it's better to commit that separately. Will commit soon. Differential Revision: https://reviews.llvm.org/D16875 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@277637 91177308-0d34-0410-b5e6-96231b3b80d8
31 lines
874 B
LLVM
31 lines
874 B
LLVM
; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
|
|
;
|
|
; Currently, MemorySSA doesn't support invariant groups. So, we should ignore
|
|
; invariant.group.barrier intrinsics entirely. We'll need to pay attention to
|
|
; them when/if we decide to support invariant groups.
|
|
|
|
@g = external global i32
|
|
|
|
define i32 @foo(i32* %a) {
|
|
; CHECK: 1 = MemoryDef(liveOnEntry)
|
|
; CHECK-NEXT: store i32 0
|
|
store i32 0, i32* %a, align 4, !llvm.invariant.group !0
|
|
|
|
; CHECK: 2 = MemoryDef(1)
|
|
; CHECK-NEXT: store i32 1
|
|
store i32 1, i32* @g, align 4
|
|
|
|
%1 = bitcast i32* %a to i8*
|
|
%a8 = call i8* @llvm.invariant.group.barrier(i8* %1)
|
|
%a32 = bitcast i8* %a8 to i32*
|
|
|
|
; CHECK: MemoryUse(2)
|
|
; CHECK-NEXT: %2 = load i32
|
|
%2 = load i32, i32* %a32, align 4, !llvm.invariant.group !0
|
|
ret i32 %2
|
|
}
|
|
|
|
declare i8* @llvm.invariant.group.barrier(i8*)
|
|
|
|
!0 = !{!"group1"}
|