mirror of
https://github.com/RPCS3/llvm.git
synced 2025-01-10 22:46:25 +00:00
43b54db2a4
When predicting use-list order, we visit functions in reverse order followed by `GlobalValue`s and write out use-lists at the first opportunity. In the reader, this will translate to *after* the last use has been added. For this to work, we actually need to descend into `GlobalValue`s. Added a targeted test in `use-list-order.ll` and `RUN` lines to the newly passing tests in `test/Bitcode`. There are two remaining failures in `test/Bitcode`: - blockaddress.ll: I haven't thought through how to model the way block addresses change the order of use-lists (or how to work around it). - metadata-2.ll: There's an old-style `@llvm.used` global array here that I suspect the .ll parser isn't upgrading properly. When it round-trips through bitcode, the .bc reader *does* upgrade it, so the extra variable (`i8* null`) has an extra use, and the shuffle vector doesn't match. I think the fix is to upgrade old-style global arrays (or reject them?) in the .ll parser. This is part of PR5680. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@214321 91177308-0d34-0410-b5e6-96231b3b80d8
84 lines
4.0 KiB
LLVM
84 lines
4.0 KiB
LLVM
; RUN: opt < %s -S | FileCheck %s
|
|
; RUN: verify-uselistorder < %s -preserve-bc-use-list-order -num-shuffles=5
|
|
; CHECK-NOT: {@llvm\\.palign}
|
|
|
|
define <4 x i32> @align1(<4 x i32> %a, <4 x i32> %b) nounwind readnone ssp {
|
|
entry:
|
|
%0 = bitcast <4 x i32> %b to <2 x i64> ; <<2 x i64>> [#uses=1]
|
|
%1 = bitcast <4 x i32> %a to <2 x i64> ; <<2 x i64>> [#uses=1]
|
|
%2 = tail call <2 x i64> @llvm.x86.ssse3.palign.r.128(<2 x i64> %1, <2 x i64> %0, i8 15) ; <<2 x i64>> [#uses=1]
|
|
%3 = bitcast <2 x i64> %2 to <4 x i32> ; <<4 x i32>> [#uses=1]
|
|
ret <4 x i32> %3
|
|
}
|
|
|
|
define double @align8(<2 x i32> %a, <2 x i32> %b) nounwind readnone ssp {
|
|
entry:
|
|
%0 = bitcast <2 x i32> %b to <1 x i64> ; <<1 x i64>> [#uses=1]
|
|
%1 = bitcast <2 x i32> %a to <1 x i64> ; <<1 x i64>> [#uses=1]
|
|
%2 = tail call <1 x i64> @llvm.x86.ssse3.palign.r(<1 x i64> %1, <1 x i64> %0, i8 7) ; <<1 x i64>> [#uses=1]
|
|
%3 = extractelement <1 x i64> %2, i32 0 ; <i64> [#uses=1]
|
|
%retval12 = bitcast i64 %3 to double ; <double> [#uses=1]
|
|
ret double %retval12
|
|
}
|
|
|
|
declare <1 x i64> @llvm.x86.ssse3.palign.r(<1 x i64>, <1 x i64>, i8) nounwind readnone
|
|
|
|
define double @align7(<2 x i32> %a, <2 x i32> %b) nounwind readnone ssp {
|
|
entry:
|
|
%0 = bitcast <2 x i32> %b to <1 x i64> ; <<1 x i64>> [#uses=1]
|
|
%1 = bitcast <2 x i32> %a to <1 x i64> ; <<1 x i64>> [#uses=1]
|
|
%2 = tail call <1 x i64> @llvm.x86.ssse3.palign.r(<1 x i64> %1, <1 x i64> %0, i8 16) ; <<1 x i64>> [#uses=1]
|
|
%3 = extractelement <1 x i64> %2, i32 0 ; <i64> [#uses=1]
|
|
%retval12 = bitcast i64 %3 to double ; <double> [#uses=1]
|
|
ret double %retval12
|
|
}
|
|
|
|
define double @align6(<2 x i32> %a, <2 x i32> %b) nounwind readnone ssp {
|
|
entry:
|
|
%0 = bitcast <2 x i32> %b to <1 x i64> ; <<1 x i64>> [#uses=1]
|
|
%1 = bitcast <2 x i32> %a to <1 x i64> ; <<1 x i64>> [#uses=1]
|
|
%2 = tail call <1 x i64> @llvm.x86.ssse3.palign.r(<1 x i64> %1, <1 x i64> %0, i8 9) ; <<1 x i64>> [#uses=1]
|
|
%3 = extractelement <1 x i64> %2, i32 0 ; <i64> [#uses=1]
|
|
%retval12 = bitcast i64 %3 to double ; <double> [#uses=1]
|
|
ret double %retval12
|
|
}
|
|
|
|
define double @align5(<2 x i32> %a, <2 x i32> %b) nounwind readnone ssp {
|
|
entry:
|
|
%0 = bitcast <2 x i32> %b to <1 x i64> ; <<1 x i64>> [#uses=1]
|
|
%1 = bitcast <2 x i32> %a to <1 x i64> ; <<1 x i64>> [#uses=1]
|
|
%2 = tail call <1 x i64> @llvm.x86.ssse3.palign.r(<1 x i64> %1, <1 x i64> %0, i8 8) ; <<1 x i64>> [#uses=1]
|
|
%3 = extractelement <1 x i64> %2, i32 0 ; <i64> [#uses=1]
|
|
%retval12 = bitcast i64 %3 to double ; <double> [#uses=1]
|
|
ret double %retval12
|
|
}
|
|
|
|
define <4 x i32> @align4(<4 x i32> %a, <4 x i32> %b) nounwind readnone ssp {
|
|
entry:
|
|
%0 = bitcast <4 x i32> %b to <2 x i64> ; <<2 x i64>> [#uses=1]
|
|
%1 = bitcast <4 x i32> %a to <2 x i64> ; <<2 x i64>> [#uses=1]
|
|
%2 = tail call <2 x i64> @llvm.x86.ssse3.palign.r.128(<2 x i64> %1, <2 x i64> %0, i8 32) ; <<2 x i64>> [#uses=1]
|
|
%3 = bitcast <2 x i64> %2 to <4 x i32> ; <<4 x i32>> [#uses=1]
|
|
ret <4 x i32> %3
|
|
}
|
|
|
|
declare <2 x i64> @llvm.x86.ssse3.palign.r.128(<2 x i64>, <2 x i64>, i8) nounwind readnone
|
|
|
|
define <4 x i32> @align3(<4 x i32> %a, <4 x i32> %b) nounwind readnone ssp {
|
|
entry:
|
|
%0 = bitcast <4 x i32> %b to <2 x i64> ; <<2 x i64>> [#uses=1]
|
|
%1 = bitcast <4 x i32> %a to <2 x i64> ; <<2 x i64>> [#uses=1]
|
|
%2 = tail call <2 x i64> @llvm.x86.ssse3.palign.r.128(<2 x i64> %1, <2 x i64> %0, i8 17) ; <<2 x i64>> [#uses=1]
|
|
%3 = bitcast <2 x i64> %2 to <4 x i32> ; <<4 x i32>> [#uses=1]
|
|
ret <4 x i32> %3
|
|
}
|
|
|
|
define <4 x i32> @align2(<4 x i32> %a, <4 x i32> %b) nounwind readnone ssp {
|
|
entry:
|
|
%0 = bitcast <4 x i32> %b to <2 x i64> ; <<2 x i64>> [#uses=1]
|
|
%1 = bitcast <4 x i32> %a to <2 x i64> ; <<2 x i64>> [#uses=1]
|
|
%2 = tail call <2 x i64> @llvm.x86.ssse3.palign.r.128(<2 x i64> %1, <2 x i64> %0, i8 16) ; <<2 x i64>> [#uses=1]
|
|
%3 = bitcast <2 x i64> %2 to <4 x i32> ; <<4 x i32>> [#uses=1]
|
|
ret <4 x i32> %3
|
|
}
|