mirror of
https://github.com/RPCSX/llvm.git
synced 2024-12-15 07:59:50 +00:00
780a093afb
r223862 tried to also combine base-updating load/stores. r224198 reverted it, as "it created a regression on the test-suite on test MultiSource/Benchmarks/Ptrdist/anagram by scrambling the order in which the words are shown." Reapply, with a fix to ignore non-normal load/stores. Truncstores are handled elsewhere (you can actually write a pattern for those, whereas for postinc loads you can't, since they return two values), but it should be possible to also combine extloads base updates, by checking that the memory (rather than result) type is of the same size as the addend. Original commit message: We used to only combine intrinsics, and turn them into VLD1_UPD/VST1_UPD when the base pointer is incremented after the load/store. We can do the same thing for generic load/stores. Note that we can only combine the first load/store+adds pair in a sequence (as might be generated for a v16f32 load for instance), because other combines turn the base pointer addition chain (each computing the address of the next load, from the address of the last load) into independent additions (common base pointer + this load's offset). Differential Revision: http://reviews.llvm.org/D6585 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@224203 91177308-0d34-0410-b5e6-96231b3b80d8
75 lines
3.2 KiB
LLVM
75 lines
3.2 KiB
LLVM
; RUN: llc < %s -mtriple=armv7-apple-ios -O0 | FileCheck %s -check-prefix=NO-REALIGN
|
|
; RUN: llc < %s -mtriple=armv7-apple-ios -O0 | FileCheck %s -check-prefix=REALIGN
|
|
|
|
; rdar://12713765
|
|
; When realign-stack is set to false, make sure we are not creating stack
|
|
; objects that are assumed to be 64-byte aligned.
|
|
@T3_retval = common global <16 x float> zeroinitializer, align 16
|
|
|
|
define void @test1(<16 x float>* noalias sret %agg.result) nounwind ssp "no-realign-stack" {
|
|
entry:
|
|
; NO-REALIGN-LABEL: test1
|
|
; NO-REALIGN: mov r[[R2:[0-9]+]], r[[R1:[0-9]+]]
|
|
; NO-REALIGN: vld1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]!
|
|
; NO-REALIGN: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
|
|
; NO-REALIGN: add r[[R2:[0-9]+]], r[[R1]], #32
|
|
; NO-REALIGN: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
|
|
; NO-REALIGN: add r[[R2:[0-9]+]], r[[R1]], #48
|
|
; NO-REALIGN: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
|
|
|
|
; NO-REALIGN: add r[[R2:[0-9]+]], r[[R1:[0-9]+]], #48
|
|
; NO-REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
|
|
; NO-REALIGN: add r[[R2:[0-9]+]], r[[R1]], #32
|
|
; NO-REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
|
|
; NO-REALIGN: vst1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]!
|
|
; NO-REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
|
|
|
|
; NO-REALIGN: add r[[R2:[0-9]+]], r[[R0:0]], #48
|
|
; NO-REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
|
|
; NO-REALIGN: add r[[R2:[0-9]+]], r[[R0]], #32
|
|
; NO-REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
|
|
; NO-REALIGN: vst1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R0]]:128]!
|
|
; NO-REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R0]]:128]
|
|
%retval = alloca <16 x float>, align 16
|
|
%0 = load <16 x float>* @T3_retval, align 16
|
|
store <16 x float> %0, <16 x float>* %retval
|
|
%1 = load <16 x float>* %retval
|
|
store <16 x float> %1, <16 x float>* %agg.result, align 16
|
|
ret void
|
|
}
|
|
|
|
define void @test2(<16 x float>* noalias sret %agg.result) nounwind ssp {
|
|
entry:
|
|
; REALIGN-LABEL: test2
|
|
; REALIGN: bic sp, sp, #63
|
|
; REALIGN: mov r[[R2:[0-9]+]], r[[R1:[0-9]+]]
|
|
; REALIGN: vld1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]!
|
|
; REALIGN: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
|
|
; REALIGN: add r[[R2:[0-9]+]], r[[R1]], #32
|
|
; REALIGN: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
|
|
; REALIGN: add r[[R2:[0-9]+]], r[[R1]], #48
|
|
; REALIGN: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
|
|
|
|
|
|
; REALIGN: orr r[[R2:[0-9]+]], r[[R1:[0-9]+]], #48
|
|
; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
|
|
; REALIGN: orr r[[R2:[0-9]+]], r[[R1]], #32
|
|
; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
|
|
; REALIGN: orr r[[R2:[0-9]+]], r[[R1]], #16
|
|
; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
|
|
; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
|
|
|
|
; REALIGN: add r[[R1:[0-9]+]], r[[R0:0]], #48
|
|
; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
|
|
; REALIGN: add r[[R1:[0-9]+]], r[[R0]], #32
|
|
; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
|
|
; REALIGN: vst1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R0]]:128]!
|
|
; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R0]]:128]
|
|
%retval = alloca <16 x float>, align 16
|
|
%0 = load <16 x float>* @T3_retval, align 16
|
|
store <16 x float> %0, <16 x float>* %retval
|
|
%1 = load <16 x float>* %retval
|
|
store <16 x float> %1, <16 x float>* %agg.result, align 16
|
|
ret void
|
|
}
|