mirror of
https://github.com/RPCS3/llvm.git
synced 2024-12-29 16:04:33 +00:00
02e4fa7d5f
- Avoid attempting stride-reuse in the case that there are users that aren't addresses. In that case, there will be places where the multiplications won't be folded away, so it's better to try to strength-reduce them. - Several SSE intrinsics have operands that strength-reduction can treat as addresses. The previous item makes this more visible, as any non-address use of an IV can inhibit stride-reuse. - Make ValidStride aware of whether there's likely to be a base register in the address computation. This prevents it from thinking that things like stride 9 are valid on x86 when the base register is already occupied. Also, XFAIL the 2007-08-10-LEA16Use32.ll test; the new logic to avoid stride-reuse elimintes the LEA in the loop, so the test is no longer testing what it was intended to test. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@43231 91177308-0d34-0410-b5e6-96231b3b80d8
28 lines
1.1 KiB
LLVM
28 lines
1.1 KiB
LLVM
; RUN: llvm-as < %s | llc -march=x86 | grep {leal}
|
|
; XFAIL: *
|
|
; This test is XFAIL'd because strength-reduction was improved to
|
|
; avoid emitting the lea, so it longer tests whether the 16-bit
|
|
; lea is avoided.
|
|
|
|
@X = global i16 0 ; <i16*> [#uses=1]
|
|
@Y = global i16 0 ; <i16*> [#uses=1]
|
|
|
|
define void @_Z3fooi(i32 %N) {
|
|
entry:
|
|
%tmp1019 = icmp sgt i32 %N, 0 ; <i1> [#uses=1]
|
|
br i1 %tmp1019, label %bb, label %return
|
|
|
|
bb: ; preds = %bb, %entry
|
|
%i.014.0 = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=2]
|
|
%tmp1 = trunc i32 %i.014.0 to i16 ; <i16> [#uses=2]
|
|
volatile store i16 %tmp1, i16* @X, align 2
|
|
%tmp34 = shl i16 %tmp1, 2 ; <i16> [#uses=1]
|
|
volatile store i16 %tmp34, i16* @Y, align 2
|
|
%indvar.next = add i32 %i.014.0, 1 ; <i32> [#uses=2]
|
|
%exitcond = icmp eq i32 %indvar.next, %N ; <i1> [#uses=1]
|
|
br i1 %exitcond, label %return, label %bb
|
|
|
|
return: ; preds = %bb, %entry
|
|
ret void
|
|
}
|