mirror of
https://github.com/RPCS3/llvm.git
synced 2025-05-20 20:36:11 +00:00

Shifts with a uniform but non-constant count were considered very expensive to vectorize, because the splat of the uniform count and the shift would tend to appear in different blocks. That made the splat invisible to ISel, and we'd scalarize the shift at codegen time. Since r201655, CodeGenPrepare sinks those splats to be next to their use, and we are able to select the appropriate vector shifts. This updates the cost model to to take this into account by making shifts by a uniform cheap again. Differential Revision: https://reviews.llvm.org/D23049 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@277782 91177308-0d34-0410-b5e6-96231b3b80d8
24 lines
687 B
LLVM
24 lines
687 B
LLVM
; RUN: opt -mtriple=x86_64-apple-darwin -mattr=+sse2 -loop-vectorize -debug-only=loop-vectorize -S < %s 2>&1 | FileCheck %s
|
|
; REQUIRES: asserts
|
|
|
|
; CHECK: "foo"
|
|
; CHECK: LV: Found an estimated cost of 1 for VF 4 For instruction: %shift = ashr i32 %val, %k
|
|
define void @foo(i32* nocapture %p, i32 %k) local_unnamed_addr #0 {
|
|
entry:
|
|
br label %body
|
|
|
|
body:
|
|
%i = phi i64 [ 0, %entry ], [ %next, %body ]
|
|
%ptr = getelementptr inbounds i32, i32* %p, i64 %i
|
|
%val = load i32, i32* %ptr, align 4
|
|
%shift = ashr i32 %val, %k
|
|
store i32 %shift, i32* %ptr, align 4
|
|
%next = add nuw nsw i64 %i, 1
|
|
%cmp = icmp eq i64 %next, 16
|
|
br i1 %cmp, label %exit, label %body
|
|
|
|
exit:
|
|
ret void
|
|
|
|
}
|