[LoopUnroll] Use LoopSize+1 as threshold, to allow unrolling loops matching LoopSize.

We use `< UP.Threshold` later on, so we should use LoopSize + 1, to
allow unrolling if the result won't exceed to loop size.

Fixes PR43305.

Reviewers: efriedma, dmgreen, paquette

Reviewed By: dmgreen

Differential Revision: https://reviews.llvm.org/D67594

llvm-svn: 372084
This commit is contained in:
Florian Hahn 2019-09-17 09:02:48 +00:00
parent 008671759e
commit 9e1c9bd69d
3 changed files with 63 additions and 5 deletions

View File

@ -1032,10 +1032,10 @@ static LoopUnrollResult tryToUnrollLoop(
return LoopUnrollResult::Unmodified;
}
// When optimizing for size, use LoopSize as threshold, to (fully) unroll
// loops, if it does not increase code size.
// When optimizing for size, use LoopSize + 1 as threshold (we use < Threshold
// later), to (fully) unroll loops, if it does not increase code size.
if (OptForSize)
UP.Threshold = std::max(UP.Threshold, LoopSize);
UP.Threshold = std::max(UP.Threshold, LoopSize + 1);
if (NumInlineCandidates != 0) {
LLVM_DEBUG(dbgs() << " Not unrolling loop with inlinable calls.\n");

View File

@ -136,7 +136,7 @@ define void @fully_unrolled_bigger() #0 {
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 [[INDVARS_IV]]
; CHECK-NEXT: store i32 [[OR]], i32* [[ARRAYIDX]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV]], 6
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV]], 7
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]]
; CHECK: for.cond.cleanup:
; CHECK-NEXT: [[PTR:%.*]] = bitcast [4 x i32]* [[ARR]] to i32*
@ -156,7 +156,7 @@ for.body: ; preds = %for.body, %entry
%arrayidx = getelementptr inbounds [4 x i32], [4 x i32]* %arr, i64 0, i64 %indvars.iv
store i32 %or, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv, 6
%exitcond = icmp eq i64 %indvars.iv, 7
br i1 %exitcond, label %for.cond.cleanup, label %for.body
for.cond.cleanup: ; preds = %for.cond

View File

@ -0,0 +1,58 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -loop-unroll -S < %s | FileCheck %s
define i32 @test(i32 %a, i32 %b, i32 %c) optsize {
; CHECK-LABEL: @test(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[REF_TMP:%.*]] = alloca [3 x i32], align 4
; CHECK-NEXT: [[TMP0:%.*]] = bitcast [3 x i32]* [[REF_TMP]] to i8*
; CHECK-NEXT: [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [3 x i32], [3 x i32]* [[REF_TMP]], i64 0, i64 0
; CHECK-NEXT: store i32 [[A:%.*]], i32* [[ARRAYINIT_BEGIN]], align 4
; CHECK-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [3 x i32], [3 x i32]* [[REF_TMP]], i64 0, i64 1
; CHECK-NEXT: store i32 [[B:%.*]], i32* [[ARRAYINIT_ELEMENT]], align 4
; CHECK-NEXT: [[ARRAYINIT_ELEMENT1:%.*]] = getelementptr inbounds [3 x i32], [3 x i32]* [[REF_TMP]], i64 0, i64 2
; CHECK-NEXT: store i32 [[C:%.*]], i32* [[ARRAYINIT_ELEMENT1]], align 4
; CHECK-NEXT: [[ADD_PTR_I_I:%.*]] = getelementptr inbounds [3 x i32], [3 x i32]* [[REF_TMP]], i64 0, i64 3
; CHECK-NEXT: [[CMP_I_I_I3:%.*]] = icmp slt i32 [[A]], [[B]]
; CHECK-NEXT: [[SPEC_SELECT_I_I4:%.*]] = select i1 [[CMP_I_I_I3]], i32* [[ARRAYINIT_ELEMENT]], i32* [[ARRAYINIT_BEGIN]]
; CHECK-NEXT: [[INCDEC_PTR_I_I5:%.*]] = getelementptr inbounds [3 x i32], [3 x i32]* [[REF_TMP]], i64 0, i64 2
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[DOTPRE:%.*]] = load i32, i32* [[SPEC_SELECT_I_I4]], align 4
; CHECK-NEXT: [[DOTPRE2:%.*]] = load i32, i32* [[INCDEC_PTR_I_I5]], align 4
; CHECK-NEXT: [[CMP_I_I_I:%.*]] = icmp slt i32 [[DOTPRE]], [[DOTPRE2]]
; CHECK-NEXT: [[SPEC_SELECT_I_I:%.*]] = select i1 [[CMP_I_I_I]], i32* [[INCDEC_PTR_I_I5]], i32* [[SPEC_SELECT_I_I4]]
; CHECK-NEXT: [[INCDEC_PTR_I_I:%.*]] = getelementptr inbounds i32, i32* [[INCDEC_PTR_I_I5]], i64 1
; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[SPEC_SELECT_I_I]], align 4
; CHECK-NEXT: ret i32 [[TMP1]]
;
entry:
%ref.tmp = alloca [3 x i32], align 4
%0 = bitcast [3 x i32]* %ref.tmp to i8*
%arrayinit.begin = getelementptr inbounds [3 x i32], [3 x i32]* %ref.tmp, i64 0, i64 0
store i32 %a, i32* %arrayinit.begin, align 4
%arrayinit.element = getelementptr inbounds [3 x i32], [3 x i32]* %ref.tmp, i64 0, i64 1
store i32 %b, i32* %arrayinit.element, align 4
%arrayinit.element1 = getelementptr inbounds [3 x i32], [3 x i32]* %ref.tmp, i64 0, i64 2
store i32 %c, i32* %arrayinit.element1, align 4
%add.ptr.i.i = getelementptr inbounds [3 x i32], [3 x i32]* %ref.tmp, i64 0, i64 3
%cmp.i.i.i3 = icmp slt i32 %a, %b
%spec.select.i.i4 = select i1 %cmp.i.i.i3, i32* %arrayinit.element, i32* %arrayinit.begin
%incdec.ptr.i.i5 = getelementptr inbounds [3 x i32], [3 x i32]* %ref.tmp, i64 0, i64 2
br label %loop
loop: ; preds = %entry, %loop
%incdec.ptr.i.i7 = phi i32* [ %incdec.ptr.i.i5, %entry ], [ %incdec.ptr.i.i, %loop ]
%spec.select.i.i6 = phi i32* [ %spec.select.i.i4, %entry ], [ %spec.select.i.i, %loop ]
%.pre = load i32, i32* %spec.select.i.i6, align 4
%.pre2 = load i32, i32* %incdec.ptr.i.i7, align 4
%cmp.i.i.i = icmp slt i32 %.pre, %.pre2
%spec.select.i.i = select i1 %cmp.i.i.i, i32* %incdec.ptr.i.i7, i32* %spec.select.i.i6
%incdec.ptr.i.i = getelementptr inbounds i32, i32* %incdec.ptr.i.i7, i64 1
%cmp1.i.i = icmp eq i32* %incdec.ptr.i.i, %add.ptr.i.i
br i1 %cmp1.i.i, label %exit, label %loop
exit: ; preds = %loop
%1 = load i32, i32* %spec.select.i.i, align 4
ret i32 %1
}