LoopDistribute/LAA: Add tests to catch regressions

I broke 2 of these with a patch, but were not covered by existing
tests.

https://reviews.llvm.org/D63035

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@363158 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Matt Arsenault 2019-06-12 13:15:59 +00:00
parent 7da998a80a
commit e04248b7f0
3 changed files with 118 additions and 0 deletions

View File

@ -23,6 +23,7 @@ target triple = "x86_64-apple-macosx10.10.0"
@D = common global i32* null, align 8
@E = common global i32* null, align 8
; CHECK-LABEL: @f(
define void @f() {
entry:
%a = load i32*, i32** @A, align 8
@ -108,3 +109,67 @@ for.body: ; preds = %for.body, %entry
for.end: ; preds = %for.body
ret void
}
; Make sure there's no "Multiple reports generated" assert with a
; volatile load, and no distribution
; TODO: Distribution of volatile may be possible under some
; circumstance, but the current implementation does not touch them.
; CHECK-LABEL: @f_volatile_load(
; CHECK: br label %for.body{{$}}
; CHECK-NOT: load
; CHECK: {{^}}for.body:
; CHECK: load i32
; CHECK: load i32
; CHECK: load volatile i32
; CHECK: load i32
; CHECK: br i1 %exitcond, label %for.end, label %for.body{{$}}
; CHECK-NOT: load
; VECTORIZE-NOT: load <4 x i32>
; VECTORIZE-NOT: mul <4 x i32>
define void @f_volatile_load() {
entry:
%a = load i32*, i32** @A, align 8
%b = load i32*, i32** @B, align 8
%c = load i32*, i32** @C, align 8
%d = load i32*, i32** @D, align 8
%e = load i32*, i32** @E, align 8
br label %for.body
for.body:
%ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
%arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
%loadA = load i32, i32* %arrayidxA, align 4
%arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
%loadB = load i32, i32* %arrayidxB, align 4
%mulA = mul i32 %loadB, %loadA
%add = add nuw nsw i64 %ind, 1
%arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
store i32 %mulA, i32* %arrayidxA_plus_4, align 4
%arrayidxD = getelementptr inbounds i32, i32* %d, i64 %ind
%loadD = load volatile i32, i32* %arrayidxD, align 4
%arrayidxE = getelementptr inbounds i32, i32* %e, i64 %ind
%loadE = load i32, i32* %arrayidxE, align 4
%mulC = mul i32 %loadD, %loadE
%arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind
store i32 %mulC, i32* %arrayidxC, align 4
%exitcond = icmp eq i64 %add, 20
br i1 %exitcond, label %for.end, label %for.body
for.end:
ret void
}

View File

@ -29,3 +29,33 @@ define i32 @read_only_func(i32* nocapture %A, i32* nocapture %B, i32 %n) nounwin
%sum.0.lcssa = phi i32 [ 0, %0 ], [ %9, %.lr.ph ]
ret i32 %sum.0.lcssa
}
; Ensure that volatile loads are not vectorized in a read-only loop.
;CHECK-LABEL: @read_only_func_volatile(
;CHECK-NOT: load <4 x i32>
;CHECK: ret i32
define i32 @read_only_func_volatile(i32* nocapture %A, i32* nocapture %B, i32 %n) nounwind uwtable readonly ssp {
%1 = icmp sgt i32 %n, 0
br i1 %1, label %.lr.ph, label %._crit_edge
.lr.ph: ; preds = %0, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
%sum.02 = phi i32 [ %9, %.lr.ph ], [ 0, %0 ]
%2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
%3 = load volatile i32, i32* %2, align 4
%4 = add nsw i64 %indvars.iv, 13
%5 = getelementptr inbounds i32, i32* %B, i64 %4
%6 = load i32, i32* %5, align 4
%7 = shl i32 %6, 1
%8 = add i32 %3, %sum.02
%9 = add i32 %8, %7
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
br i1 %exitcond, label %._crit_edge, label %.lr.ph
._crit_edge: ; preds = %.lr.ph, %0
%sum.0.lcssa = phi i32 [ 0, %0 ], [ %9, %.lr.ph ]
ret i32 %sum.0.lcssa
}

View File

@ -23,3 +23,26 @@ define i32 @read_mod_write_single_ptr(float* nocapture %a, i32 %n) nounwind uwta
._crit_edge: ; preds = %.lr.ph, %0
ret i32 undef
}
; Ensure that volatile stores are not vectorized.
; CHECK-LABEL: @read_mod_write_single_ptr_volatile_store(
; CHECK-NOT: store <4 x float>
; CHECK: ret i32
define i32 @read_mod_write_single_ptr_volatile_store(float* nocapture %a, i32 %n) nounwind uwtable ssp {
%1 = icmp sgt i32 %n, 0
br i1 %1, label %.lr.ph, label %._crit_edge
.lr.ph: ; preds = %0, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
%2 = getelementptr inbounds float, float* %a, i64 %indvars.iv
%3 = load float, float* %2, align 4
%4 = fmul float %3, 3.000000e+00
store volatile float %4, float* %2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
br i1 %exitcond, label %._crit_edge, label %.lr.ph
._crit_edge: ; preds = %.lr.ph, %0
ret i32 undef
}