[LV] Fix PR26600: avoid out of bounds loads for interleaved access vectorization

Summary:
If we don't have the first and last access of an interleaved load group,
the first and last wide load in the loop can do an out of bounds
access. Even though we discard results from speculative loads,
this can cause problems, since it can technically generate page faults
(or worse).

We now discard interleaved load groups that don't have the first and
load in the group.

Reviewers: hfinkel, rengolin

Subscribers: rengolin, llvm-commits, mzolotukhin, anemet

Differential Revision: http://reviews.llvm.org/D17332

llvm-svn: 261331
This commit is contained in:
Silviu Baranga 2016-02-19 15:46:10 +00:00
parent db5af50c55
commit 7a8e19daa2
3 changed files with 19 additions and 5 deletions

View File

@ -4719,6 +4719,8 @@ void InterleavedAccessInfo::analyzeInterleaving(
// Holds all interleaved store groups temporarily.
SmallSetVector<InterleaveGroup *, 4> StoreGroups;
// Holds all interleaved load groups temporarily.
SmallSetVector<InterleaveGroup *, 4> LoadGroups;
// Search the load-load/write-write pair B-A in bottom-up order and try to
// insert B into the interleave group of A according to 3 rules:
@ -4746,6 +4748,8 @@ void InterleavedAccessInfo::analyzeInterleaving(
if (A->mayWriteToMemory())
StoreGroups.insert(Group);
else
LoadGroups.insert(Group);
for (auto II = std::next(I); II != E; ++II) {
Instruction *B = II->first;
@ -4793,6 +4797,12 @@ void InterleavedAccessInfo::analyzeInterleaving(
for (InterleaveGroup *Group : StoreGroups)
if (Group->getNumMembers() != Group->getFactor())
releaseGroup(Group);
// Remove interleaved load groups that don't have the first and last member.
// This guarantees that we won't do speculative out of bounds loads.
for (InterleaveGroup *Group : LoadGroups)
if (!Group->getMember(0) || !Group->getMember(Group->getFactor() - 1))
releaseGroup(Group);
}
LoopVectorizationCostModel::VectorizationFactor

View File

@ -16,9 +16,15 @@ for.cond.cleanup: ; preds = %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%0 = shl nsw i64 %indvars.iv, 1
%odd.idx = add nsw i64 %0, 1
%arrayidx = getelementptr inbounds double, double* %b, i64 %0
%arrayidx.odd = getelementptr inbounds double, double* %b, i64 %odd.idx
%1 = load double, double* %arrayidx, align 8
%add = fadd double %1, 1.000000e+00
%2 = load double, double* %arrayidx.odd, align 8
%add = fadd double %1, %2
%arrayidx2 = getelementptr inbounds double, double* %a, i64 %indvars.iv
store double %add, double* %arrayidx2, align 8
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1

View File

@ -292,10 +292,8 @@ for.body: ; preds = %for.body, %entry
; }
; CHECK-LABEL: @even_load(
; CHECK: %wide.vec = load <8 x i32>, <8 x i32>* %{{.*}}, align 4
; CHECK: %strided.vec = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
; CHECK-NOT: shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
; CHECK: shl nsw <4 x i32> %strided.vec, <i32 1, i32 1, i32 1, i32 1>
; CHECK-NOT: %wide.vec = load <8 x i32>, <8 x i32>* %{{.*}}, align 4
; CHECK-NOT: %strided.vec = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
define void @even_load(i32* noalias nocapture readonly %A, i32* noalias nocapture %B) {
entry: