[ARM] Use reduction intrinsics for larger than legal reductions

The codegen for splitting a llvm.vector.reduction intrinsic into parts
will be better than the codegen for the generic reductions. This will
only directly effect when vectorization factors are specified by the
user.

Also added tests to make sure the codegen for larger reductions is OK.

Differential Revision: https://reviews.llvm.org/D72257
This commit is contained in:
David Green 2020-01-24 14:21:45 +00:00
parent d0e4729521
commit 40a053e4e5
3 changed files with 138 additions and 3 deletions

View File

@ -1364,7 +1364,8 @@ bool ARMTTIImpl::useReductionIntrinsic(unsigned Opcode, Type *Ty,
return false;
case Instruction::ICmp:
case Instruction::Add:
return ScalarBits < 64 && ScalarBits * Ty->getVectorNumElements() == 128;
return ScalarBits < 64 &&
(ScalarBits * Ty->getVectorNumElements()) % 128 == 0;
default:
llvm_unreachable("Unhandled reduction opcode");
}

View File

@ -3,8 +3,11 @@
declare i64 @llvm.experimental.vector.reduce.add.i64.v2i64(<2 x i64>)
declare i32 @llvm.experimental.vector.reduce.add.i32.v4i32(<4 x i32>)
declare i32 @llvm.experimental.vector.reduce.add.i32.v8i32(<8 x i32>)
declare i16 @llvm.experimental.vector.reduce.add.i16.v8i16(<8 x i16>)
declare i16 @llvm.experimental.vector.reduce.add.i16.v16i16(<16 x i16>)
declare i8 @llvm.experimental.vector.reduce.add.i8.v16i8(<16 x i8>)
declare i8 @llvm.experimental.vector.reduce.add.i8.v32i8(<32 x i8>)
define arm_aapcs_vfpcc i64 @vaddv_v2i64_i64(<2 x i64> %s1) {
; CHECK-LABEL: vaddv_v2i64_i64:
@ -31,8 +34,19 @@ entry:
ret i32 %r
}
define arm_aapcs_vfpcc i16 @vaddv_v16i16_i16(<8 x i16> %s1) {
; CHECK-LABEL: vaddv_v16i16_i16:
define arm_aapcs_vfpcc i32 @vaddv_v8i32_i32(<8 x i32> %s1) {
; CHECK-LABEL: vaddv_v8i32_i32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vadd.i32 q0, q0, q1
; CHECK-NEXT: vaddv.u32 r0, q0
; CHECK-NEXT: bx lr
entry:
%r = call i32 @llvm.experimental.vector.reduce.add.i32.v8i32(<8 x i32> %s1)
ret i32 %r
}
define arm_aapcs_vfpcc i16 @vaddv_v8i16_i16(<8 x i16> %s1) {
; CHECK-LABEL: vaddv_v8i16_i16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vaddv.u16 r0, q0
; CHECK-NEXT: bx lr
@ -41,6 +55,17 @@ entry:
ret i16 %r
}
define arm_aapcs_vfpcc i16 @vaddv_v16i16_i16(<16 x i16> %s1) {
; CHECK-LABEL: vaddv_v16i16_i16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vadd.i16 q0, q0, q1
; CHECK-NEXT: vaddv.u16 r0, q0
; CHECK-NEXT: bx lr
entry:
%r = call i16 @llvm.experimental.vector.reduce.add.i16.v16i16(<16 x i16> %s1)
ret i16 %r
}
define arm_aapcs_vfpcc i8 @vaddv_v16i8_i8(<16 x i8> %s1) {
; CHECK-LABEL: vaddv_v16i8_i8:
; CHECK: @ %bb.0: @ %entry
@ -51,6 +76,17 @@ entry:
ret i8 %r
}
define arm_aapcs_vfpcc i8 @vaddv_v32i8_i8(<32 x i8> %s1) {
; CHECK-LABEL: vaddv_v32i8_i8:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vadd.i8 q0, q0, q1
; CHECK-NEXT: vaddv.u8 r0, q0
; CHECK-NEXT: bx lr
entry:
%r = call i8 @llvm.experimental.vector.reduce.add.i8.v32i8(<32 x i8> %s1)
ret i8 %r
}
define arm_aapcs_vfpcc i64 @vaddva_v2i64_i64(<2 x i64> %s1, i64 %x) {
; CHECK-LABEL: vaddva_v2i64_i64:
; CHECK: @ %bb.0: @ %entry
@ -82,6 +118,18 @@ entry:
ret i32 %r
}
define arm_aapcs_vfpcc i32 @vaddva_v8i32_i32(<8 x i32> %s1, i32 %x) {
; CHECK-LABEL: vaddva_v8i32_i32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vadd.i32 q0, q0, q1
; CHECK-NEXT: vaddva.u32 r0, q0
; CHECK-NEXT: bx lr
entry:
%t = call i32 @llvm.experimental.vector.reduce.add.i32.v8i32(<8 x i32> %s1)
%r = add i32 %t, %x
ret i32 %r
}
define arm_aapcs_vfpcc i16 @vaddva_v8i16_i16(<8 x i16> %s1, i16 %x) {
; CHECK-LABEL: vaddva_v8i16_i16:
; CHECK: @ %bb.0: @ %entry
@ -93,6 +141,18 @@ entry:
ret i16 %r
}
define arm_aapcs_vfpcc i16 @vaddva_v16i16_i16(<16 x i16> %s1, i16 %x) {
; CHECK-LABEL: vaddva_v16i16_i16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vadd.i16 q0, q0, q1
; CHECK-NEXT: vaddva.u16 r0, q0
; CHECK-NEXT: bx lr
entry:
%t = call i16 @llvm.experimental.vector.reduce.add.i16.v16i16(<16 x i16> %s1)
%r = add i16 %t, %x
ret i16 %r
}
define arm_aapcs_vfpcc i8 @vaddva_v16i8_i8(<16 x i8> %s1, i8 %x) {
; CHECK-LABEL: vaddva_v16i8_i8:
; CHECK: @ %bb.0: @ %entry
@ -103,3 +163,15 @@ entry:
%r = add i8 %t, %x
ret i8 %r
}
define arm_aapcs_vfpcc i8 @vaddva_v32i8_i8(<32 x i8> %s1, i8 %x) {
; CHECK-LABEL: vaddva_v32i8_i8:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vadd.i8 q0, q0, q1
; CHECK-NEXT: vaddva.u8 r0, q0
; CHECK-NEXT: bx lr
entry:
%t = call i8 @llvm.experimental.vector.reduce.add.i8.v32i8(<32 x i8> %s1)
%r = add i8 %t, %x
ret i8 %r
}

View File

@ -0,0 +1,62 @@
; RUN: opt -loop-vectorize < %s -S -o - | FileCheck %s
target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
target triple = "thumbv8.1m.main-arm-none-eabi"
; CHECK-LABEL: check4
; CHECK: call i32 @llvm.experimental.vector.reduce.add.v4i32
define i32 @check4(i8* noalias nocapture readonly %A, i8* noalias nocapture readonly %B, i32 %n) #0 {
entry:
%cmp9 = icmp sgt i32 %n, 0
br i1 %cmp9, label %for.body, label %for.cond.cleanup
for.cond.cleanup: ; preds = %for.body, %entry
%res.0.lcssa = phi i32 [ undef, %entry ], [ %add, %for.body ]
ret i32 %res.0.lcssa
for.body: ; preds = %entry, %for.body
%i.011 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%res.010 = phi i32 [ %add, %for.body ], [ undef, %entry ]
%arrayidx = getelementptr inbounds i8, i8* %A, i32 %i.011
%0 = load i8, i8* %arrayidx, align 1
%conv = sext i8 %0 to i32
%arrayidx1 = getelementptr inbounds i8, i8* %B, i32 %i.011
%1 = load i8, i8* %arrayidx1, align 1
%conv2 = sext i8 %1 to i32
%mul = mul nsw i32 %conv2, %conv
%add = add nsw i32 %mul, %res.010
%inc = add nuw nsw i32 %i.011, 1
%exitcond = icmp eq i32 %inc, %n
br i1 %exitcond, label %for.cond.cleanup, label %for.body
}
; CHECK-LABEL: check16
; CHECK: call i32 @llvm.experimental.vector.reduce.add.v16i32
define i32 @check16(i8* noalias nocapture readonly %A, i8* noalias nocapture readonly %B, i32 %n) #0 {
entry:
%cmp9 = icmp sgt i32 %n, 0
br i1 %cmp9, label %for.body, label %for.cond.cleanup
for.cond.cleanup: ; preds = %for.body, %entry
%res.0.lcssa = phi i32 [ undef, %entry ], [ %add, %for.body ]
ret i32 %res.0.lcssa
for.body: ; preds = %entry, %for.body
%i.011 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%res.010 = phi i32 [ %add, %for.body ], [ undef, %entry ]
%arrayidx = getelementptr inbounds i8, i8* %A, i32 %i.011
%0 = load i8, i8* %arrayidx, align 1
%conv = sext i8 %0 to i32
%arrayidx1 = getelementptr inbounds i8, i8* %B, i32 %i.011
%1 = load i8, i8* %arrayidx1, align 1
%conv2 = sext i8 %1 to i32
%mul = mul nsw i32 %conv2, %conv
%add = add nsw i32 %mul, %res.010
%inc = add nuw nsw i32 %i.011, 1
%exitcond = icmp eq i32 %inc, %n
br i1 %exitcond, label %for.cond.cleanup, label %for.body, !llvm.loop !6
}
attributes #0 = { "target-features"="+mve" }
!6 = distinct !{!6, !7}
!7 = !{!"llvm.loop.vectorize.width", i32 16}