[X86] Cleanup patterns for using VMOVDDUP for broadcasts.

-Remove OptForSize. Not all of the backend follows the same rules for creating broadcasts and there is no conflicting pattern.
-Don't stop selecting VEX VMOVDDUP when AVX512 is supported. We need VLX for EVEX VMOVDDUP.
-Only use VMOVDDUP for v2i64 broadcasts if AVX2 is not supported.

llvm-svn: 283020
This commit is contained in:
Craig Topper 2016-10-01 07:11:24 +00:00
parent 422da84ace
commit 6d5567d561
3 changed files with 18 additions and 13 deletions

View File

@ -5162,12 +5162,12 @@ let Predicates = [HasAVX] in {
(VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
}
let Predicates = [UseAVX, OptForSize] in {
def : Pat<(v2f64 (X86VBroadcast (loadf64 addr:$src))),
(VMOVDDUPrm addr:$src)>;
def : Pat<(v2i64 (X86VBroadcast (loadi64 addr:$src))),
(VMOVDDUPrm addr:$src)>;
}
let Predicates = [HasAVX, NoVLX] in
def : Pat<(v2f64 (X86VBroadcast (loadf64 addr:$src))),
(VMOVDDUPrm addr:$src)>;
let Predicates = [HasAVX1Only] in
def : Pat<(v2i64 (X86VBroadcast (loadi64 addr:$src))),
(VMOVDDUPrm addr:$src)>;
let Predicates = [UseSSE3] in {
def : Pat<(X86Movddup (memopv2f64 addr:$src)),

View File

@ -250,8 +250,7 @@ define <8 x i16> @broadcast_mem_v4i16_v8i16(<4 x i16>* %ptr) {
; X32-AVX2-LABEL: broadcast_mem_v4i16_v8i16:
; X32-AVX2: ## BB#0:
; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-AVX2-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; X32-AVX2-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X32-AVX2-NEXT: retl
;
; X64-AVX2-LABEL: broadcast_mem_v4i16_v8i16:

View File

@ -49,11 +49,17 @@ define <8 x float> @splat_v8f32(<8 x float> %x) #1 {
; AVX can't do integer splats, so fake it: use vmovddup to splat 64-bit value.
; We also generate vmovddup for AVX2 because it's one byte smaller than vpbroadcastq.
define <2 x i64> @splat_v2i64(<2 x i64> %x) #1 {
; CHECK-LABEL: splat_v2i64:
; CHECK: # BB#0:
; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; CHECK-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
; AVX-LABEL: splat_v2i64:
; AVX: # BB#0:
; AVX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX2-LABEL: splat_v2i64:
; AVX2: # BB#0:
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %xmm1
; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
%add = add <2 x i64> %x, <i64 1, i64 1>
ret <2 x i64> %add
}