[AVX-512] Teach isel lowering that a subvector broadcast being inserted into both halves of a 512-bit vector can be combined into a larger subvector broadcast.

Summary:
This allows us to create broadcasts of 128-bit vector loads into 512-bit vectors.

New patterns added to support 8-bit and 16-bit vector types and v2f64/v2i64->v8f64/v8i64 without DQI instructions.

There also fallback patterns when the load can't be folded. These patterns are a little complex as we first need to insert the lower 128-bits into the second 128-bits using a zmm subvector insert instruction. We need to use a zmm insert in case VLX isn't available. Then use another zmm sub vector insert to take those 256-bits and insert them into the upper bits. Since we used a zmm insert to create the 256-bits we also need to do a extract_subreg to get just the lower 256-bits to pass to the second insert.

The outer insert for the fallback patterns should have its type correct because eventually we should also supported masked operations here too. So we need a DQI and a NoDQI version of the v16f32/v16i32 patterns.

Reviewers: RKSimon, delena, igorb

Subscribers: llvm-commits

Differential Revision: https://reviews.llvm.org/D25651

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@284567 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Craig Topper 2016-10-19 04:44:17 +00:00
parent 2e4381ef79
commit cdb220aad5
4 changed files with 152 additions and 168 deletions

View File

@ -13048,6 +13048,12 @@ static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
return DAG.getNode(X86ISD::SUBV_BROADCAST, dl, OpVT, SubVec);
}
}
// If this is subv_broadcast insert into both halves, use a larger
// subv_broadcast.
if (SubVec.getOpcode() == X86ISD::SUBV_BROADCAST && SubVec == SubVec2) {
return DAG.getNode(X86ISD::SUBV_BROADCAST, dl, OpVT,
SubVec.getOperand(0));
}
}
}

View File

@ -1063,6 +1063,51 @@ def : Pat<(v32i16 (X86SubVBroadcast (v16i16 VR256X:$src))),
def : Pat<(v64i8 (X86SubVBroadcast (v32i8 VR256X:$src))),
(VINSERTI64x4Zrr (INSERT_SUBREG (v64i8 (IMPLICIT_DEF)), VR256X:$src, sub_ymm),
(v32i8 VR256X:$src), 1)>;
def : Pat<(v32i16 (X86SubVBroadcast (bc_v8i16 (loadv2i64 addr:$src)))),
(VBROADCASTI32X4rm addr:$src)>;
def : Pat<(v64i8 (X86SubVBroadcast (bc_v16i8 (loadv2i64 addr:$src)))),
(VBROADCASTI32X4rm addr:$src)>;
// Provide fallback in case the load node that is used in the patterns above
// is used by additional users, which prevents the pattern selection.
def : Pat<(v8f64 (X86SubVBroadcast (v2f64 VR128X:$src))),
(VINSERTF64x4Zrr
(VINSERTF32x4Zrr (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)),
VR128X:$src, sub_xmm),
VR128X:$src, 1),
(EXTRACT_SUBREG
(v8f64 (VINSERTF32x4Zrr (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)),
VR128X:$src, sub_xmm),
VR128X:$src, 1)), sub_ymm), 1)>;
def : Pat<(v8i64 (X86SubVBroadcast (v2i64 VR128X:$src))),
(VINSERTI64x4Zrr
(VINSERTI32x4Zrr (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)),
VR128X:$src, sub_xmm),
VR128X:$src, 1),
(EXTRACT_SUBREG
(v8i64 (VINSERTI32x4Zrr (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)),
VR128X:$src, sub_xmm),
VR128X:$src, 1)), sub_ymm), 1)>;
def : Pat<(v32i16 (X86SubVBroadcast (v8i16 VR128X:$src))),
(VINSERTI64x4Zrr
(VINSERTI32x4Zrr (INSERT_SUBREG (v32i16 (IMPLICIT_DEF)),
VR128X:$src, sub_xmm),
VR128X:$src, 1),
(EXTRACT_SUBREG
(v32i16 (VINSERTI32x4Zrr (INSERT_SUBREG (v32i16 (IMPLICIT_DEF)),
VR128X:$src, sub_xmm),
VR128X:$src, 1)), sub_ymm), 1)>;
def : Pat<(v64i8 (X86SubVBroadcast (v16i8 VR128X:$src))),
(VINSERTI64x4Zrr
(VINSERTI32x4Zrr (INSERT_SUBREG (v64i8 (IMPLICIT_DEF)),
VR128X:$src, sub_xmm),
VR128X:$src, 1),
(EXTRACT_SUBREG
(v64i8 (VINSERTI32x4Zrr (INSERT_SUBREG (v64i8 (IMPLICIT_DEF)),
VR128X:$src, sub_xmm),
VR128X:$src, 1)), sub_ymm), 1)>;
}
let Predicates = [HasVLX] in {
@ -1129,6 +1174,30 @@ def : Pat<(v4i64 (X86SubVBroadcast (v2i64 VR128X:$src))),
}
let Predicates = [HasAVX512, NoDQI] in {
def : Pat<(v8f64 (X86SubVBroadcast (loadv2f64 addr:$src))),
(VBROADCASTF32X4rm addr:$src)>;
def : Pat<(v8i64 (X86SubVBroadcast (loadv2i64 addr:$src))),
(VBROADCASTI32X4rm addr:$src)>;
def : Pat<(v16f32 (X86SubVBroadcast (v4f32 VR128X:$src))),
(VINSERTF64x4Zrr
(VINSERTF32x4Zrr (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)),
VR128X:$src, sub_xmm),
VR128X:$src, 1),
(EXTRACT_SUBREG
(v16f32 (VINSERTF32x4Zrr (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)),
VR128X:$src, sub_xmm),
VR128X:$src, 1)), sub_ymm), 1)>;
def : Pat<(v16i32 (X86SubVBroadcast (v4i32 VR128X:$src))),
(VINSERTI64x4Zrr
(VINSERTI32x4Zrr (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)),
VR128X:$src, sub_xmm),
VR128X:$src, 1),
(EXTRACT_SUBREG
(v16i32 (VINSERTI32x4Zrr (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)),
VR128X:$src, sub_xmm),
VR128X:$src, 1)), sub_ymm), 1)>;
def : Pat<(v16f32 (X86SubVBroadcast (loadv8f32 addr:$src))),
(VBROADCASTF64X4rm addr:$src)>;
def : Pat<(v16i32 (X86SubVBroadcast (bc_v8i32 (loadv4i64 addr:$src)))),
@ -1166,6 +1235,25 @@ def : Pat<(v16f32 (X86SubVBroadcast (v8f32 VR256X:$src))),
def : Pat<(v16i32 (X86SubVBroadcast (v8i32 VR256X:$src))),
(VINSERTI32x8Zrr (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm),
(v8i32 VR256X:$src), 1)>;
def : Pat<(v16f32 (X86SubVBroadcast (v4f32 VR128X:$src))),
(VINSERTF32x8Zrr
(VINSERTF32x4Zrr (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)),
VR128X:$src, sub_xmm),
VR128X:$src, 1),
(EXTRACT_SUBREG
(v16f32 (VINSERTF32x4Zrr (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)),
VR128X:$src, sub_xmm),
VR128X:$src, 1)), sub_ymm), 1)>;
def : Pat<(v16i32 (X86SubVBroadcast (v4i32 VR128X:$src))),
(VINSERTI32x8Zrr
(VINSERTI32x4Zrr (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)),
VR128X:$src, sub_xmm),
VR128X:$src, 1),
(EXTRACT_SUBREG
(v16i32 (VINSERTI32x4Zrr (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)),
VR128X:$src, sub_xmm),
VR128X:$src, 1)), sub_ymm), 1)>;
}
multiclass avx512_common_broadcast_32x2<bits<8> opc, string OpcodeStr,

View File

@ -110,22 +110,19 @@ define <32 x i8> @test_broadcast_16i8_32i8(<16 x i8> *%p) nounwind {
define <8 x double> @test_broadcast_2f64_8f64(<2 x double> *%p) nounwind {
; X64-AVX512VL-LABEL: test_broadcast_2f64_8f64:
; X64-AVX512VL: ## BB#0:
; X64-AVX512VL-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512VL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512VL-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X64-AVX512VL-NEXT: vaddpd {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: test_broadcast_2f64_8f64:
; X64-AVX512BWVL: ## BB#0:
; X64-AVX512BWVL-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512BWVL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X64-AVX512BWVL-NEXT: vaddpd {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: retq
;
; X64-AVX512DQVL-LABEL: test_broadcast_2f64_8f64:
; X64-AVX512DQVL: ## BB#0:
; X64-AVX512DQVL-NEXT: vbroadcastf64x2 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512DQVL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512DQVL-NEXT: vbroadcastf64x2 {{.*#+}} zmm0 = mem[0,1,0,1,0,1,0,1]
; X64-AVX512DQVL-NEXT: vaddpd {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512DQVL-NEXT: retq
%1 = load <2 x double>, <2 x double> *%p
@ -137,22 +134,19 @@ define <8 x double> @test_broadcast_2f64_8f64(<2 x double> *%p) nounwind {
define <8 x i64> @test_broadcast_2i64_8i64(<2 x i64> *%p) nounwind {
; X64-AVX512VL-LABEL: test_broadcast_2i64_8i64:
; X64-AVX512VL: ## BB#0:
; X64-AVX512VL-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512VL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512VL-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X64-AVX512VL-NEXT: vpaddq {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: test_broadcast_2i64_8i64:
; X64-AVX512BWVL: ## BB#0:
; X64-AVX512BWVL-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512BWVL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X64-AVX512BWVL-NEXT: vpaddq {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: retq
;
; X64-AVX512DQVL-LABEL: test_broadcast_2i64_8i64:
; X64-AVX512DQVL: ## BB#0:
; X64-AVX512DQVL-NEXT: vbroadcasti64x2 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512DQVL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512DQVL-NEXT: vbroadcasti64x2 {{.*#+}} zmm0 = mem[0,1,0,1,0,1,0,1]
; X64-AVX512DQVL-NEXT: vpaddq {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512DQVL-NEXT: retq
%1 = load <2 x i64>, <2 x i64> *%p
@ -162,26 +156,11 @@ define <8 x i64> @test_broadcast_2i64_8i64(<2 x i64> *%p) nounwind {
}
define <16 x float> @test_broadcast_4f32_16f32(<4 x float> *%p) nounwind {
; X64-AVX512VL-LABEL: test_broadcast_4f32_16f32:
; X64-AVX512VL: ## BB#0:
; X64-AVX512VL-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512VL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512VL-NEXT: vaddps {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: test_broadcast_4f32_16f32:
; X64-AVX512BWVL: ## BB#0:
; X64-AVX512BWVL-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512BWVL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: vaddps {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: retq
;
; X64-AVX512DQVL-LABEL: test_broadcast_4f32_16f32:
; X64-AVX512DQVL: ## BB#0:
; X64-AVX512DQVL-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512DQVL-NEXT: vinsertf32x8 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512DQVL-NEXT: vaddps {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512DQVL-NEXT: retq
; X64-AVX512-LABEL: test_broadcast_4f32_16f32:
; X64-AVX512: ## BB#0:
; X64-AVX512-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X64-AVX512-NEXT: vaddps {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512-NEXT: retq
%1 = load <4 x float>, <4 x float> *%p
%2 = shufflevector <4 x float> %1, <4 x float> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
%3 = fadd <16 x float> %2, <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0, float 9.0, float 10.0, float 11.0, float 12.0, float 13.0, float 14.0, float 15.0, float 16.0>
@ -189,26 +168,11 @@ define <16 x float> @test_broadcast_4f32_16f32(<4 x float> *%p) nounwind {
}
define <16 x i32> @test_broadcast_4i32_16i32(<4 x i32> *%p) nounwind {
; X64-AVX512VL-LABEL: test_broadcast_4i32_16i32:
; X64-AVX512VL: ## BB#0:
; X64-AVX512VL-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512VL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: test_broadcast_4i32_16i32:
; X64-AVX512BWVL: ## BB#0:
; X64-AVX512BWVL-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512BWVL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: vpaddd {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: retq
;
; X64-AVX512DQVL-LABEL: test_broadcast_4i32_16i32:
; X64-AVX512DQVL: ## BB#0:
; X64-AVX512DQVL-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512DQVL-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512DQVL-NEXT: vpaddd {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512DQVL-NEXT: retq
; X64-AVX512-LABEL: test_broadcast_4i32_16i32:
; X64-AVX512: ## BB#0:
; X64-AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X64-AVX512-NEXT: vpaddd {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512-NEXT: retq
%1 = load <4 x i32>, <4 x i32> *%p
%2 = shufflevector <4 x i32> %1, <4 x i32> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
%3 = add <16 x i32> %2, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16>
@ -225,8 +189,7 @@ define <32 x i16> @test_broadcast_8i16_32i16(<8 x i16> *%p) nounwind {
;
; X64-AVX512BWVL-LABEL: test_broadcast_8i16_32i16:
; X64-AVX512BWVL: ## BB#0:
; X64-AVX512BWVL-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512BWVL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X64-AVX512BWVL-NEXT: vpaddw {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: retq
;
@ -252,8 +215,7 @@ define <64 x i8> @test_broadcast_16i8_64i8(<16 x i8> *%p) nounwind {
;
; X64-AVX512BWVL-LABEL: test_broadcast_16i8_64i8:
; X64-AVX512BWVL: ## BB#0:
; X64-AVX512BWVL-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512BWVL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X64-AVX512BWVL-NEXT: vpaddb {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: retq
;

View File

@ -74,22 +74,19 @@ define <8 x double> @test_broadcast_2f64_8f64(<2 x double> *%p) nounwind {
; X32-AVX512F-LABEL: test_broadcast_2f64_8f64:
; X32-AVX512F: ## BB#0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512F-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512F-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X32-AVX512F-NEXT: retl
;
; X32-AVX512BW-LABEL: test_broadcast_2f64_8f64:
; X32-AVX512BW: ## BB#0:
; X32-AVX512BW-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512BW-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512BW-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X32-AVX512BW-NEXT: retl
;
; X32-AVX512DQ-LABEL: test_broadcast_2f64_8f64:
; X32-AVX512DQ: ## BB#0:
; X32-AVX512DQ-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512DQ-NEXT: vbroadcastf64x2 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX512DQ-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512DQ-NEXT: vbroadcastf64x2 {{.*#+}} zmm0 = mem[0,1,0,1,0,1,0,1]
; X32-AVX512DQ-NEXT: retl
;
; X64-AVX-LABEL: test_broadcast_2f64_8f64:
@ -100,20 +97,17 @@ define <8 x double> @test_broadcast_2f64_8f64(<2 x double> *%p) nounwind {
;
; X64-AVX512F-LABEL: test_broadcast_2f64_8f64:
; X64-AVX512F: ## BB#0:
; X64-AVX512F-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512F-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512F-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X64-AVX512F-NEXT: retq
;
; X64-AVX512BW-LABEL: test_broadcast_2f64_8f64:
; X64-AVX512BW: ## BB#0:
; X64-AVX512BW-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BW-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X64-AVX512BW-NEXT: retq
;
; X64-AVX512DQ-LABEL: test_broadcast_2f64_8f64:
; X64-AVX512DQ: ## BB#0:
; X64-AVX512DQ-NEXT: vbroadcastf64x2 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512DQ-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512DQ-NEXT: vbroadcastf64x2 {{.*#+}} zmm0 = mem[0,1,0,1,0,1,0,1]
; X64-AVX512DQ-NEXT: retq
%1 = load <2 x double>, <2 x double> *%p
%2 = shufflevector <2 x double> %1, <2 x double> undef, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
@ -216,22 +210,19 @@ define <8 x i64> @test_broadcast_2i64_8i64(<2 x i64> *%p) nounwind {
; X32-AVX512F-LABEL: test_broadcast_2i64_8i64:
; X32-AVX512F: ## BB#0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X32-AVX512F-NEXT: retl
;
; X32-AVX512BW-LABEL: test_broadcast_2i64_8i64:
; X32-AVX512BW: ## BB#0:
; X32-AVX512BW-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X32-AVX512BW-NEXT: retl
;
; X32-AVX512DQ-LABEL: test_broadcast_2i64_8i64:
; X32-AVX512DQ: ## BB#0:
; X32-AVX512DQ-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512DQ-NEXT: vbroadcasti64x2 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512DQ-NEXT: vbroadcasti64x2 {{.*#+}} zmm0 = mem[0,1,0,1,0,1,0,1]
; X32-AVX512DQ-NEXT: retl
;
; X64-AVX1-LABEL: test_broadcast_2i64_8i64:
@ -248,20 +239,17 @@ define <8 x i64> @test_broadcast_2i64_8i64(<2 x i64> *%p) nounwind {
;
; X64-AVX512F-LABEL: test_broadcast_2i64_8i64:
; X64-AVX512F: ## BB#0:
; X64-AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X64-AVX512F-NEXT: retq
;
; X64-AVX512BW-LABEL: test_broadcast_2i64_8i64:
; X64-AVX512BW: ## BB#0:
; X64-AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X64-AVX512BW-NEXT: retq
;
; X64-AVX512DQ-LABEL: test_broadcast_2i64_8i64:
; X64-AVX512DQ: ## BB#0:
; X64-AVX512DQ-NEXT: vbroadcasti64x2 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512DQ-NEXT: vbroadcasti64x2 {{.*#+}} zmm0 = mem[0,1,0,1,0,1,0,1]
; X64-AVX512DQ-NEXT: retq
%1 = load <2 x i64>, <2 x i64> *%p
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
@ -332,26 +320,11 @@ define <16 x float> @test_broadcast_4f32_16f32(<4 x float> *%p) nounwind {
; X32-AVX-NEXT: vmovdqa %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512F-LABEL: test_broadcast_4f32_16f32:
; X32-AVX512F: ## BB#0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512F-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512F-NEXT: retl
;
; X32-AVX512BW-LABEL: test_broadcast_4f32_16f32:
; X32-AVX512BW: ## BB#0:
; X32-AVX512BW-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512BW-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512BW-NEXT: retl
;
; X32-AVX512DQ-LABEL: test_broadcast_4f32_16f32:
; X32-AVX512DQ: ## BB#0:
; X32-AVX512DQ-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512DQ-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512DQ-NEXT: vinsertf32x8 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512DQ-NEXT: retl
; X32-AVX512-LABEL: test_broadcast_4f32_16f32:
; X32-AVX512: ## BB#0:
; X32-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: test_broadcast_4f32_16f32:
; X64-AVX: ## BB#0:
@ -359,23 +332,10 @@ define <16 x float> @test_broadcast_4f32_16f32(<4 x float> *%p) nounwind {
; X64-AVX-NEXT: vmovdqa %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512F-LABEL: test_broadcast_4f32_16f32:
; X64-AVX512F: ## BB#0:
; X64-AVX512F-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512F-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512F-NEXT: retq
;
; X64-AVX512BW-LABEL: test_broadcast_4f32_16f32:
; X64-AVX512BW: ## BB#0:
; X64-AVX512BW-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BW-NEXT: retq
;
; X64-AVX512DQ-LABEL: test_broadcast_4f32_16f32:
; X64-AVX512DQ: ## BB#0:
; X64-AVX512DQ-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512DQ-NEXT: vinsertf32x8 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512DQ-NEXT: retq
; X64-AVX512-LABEL: test_broadcast_4f32_16f32:
; X64-AVX512: ## BB#0:
; X64-AVX512-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X64-AVX512-NEXT: retq
%1 = load <4 x float>, <4 x float> *%p
%2 = shufflevector <4 x float> %1, <4 x float> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
ret <16 x float> %2
@ -474,26 +434,11 @@ define <16 x i32> @test_broadcast_4i32_16i32(<4 x i32> *%p) nounwind {
; X32-AVX2-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX2-NEXT: retl
;
; X32-AVX512F-LABEL: test_broadcast_4i32_16i32:
; X32-AVX512F: ## BB#0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512F-NEXT: retl
;
; X32-AVX512BW-LABEL: test_broadcast_4i32_16i32:
; X32-AVX512BW: ## BB#0:
; X32-AVX512BW-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512BW-NEXT: retl
;
; X32-AVX512DQ-LABEL: test_broadcast_4i32_16i32:
; X32-AVX512DQ: ## BB#0:
; X32-AVX512DQ-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512DQ-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512DQ-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512DQ-NEXT: retl
; X32-AVX512-LABEL: test_broadcast_4i32_16i32:
; X32-AVX512: ## BB#0:
; X32-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X32-AVX512-NEXT: retl
;
; X64-AVX1-LABEL: test_broadcast_4i32_16i32:
; X64-AVX1: ## BB#0:
@ -507,23 +452,10 @@ define <16 x i32> @test_broadcast_4i32_16i32(<4 x i32> *%p) nounwind {
; X64-AVX2-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX2-NEXT: retq
;
; X64-AVX512F-LABEL: test_broadcast_4i32_16i32:
; X64-AVX512F: ## BB#0:
; X64-AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512F-NEXT: retq
;
; X64-AVX512BW-LABEL: test_broadcast_4i32_16i32:
; X64-AVX512BW: ## BB#0:
; X64-AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BW-NEXT: retq
;
; X64-AVX512DQ-LABEL: test_broadcast_4i32_16i32:
; X64-AVX512DQ: ## BB#0:
; X64-AVX512DQ-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512DQ-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512DQ-NEXT: retq
; X64-AVX512-LABEL: test_broadcast_4i32_16i32:
; X64-AVX512: ## BB#0:
; X64-AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X64-AVX512-NEXT: retq
%1 = load <4 x i32>, <4 x i32> *%p
%2 = shufflevector <4 x i32> %1, <4 x i32> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
ret <16 x i32> %2
@ -632,8 +564,7 @@ define <32 x i16> @test_broadcast_8i16_32i16(<8 x i16> *%p) nounwind {
; X32-AVX512BW-LABEL: test_broadcast_8i16_32i16:
; X32-AVX512BW: ## BB#0:
; X32-AVX512BW-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X32-AVX512BW-NEXT: retl
;
; X32-AVX512DQ-LABEL: test_broadcast_8i16_32i16:
@ -663,8 +594,7 @@ define <32 x i16> @test_broadcast_8i16_32i16(<8 x i16> *%p) nounwind {
;
; X64-AVX512BW-LABEL: test_broadcast_8i16_32i16:
; X64-AVX512BW: ## BB#0:
; X64-AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X64-AVX512BW-NEXT: retq
;
; X64-AVX512DQ-LABEL: test_broadcast_8i16_32i16:
@ -784,8 +714,7 @@ define <64 x i8> @test_broadcast_16i8_64i8(<16 x i8> *%p) nounwind {
; X32-AVX512BW-LABEL: test_broadcast_16i8_64i8:
; X32-AVX512BW: ## BB#0:
; X32-AVX512BW-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X32-AVX512BW-NEXT: retl
;
; X32-AVX512DQ-LABEL: test_broadcast_16i8_64i8:
@ -815,8 +744,7 @@ define <64 x i8> @test_broadcast_16i8_64i8(<16 x i8> *%p) nounwind {
;
; X64-AVX512BW-LABEL: test_broadcast_16i8_64i8:
; X64-AVX512BW: ## BB#0:
; X64-AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X64-AVX512BW-NEXT: retq
;
; X64-AVX512DQ-LABEL: test_broadcast_16i8_64i8:
@ -1345,7 +1273,7 @@ define <16 x i32> @test_broadcast_4i32_16i32_chain(<4 x i32>* %p0, <4 x float>*
; X32-AVX512F-NEXT: vmovdqa64 (%ecx), %xmm0
; X32-AVX512F-NEXT: vpxord %xmm1, %xmm1, %xmm1
; X32-AVX512F-NEXT: vmovdqa32 %xmm1, (%eax)
; X32-AVX512F-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512F-NEXT: vinserti32x4 $1, %xmm0, %zmm0, %zmm0
; X32-AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512F-NEXT: retl
;
@ -1356,7 +1284,7 @@ define <16 x i32> @test_broadcast_4i32_16i32_chain(<4 x i32>* %p0, <4 x float>*
; X32-AVX512BW-NEXT: vmovdqa64 (%ecx), %xmm0
; X32-AVX512BW-NEXT: vpxord %xmm1, %xmm1, %xmm1
; X32-AVX512BW-NEXT: vmovdqa32 %xmm1, (%eax)
; X32-AVX512BW-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512BW-NEXT: vinserti32x4 $1, %xmm0, %zmm0, %zmm0
; X32-AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512BW-NEXT: retl
;
@ -1367,7 +1295,7 @@ define <16 x i32> @test_broadcast_4i32_16i32_chain(<4 x i32>* %p0, <4 x float>*
; X32-AVX512DQ-NEXT: vmovdqa64 (%ecx), %xmm0
; X32-AVX512DQ-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X32-AVX512DQ-NEXT: vmovaps %xmm1, (%eax)
; X32-AVX512DQ-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512DQ-NEXT: vinserti32x4 $1, %xmm0, %zmm0, %zmm0
; X32-AVX512DQ-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512DQ-NEXT: retl
;
@ -1385,7 +1313,7 @@ define <16 x i32> @test_broadcast_4i32_16i32_chain(<4 x i32>* %p0, <4 x float>*
; X64-AVX512F-NEXT: vmovdqa64 (%rdi), %xmm0
; X64-AVX512F-NEXT: vpxord %xmm1, %xmm1, %xmm1
; X64-AVX512F-NEXT: vmovdqa32 %xmm1, (%rsi)
; X64-AVX512F-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512F-NEXT: vinserti32x4 $1, %xmm0, %zmm0, %zmm0
; X64-AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512F-NEXT: retq
;
@ -1394,7 +1322,7 @@ define <16 x i32> @test_broadcast_4i32_16i32_chain(<4 x i32>* %p0, <4 x float>*
; X64-AVX512BW-NEXT: vmovdqa64 (%rdi), %xmm0
; X64-AVX512BW-NEXT: vpxord %xmm1, %xmm1, %xmm1
; X64-AVX512BW-NEXT: vmovdqa32 %xmm1, (%rsi)
; X64-AVX512BW-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512BW-NEXT: vinserti32x4 $1, %xmm0, %zmm0, %zmm0
; X64-AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BW-NEXT: retq
;
@ -1403,7 +1331,7 @@ define <16 x i32> @test_broadcast_4i32_16i32_chain(<4 x i32>* %p0, <4 x float>*
; X64-AVX512DQ-NEXT: vmovdqa64 (%rdi), %xmm0
; X64-AVX512DQ-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-AVX512DQ-NEXT: vmovaps %xmm1, (%rsi)
; X64-AVX512DQ-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512DQ-NEXT: vinserti32x4 $1, %xmm0, %zmm0, %zmm0
; X64-AVX512DQ-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512DQ-NEXT: retq
%1 = load <4 x i32>, <4 x i32>* %p0