diff --git a/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll b/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll new file mode 100644 index 00000000000..9b6401d1a76 --- /dev/null +++ b/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll @@ -0,0 +1,3483 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE2-SSSE3,SSE2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=SSE2-SSSE3,SSSE3 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX12,AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX12,AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX512 + +; +; 128-bit vectors +; + +define <2 x i64> @ext_i2_2i64(i2 %a0) { +; SSE2-SSSE3-LABEL: ext_i2_2i64: +; SSE2-SSSE3: # BB#0: +; SSE2-SSSE3-NEXT: andb $3, %dil +; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: movq %rax, %rcx +; SSE2-SSSE3-NEXT: shlq $62, %rcx +; SSE2-SSSE3-NEXT: sarq $63, %rcx +; SSE2-SSSE3-NEXT: movq %rcx, %xmm1 +; SSE2-SSSE3-NEXT: shlq $63, %rax +; SSE2-SSSE3-NEXT: sarq $63, %rax +; SSE2-SSSE3-NEXT: movq %rax, %xmm0 +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE2-SSSE3-NEXT: retq +; +; AVX12-LABEL: ext_i2_2i64: +; AVX12: # BB#0: +; AVX12-NEXT: andb $3, %dil +; AVX12-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; AVX12-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; AVX12-NEXT: movq %rax, %rcx +; AVX12-NEXT: shlq $62, %rcx +; AVX12-NEXT: sarq $63, %rcx +; AVX12-NEXT: vmovq %rcx, %xmm0 +; AVX12-NEXT: shlq $63, %rax +; AVX12-NEXT: sarq $63, %rax +; AVX12-NEXT: vmovq %rax, %xmm1 +; AVX12-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX12-NEXT: retq +; +; AVX512-LABEL: ext_i2_2i64: +; AVX512: # BB#0: +; AVX512-NEXT: andb $3, %dil +; AVX512-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; AVX512-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; AVX512-NEXT: kmovd %eax, %k1 +; AVX512-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} +; AVX512-NEXT: # kill: %XMM0 %XMM0 %ZMM0 +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %1 = bitcast i2 %a0 to <2 x i1> + %2 = sext <2 x i1> %1 to <2 x i64> + ret <2 x i64> %2 +} + +define <4 x i32> @ext_i4_4i32(i4 %a0) { +; SSE2-SSSE3-LABEL: ext_i4_4i32: +; SSE2-SSSE3: # BB#0: +; SSE2-SSSE3-NEXT: andb $15, %dil +; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: movq %rax, %rcx +; SSE2-SSSE3-NEXT: shlq $60, %rcx +; SSE2-SSSE3-NEXT: sarq $63, %rcx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movq %rax, %rcx +; SSE2-SSSE3-NEXT: shlq $61, %rcx +; SSE2-SSSE3-NEXT: sarq $63, %rcx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; SSE2-SSSE3-NEXT: movq %rax, %rcx +; SSE2-SSSE3-NEXT: shlq $62, %rcx +; SSE2-SSSE3-NEXT: sarq $63, %rcx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: shlq $63, %rax +; SSE2-SSSE3-NEXT: sarq $63, %rax +; SSE2-SSSE3-NEXT: movd %eax, %xmm0 +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE2-SSSE3-NEXT: retq +; +; AVX12-LABEL: ext_i4_4i32: +; AVX12: # BB#0: +; AVX12-NEXT: andb $15, %dil +; AVX12-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; AVX12-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; AVX12-NEXT: movq %rax, %rcx +; AVX12-NEXT: shlq $62, %rcx +; AVX12-NEXT: sarq $63, %rcx +; AVX12-NEXT: movq %rax, %rdx +; AVX12-NEXT: shlq $63, %rdx +; AVX12-NEXT: sarq $63, %rdx +; AVX12-NEXT: vmovd %edx, %xmm0 +; AVX12-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movq %rax, %rcx +; AVX12-NEXT: shlq $61, %rcx +; AVX12-NEXT: sarq $63, %rcx +; AVX12-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: shlq $60, %rax +; AVX12-NEXT: sarq $63, %rax +; AVX12-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0 +; AVX12-NEXT: retq +; +; AVX512-LABEL: ext_i4_4i32: +; AVX512: # BB#0: +; AVX512-NEXT: andb $15, %dil +; AVX512-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; AVX512-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; AVX512-NEXT: kmovd %eax, %k1 +; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0 +; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z} +; AVX512-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %1 = bitcast i4 %a0 to <4 x i1> + %2 = sext <4 x i1> %1 to <4 x i32> + ret <4 x i32> %2 +} + +define <8 x i16> @ext_i8_8i16(i8 %a0) { +; SSE2-SSSE3-LABEL: ext_i8_8i16: +; SSE2-SSSE3: # BB#0: +; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movsbq -{{[0-9]+}}(%rsp), %rax +; SSE2-SSSE3-NEXT: movq %rax, %rcx +; SSE2-SSSE3-NEXT: shrq $7, %rcx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movq %rax, %rcx +; SSE2-SSSE3-NEXT: shlq $57, %rcx +; SSE2-SSSE3-NEXT: sarq $63, %rcx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] +; SSE2-SSSE3-NEXT: movq %rax, %rcx +; SSE2-SSSE3-NEXT: shlq $58, %rcx +; SSE2-SSSE3-NEXT: sarq $63, %rcx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movq %rax, %rcx +; SSE2-SSSE3-NEXT: shlq $59, %rcx +; SSE2-SSSE3-NEXT: sarq $63, %rcx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; SSE2-SSSE3-NEXT: movq %rax, %rcx +; SSE2-SSSE3-NEXT: shlq $60, %rcx +; SSE2-SSSE3-NEXT: sarq $63, %rcx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movq %rax, %rcx +; SSE2-SSSE3-NEXT: shlq $61, %rcx +; SSE2-SSSE3-NEXT: sarq $63, %rcx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] +; SSE2-SSSE3-NEXT: movq %rax, %rcx +; SSE2-SSSE3-NEXT: shlq $62, %rcx +; SSE2-SSSE3-NEXT: sarq $63, %rcx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 +; SSE2-SSSE3-NEXT: shlq $63, %rax +; SSE2-SSSE3-NEXT: sarq $63, %rax +; SSE2-SSSE3-NEXT: movd %eax, %xmm0 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE2-SSSE3-NEXT: retq +; +; AVX12-LABEL: ext_i8_8i16: +; AVX12: # BB#0: +; AVX12-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; AVX12-NEXT: movsbq -{{[0-9]+}}(%rsp), %rax +; AVX12-NEXT: movq %rax, %rcx +; AVX12-NEXT: shlq $62, %rcx +; AVX12-NEXT: sarq $63, %rcx +; AVX12-NEXT: movq %rax, %rdx +; AVX12-NEXT: shlq $63, %rdx +; AVX12-NEXT: sarq $63, %rdx +; AVX12-NEXT: vmovd %edx, %xmm0 +; AVX12-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movq %rax, %rcx +; AVX12-NEXT: shlq $61, %rcx +; AVX12-NEXT: sarq $63, %rcx +; AVX12-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movq %rax, %rcx +; AVX12-NEXT: shlq $60, %rcx +; AVX12-NEXT: sarq $63, %rcx +; AVX12-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movq %rax, %rcx +; AVX12-NEXT: shlq $59, %rcx +; AVX12-NEXT: sarq $63, %rcx +; AVX12-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movq %rax, %rcx +; AVX12-NEXT: shlq $58, %rcx +; AVX12-NEXT: sarq $63, %rcx +; AVX12-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movq %rax, %rcx +; AVX12-NEXT: shlq $57, %rcx +; AVX12-NEXT: sarq $63, %rcx +; AVX12-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: shrq $7, %rax +; AVX12-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0 +; AVX12-NEXT: retq +; +; AVX512-LABEL: ext_i8_8i16: +; AVX512: # BB#0: +; AVX512-NEXT: kmovd %edi, %k0 +; AVX512-NEXT: vpmovm2w %k0, %xmm0 +; AVX512-NEXT: retq + %1 = bitcast i8 %a0 to <8 x i1> + %2 = sext <8 x i1> %1 to <8 x i16> + ret <8 x i16> %2 +} + +define <16 x i8> @ext_i16_16i8(i16 %a0) { +; SSE2-SSSE3-LABEL: ext_i16_16i8: +; SSE2-SSSE3: # BB#0: +; SSE2-SSSE3-NEXT: pushq %rbp +; SSE2-SSSE3-NEXT: .Lcfi0: +; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 16 +; SSE2-SSSE3-NEXT: pushq %r15 +; SSE2-SSSE3-NEXT: .Lcfi1: +; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 24 +; SSE2-SSSE3-NEXT: pushq %r14 +; SSE2-SSSE3-NEXT: .Lcfi2: +; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 32 +; SSE2-SSSE3-NEXT: pushq %r13 +; SSE2-SSSE3-NEXT: .Lcfi3: +; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 40 +; SSE2-SSSE3-NEXT: pushq %r12 +; SSE2-SSSE3-NEXT: .Lcfi4: +; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 48 +; SSE2-SSSE3-NEXT: pushq %rbx +; SSE2-SSSE3-NEXT: .Lcfi5: +; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 56 +; SSE2-SSSE3-NEXT: .Lcfi6: +; SSE2-SSSE3-NEXT: .cfi_offset %rbx, -56 +; SSE2-SSSE3-NEXT: .Lcfi7: +; SSE2-SSSE3-NEXT: .cfi_offset %r12, -48 +; SSE2-SSSE3-NEXT: .Lcfi8: +; SSE2-SSSE3-NEXT: .cfi_offset %r13, -40 +; SSE2-SSSE3-NEXT: .Lcfi9: +; SSE2-SSSE3-NEXT: .cfi_offset %r14, -32 +; SSE2-SSSE3-NEXT: .Lcfi10: +; SSE2-SSSE3-NEXT: .cfi_offset %r15, -24 +; SSE2-SSSE3-NEXT: .Lcfi11: +; SSE2-SSSE3-NEXT: .cfi_offset %rbp, -16 +; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movswq -{{[0-9]+}}(%rsp), %rax +; SSE2-SSSE3-NEXT: movq %rax, %r8 +; SSE2-SSSE3-NEXT: movq %rax, %r9 +; SSE2-SSSE3-NEXT: movq %rax, %r10 +; SSE2-SSSE3-NEXT: movq %rax, %r11 +; SSE2-SSSE3-NEXT: movq %rax, %r14 +; SSE2-SSSE3-NEXT: movq %rax, %r15 +; SSE2-SSSE3-NEXT: movq %rax, %r12 +; SSE2-SSSE3-NEXT: movq %rax, %r13 +; SSE2-SSSE3-NEXT: movq %rax, %rbx +; SSE2-SSSE3-NEXT: movq %rax, %rcx +; SSE2-SSSE3-NEXT: movq %rax, %rdx +; SSE2-SSSE3-NEXT: movq %rax, %rsi +; SSE2-SSSE3-NEXT: movq %rax, %rdi +; SSE2-SSSE3-NEXT: movq %rax, %rbp +; SSE2-SSSE3-NEXT: shrq $15, %rbp +; SSE2-SSSE3-NEXT: movd %ebp, %xmm0 +; SSE2-SSSE3-NEXT: movq %rax, %rbp +; SSE2-SSSE3-NEXT: movsbq %al, %rax +; SSE2-SSSE3-NEXT: shlq $49, %r8 +; SSE2-SSSE3-NEXT: sarq $63, %r8 +; SSE2-SSSE3-NEXT: movd %r8d, %xmm1 +; SSE2-SSSE3-NEXT: shlq $50, %r9 +; SSE2-SSSE3-NEXT: sarq $63, %r9 +; SSE2-SSSE3-NEXT: movd %r9d, %xmm2 +; SSE2-SSSE3-NEXT: shlq $51, %r10 +; SSE2-SSSE3-NEXT: sarq $63, %r10 +; SSE2-SSSE3-NEXT: movd %r10d, %xmm3 +; SSE2-SSSE3-NEXT: shlq $52, %r11 +; SSE2-SSSE3-NEXT: sarq $63, %r11 +; SSE2-SSSE3-NEXT: movd %r11d, %xmm4 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-SSSE3-NEXT: shlq $53, %r14 +; SSE2-SSSE3-NEXT: sarq $63, %r14 +; SSE2-SSSE3-NEXT: movd %r14d, %xmm0 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] +; SSE2-SSSE3-NEXT: shlq $54, %r15 +; SSE2-SSSE3-NEXT: sarq $63, %r15 +; SSE2-SSSE3-NEXT: movd %r15d, %xmm2 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3] +; SSE2-SSSE3-NEXT: shlq $55, %r12 +; SSE2-SSSE3-NEXT: sarq $63, %r12 +; SSE2-SSSE3-NEXT: movd %r12d, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] +; SSE2-SSSE3-NEXT: shlq $60, %r13 +; SSE2-SSSE3-NEXT: sarq $63, %r13 +; SSE2-SSSE3-NEXT: movd %r13d, %xmm4 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] +; SSE2-SSSE3-NEXT: shlq $61, %rbx +; SSE2-SSSE3-NEXT: sarq $63, %rbx +; SSE2-SSSE3-NEXT: movd %ebx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; SSE2-SSSE3-NEXT: shlq $62, %rcx +; SSE2-SSSE3-NEXT: sarq $63, %rcx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm5 +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] +; SSE2-SSSE3-NEXT: shlq $63, %rdx +; SSE2-SSSE3-NEXT: sarq $63, %rdx +; SSE2-SSSE3-NEXT: movd %edx, %xmm0 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7] +; SSE2-SSSE3-NEXT: shlq $58, %rsi +; SSE2-SSSE3-NEXT: sarq $63, %rsi +; SSE2-SSSE3-NEXT: movd %esi, %xmm3 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7] +; SSE2-SSSE3-NEXT: shlq $59, %rdi +; SSE2-SSSE3-NEXT: sarq $63, %rdi +; SSE2-SSSE3-NEXT: movd %edi, %xmm4 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7] +; SSE2-SSSE3-NEXT: shlq $57, %rbp +; SSE2-SSSE3-NEXT: sarq $63, %rbp +; SSE2-SSSE3-NEXT: movd %ebp, %xmm2 +; SSE2-SSSE3-NEXT: shrq $7, %rax +; SSE2-SSSE3-NEXT: movd %eax, %xmm3 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE2-SSSE3-NEXT: popq %rbx +; SSE2-SSSE3-NEXT: popq %r12 +; SSE2-SSSE3-NEXT: popq %r13 +; SSE2-SSSE3-NEXT: popq %r14 +; SSE2-SSSE3-NEXT: popq %r15 +; SSE2-SSSE3-NEXT: popq %rbp +; SSE2-SSSE3-NEXT: retq +; +; AVX12-LABEL: ext_i16_16i8: +; AVX12: # BB#0: +; AVX12-NEXT: movw %di, -{{[0-9]+}}(%rsp) +; AVX12-NEXT: movswq -{{[0-9]+}}(%rsp), %rax +; AVX12-NEXT: movq %rax, %rcx +; AVX12-NEXT: shlq $62, %rcx +; AVX12-NEXT: sarq $63, %rcx +; AVX12-NEXT: movq %rax, %rdx +; AVX12-NEXT: shlq $63, %rdx +; AVX12-NEXT: sarq $63, %rdx +; AVX12-NEXT: vmovd %edx, %xmm0 +; AVX12-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movq %rax, %rcx +; AVX12-NEXT: shlq $61, %rcx +; AVX12-NEXT: sarq $63, %rcx +; AVX12-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movq %rax, %rcx +; AVX12-NEXT: shlq $60, %rcx +; AVX12-NEXT: sarq $63, %rcx +; AVX12-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movq %rax, %rcx +; AVX12-NEXT: shlq $59, %rcx +; AVX12-NEXT: sarq $63, %rcx +; AVX12-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movq %rax, %rcx +; AVX12-NEXT: shlq $58, %rcx +; AVX12-NEXT: sarq $63, %rcx +; AVX12-NEXT: vpinsrb $5, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movq %rax, %rcx +; AVX12-NEXT: shlq $57, %rcx +; AVX12-NEXT: sarq $63, %rcx +; AVX12-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movsbq %al, %rcx +; AVX12-NEXT: shrq $7, %rcx +; AVX12-NEXT: vpinsrb $7, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movq %rax, %rcx +; AVX12-NEXT: shlq $55, %rcx +; AVX12-NEXT: sarq $63, %rcx +; AVX12-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movq %rax, %rcx +; AVX12-NEXT: shlq $54, %rcx +; AVX12-NEXT: sarq $63, %rcx +; AVX12-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movq %rax, %rcx +; AVX12-NEXT: shlq $53, %rcx +; AVX12-NEXT: sarq $63, %rcx +; AVX12-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movq %rax, %rcx +; AVX12-NEXT: shlq $52, %rcx +; AVX12-NEXT: sarq $63, %rcx +; AVX12-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movq %rax, %rcx +; AVX12-NEXT: shlq $51, %rcx +; AVX12-NEXT: sarq $63, %rcx +; AVX12-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movq %rax, %rcx +; AVX12-NEXT: shlq $50, %rcx +; AVX12-NEXT: sarq $63, %rcx +; AVX12-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movq %rax, %rcx +; AVX12-NEXT: shlq $49, %rcx +; AVX12-NEXT: sarq $63, %rcx +; AVX12-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: shrq $15, %rax +; AVX12-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 +; AVX12-NEXT: retq +; +; AVX512-LABEL: ext_i16_16i8: +; AVX512: # BB#0: +; AVX512-NEXT: kmovd %edi, %k0 +; AVX512-NEXT: vpmovm2b %k0, %xmm0 +; AVX512-NEXT: retq + %1 = bitcast i16 %a0 to <16 x i1> + %2 = sext <16 x i1> %1 to <16 x i8> + ret <16 x i8> %2 +} + +; +; 256-bit vectors +; + +define <4 x i64> @ext_i4_4i64(i4 %a0) { +; SSE2-SSSE3-LABEL: ext_i4_4i64: +; SSE2-SSSE3: # BB#0: +; SSE2-SSSE3-NEXT: andb $15, %dil +; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $3, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $2, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; SSE2-SSSE3-NEXT: movd %eax, %xmm2 +; SSE2-SSSE3-NEXT: shrl %eax +; SSE2-SSSE3-NEXT: movd %eax, %xmm0 +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0] +; SSE2-SSSE3-NEXT: pand {{.*}}(%rip), %xmm2 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,1,3] +; SSE2-SSSE3-NEXT: psllq $63, %xmm0 +; SSE2-SSSE3-NEXT: psrad $31, %xmm0 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,1,3,3] +; SSE2-SSSE3-NEXT: psllq $63, %xmm1 +; SSE2-SSSE3-NEXT: psrad $31, %xmm1 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; SSE2-SSSE3-NEXT: retq +; +; AVX1-LABEL: ext_i4_4i64: +; AVX1: # BB#0: +; AVX1-NEXT: andb $15, %dil +; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; AVX1-NEXT: movq %rax, %rcx +; AVX1-NEXT: shlq $60, %rcx +; AVX1-NEXT: sarq $63, %rcx +; AVX1-NEXT: vmovq %rcx, %xmm0 +; AVX1-NEXT: movq %rax, %rcx +; AVX1-NEXT: shlq $61, %rcx +; AVX1-NEXT: sarq $63, %rcx +; AVX1-NEXT: vmovq %rcx, %xmm1 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX1-NEXT: movq %rax, %rcx +; AVX1-NEXT: shlq $62, %rcx +; AVX1-NEXT: sarq $63, %rcx +; AVX1-NEXT: vmovq %rcx, %xmm1 +; AVX1-NEXT: shlq $63, %rax +; AVX1-NEXT: sarq $63, %rax +; AVX1-NEXT: vmovq %rax, %xmm2 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: ext_i4_4i64: +; AVX2: # BB#0: +; AVX2-NEXT: andb $15, %dil +; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; AVX2-NEXT: movq %rax, %rcx +; AVX2-NEXT: shlq $60, %rcx +; AVX2-NEXT: sarq $63, %rcx +; AVX2-NEXT: vmovq %rcx, %xmm0 +; AVX2-NEXT: movq %rax, %rcx +; AVX2-NEXT: shlq $61, %rcx +; AVX2-NEXT: sarq $63, %rcx +; AVX2-NEXT: vmovq %rcx, %xmm1 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX2-NEXT: movq %rax, %rcx +; AVX2-NEXT: shlq $62, %rcx +; AVX2-NEXT: sarq $63, %rcx +; AVX2-NEXT: vmovq %rcx, %xmm1 +; AVX2-NEXT: shlq $63, %rax +; AVX2-NEXT: sarq $63, %rax +; AVX2-NEXT: vmovq %rax, %xmm2 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX2-NEXT: retq +; +; AVX512-LABEL: ext_i4_4i64: +; AVX512: # BB#0: +; AVX512-NEXT: andb $15, %dil +; AVX512-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; AVX512-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; AVX512-NEXT: kmovd %eax, %k1 +; AVX512-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} +; AVX512-NEXT: # kill: %YMM0 %YMM0 %ZMM0 +; AVX512-NEXT: retq + %1 = bitcast i4 %a0 to <4 x i1> + %2 = sext <4 x i1> %1 to <4 x i64> + ret <4 x i64> %2 +} + +define <8 x i32> @ext_i8_8i32(i8 %a0) { +; SSE2-SSSE3-LABEL: ext_i8_8i32: +; SSE2-SSSE3: # BB#0: +; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $3, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $2, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $5, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $4, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $6, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: shrl $7, %eax +; SSE2-SSSE3-NEXT: movzwl %ax, %eax +; SSE2-SSSE3-NEXT: movd %eax, %xmm3 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; SSE2-SSSE3-NEXT: pslld $31, %xmm0 +; SSE2-SSSE3-NEXT: psrad $31, %xmm0 +; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-SSSE3-NEXT: pslld $31, %xmm1 +; SSE2-SSSE3-NEXT: psrad $31, %xmm1 +; SSE2-SSSE3-NEXT: retq +; +; AVX1-LABEL: ext_i8_8i32: +; AVX1: # BB#0: +; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: movsbq -{{[0-9]+}}(%rsp), %rax +; AVX1-NEXT: movq %rax, %rcx +; AVX1-NEXT: shlq $58, %rcx +; AVX1-NEXT: sarq $63, %rcx +; AVX1-NEXT: movq %rax, %rdx +; AVX1-NEXT: shlq $59, %rdx +; AVX1-NEXT: sarq $63, %rdx +; AVX1-NEXT: vmovd %edx, %xmm0 +; AVX1-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movq %rax, %rcx +; AVX1-NEXT: shlq $57, %rcx +; AVX1-NEXT: sarq $63, %rcx +; AVX1-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movq %rax, %rcx +; AVX1-NEXT: shrq $7, %rcx +; AVX1-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movq %rax, %rcx +; AVX1-NEXT: shlq $62, %rcx +; AVX1-NEXT: sarq $63, %rcx +; AVX1-NEXT: movq %rax, %rdx +; AVX1-NEXT: shlq $63, %rdx +; AVX1-NEXT: sarq $63, %rdx +; AVX1-NEXT: vmovd %edx, %xmm1 +; AVX1-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm1 +; AVX1-NEXT: movq %rax, %rcx +; AVX1-NEXT: shlq $61, %rcx +; AVX1-NEXT: sarq $63, %rcx +; AVX1-NEXT: vpinsrd $2, %ecx, %xmm1, %xmm1 +; AVX1-NEXT: shlq $60, %rax +; AVX1-NEXT: sarq $63, %rax +; AVX1-NEXT: vpinsrd $3, %eax, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: ext_i8_8i32: +; AVX2: # BB#0: +; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: movsbq -{{[0-9]+}}(%rsp), %rax +; AVX2-NEXT: movq %rax, %rcx +; AVX2-NEXT: shlq $58, %rcx +; AVX2-NEXT: sarq $63, %rcx +; AVX2-NEXT: movq %rax, %rdx +; AVX2-NEXT: shlq $59, %rdx +; AVX2-NEXT: sarq $63, %rdx +; AVX2-NEXT: vmovd %edx, %xmm0 +; AVX2-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movq %rax, %rcx +; AVX2-NEXT: shlq $57, %rcx +; AVX2-NEXT: sarq $63, %rcx +; AVX2-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movq %rax, %rcx +; AVX2-NEXT: shrq $7, %rcx +; AVX2-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movq %rax, %rcx +; AVX2-NEXT: shlq $62, %rcx +; AVX2-NEXT: sarq $63, %rcx +; AVX2-NEXT: movq %rax, %rdx +; AVX2-NEXT: shlq $63, %rdx +; AVX2-NEXT: sarq $63, %rdx +; AVX2-NEXT: vmovd %edx, %xmm1 +; AVX2-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm1 +; AVX2-NEXT: movq %rax, %rcx +; AVX2-NEXT: shlq $61, %rcx +; AVX2-NEXT: sarq $63, %rcx +; AVX2-NEXT: vpinsrd $2, %ecx, %xmm1, %xmm1 +; AVX2-NEXT: shlq $60, %rax +; AVX2-NEXT: sarq $63, %rax +; AVX2-NEXT: vpinsrd $3, %eax, %xmm1, %xmm1 +; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX2-NEXT: retq +; +; AVX512-LABEL: ext_i8_8i32: +; AVX512: # BB#0: +; AVX512-NEXT: kmovd %edi, %k1 +; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0 +; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z} +; AVX512-NEXT: retq + %1 = bitcast i8 %a0 to <8 x i1> + %2 = sext <8 x i1> %1 to <8 x i32> + ret <8 x i32> %2 +} + +define <16 x i16> @ext_i16_16i16(i16 %a0) { +; SSE2-SSSE3-LABEL: ext_i16_16i16: +; SSE2-SSSE3: # BB#0: +; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $7, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $6, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $5, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $4, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $3, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $2, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $11, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $10, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $9, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $8, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $13, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $12, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $14, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: shrl $15, %eax +; SSE2-SSSE3-NEXT: movzwl %ax, %eax +; SSE2-SSSE3-NEXT: movd %eax, %xmm4 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] +; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-SSSE3-NEXT: psllw $15, %xmm0 +; SSE2-SSSE3-NEXT: psraw $15, %xmm0 +; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] +; SSE2-SSSE3-NEXT: psllw $15, %xmm1 +; SSE2-SSSE3-NEXT: psraw $15, %xmm1 +; SSE2-SSSE3-NEXT: retq +; +; AVX1-LABEL: ext_i16_16i16: +; AVX1: # BB#0: +; AVX1-NEXT: pushq %rbp +; AVX1-NEXT: .Lcfi0: +; AVX1-NEXT: .cfi_def_cfa_offset 16 +; AVX1-NEXT: pushq %r15 +; AVX1-NEXT: .Lcfi1: +; AVX1-NEXT: .cfi_def_cfa_offset 24 +; AVX1-NEXT: pushq %r14 +; AVX1-NEXT: .Lcfi2: +; AVX1-NEXT: .cfi_def_cfa_offset 32 +; AVX1-NEXT: pushq %r13 +; AVX1-NEXT: .Lcfi3: +; AVX1-NEXT: .cfi_def_cfa_offset 40 +; AVX1-NEXT: pushq %r12 +; AVX1-NEXT: .Lcfi4: +; AVX1-NEXT: .cfi_def_cfa_offset 48 +; AVX1-NEXT: pushq %rbx +; AVX1-NEXT: .Lcfi5: +; AVX1-NEXT: .cfi_def_cfa_offset 56 +; AVX1-NEXT: .Lcfi6: +; AVX1-NEXT: .cfi_offset %rbx, -56 +; AVX1-NEXT: .Lcfi7: +; AVX1-NEXT: .cfi_offset %r12, -48 +; AVX1-NEXT: .Lcfi8: +; AVX1-NEXT: .cfi_offset %r13, -40 +; AVX1-NEXT: .Lcfi9: +; AVX1-NEXT: .cfi_offset %r14, -32 +; AVX1-NEXT: .Lcfi10: +; AVX1-NEXT: .cfi_offset %r15, -24 +; AVX1-NEXT: .Lcfi11: +; AVX1-NEXT: .cfi_offset %rbp, -16 +; AVX1-NEXT: movw %di, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: movswq -{{[0-9]+}}(%rsp), %rax +; AVX1-NEXT: movq %rax, %rcx +; AVX1-NEXT: shlq $55, %rcx +; AVX1-NEXT: sarq $63, %rcx +; AVX1-NEXT: vmovd %ecx, %xmm0 +; AVX1-NEXT: movq %rax, %r8 +; AVX1-NEXT: movq %rax, %r10 +; AVX1-NEXT: movq %rax, %r11 +; AVX1-NEXT: movq %rax, %r14 +; AVX1-NEXT: movq %rax, %r15 +; AVX1-NEXT: movq %rax, %r9 +; AVX1-NEXT: movq %rax, %r12 +; AVX1-NEXT: movq %rax, %r13 +; AVX1-NEXT: movq %rax, %rbx +; AVX1-NEXT: movq %rax, %rdi +; AVX1-NEXT: movq %rax, %rcx +; AVX1-NEXT: movq %rax, %rdx +; AVX1-NEXT: movq %rax, %rsi +; AVX1-NEXT: movsbq %al, %rbp +; AVX1-NEXT: shlq $54, %rax +; AVX1-NEXT: sarq $63, %rax +; AVX1-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0 +; AVX1-NEXT: shlq $53, %r8 +; AVX1-NEXT: sarq $63, %r8 +; AVX1-NEXT: vpinsrw $2, %r8d, %xmm0, %xmm0 +; AVX1-NEXT: shlq $52, %r10 +; AVX1-NEXT: sarq $63, %r10 +; AVX1-NEXT: vpinsrw $3, %r10d, %xmm0, %xmm0 +; AVX1-NEXT: shlq $51, %r11 +; AVX1-NEXT: sarq $63, %r11 +; AVX1-NEXT: vpinsrw $4, %r11d, %xmm0, %xmm0 +; AVX1-NEXT: shlq $50, %r14 +; AVX1-NEXT: sarq $63, %r14 +; AVX1-NEXT: vpinsrw $5, %r14d, %xmm0, %xmm0 +; AVX1-NEXT: shlq $49, %r15 +; AVX1-NEXT: sarq $63, %r15 +; AVX1-NEXT: vpinsrw $6, %r15d, %xmm0, %xmm0 +; AVX1-NEXT: shrq $15, %r9 +; AVX1-NEXT: vpinsrw $7, %r9d, %xmm0, %xmm0 +; AVX1-NEXT: shlq $63, %r13 +; AVX1-NEXT: sarq $63, %r13 +; AVX1-NEXT: vmovd %r13d, %xmm1 +; AVX1-NEXT: shlq $62, %r12 +; AVX1-NEXT: sarq $63, %r12 +; AVX1-NEXT: vpinsrw $1, %r12d, %xmm1, %xmm1 +; AVX1-NEXT: shlq $61, %rbx +; AVX1-NEXT: sarq $63, %rbx +; AVX1-NEXT: vpinsrw $2, %ebx, %xmm1, %xmm1 +; AVX1-NEXT: shlq $60, %rdi +; AVX1-NEXT: sarq $63, %rdi +; AVX1-NEXT: vpinsrw $3, %edi, %xmm1, %xmm1 +; AVX1-NEXT: shlq $59, %rcx +; AVX1-NEXT: sarq $63, %rcx +; AVX1-NEXT: vpinsrw $4, %ecx, %xmm1, %xmm1 +; AVX1-NEXT: shlq $58, %rdx +; AVX1-NEXT: sarq $63, %rdx +; AVX1-NEXT: vpinsrw $5, %edx, %xmm1, %xmm1 +; AVX1-NEXT: shlq $57, %rsi +; AVX1-NEXT: sarq $63, %rsi +; AVX1-NEXT: vpinsrw $6, %esi, %xmm1, %xmm1 +; AVX1-NEXT: shrq $7, %rbp +; AVX1-NEXT: vpinsrw $7, %ebp, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: popq %rbx +; AVX1-NEXT: popq %r12 +; AVX1-NEXT: popq %r13 +; AVX1-NEXT: popq %r14 +; AVX1-NEXT: popq %r15 +; AVX1-NEXT: popq %rbp +; AVX1-NEXT: retq +; +; AVX2-LABEL: ext_i16_16i16: +; AVX2: # BB#0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: .Lcfi0: +; AVX2-NEXT: .cfi_def_cfa_offset 16 +; AVX2-NEXT: pushq %r15 +; AVX2-NEXT: .Lcfi1: +; AVX2-NEXT: .cfi_def_cfa_offset 24 +; AVX2-NEXT: pushq %r14 +; AVX2-NEXT: .Lcfi2: +; AVX2-NEXT: .cfi_def_cfa_offset 32 +; AVX2-NEXT: pushq %r13 +; AVX2-NEXT: .Lcfi3: +; AVX2-NEXT: .cfi_def_cfa_offset 40 +; AVX2-NEXT: pushq %r12 +; AVX2-NEXT: .Lcfi4: +; AVX2-NEXT: .cfi_def_cfa_offset 48 +; AVX2-NEXT: pushq %rbx +; AVX2-NEXT: .Lcfi5: +; AVX2-NEXT: .cfi_def_cfa_offset 56 +; AVX2-NEXT: .Lcfi6: +; AVX2-NEXT: .cfi_offset %rbx, -56 +; AVX2-NEXT: .Lcfi7: +; AVX2-NEXT: .cfi_offset %r12, -48 +; AVX2-NEXT: .Lcfi8: +; AVX2-NEXT: .cfi_offset %r13, -40 +; AVX2-NEXT: .Lcfi9: +; AVX2-NEXT: .cfi_offset %r14, -32 +; AVX2-NEXT: .Lcfi10: +; AVX2-NEXT: .cfi_offset %r15, -24 +; AVX2-NEXT: .Lcfi11: +; AVX2-NEXT: .cfi_offset %rbp, -16 +; AVX2-NEXT: movw %di, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: movswq -{{[0-9]+}}(%rsp), %rax +; AVX2-NEXT: movq %rax, %rcx +; AVX2-NEXT: shlq $55, %rcx +; AVX2-NEXT: sarq $63, %rcx +; AVX2-NEXT: vmovd %ecx, %xmm0 +; AVX2-NEXT: movq %rax, %r8 +; AVX2-NEXT: movq %rax, %r10 +; AVX2-NEXT: movq %rax, %r11 +; AVX2-NEXT: movq %rax, %r14 +; AVX2-NEXT: movq %rax, %r15 +; AVX2-NEXT: movq %rax, %r9 +; AVX2-NEXT: movq %rax, %r12 +; AVX2-NEXT: movq %rax, %r13 +; AVX2-NEXT: movq %rax, %rbx +; AVX2-NEXT: movq %rax, %rdi +; AVX2-NEXT: movq %rax, %rcx +; AVX2-NEXT: movq %rax, %rdx +; AVX2-NEXT: movq %rax, %rsi +; AVX2-NEXT: movsbq %al, %rbp +; AVX2-NEXT: shlq $54, %rax +; AVX2-NEXT: sarq $63, %rax +; AVX2-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0 +; AVX2-NEXT: shlq $53, %r8 +; AVX2-NEXT: sarq $63, %r8 +; AVX2-NEXT: vpinsrw $2, %r8d, %xmm0, %xmm0 +; AVX2-NEXT: shlq $52, %r10 +; AVX2-NEXT: sarq $63, %r10 +; AVX2-NEXT: vpinsrw $3, %r10d, %xmm0, %xmm0 +; AVX2-NEXT: shlq $51, %r11 +; AVX2-NEXT: sarq $63, %r11 +; AVX2-NEXT: vpinsrw $4, %r11d, %xmm0, %xmm0 +; AVX2-NEXT: shlq $50, %r14 +; AVX2-NEXT: sarq $63, %r14 +; AVX2-NEXT: vpinsrw $5, %r14d, %xmm0, %xmm0 +; AVX2-NEXT: shlq $49, %r15 +; AVX2-NEXT: sarq $63, %r15 +; AVX2-NEXT: vpinsrw $6, %r15d, %xmm0, %xmm0 +; AVX2-NEXT: shrq $15, %r9 +; AVX2-NEXT: vpinsrw $7, %r9d, %xmm0, %xmm0 +; AVX2-NEXT: shlq $63, %r13 +; AVX2-NEXT: sarq $63, %r13 +; AVX2-NEXT: vmovd %r13d, %xmm1 +; AVX2-NEXT: shlq $62, %r12 +; AVX2-NEXT: sarq $63, %r12 +; AVX2-NEXT: vpinsrw $1, %r12d, %xmm1, %xmm1 +; AVX2-NEXT: shlq $61, %rbx +; AVX2-NEXT: sarq $63, %rbx +; AVX2-NEXT: vpinsrw $2, %ebx, %xmm1, %xmm1 +; AVX2-NEXT: shlq $60, %rdi +; AVX2-NEXT: sarq $63, %rdi +; AVX2-NEXT: vpinsrw $3, %edi, %xmm1, %xmm1 +; AVX2-NEXT: shlq $59, %rcx +; AVX2-NEXT: sarq $63, %rcx +; AVX2-NEXT: vpinsrw $4, %ecx, %xmm1, %xmm1 +; AVX2-NEXT: shlq $58, %rdx +; AVX2-NEXT: sarq $63, %rdx +; AVX2-NEXT: vpinsrw $5, %edx, %xmm1, %xmm1 +; AVX2-NEXT: shlq $57, %rsi +; AVX2-NEXT: sarq $63, %rsi +; AVX2-NEXT: vpinsrw $6, %esi, %xmm1, %xmm1 +; AVX2-NEXT: shrq $7, %rbp +; AVX2-NEXT: vpinsrw $7, %ebp, %xmm1, %xmm1 +; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX2-NEXT: popq %rbx +; AVX2-NEXT: popq %r12 +; AVX2-NEXT: popq %r13 +; AVX2-NEXT: popq %r14 +; AVX2-NEXT: popq %r15 +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: retq +; +; AVX512-LABEL: ext_i16_16i16: +; AVX512: # BB#0: +; AVX512-NEXT: kmovd %edi, %k0 +; AVX512-NEXT: vpmovm2w %k0, %ymm0 +; AVX512-NEXT: retq + %1 = bitcast i16 %a0 to <16 x i1> + %2 = sext <16 x i1> %1 to <16 x i16> + ret <16 x i16> %2 +} + +define <32 x i8> @ext_i32_32i8(i32 %a0) { +; SSE2-SSSE3-LABEL: ext_i32_32i8: +; SSE2-SSSE3: # BB#0: +; SSE2-SSSE3-NEXT: pushq %rbp +; SSE2-SSSE3-NEXT: .Lcfi12: +; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 16 +; SSE2-SSSE3-NEXT: pushq %r15 +; SSE2-SSSE3-NEXT: .Lcfi13: +; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 24 +; SSE2-SSSE3-NEXT: pushq %r14 +; SSE2-SSSE3-NEXT: .Lcfi14: +; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 32 +; SSE2-SSSE3-NEXT: pushq %r13 +; SSE2-SSSE3-NEXT: .Lcfi15: +; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 40 +; SSE2-SSSE3-NEXT: pushq %r12 +; SSE2-SSSE3-NEXT: .Lcfi16: +; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 48 +; SSE2-SSSE3-NEXT: pushq %rbx +; SSE2-SSSE3-NEXT: .Lcfi17: +; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 56 +; SSE2-SSSE3-NEXT: .Lcfi18: +; SSE2-SSSE3-NEXT: .cfi_offset %rbx, -56 +; SSE2-SSSE3-NEXT: .Lcfi19: +; SSE2-SSSE3-NEXT: .cfi_offset %r12, -48 +; SSE2-SSSE3-NEXT: .Lcfi20: +; SSE2-SSSE3-NEXT: .cfi_offset %r13, -40 +; SSE2-SSSE3-NEXT: .Lcfi21: +; SSE2-SSSE3-NEXT: .cfi_offset %r14, -32 +; SSE2-SSSE3-NEXT: .Lcfi22: +; SSE2-SSSE3-NEXT: .cfi_offset %r15, -24 +; SSE2-SSSE3-NEXT: .Lcfi23: +; SSE2-SSSE3-NEXT: .cfi_offset %rbp, -16 +; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: shrl $16, %edi +; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movswq -{{[0-9]+}}(%rsp), %rbx +; SSE2-SSSE3-NEXT: movq %rbx, %r8 +; SSE2-SSSE3-NEXT: movq %rbx, %r9 +; SSE2-SSSE3-NEXT: movq %rbx, %r10 +; SSE2-SSSE3-NEXT: movq %rbx, %r11 +; SSE2-SSSE3-NEXT: movq %rbx, %r14 +; SSE2-SSSE3-NEXT: movq %rbx, %r15 +; SSE2-SSSE3-NEXT: movq %rbx, %r12 +; SSE2-SSSE3-NEXT: movq %rbx, %r13 +; SSE2-SSSE3-NEXT: movq %rbx, %rdi +; SSE2-SSSE3-NEXT: movq %rbx, %rcx +; SSE2-SSSE3-NEXT: movq %rbx, %rdx +; SSE2-SSSE3-NEXT: movq %rbx, %rbp +; SSE2-SSSE3-NEXT: movq %rbx, %rsi +; SSE2-SSSE3-NEXT: movq %rbx, %rax +; SSE2-SSSE3-NEXT: shrq $15, %rax +; SSE2-SSSE3-NEXT: movd %eax, %xmm0 +; SSE2-SSSE3-NEXT: movq %rbx, %rax +; SSE2-SSSE3-NEXT: movsbq %bl, %rbx +; SSE2-SSSE3-NEXT: shlq $49, %r8 +; SSE2-SSSE3-NEXT: sarq $63, %r8 +; SSE2-SSSE3-NEXT: movd %r8d, %xmm15 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3],xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7] +; SSE2-SSSE3-NEXT: shlq $50, %r9 +; SSE2-SSSE3-NEXT: sarq $63, %r9 +; SSE2-SSSE3-NEXT: movd %r9d, %xmm8 +; SSE2-SSSE3-NEXT: shlq $51, %r10 +; SSE2-SSSE3-NEXT: sarq $63, %r10 +; SSE2-SSSE3-NEXT: movd %r10d, %xmm3 +; SSE2-SSSE3-NEXT: shlq $52, %r11 +; SSE2-SSSE3-NEXT: sarq $63, %r11 +; SSE2-SSSE3-NEXT: movd %r11d, %xmm9 +; SSE2-SSSE3-NEXT: shlq $53, %r14 +; SSE2-SSSE3-NEXT: sarq $63, %r14 +; SSE2-SSSE3-NEXT: movd %r14d, %xmm6 +; SSE2-SSSE3-NEXT: shlq $54, %r15 +; SSE2-SSSE3-NEXT: sarq $63, %r15 +; SSE2-SSSE3-NEXT: movd %r15d, %xmm10 +; SSE2-SSSE3-NEXT: shlq $55, %r12 +; SSE2-SSSE3-NEXT: sarq $63, %r12 +; SSE2-SSSE3-NEXT: movd %r12d, %xmm1 +; SSE2-SSSE3-NEXT: shlq $60, %r13 +; SSE2-SSSE3-NEXT: sarq $63, %r13 +; SSE2-SSSE3-NEXT: movd %r13d, %xmm11 +; SSE2-SSSE3-NEXT: shlq $61, %rdi +; SSE2-SSSE3-NEXT: sarq $63, %rdi +; SSE2-SSSE3-NEXT: movd %edi, %xmm5 +; SSE2-SSSE3-NEXT: shlq $62, %rcx +; SSE2-SSSE3-NEXT: sarq $63, %rcx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm12 +; SSE2-SSSE3-NEXT: shlq $63, %rdx +; SSE2-SSSE3-NEXT: sarq $63, %rdx +; SSE2-SSSE3-NEXT: movd %edx, %xmm0 +; SSE2-SSSE3-NEXT: shlq $58, %rbp +; SSE2-SSSE3-NEXT: sarq $63, %rbp +; SSE2-SSSE3-NEXT: movd %ebp, %xmm13 +; SSE2-SSSE3-NEXT: shlq $59, %rsi +; SSE2-SSSE3-NEXT: sarq $63, %rsi +; SSE2-SSSE3-NEXT: movd %esi, %xmm7 +; SSE2-SSSE3-NEXT: shlq $57, %rax +; SSE2-SSSE3-NEXT: sarq $63, %rax +; SSE2-SSSE3-NEXT: movd %eax, %xmm4 +; SSE2-SSSE3-NEXT: shrq $7, %rbx +; SSE2-SSSE3-NEXT: movd %ebx, %xmm14 +; SSE2-SSSE3-NEXT: movswq -{{[0-9]+}}(%rsp), %rsi +; SSE2-SSSE3-NEXT: movq %rsi, %r8 +; SSE2-SSSE3-NEXT: movq %rsi, %r9 +; SSE2-SSSE3-NEXT: movq %rsi, %r10 +; SSE2-SSSE3-NEXT: movq %rsi, %r11 +; SSE2-SSSE3-NEXT: movq %rsi, %r14 +; SSE2-SSSE3-NEXT: movq %rsi, %r15 +; SSE2-SSSE3-NEXT: movq %rsi, %r12 +; SSE2-SSSE3-NEXT: movq %rsi, %r13 +; SSE2-SSSE3-NEXT: movq %rsi, %rbx +; SSE2-SSSE3-NEXT: movq %rsi, %rax +; SSE2-SSSE3-NEXT: movq %rsi, %rcx +; SSE2-SSSE3-NEXT: movq %rsi, %rdx +; SSE2-SSSE3-NEXT: movq %rsi, %rdi +; SSE2-SSSE3-NEXT: movq %rsi, %rbp +; SSE2-SSSE3-NEXT: shrq $15, %rbp +; SSE2-SSSE3-NEXT: movd %ebp, %xmm2 +; SSE2-SSSE3-NEXT: movq %rsi, %rbp +; SSE2-SSSE3-NEXT: movsbq %sil, %rsi +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3],xmm3[4],xmm8[4],xmm3[5],xmm8[5],xmm3[6],xmm8[6],xmm3[7],xmm8[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3] +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm9[0],xmm6[1],xmm9[1],xmm6[2],xmm9[2],xmm6[3],xmm9[3],xmm6[4],xmm9[4],xmm6[5],xmm9[5],xmm6[6],xmm9[6],xmm6[7],xmm9[7] +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm10[0],xmm1[1],xmm10[1],xmm1[2],xmm10[2],xmm1[3],xmm10[3],xmm1[4],xmm10[4],xmm1[5],xmm10[5],xmm1[6],xmm10[6],xmm1[7],xmm10[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm11[0],xmm5[1],xmm11[1],xmm5[2],xmm11[2],xmm5[3],xmm11[3],xmm5[4],xmm11[4],xmm5[5],xmm11[5],xmm5[6],xmm11[6],xmm5[7],xmm11[7] +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1],xmm0[2],xmm12[2],xmm0[3],xmm12[3],xmm0[4],xmm12[4],xmm0[5],xmm12[5],xmm0[6],xmm12[6],xmm0[7],xmm12[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3] +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm13[0],xmm7[1],xmm13[1],xmm7[2],xmm13[2],xmm7[3],xmm13[3],xmm7[4],xmm13[4],xmm7[5],xmm13[5],xmm7[6],xmm13[6],xmm7[7],xmm13[7] +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm14[0],xmm4[1],xmm14[1],xmm4[2],xmm14[2],xmm4[3],xmm14[3],xmm4[4],xmm14[4],xmm4[5],xmm14[5],xmm4[6],xmm14[6],xmm4[7],xmm14[7] +; SSE2-SSSE3-NEXT: shlq $49, %r8 +; SSE2-SSSE3-NEXT: sarq $63, %r8 +; SSE2-SSSE3-NEXT: movd %r8d, %xmm3 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3] +; SSE2-SSSE3-NEXT: shlq $50, %r9 +; SSE2-SSSE3-NEXT: sarq $63, %r9 +; SSE2-SSSE3-NEXT: movd %r9d, %xmm4 +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1] +; SSE2-SSSE3-NEXT: shlq $51, %r10 +; SSE2-SSSE3-NEXT: sarq $63, %r10 +; SSE2-SSSE3-NEXT: movd %r10d, %xmm5 +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE2-SSSE3-NEXT: shlq $52, %r11 +; SSE2-SSSE3-NEXT: sarq $63, %r11 +; SSE2-SSSE3-NEXT: movd %r11d, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] +; SSE2-SSSE3-NEXT: shlq $53, %r14 +; SSE2-SSSE3-NEXT: sarq $63, %r14 +; SSE2-SSSE3-NEXT: movd %r14d, %xmm2 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7] +; SSE2-SSSE3-NEXT: shlq $54, %r15 +; SSE2-SSSE3-NEXT: sarq $63, %r15 +; SSE2-SSSE3-NEXT: movd %r15d, %xmm4 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3] +; SSE2-SSSE3-NEXT: shlq $55, %r12 +; SSE2-SSSE3-NEXT: sarq $63, %r12 +; SSE2-SSSE3-NEXT: movd %r12d, %xmm3 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] +; SSE2-SSSE3-NEXT: shlq $60, %r13 +; SSE2-SSSE3-NEXT: sarq $63, %r13 +; SSE2-SSSE3-NEXT: movd %r13d, %xmm6 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7] +; SSE2-SSSE3-NEXT: shlq $61, %rbx +; SSE2-SSSE3-NEXT: sarq $63, %rbx +; SSE2-SSSE3-NEXT: movd %ebx, %xmm4 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] +; SSE2-SSSE3-NEXT: shlq $62, %rax +; SSE2-SSSE3-NEXT: sarq $63, %rax +; SSE2-SSSE3-NEXT: movd %eax, %xmm2 +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1] +; SSE2-SSSE3-NEXT: shlq $63, %rcx +; SSE2-SSSE3-NEXT: sarq $63, %rcx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3],xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7] +; SSE2-SSSE3-NEXT: shlq $58, %rdx +; SSE2-SSSE3-NEXT: sarq $63, %rdx +; SSE2-SSSE3-NEXT: movd %edx, %xmm5 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] +; SSE2-SSSE3-NEXT: shlq $59, %rdi +; SSE2-SSSE3-NEXT: sarq $63, %rdi +; SSE2-SSSE3-NEXT: movd %edi, %xmm2 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3],xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7] +; SSE2-SSSE3-NEXT: shlq $57, %rbp +; SSE2-SSSE3-NEXT: sarq $63, %rbp +; SSE2-SSSE3-NEXT: movd %ebp, %xmm4 +; SSE2-SSSE3-NEXT: shrq $7, %rsi +; SSE2-SSSE3-NEXT: movd %esi, %xmm5 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0] +; SSE2-SSSE3-NEXT: popq %rbx +; SSE2-SSSE3-NEXT: popq %r12 +; SSE2-SSSE3-NEXT: popq %r13 +; SSE2-SSSE3-NEXT: popq %r14 +; SSE2-SSSE3-NEXT: popq %r15 +; SSE2-SSSE3-NEXT: popq %rbp +; SSE2-SSSE3-NEXT: retq +; +; AVX1-LABEL: ext_i32_32i8: +; AVX1: # BB#0: +; AVX1-NEXT: pushq %rbp +; AVX1-NEXT: .Lcfi12: +; AVX1-NEXT: .cfi_def_cfa_offset 16 +; AVX1-NEXT: .Lcfi13: +; AVX1-NEXT: .cfi_offset %rbp, -16 +; AVX1-NEXT: movq %rsp, %rbp +; AVX1-NEXT: .Lcfi14: +; AVX1-NEXT: .cfi_def_cfa_register %rbp +; AVX1-NEXT: pushq %r15 +; AVX1-NEXT: pushq %r14 +; AVX1-NEXT: pushq %r13 +; AVX1-NEXT: pushq %r12 +; AVX1-NEXT: pushq %rbx +; AVX1-NEXT: andq $-32, %rsp +; AVX1-NEXT: subq $64, %rsp +; AVX1-NEXT: .Lcfi15: +; AVX1-NEXT: .cfi_offset %rbx, -56 +; AVX1-NEXT: .Lcfi16: +; AVX1-NEXT: .cfi_offset %r12, -48 +; AVX1-NEXT: .Lcfi17: +; AVX1-NEXT: .cfi_offset %r13, -40 +; AVX1-NEXT: .Lcfi18: +; AVX1-NEXT: .cfi_offset %r14, -32 +; AVX1-NEXT: .Lcfi19: +; AVX1-NEXT: .cfi_offset %r15, -24 +; AVX1-NEXT: movl %edi, (%rsp) +; AVX1-NEXT: movslq (%rsp), %rdx +; AVX1-NEXT: movq %rdx, %rcx +; AVX1-NEXT: shlq $47, %rcx +; AVX1-NEXT: sarq $63, %rcx +; AVX1-NEXT: vmovd %ecx, %xmm0 +; AVX1-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: movq %rdx, %r8 +; AVX1-NEXT: movq %rdx, %rcx +; AVX1-NEXT: movq %rdx, %rdi +; AVX1-NEXT: movq %rdx, %r13 +; AVX1-NEXT: movq %rdx, %rsi +; AVX1-NEXT: movq %rdx, %r10 +; AVX1-NEXT: movq %rdx, %r11 +; AVX1-NEXT: movq %rdx, %r9 +; AVX1-NEXT: movq %rdx, %rbx +; AVX1-NEXT: movq %rdx, %r14 +; AVX1-NEXT: movq %rdx, %r15 +; AVX1-NEXT: movq %rdx, %r12 +; AVX1-NEXT: movq %rdx, %rax +; AVX1-NEXT: shlq $46, %rax +; AVX1-NEXT: sarq $63, %rax +; AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX1-NEXT: shlq $45, %rax +; AVX1-NEXT: sarq $63, %rax +; AVX1-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: shlq $44, %r8 +; AVX1-NEXT: sarq $63, %r8 +; AVX1-NEXT: vpinsrb $3, %r8d, %xmm0, %xmm0 +; AVX1-NEXT: movq %rdx, %r8 +; AVX1-NEXT: shlq $43, %rcx +; AVX1-NEXT: sarq $63, %rcx +; AVX1-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movq %rdx, %rcx +; AVX1-NEXT: shlq $42, %rdi +; AVX1-NEXT: sarq $63, %rdi +; AVX1-NEXT: vpinsrb $5, %edi, %xmm0, %xmm0 +; AVX1-NEXT: movq %rdx, %rdi +; AVX1-NEXT: shlq $41, %r13 +; AVX1-NEXT: sarq $63, %r13 +; AVX1-NEXT: vpinsrb $6, %r13d, %xmm0, %xmm0 +; AVX1-NEXT: movq %rdx, %r13 +; AVX1-NEXT: shlq $40, %rsi +; AVX1-NEXT: sarq $63, %rsi +; AVX1-NEXT: vpinsrb $7, %esi, %xmm0, %xmm0 +; AVX1-NEXT: movq %rdx, %rsi +; AVX1-NEXT: shlq $39, %r10 +; AVX1-NEXT: sarq $63, %r10 +; AVX1-NEXT: vpinsrb $8, %r10d, %xmm0, %xmm0 +; AVX1-NEXT: movq %rdx, %r10 +; AVX1-NEXT: shlq $38, %r11 +; AVX1-NEXT: sarq $63, %r11 +; AVX1-NEXT: vpinsrb $9, %r11d, %xmm0, %xmm0 +; AVX1-NEXT: movsbq %dl, %rax +; AVX1-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: shlq $37, %r9 +; AVX1-NEXT: sarq $63, %r9 +; AVX1-NEXT: vpinsrb $10, %r9d, %xmm0, %xmm0 +; AVX1-NEXT: movq %rdx, %r9 +; AVX1-NEXT: shlq $36, %rbx +; AVX1-NEXT: sarq $63, %rbx +; AVX1-NEXT: vpinsrb $11, %ebx, %xmm0, %xmm0 +; AVX1-NEXT: movq %rdx, %rbx +; AVX1-NEXT: shlq $35, %r14 +; AVX1-NEXT: sarq $63, %r14 +; AVX1-NEXT: vpinsrb $12, %r14d, %xmm0, %xmm0 +; AVX1-NEXT: movq %rdx, %r14 +; AVX1-NEXT: shlq $34, %r15 +; AVX1-NEXT: sarq $63, %r15 +; AVX1-NEXT: vpinsrb $13, %r15d, %xmm0, %xmm0 +; AVX1-NEXT: movq %rdx, %r15 +; AVX1-NEXT: shlq $33, %r12 +; AVX1-NEXT: sarq $63, %r12 +; AVX1-NEXT: vpinsrb $14, %r12d, %xmm0, %xmm0 +; AVX1-NEXT: movq %rdx, %r12 +; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX1-NEXT: shrq $31, %rax +; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movq %rdx, %rax +; AVX1-NEXT: shlq $63, %r8 +; AVX1-NEXT: sarq $63, %r8 +; AVX1-NEXT: vmovd %r8d, %xmm1 +; AVX1-NEXT: movq %rdx, %r8 +; AVX1-NEXT: movswq %dx, %rdx +; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %r11 # 8-byte Reload +; AVX1-NEXT: shlq $62, %r11 +; AVX1-NEXT: sarq $63, %r11 +; AVX1-NEXT: vpinsrb $1, %r11d, %xmm1, %xmm1 +; AVX1-NEXT: shlq $61, %rcx +; AVX1-NEXT: sarq $63, %rcx +; AVX1-NEXT: vpinsrb $2, %ecx, %xmm1, %xmm1 +; AVX1-NEXT: shlq $60, %rdi +; AVX1-NEXT: sarq $63, %rdi +; AVX1-NEXT: vpinsrb $3, %edi, %xmm1, %xmm1 +; AVX1-NEXT: shlq $59, %r13 +; AVX1-NEXT: sarq $63, %r13 +; AVX1-NEXT: vpinsrb $4, %r13d, %xmm1, %xmm1 +; AVX1-NEXT: shlq $58, %rsi +; AVX1-NEXT: sarq $63, %rsi +; AVX1-NEXT: vpinsrb $5, %esi, %xmm1, %xmm1 +; AVX1-NEXT: shlq $57, %r10 +; AVX1-NEXT: sarq $63, %r10 +; AVX1-NEXT: vpinsrb $6, %r10d, %xmm1, %xmm1 +; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; AVX1-NEXT: shrq $7, %rcx +; AVX1-NEXT: vpinsrb $7, %ecx, %xmm1, %xmm1 +; AVX1-NEXT: shlq $55, %r9 +; AVX1-NEXT: sarq $63, %r9 +; AVX1-NEXT: vpinsrb $8, %r9d, %xmm1, %xmm1 +; AVX1-NEXT: shlq $54, %rbx +; AVX1-NEXT: sarq $63, %rbx +; AVX1-NEXT: vpinsrb $9, %ebx, %xmm1, %xmm1 +; AVX1-NEXT: shlq $53, %r14 +; AVX1-NEXT: sarq $63, %r14 +; AVX1-NEXT: vpinsrb $10, %r14d, %xmm1, %xmm1 +; AVX1-NEXT: shlq $52, %r15 +; AVX1-NEXT: sarq $63, %r15 +; AVX1-NEXT: vpinsrb $11, %r15d, %xmm1, %xmm1 +; AVX1-NEXT: shlq $51, %r12 +; AVX1-NEXT: sarq $63, %r12 +; AVX1-NEXT: vpinsrb $12, %r12d, %xmm1, %xmm1 +; AVX1-NEXT: shlq $50, %rax +; AVX1-NEXT: sarq $63, %rax +; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 +; AVX1-NEXT: shlq $49, %r8 +; AVX1-NEXT: sarq $63, %r8 +; AVX1-NEXT: vpinsrb $14, %r8d, %xmm1, %xmm1 +; AVX1-NEXT: shrq $15, %rdx +; AVX1-NEXT: vpinsrb $15, %edx, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: leaq -40(%rbp), %rsp +; AVX1-NEXT: popq %rbx +; AVX1-NEXT: popq %r12 +; AVX1-NEXT: popq %r13 +; AVX1-NEXT: popq %r14 +; AVX1-NEXT: popq %r15 +; AVX1-NEXT: popq %rbp +; AVX1-NEXT: retq +; +; AVX2-LABEL: ext_i32_32i8: +; AVX2: # BB#0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: .Lcfi12: +; AVX2-NEXT: .cfi_def_cfa_offset 16 +; AVX2-NEXT: .Lcfi13: +; AVX2-NEXT: .cfi_offset %rbp, -16 +; AVX2-NEXT: movq %rsp, %rbp +; AVX2-NEXT: .Lcfi14: +; AVX2-NEXT: .cfi_def_cfa_register %rbp +; AVX2-NEXT: pushq %r15 +; AVX2-NEXT: pushq %r14 +; AVX2-NEXT: pushq %r13 +; AVX2-NEXT: pushq %r12 +; AVX2-NEXT: pushq %rbx +; AVX2-NEXT: andq $-32, %rsp +; AVX2-NEXT: subq $64, %rsp +; AVX2-NEXT: .Lcfi15: +; AVX2-NEXT: .cfi_offset %rbx, -56 +; AVX2-NEXT: .Lcfi16: +; AVX2-NEXT: .cfi_offset %r12, -48 +; AVX2-NEXT: .Lcfi17: +; AVX2-NEXT: .cfi_offset %r13, -40 +; AVX2-NEXT: .Lcfi18: +; AVX2-NEXT: .cfi_offset %r14, -32 +; AVX2-NEXT: .Lcfi19: +; AVX2-NEXT: .cfi_offset %r15, -24 +; AVX2-NEXT: movl %edi, (%rsp) +; AVX2-NEXT: movslq (%rsp), %rdx +; AVX2-NEXT: movq %rdx, %rcx +; AVX2-NEXT: shlq $47, %rcx +; AVX2-NEXT: sarq $63, %rcx +; AVX2-NEXT: vmovd %ecx, %xmm0 +; AVX2-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: movq %rdx, %r8 +; AVX2-NEXT: movq %rdx, %rcx +; AVX2-NEXT: movq %rdx, %rdi +; AVX2-NEXT: movq %rdx, %r13 +; AVX2-NEXT: movq %rdx, %rsi +; AVX2-NEXT: movq %rdx, %r10 +; AVX2-NEXT: movq %rdx, %r11 +; AVX2-NEXT: movq %rdx, %r9 +; AVX2-NEXT: movq %rdx, %rbx +; AVX2-NEXT: movq %rdx, %r14 +; AVX2-NEXT: movq %rdx, %r15 +; AVX2-NEXT: movq %rdx, %r12 +; AVX2-NEXT: movq %rdx, %rax +; AVX2-NEXT: shlq $46, %rax +; AVX2-NEXT: sarq $63, %rax +; AVX2-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX2-NEXT: shlq $45, %rax +; AVX2-NEXT: sarq $63, %rax +; AVX2-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: shlq $44, %r8 +; AVX2-NEXT: sarq $63, %r8 +; AVX2-NEXT: vpinsrb $3, %r8d, %xmm0, %xmm0 +; AVX2-NEXT: movq %rdx, %r8 +; AVX2-NEXT: shlq $43, %rcx +; AVX2-NEXT: sarq $63, %rcx +; AVX2-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movq %rdx, %rcx +; AVX2-NEXT: shlq $42, %rdi +; AVX2-NEXT: sarq $63, %rdi +; AVX2-NEXT: vpinsrb $5, %edi, %xmm0, %xmm0 +; AVX2-NEXT: movq %rdx, %rdi +; AVX2-NEXT: shlq $41, %r13 +; AVX2-NEXT: sarq $63, %r13 +; AVX2-NEXT: vpinsrb $6, %r13d, %xmm0, %xmm0 +; AVX2-NEXT: movq %rdx, %r13 +; AVX2-NEXT: shlq $40, %rsi +; AVX2-NEXT: sarq $63, %rsi +; AVX2-NEXT: vpinsrb $7, %esi, %xmm0, %xmm0 +; AVX2-NEXT: movq %rdx, %rsi +; AVX2-NEXT: shlq $39, %r10 +; AVX2-NEXT: sarq $63, %r10 +; AVX2-NEXT: vpinsrb $8, %r10d, %xmm0, %xmm0 +; AVX2-NEXT: movq %rdx, %r10 +; AVX2-NEXT: shlq $38, %r11 +; AVX2-NEXT: sarq $63, %r11 +; AVX2-NEXT: vpinsrb $9, %r11d, %xmm0, %xmm0 +; AVX2-NEXT: movsbq %dl, %rax +; AVX2-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: shlq $37, %r9 +; AVX2-NEXT: sarq $63, %r9 +; AVX2-NEXT: vpinsrb $10, %r9d, %xmm0, %xmm0 +; AVX2-NEXT: movq %rdx, %r9 +; AVX2-NEXT: shlq $36, %rbx +; AVX2-NEXT: sarq $63, %rbx +; AVX2-NEXT: vpinsrb $11, %ebx, %xmm0, %xmm0 +; AVX2-NEXT: movq %rdx, %rbx +; AVX2-NEXT: shlq $35, %r14 +; AVX2-NEXT: sarq $63, %r14 +; AVX2-NEXT: vpinsrb $12, %r14d, %xmm0, %xmm0 +; AVX2-NEXT: movq %rdx, %r14 +; AVX2-NEXT: shlq $34, %r15 +; AVX2-NEXT: sarq $63, %r15 +; AVX2-NEXT: vpinsrb $13, %r15d, %xmm0, %xmm0 +; AVX2-NEXT: movq %rdx, %r15 +; AVX2-NEXT: shlq $33, %r12 +; AVX2-NEXT: sarq $63, %r12 +; AVX2-NEXT: vpinsrb $14, %r12d, %xmm0, %xmm0 +; AVX2-NEXT: movq %rdx, %r12 +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX2-NEXT: shrq $31, %rax +; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movq %rdx, %rax +; AVX2-NEXT: shlq $63, %r8 +; AVX2-NEXT: sarq $63, %r8 +; AVX2-NEXT: vmovd %r8d, %xmm1 +; AVX2-NEXT: movq %rdx, %r8 +; AVX2-NEXT: movswq %dx, %rdx +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r11 # 8-byte Reload +; AVX2-NEXT: shlq $62, %r11 +; AVX2-NEXT: sarq $63, %r11 +; AVX2-NEXT: vpinsrb $1, %r11d, %xmm1, %xmm1 +; AVX2-NEXT: shlq $61, %rcx +; AVX2-NEXT: sarq $63, %rcx +; AVX2-NEXT: vpinsrb $2, %ecx, %xmm1, %xmm1 +; AVX2-NEXT: shlq $60, %rdi +; AVX2-NEXT: sarq $63, %rdi +; AVX2-NEXT: vpinsrb $3, %edi, %xmm1, %xmm1 +; AVX2-NEXT: shlq $59, %r13 +; AVX2-NEXT: sarq $63, %r13 +; AVX2-NEXT: vpinsrb $4, %r13d, %xmm1, %xmm1 +; AVX2-NEXT: shlq $58, %rsi +; AVX2-NEXT: sarq $63, %rsi +; AVX2-NEXT: vpinsrb $5, %esi, %xmm1, %xmm1 +; AVX2-NEXT: shlq $57, %r10 +; AVX2-NEXT: sarq $63, %r10 +; AVX2-NEXT: vpinsrb $6, %r10d, %xmm1, %xmm1 +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; AVX2-NEXT: shrq $7, %rcx +; AVX2-NEXT: vpinsrb $7, %ecx, %xmm1, %xmm1 +; AVX2-NEXT: shlq $55, %r9 +; AVX2-NEXT: sarq $63, %r9 +; AVX2-NEXT: vpinsrb $8, %r9d, %xmm1, %xmm1 +; AVX2-NEXT: shlq $54, %rbx +; AVX2-NEXT: sarq $63, %rbx +; AVX2-NEXT: vpinsrb $9, %ebx, %xmm1, %xmm1 +; AVX2-NEXT: shlq $53, %r14 +; AVX2-NEXT: sarq $63, %r14 +; AVX2-NEXT: vpinsrb $10, %r14d, %xmm1, %xmm1 +; AVX2-NEXT: shlq $52, %r15 +; AVX2-NEXT: sarq $63, %r15 +; AVX2-NEXT: vpinsrb $11, %r15d, %xmm1, %xmm1 +; AVX2-NEXT: shlq $51, %r12 +; AVX2-NEXT: sarq $63, %r12 +; AVX2-NEXT: vpinsrb $12, %r12d, %xmm1, %xmm1 +; AVX2-NEXT: shlq $50, %rax +; AVX2-NEXT: sarq $63, %rax +; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 +; AVX2-NEXT: shlq $49, %r8 +; AVX2-NEXT: sarq $63, %r8 +; AVX2-NEXT: vpinsrb $14, %r8d, %xmm1, %xmm1 +; AVX2-NEXT: shrq $15, %rdx +; AVX2-NEXT: vpinsrb $15, %edx, %xmm1, %xmm1 +; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX2-NEXT: leaq -40(%rbp), %rsp +; AVX2-NEXT: popq %rbx +; AVX2-NEXT: popq %r12 +; AVX2-NEXT: popq %r13 +; AVX2-NEXT: popq %r14 +; AVX2-NEXT: popq %r15 +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: retq +; +; AVX512-LABEL: ext_i32_32i8: +; AVX512: # BB#0: +; AVX512-NEXT: kmovd %edi, %k0 +; AVX512-NEXT: vpmovm2b %k0, %ymm0 +; AVX512-NEXT: retq + %1 = bitcast i32 %a0 to <32 x i1> + %2 = sext <32 x i1> %1 to <32 x i8> + ret <32 x i8> %2 +} + +; +; 512-bit vectors +; + +define <8 x i64> @ext_i8_8i64(i8 %a0) { +; SSE2-SSSE3-LABEL: ext_i8_8i64: +; SSE2-SSSE3: # BB#0: +; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $3, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $2, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $5, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $4, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $6, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: shrl $7, %eax +; SSE2-SSSE3-NEXT: movzwl %ax, %eax +; SSE2-SSSE3-NEXT: movd %eax, %xmm2 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm1[0] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,1,0,3] +; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,6,7] +; SSE2-SSSE3-NEXT: psllq $63, %xmm0 +; SSE2-SSSE3-NEXT: psrad $31, %xmm0 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,1,3] +; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,6,7] +; SSE2-SSSE3-NEXT: psllq $63, %xmm1 +; SSE2-SSSE3-NEXT: psrad $31, %xmm1 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,1,2,3] +; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,6,7] +; SSE2-SSSE3-NEXT: psllq $63, %xmm2 +; SSE2-SSSE3-NEXT: psrad $31, %xmm2 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,1,3,3] +; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,6,7] +; SSE2-SSSE3-NEXT: psllq $63, %xmm3 +; SSE2-SSSE3-NEXT: psrad $31, %xmm3 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; SSE2-SSSE3-NEXT: retq +; +; AVX1-LABEL: ext_i8_8i64: +; AVX1: # BB#0: +; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: movl %eax, %edx +; AVX1-NEXT: andl $1, %edx +; AVX1-NEXT: vmovd %edx, %xmm0 +; AVX1-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $2, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $3, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $4, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $5, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $6, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: shrl $7, %eax +; AVX1-NEXT: movzwl %ax, %eax +; AVX1-NEXT: vpinsrw $7, %eax, %xmm0, %xmm1 +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; AVX1-NEXT: vpslld $31, %xmm0, %xmm0 +; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0 +; AVX1-NEXT: vpmovsxdq %xmm0, %xmm2 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 +; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; AVX1-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX1-NEXT: vpsrad $31, %xmm1, %xmm1 +; AVX1-NEXT: vpmovsxdq %xmm1, %xmm2 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: ext_i8_8i64: +; AVX2: # BB#0: +; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: movl %eax, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: vmovd %edx, %xmm0 +; AVX2-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $2, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $3, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $4, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $5, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $6, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: shrl $7, %eax +; AVX2-NEXT: movzwl %ax, %eax +; AVX2-NEXT: vpinsrw $7, %eax, %xmm0, %xmm1 +; AVX2-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; AVX2-NEXT: vpslld $31, %xmm0, %xmm0 +; AVX2-NEXT: vpsrad $31, %xmm0, %xmm0 +; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0 +; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; AVX2-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX2-NEXT: vpsrad $31, %xmm1, %xmm1 +; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1 +; AVX2-NEXT: retq +; +; AVX512-LABEL: ext_i8_8i64: +; AVX512: # BB#0: +; AVX512-NEXT: kmovd %edi, %k1 +; AVX512-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} +; AVX512-NEXT: retq + %1 = bitcast i8 %a0 to <8 x i1> + %2 = sext <8 x i1> %1 to <8 x i64> + ret <8 x i64> %2 +} + +define <16 x i32> @ext_i16_16i32(i16 %a0) { +; SSE2-SSSE3-LABEL: ext_i16_16i32: +; SSE2-SSSE3: # BB#0: +; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $7, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $6, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $5, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $4, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $3, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $2, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $11, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $10, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $9, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $8, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $13, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $12, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $14, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: shrl $15, %eax +; SSE2-SSSE3-NEXT: movzwl %ax, %eax +; SSE2-SSSE3-NEXT: movd %eax, %xmm4 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0] +; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; SSE2-SSSE3-NEXT: pslld $31, %xmm0 +; SSE2-SSSE3-NEXT: psrad $31, %xmm0 +; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-SSSE3-NEXT: pslld $31, %xmm1 +; SSE2-SSSE3-NEXT: psrad $31, %xmm1 +; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15] +; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm2 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] +; SSE2-SSSE3-NEXT: pslld $31, %xmm2 +; SSE2-SSSE3-NEXT: psrad $31, %xmm2 +; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] +; SSE2-SSSE3-NEXT: pslld $31, %xmm3 +; SSE2-SSSE3-NEXT: psrad $31, %xmm3 +; SSE2-SSSE3-NEXT: retq +; +; AVX1-LABEL: ext_i16_16i32: +; AVX1: # BB#0: +; AVX1-NEXT: movw %di, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: movl %eax, %edx +; AVX1-NEXT: andl $1, %edx +; AVX1-NEXT: vmovd %edx, %xmm0 +; AVX1-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $2, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $3, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $4, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $5, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrb $5, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $6, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $7, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrb $7, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $8, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $9, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $10, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $11, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $12, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $13, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $14, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: shrl $15, %eax +; AVX1-NEXT: movzwl %ax, %eax +; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm1 +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; AVX1-NEXT: vpslld $31, %xmm2, %xmm2 +; AVX1-NEXT: vpsrad $31, %xmm2, %xmm2 +; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7] +; AVX1-NEXT: vpslld $31, %xmm0, %xmm0 +; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 +; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; AVX1-NEXT: vpslld $31, %xmm2, %xmm2 +; AVX1-NEXT: vpsrad $31, %xmm2, %xmm2 +; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; AVX1-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX1-NEXT: vpsrad $31, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: ext_i16_16i32: +; AVX2: # BB#0: +; AVX2-NEXT: movw %di, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: movl %eax, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: vmovd %edx, %xmm0 +; AVX2-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $2, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $3, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $4, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $5, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrb $5, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $6, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $7, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrb $7, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $8, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $9, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $10, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $11, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $12, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $13, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $14, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: shrl $15, %eax +; AVX2-NEXT: movzwl %ax, %eax +; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm1 +; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX2-NEXT: vpslld $31, %ymm0, %ymm0 +; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0 +; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX2-NEXT: vpslld $31, %ymm1, %ymm1 +; AVX2-NEXT: vpsrad $31, %ymm1, %ymm1 +; AVX2-NEXT: retq +; +; AVX512-LABEL: ext_i16_16i32: +; AVX512: # BB#0: +; AVX512-NEXT: kmovd %edi, %k1 +; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} +; AVX512-NEXT: retq + %1 = bitcast i16 %a0 to <16 x i1> + %2 = sext <16 x i1> %1 to <16 x i32> + ret <16 x i32> %2 +} + +define <32 x i16> @ext_i32_32i16(i32 %a0) { +; SSE2-SSSE3-LABEL: ext_i32_32i16: +; SSE2-SSSE3: # BB#0: +; SSE2-SSSE3-NEXT: movl %edi, %eax +; SSE2-SSSE3-NEXT: shrl $16, %eax +; SSE2-SSSE3-NEXT: movw %ax, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $7, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $6, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $5, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $4, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $3, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $2, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $11, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $10, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $9, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $8, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $13, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $12, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $14, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: shrl $15, %eax +; SSE2-SSSE3-NEXT: movzwl %ax, %eax +; SSE2-SSSE3-NEXT: movd %eax, %xmm4 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0] +; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $7, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $6, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $5, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $4, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $3, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $2, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $11, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $10, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $9, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $8, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $13, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $12, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $14, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: shrl $15, %eax +; SSE2-SSSE3-NEXT: movzwl %ax, %eax +; SSE2-SSSE3-NEXT: movd %eax, %xmm5 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3],xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] +; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-SSSE3-NEXT: psllw $15, %xmm0 +; SSE2-SSSE3-NEXT: psraw $15, %xmm0 +; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] +; SSE2-SSSE3-NEXT: psllw $15, %xmm1 +; SSE2-SSSE3-NEXT: psraw $15, %xmm1 +; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm2 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE2-SSSE3-NEXT: psllw $15, %xmm2 +; SSE2-SSSE3-NEXT: psraw $15, %xmm2 +; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15] +; SSE2-SSSE3-NEXT: psllw $15, %xmm3 +; SSE2-SSSE3-NEXT: psraw $15, %xmm3 +; SSE2-SSSE3-NEXT: retq +; +; AVX1-LABEL: ext_i32_32i16: +; AVX1: # BB#0: +; AVX1-NEXT: pushq %rbp +; AVX1-NEXT: .Lcfi20: +; AVX1-NEXT: .cfi_def_cfa_offset 16 +; AVX1-NEXT: .Lcfi21: +; AVX1-NEXT: .cfi_offset %rbp, -16 +; AVX1-NEXT: movq %rsp, %rbp +; AVX1-NEXT: .Lcfi22: +; AVX1-NEXT: .cfi_def_cfa_register %rbp +; AVX1-NEXT: pushq %r15 +; AVX1-NEXT: pushq %r14 +; AVX1-NEXT: pushq %r13 +; AVX1-NEXT: pushq %r12 +; AVX1-NEXT: pushq %rbx +; AVX1-NEXT: andq $-32, %rsp +; AVX1-NEXT: subq $128, %rsp +; AVX1-NEXT: .Lcfi23: +; AVX1-NEXT: .cfi_offset %rbx, -56 +; AVX1-NEXT: .Lcfi24: +; AVX1-NEXT: .cfi_offset %r12, -48 +; AVX1-NEXT: .Lcfi25: +; AVX1-NEXT: .cfi_offset %r13, -40 +; AVX1-NEXT: .Lcfi26: +; AVX1-NEXT: .cfi_offset %r14, -32 +; AVX1-NEXT: .Lcfi27: +; AVX1-NEXT: .cfi_offset %r15, -24 +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX1-NEXT: movl %edi, %r13d +; AVX1-NEXT: movl %edi, %r12d +; AVX1-NEXT: movl %edi, %r15d +; AVX1-NEXT: movl %edi, %r14d +; AVX1-NEXT: movl %edi, %ebx +; AVX1-NEXT: movl %edi, %r11d +; AVX1-NEXT: movl %edi, %r10d +; AVX1-NEXT: movl %edi, %r9d +; AVX1-NEXT: movl %edi, %r8d +; AVX1-NEXT: movl %edi, %esi +; AVX1-NEXT: movl %edi, %edx +; AVX1-NEXT: movl %edi, %ecx +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: andl $1, %edi +; AVX1-NEXT: vmovd %edi, %xmm0 +; AVX1-NEXT: shrl %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 +; AVX1-NEXT: shrl $2, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: shrl $3, %edx +; AVX1-NEXT: andl $1, %edx +; AVX1-NEXT: vpinsrb $3, %edx, %xmm0, %xmm0 +; AVX1-NEXT: shrl $4, %esi +; AVX1-NEXT: andl $1, %esi +; AVX1-NEXT: vpinsrb $4, %esi, %xmm0, %xmm0 +; AVX1-NEXT: shrl $5, %r8d +; AVX1-NEXT: andl $1, %r8d +; AVX1-NEXT: vpinsrb $5, %r8d, %xmm0, %xmm0 +; AVX1-NEXT: shrl $6, %r9d +; AVX1-NEXT: andl $1, %r9d +; AVX1-NEXT: vpinsrb $6, %r9d, %xmm0, %xmm0 +; AVX1-NEXT: shrl $7, %r10d +; AVX1-NEXT: andl $1, %r10d +; AVX1-NEXT: vpinsrb $7, %r10d, %xmm0, %xmm0 +; AVX1-NEXT: shrl $8, %r11d +; AVX1-NEXT: andl $1, %r11d +; AVX1-NEXT: vpinsrb $8, %r11d, %xmm0, %xmm0 +; AVX1-NEXT: shrl $9, %ebx +; AVX1-NEXT: andl $1, %ebx +; AVX1-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0 +; AVX1-NEXT: shrl $10, %r14d +; AVX1-NEXT: andl $1, %r14d +; AVX1-NEXT: vpinsrb $10, %r14d, %xmm0, %xmm0 +; AVX1-NEXT: shrl $11, %r15d +; AVX1-NEXT: andl $1, %r15d +; AVX1-NEXT: vpinsrb $11, %r15d, %xmm0, %xmm0 +; AVX1-NEXT: shrl $12, %r12d +; AVX1-NEXT: andl $1, %r12d +; AVX1-NEXT: vpinsrb $12, %r12d, %xmm0, %xmm0 +; AVX1-NEXT: shrl $13, %r13d +; AVX1-NEXT: andl $1, %r13d +; AVX1-NEXT: vpinsrb $13, %r13d, %xmm0, %xmm0 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX1-NEXT: shrl $14, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX1-NEXT: shrl $15, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX1-NEXT: shrl $16, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vmovd %eax, %xmm1 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX1-NEXT: shrl $17, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX1-NEXT: shrl $18, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX1-NEXT: shrl $19, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX1-NEXT: shrl $20, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX1-NEXT: shrl $21, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX1-NEXT: shrl $22, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX1-NEXT: shrl $23, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX1-NEXT: shrl $24, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX1-NEXT: shrl $25, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX1-NEXT: shrl $26, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX1-NEXT: shrl $27, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX1-NEXT: shrl $28, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX1-NEXT: shrl $29, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX1-NEXT: shrl $30, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX1-NEXT: shrl $31, %eax +; AVX1-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1 +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX1-NEXT: vpsllw $15, %xmm2, %xmm2 +; AVX1-NEXT: vpsraw $15, %xmm2, %xmm2 +; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; AVX1-NEXT: vpsllw $15, %xmm0, %xmm0 +; AVX1-NEXT: vpsraw $15, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX1-NEXT: vpsllw $15, %xmm2, %xmm2 +; AVX1-NEXT: vpsraw $15, %xmm2, %xmm2 +; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; AVX1-NEXT: vpsllw $15, %xmm1, %xmm1 +; AVX1-NEXT: vpsraw $15, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 +; AVX1-NEXT: leaq -40(%rbp), %rsp +; AVX1-NEXT: popq %rbx +; AVX1-NEXT: popq %r12 +; AVX1-NEXT: popq %r13 +; AVX1-NEXT: popq %r14 +; AVX1-NEXT: popq %r15 +; AVX1-NEXT: popq %rbp +; AVX1-NEXT: retq +; +; AVX2-LABEL: ext_i32_32i16: +; AVX2: # BB#0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: .Lcfi20: +; AVX2-NEXT: .cfi_def_cfa_offset 16 +; AVX2-NEXT: .Lcfi21: +; AVX2-NEXT: .cfi_offset %rbp, -16 +; AVX2-NEXT: movq %rsp, %rbp +; AVX2-NEXT: .Lcfi22: +; AVX2-NEXT: .cfi_def_cfa_register %rbp +; AVX2-NEXT: pushq %r15 +; AVX2-NEXT: pushq %r14 +; AVX2-NEXT: pushq %r13 +; AVX2-NEXT: pushq %r12 +; AVX2-NEXT: pushq %rbx +; AVX2-NEXT: andq $-32, %rsp +; AVX2-NEXT: subq $128, %rsp +; AVX2-NEXT: .Lcfi23: +; AVX2-NEXT: .cfi_offset %rbx, -56 +; AVX2-NEXT: .Lcfi24: +; AVX2-NEXT: .cfi_offset %r12, -48 +; AVX2-NEXT: .Lcfi25: +; AVX2-NEXT: .cfi_offset %r13, -40 +; AVX2-NEXT: .Lcfi26: +; AVX2-NEXT: .cfi_offset %r14, -32 +; AVX2-NEXT: .Lcfi27: +; AVX2-NEXT: .cfi_offset %r15, -24 +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX2-NEXT: movl %edi, %r13d +; AVX2-NEXT: movl %edi, %r12d +; AVX2-NEXT: movl %edi, %r15d +; AVX2-NEXT: movl %edi, %r14d +; AVX2-NEXT: movl %edi, %ebx +; AVX2-NEXT: movl %edi, %r11d +; AVX2-NEXT: movl %edi, %r10d +; AVX2-NEXT: movl %edi, %r9d +; AVX2-NEXT: movl %edi, %r8d +; AVX2-NEXT: movl %edi, %esi +; AVX2-NEXT: movl %edi, %edx +; AVX2-NEXT: movl %edi, %ecx +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: andl $1, %edi +; AVX2-NEXT: vmovd %edi, %xmm0 +; AVX2-NEXT: shrl %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 +; AVX2-NEXT: shrl $2, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: shrl $3, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: vpinsrb $3, %edx, %xmm0, %xmm0 +; AVX2-NEXT: shrl $4, %esi +; AVX2-NEXT: andl $1, %esi +; AVX2-NEXT: vpinsrb $4, %esi, %xmm0, %xmm0 +; AVX2-NEXT: shrl $5, %r8d +; AVX2-NEXT: andl $1, %r8d +; AVX2-NEXT: vpinsrb $5, %r8d, %xmm0, %xmm0 +; AVX2-NEXT: shrl $6, %r9d +; AVX2-NEXT: andl $1, %r9d +; AVX2-NEXT: vpinsrb $6, %r9d, %xmm0, %xmm0 +; AVX2-NEXT: shrl $7, %r10d +; AVX2-NEXT: andl $1, %r10d +; AVX2-NEXT: vpinsrb $7, %r10d, %xmm0, %xmm0 +; AVX2-NEXT: shrl $8, %r11d +; AVX2-NEXT: andl $1, %r11d +; AVX2-NEXT: vpinsrb $8, %r11d, %xmm0, %xmm0 +; AVX2-NEXT: shrl $9, %ebx +; AVX2-NEXT: andl $1, %ebx +; AVX2-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0 +; AVX2-NEXT: shrl $10, %r14d +; AVX2-NEXT: andl $1, %r14d +; AVX2-NEXT: vpinsrb $10, %r14d, %xmm0, %xmm0 +; AVX2-NEXT: shrl $11, %r15d +; AVX2-NEXT: andl $1, %r15d +; AVX2-NEXT: vpinsrb $11, %r15d, %xmm0, %xmm0 +; AVX2-NEXT: shrl $12, %r12d +; AVX2-NEXT: andl $1, %r12d +; AVX2-NEXT: vpinsrb $12, %r12d, %xmm0, %xmm0 +; AVX2-NEXT: shrl $13, %r13d +; AVX2-NEXT: andl $1, %r13d +; AVX2-NEXT: vpinsrb $13, %r13d, %xmm0, %xmm0 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX2-NEXT: shrl $14, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX2-NEXT: shrl $15, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX2-NEXT: shrl $16, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vmovd %eax, %xmm1 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX2-NEXT: shrl $17, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX2-NEXT: shrl $18, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX2-NEXT: shrl $19, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX2-NEXT: shrl $20, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX2-NEXT: shrl $21, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX2-NEXT: shrl $22, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX2-NEXT: shrl $23, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX2-NEXT: shrl $24, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX2-NEXT: shrl $25, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX2-NEXT: shrl $26, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX2-NEXT: shrl $27, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX2-NEXT: shrl $28, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX2-NEXT: shrl $29, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX2-NEXT: shrl $30, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX2-NEXT: shrl $31, %eax +; AVX2-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1 +; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero +; AVX2-NEXT: vpsllw $15, %ymm0, %ymm0 +; AVX2-NEXT: vpsraw $15, %ymm0, %ymm0 +; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero +; AVX2-NEXT: vpsllw $15, %ymm1, %ymm1 +; AVX2-NEXT: vpsraw $15, %ymm1, %ymm1 +; AVX2-NEXT: leaq -40(%rbp), %rsp +; AVX2-NEXT: popq %rbx +; AVX2-NEXT: popq %r12 +; AVX2-NEXT: popq %r13 +; AVX2-NEXT: popq %r14 +; AVX2-NEXT: popq %r15 +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: retq +; +; AVX512-LABEL: ext_i32_32i16: +; AVX512: # BB#0: +; AVX512-NEXT: kmovd %edi, %k0 +; AVX512-NEXT: vpmovm2w %k0, %zmm0 +; AVX512-NEXT: retq + %1 = bitcast i32 %a0 to <32 x i1> + %2 = sext <32 x i1> %1 to <32 x i16> + ret <32 x i16> %2 +} + +define <64 x i8> @ext_i64_64i8(i64 %a0) { +; SSE2-SSSE3-LABEL: ext_i64_64i8: +; SSE2-SSSE3: # BB#0: +; SSE2-SSSE3-NEXT: pushq %rbp +; SSE2-SSSE3-NEXT: .Lcfi24: +; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 16 +; SSE2-SSSE3-NEXT: pushq %r15 +; SSE2-SSSE3-NEXT: .Lcfi25: +; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 24 +; SSE2-SSSE3-NEXT: pushq %r14 +; SSE2-SSSE3-NEXT: .Lcfi26: +; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 32 +; SSE2-SSSE3-NEXT: pushq %r13 +; SSE2-SSSE3-NEXT: .Lcfi27: +; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 40 +; SSE2-SSSE3-NEXT: pushq %r12 +; SSE2-SSSE3-NEXT: .Lcfi28: +; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 48 +; SSE2-SSSE3-NEXT: pushq %rbx +; SSE2-SSSE3-NEXT: .Lcfi29: +; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 56 +; SSE2-SSSE3-NEXT: .Lcfi30: +; SSE2-SSSE3-NEXT: .cfi_offset %rbx, -56 +; SSE2-SSSE3-NEXT: .Lcfi31: +; SSE2-SSSE3-NEXT: .cfi_offset %r12, -48 +; SSE2-SSSE3-NEXT: .Lcfi32: +; SSE2-SSSE3-NEXT: .cfi_offset %r13, -40 +; SSE2-SSSE3-NEXT: .Lcfi33: +; SSE2-SSSE3-NEXT: .cfi_offset %r14, -32 +; SSE2-SSSE3-NEXT: .Lcfi34: +; SSE2-SSSE3-NEXT: .cfi_offset %r15, -24 +; SSE2-SSSE3-NEXT: .Lcfi35: +; SSE2-SSSE3-NEXT: .cfi_offset %rbp, -16 +; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movq %rdi, %rax +; SSE2-SSSE3-NEXT: shrq $32, %rax +; SSE2-SSSE3-NEXT: movw %ax, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movq %rdi, %rax +; SSE2-SSSE3-NEXT: shrq $48, %rax +; SSE2-SSSE3-NEXT: movw %ax, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: shrl $16, %edi +; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movswq -{{[0-9]+}}(%rsp), %rbx +; SSE2-SSSE3-NEXT: movq %rbx, %r8 +; SSE2-SSSE3-NEXT: movq %rbx, %r9 +; SSE2-SSSE3-NEXT: movq %rbx, %r10 +; SSE2-SSSE3-NEXT: movq %rbx, %r11 +; SSE2-SSSE3-NEXT: movq %rbx, %r14 +; SSE2-SSSE3-NEXT: movq %rbx, %r15 +; SSE2-SSSE3-NEXT: movq %rbx, %r12 +; SSE2-SSSE3-NEXT: movq %rbx, %r13 +; SSE2-SSSE3-NEXT: movq %rbx, %rdi +; SSE2-SSSE3-NEXT: movq %rbx, %rcx +; SSE2-SSSE3-NEXT: movq %rbx, %rdx +; SSE2-SSSE3-NEXT: movq %rbx, %rsi +; SSE2-SSSE3-NEXT: movq %rbx, %rbp +; SSE2-SSSE3-NEXT: movq %rbx, %rax +; SSE2-SSSE3-NEXT: shrq $15, %rax +; SSE2-SSSE3-NEXT: movd %eax, %xmm0 +; SSE2-SSSE3-NEXT: movq %rbx, %rax +; SSE2-SSSE3-NEXT: movsbq %bl, %rbx +; SSE2-SSSE3-NEXT: shlq $49, %r8 +; SSE2-SSSE3-NEXT: sarq $63, %r8 +; SSE2-SSSE3-NEXT: movd %r8d, %xmm15 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3],xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7] +; SSE2-SSSE3-NEXT: shlq $50, %r9 +; SSE2-SSSE3-NEXT: sarq $63, %r9 +; SSE2-SSSE3-NEXT: movd %r9d, %xmm8 +; SSE2-SSSE3-NEXT: shlq $51, %r10 +; SSE2-SSSE3-NEXT: sarq $63, %r10 +; SSE2-SSSE3-NEXT: movd %r10d, %xmm2 +; SSE2-SSSE3-NEXT: shlq $52, %r11 +; SSE2-SSSE3-NEXT: sarq $63, %r11 +; SSE2-SSSE3-NEXT: movd %r11d, %xmm9 +; SSE2-SSSE3-NEXT: shlq $53, %r14 +; SSE2-SSSE3-NEXT: sarq $63, %r14 +; SSE2-SSSE3-NEXT: movd %r14d, %xmm6 +; SSE2-SSSE3-NEXT: shlq $54, %r15 +; SSE2-SSSE3-NEXT: sarq $63, %r15 +; SSE2-SSSE3-NEXT: movd %r15d, %xmm10 +; SSE2-SSSE3-NEXT: shlq $55, %r12 +; SSE2-SSSE3-NEXT: sarq $63, %r12 +; SSE2-SSSE3-NEXT: movd %r12d, %xmm4 +; SSE2-SSSE3-NEXT: shlq $60, %r13 +; SSE2-SSSE3-NEXT: sarq $63, %r13 +; SSE2-SSSE3-NEXT: movd %r13d, %xmm11 +; SSE2-SSSE3-NEXT: shlq $61, %rdi +; SSE2-SSSE3-NEXT: sarq $63, %rdi +; SSE2-SSSE3-NEXT: movd %edi, %xmm5 +; SSE2-SSSE3-NEXT: shlq $62, %rcx +; SSE2-SSSE3-NEXT: sarq $63, %rcx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm12 +; SSE2-SSSE3-NEXT: shlq $63, %rdx +; SSE2-SSSE3-NEXT: sarq $63, %rdx +; SSE2-SSSE3-NEXT: movd %edx, %xmm0 +; SSE2-SSSE3-NEXT: shlq $58, %rsi +; SSE2-SSSE3-NEXT: sarq $63, %rsi +; SSE2-SSSE3-NEXT: movd %esi, %xmm13 +; SSE2-SSSE3-NEXT: shlq $59, %rbp +; SSE2-SSSE3-NEXT: sarq $63, %rbp +; SSE2-SSSE3-NEXT: movd %ebp, %xmm7 +; SSE2-SSSE3-NEXT: shlq $57, %rax +; SSE2-SSSE3-NEXT: sarq $63, %rax +; SSE2-SSSE3-NEXT: movd %eax, %xmm3 +; SSE2-SSSE3-NEXT: shrq $7, %rbx +; SSE2-SSSE3-NEXT: movd %ebx, %xmm14 +; SSE2-SSSE3-NEXT: movswq -{{[0-9]+}}(%rsp), %rsi +; SSE2-SSSE3-NEXT: movq %rsi, %r8 +; SSE2-SSSE3-NEXT: movq %rsi, %r9 +; SSE2-SSSE3-NEXT: movq %rsi, %r10 +; SSE2-SSSE3-NEXT: movq %rsi, %r11 +; SSE2-SSSE3-NEXT: movq %rsi, %r14 +; SSE2-SSSE3-NEXT: movq %rsi, %r15 +; SSE2-SSSE3-NEXT: movq %rsi, %r12 +; SSE2-SSSE3-NEXT: movq %rsi, %r13 +; SSE2-SSSE3-NEXT: movq %rsi, %rbx +; SSE2-SSSE3-NEXT: movq %rsi, %rax +; SSE2-SSSE3-NEXT: movq %rsi, %rcx +; SSE2-SSSE3-NEXT: movq %rsi, %rdx +; SSE2-SSSE3-NEXT: movq %rsi, %rdi +; SSE2-SSSE3-NEXT: movq %rsi, %rbp +; SSE2-SSSE3-NEXT: shrq $15, %rbp +; SSE2-SSSE3-NEXT: movd %ebp, %xmm1 +; SSE2-SSSE3-NEXT: movq %rsi, %rbp +; SSE2-SSSE3-NEXT: movsbq %sil, %rsi +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3],xmm2[4],xmm8[4],xmm2[5],xmm8[5],xmm2[6],xmm8[6],xmm2[7],xmm8[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3] +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm9[0],xmm6[1],xmm9[1],xmm6[2],xmm9[2],xmm6[3],xmm9[3],xmm6[4],xmm9[4],xmm6[5],xmm9[5],xmm6[6],xmm9[6],xmm6[7],xmm9[7] +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm10[0],xmm4[1],xmm10[1],xmm4[2],xmm10[2],xmm4[3],xmm10[3],xmm4[4],xmm10[4],xmm4[5],xmm10[5],xmm4[6],xmm10[6],xmm4[7],xmm10[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1] +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm11[0],xmm5[1],xmm11[1],xmm5[2],xmm11[2],xmm5[3],xmm11[3],xmm5[4],xmm11[4],xmm5[5],xmm11[5],xmm5[6],xmm11[6],xmm5[7],xmm11[7] +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1],xmm0[2],xmm12[2],xmm0[3],xmm12[3],xmm0[4],xmm12[4],xmm0[5],xmm12[5],xmm0[6],xmm12[6],xmm0[7],xmm12[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3] +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm13[0],xmm7[1],xmm13[1],xmm7[2],xmm13[2],xmm7[3],xmm13[3],xmm7[4],xmm13[4],xmm7[5],xmm13[5],xmm7[6],xmm13[6],xmm7[7],xmm13[7] +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3],xmm3[4],xmm14[4],xmm3[5],xmm14[5],xmm3[6],xmm14[6],xmm3[7],xmm14[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1] +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0] +; SSE2-SSSE3-NEXT: shlq $49, %r8 +; SSE2-SSSE3-NEXT: sarq $63, %r8 +; SSE2-SSSE3-NEXT: movd %r8d, %xmm13 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm1[0],xmm13[1],xmm1[1],xmm13[2],xmm1[2],xmm13[3],xmm1[3],xmm13[4],xmm1[4],xmm13[5],xmm1[5],xmm13[6],xmm1[6],xmm13[7],xmm1[7] +; SSE2-SSSE3-NEXT: shlq $50, %r9 +; SSE2-SSSE3-NEXT: sarq $63, %r9 +; SSE2-SSSE3-NEXT: movd %r9d, %xmm1 +; SSE2-SSSE3-NEXT: shlq $51, %r10 +; SSE2-SSSE3-NEXT: sarq $63, %r10 +; SSE2-SSSE3-NEXT: movd %r10d, %xmm3 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7] +; SSE2-SSSE3-NEXT: shlq $52, %r11 +; SSE2-SSSE3-NEXT: sarq $63, %r11 +; SSE2-SSSE3-NEXT: movd %r11d, %xmm8 +; SSE2-SSSE3-NEXT: shlq $53, %r14 +; SSE2-SSSE3-NEXT: sarq $63, %r14 +; SSE2-SSSE3-NEXT: movd %r14d, %xmm15 +; SSE2-SSSE3-NEXT: shlq $54, %r15 +; SSE2-SSSE3-NEXT: sarq $63, %r15 +; SSE2-SSSE3-NEXT: movd %r15d, %xmm9 +; SSE2-SSSE3-NEXT: shlq $55, %r12 +; SSE2-SSSE3-NEXT: sarq $63, %r12 +; SSE2-SSSE3-NEXT: movd %r12d, %xmm4 +; SSE2-SSSE3-NEXT: shlq $60, %r13 +; SSE2-SSSE3-NEXT: sarq $63, %r13 +; SSE2-SSSE3-NEXT: movd %r13d, %xmm10 +; SSE2-SSSE3-NEXT: shlq $61, %rbx +; SSE2-SSSE3-NEXT: sarq $63, %rbx +; SSE2-SSSE3-NEXT: movd %ebx, %xmm7 +; SSE2-SSSE3-NEXT: shlq $62, %rax +; SSE2-SSSE3-NEXT: sarq $63, %rax +; SSE2-SSSE3-NEXT: movd %eax, %xmm11 +; SSE2-SSSE3-NEXT: shlq $63, %rcx +; SSE2-SSSE3-NEXT: sarq $63, %rcx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: shlq $58, %rdx +; SSE2-SSSE3-NEXT: sarq $63, %rdx +; SSE2-SSSE3-NEXT: movd %edx, %xmm12 +; SSE2-SSSE3-NEXT: shlq $59, %rdi +; SSE2-SSSE3-NEXT: sarq $63, %rdi +; SSE2-SSSE3-NEXT: movd %edi, %xmm5 +; SSE2-SSSE3-NEXT: shlq $57, %rbp +; SSE2-SSSE3-NEXT: sarq $63, %rbp +; SSE2-SSSE3-NEXT: movd %ebp, %xmm1 +; SSE2-SSSE3-NEXT: shrq $7, %rsi +; SSE2-SSSE3-NEXT: movd %esi, %xmm14 +; SSE2-SSSE3-NEXT: movswq -{{[0-9]+}}(%rsp), %rsi +; SSE2-SSSE3-NEXT: movq %rsi, %r8 +; SSE2-SSSE3-NEXT: movq %rsi, %r9 +; SSE2-SSSE3-NEXT: movq %rsi, %r10 +; SSE2-SSSE3-NEXT: movq %rsi, %r11 +; SSE2-SSSE3-NEXT: movq %rsi, %r14 +; SSE2-SSSE3-NEXT: movq %rsi, %r15 +; SSE2-SSSE3-NEXT: movq %rsi, %r12 +; SSE2-SSSE3-NEXT: movq %rsi, %r13 +; SSE2-SSSE3-NEXT: movq %rsi, %rbx +; SSE2-SSSE3-NEXT: movq %rsi, %rax +; SSE2-SSSE3-NEXT: movq %rsi, %rcx +; SSE2-SSSE3-NEXT: movq %rsi, %rdx +; SSE2-SSSE3-NEXT: movq %rsi, %rdi +; SSE2-SSSE3-NEXT: movq %rsi, %rbp +; SSE2-SSSE3-NEXT: shrq $15, %rbp +; SSE2-SSSE3-NEXT: movd %ebp, %xmm6 +; SSE2-SSSE3-NEXT: movq %rsi, %rbp +; SSE2-SSSE3-NEXT: movsbq %sil, %rsi +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm13[0],xmm3[1],xmm13[1],xmm3[2],xmm13[2],xmm3[3],xmm13[3] +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3],xmm15[4],xmm8[4],xmm15[5],xmm8[5],xmm15[6],xmm8[6],xmm15[7],xmm8[7] +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm9[0],xmm4[1],xmm9[1],xmm4[2],xmm9[2],xmm4[3],xmm9[3],xmm4[4],xmm9[4],xmm4[5],xmm9[5],xmm4[6],xmm9[6],xmm4[7],xmm9[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm15[0],xmm4[1],xmm15[1],xmm4[2],xmm15[2],xmm4[3],xmm15[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm10[0],xmm7[1],xmm10[1],xmm7[2],xmm10[2],xmm7[3],xmm10[3],xmm7[4],xmm10[4],xmm7[5],xmm10[5],xmm7[6],xmm10[6],xmm7[7],xmm10[7] +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm11[0],xmm2[1],xmm11[1],xmm2[2],xmm11[2],xmm2[3],xmm11[3],xmm2[4],xmm11[4],xmm2[5],xmm11[5],xmm2[6],xmm11[6],xmm2[7],xmm11[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm7[0],xmm2[1],xmm7[1],xmm2[2],xmm7[2],xmm2[3],xmm7[3] +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7] +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3],xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1] +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0] +; SSE2-SSSE3-NEXT: shlq $49, %r8 +; SSE2-SSSE3-NEXT: sarq $63, %r8 +; SSE2-SSSE3-NEXT: movd %r8d, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7] +; SSE2-SSSE3-NEXT: shlq $50, %r9 +; SSE2-SSSE3-NEXT: sarq $63, %r9 +; SSE2-SSSE3-NEXT: movd %r9d, %xmm3 +; SSE2-SSSE3-NEXT: shlq $51, %r10 +; SSE2-SSSE3-NEXT: sarq $63, %r10 +; SSE2-SSSE3-NEXT: movd %r10d, %xmm4 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3] +; SSE2-SSSE3-NEXT: shlq $52, %r11 +; SSE2-SSSE3-NEXT: sarq $63, %r11 +; SSE2-SSSE3-NEXT: movd %r11d, %xmm8 +; SSE2-SSSE3-NEXT: shlq $53, %r14 +; SSE2-SSSE3-NEXT: sarq $63, %r14 +; SSE2-SSSE3-NEXT: movd %r14d, %xmm13 +; SSE2-SSSE3-NEXT: shlq $54, %r15 +; SSE2-SSSE3-NEXT: sarq $63, %r15 +; SSE2-SSSE3-NEXT: movd %r15d, %xmm9 +; SSE2-SSSE3-NEXT: shlq $55, %r12 +; SSE2-SSSE3-NEXT: sarq $63, %r12 +; SSE2-SSSE3-NEXT: movd %r12d, %xmm1 +; SSE2-SSSE3-NEXT: shlq $60, %r13 +; SSE2-SSSE3-NEXT: sarq $63, %r13 +; SSE2-SSSE3-NEXT: movd %r13d, %xmm10 +; SSE2-SSSE3-NEXT: shlq $61, %rbx +; SSE2-SSSE3-NEXT: sarq $63, %rbx +; SSE2-SSSE3-NEXT: movd %ebx, %xmm15 +; SSE2-SSSE3-NEXT: shlq $62, %rax +; SSE2-SSSE3-NEXT: sarq $63, %rax +; SSE2-SSSE3-NEXT: movd %eax, %xmm11 +; SSE2-SSSE3-NEXT: shlq $63, %rcx +; SSE2-SSSE3-NEXT: sarq $63, %rcx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 +; SSE2-SSSE3-NEXT: shlq $58, %rdx +; SSE2-SSSE3-NEXT: sarq $63, %rdx +; SSE2-SSSE3-NEXT: movd %edx, %xmm12 +; SSE2-SSSE3-NEXT: shlq $59, %rdi +; SSE2-SSSE3-NEXT: sarq $63, %rdi +; SSE2-SSSE3-NEXT: movd %edi, %xmm5 +; SSE2-SSSE3-NEXT: shlq $57, %rbp +; SSE2-SSSE3-NEXT: sarq $63, %rbp +; SSE2-SSSE3-NEXT: movd %ebp, %xmm6 +; SSE2-SSSE3-NEXT: shrq $7, %rsi +; SSE2-SSSE3-NEXT: movd %esi, %xmm14 +; SSE2-SSSE3-NEXT: movswq -{{[0-9]+}}(%rsp), %rsi +; SSE2-SSSE3-NEXT: movq %rsi, %r8 +; SSE2-SSSE3-NEXT: movq %rsi, %r9 +; SSE2-SSSE3-NEXT: movq %rsi, %r10 +; SSE2-SSSE3-NEXT: movq %rsi, %r11 +; SSE2-SSSE3-NEXT: movq %rsi, %r14 +; SSE2-SSSE3-NEXT: movq %rsi, %r15 +; SSE2-SSSE3-NEXT: movq %rsi, %r12 +; SSE2-SSSE3-NEXT: movq %rsi, %r13 +; SSE2-SSSE3-NEXT: movq %rsi, %rbx +; SSE2-SSSE3-NEXT: movq %rsi, %rax +; SSE2-SSSE3-NEXT: movq %rsi, %rcx +; SSE2-SSSE3-NEXT: movq %rsi, %rdx +; SSE2-SSSE3-NEXT: movq %rsi, %rdi +; SSE2-SSSE3-NEXT: movq %rsi, %rbp +; SSE2-SSSE3-NEXT: shrq $15, %rbp +; SSE2-SSSE3-NEXT: movd %ebp, %xmm7 +; SSE2-SSSE3-NEXT: movq %rsi, %rbp +; SSE2-SSSE3-NEXT: movsbq %sil, %rsi +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm8[0],xmm13[1],xmm8[1],xmm13[2],xmm8[2],xmm13[3],xmm8[3],xmm13[4],xmm8[4],xmm13[5],xmm8[5],xmm13[6],xmm8[6],xmm13[7],xmm8[7] +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1],xmm1[2],xmm9[2],xmm1[3],xmm9[3],xmm1[4],xmm9[4],xmm1[5],xmm9[5],xmm1[6],xmm9[6],xmm1[7],xmm9[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm13[0],xmm1[1],xmm13[1],xmm1[2],xmm13[2],xmm1[3],xmm13[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1] +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm10[0],xmm15[1],xmm10[1],xmm15[2],xmm10[2],xmm15[3],xmm10[3],xmm15[4],xmm10[4],xmm15[5],xmm10[5],xmm15[6],xmm10[6],xmm15[7],xmm10[7] +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm11[0],xmm3[1],xmm11[1],xmm3[2],xmm11[2],xmm3[3],xmm11[3],xmm3[4],xmm11[4],xmm3[5],xmm11[5],xmm3[6],xmm11[6],xmm3[7],xmm11[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3] +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7] +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm14[0],xmm6[1],xmm14[1],xmm6[2],xmm14[2],xmm6[3],xmm14[3],xmm6[4],xmm14[4],xmm6[5],xmm14[5],xmm6[6],xmm14[6],xmm6[7],xmm14[7] +; SSE2-SSSE3-NEXT: shlq $49, %r8 +; SSE2-SSSE3-NEXT: sarq $63, %r8 +; SSE2-SSSE3-NEXT: movd %r8d, %xmm4 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3] +; SSE2-SSSE3-NEXT: shlq $50, %r9 +; SSE2-SSSE3-NEXT: sarq $63, %r9 +; SSE2-SSSE3-NEXT: movd %r9d, %xmm6 +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1] +; SSE2-SSSE3-NEXT: shlq $51, %r10 +; SSE2-SSSE3-NEXT: sarq $63, %r10 +; SSE2-SSSE3-NEXT: movd %r10d, %xmm5 +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm1[0] +; SSE2-SSSE3-NEXT: shlq $52, %r11 +; SSE2-SSSE3-NEXT: sarq $63, %r11 +; SSE2-SSSE3-NEXT: movd %r11d, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1],xmm4[2],xmm7[2],xmm4[3],xmm7[3],xmm4[4],xmm7[4],xmm4[5],xmm7[5],xmm4[6],xmm7[6],xmm4[7],xmm7[7] +; SSE2-SSSE3-NEXT: shlq $53, %r14 +; SSE2-SSSE3-NEXT: sarq $63, %r14 +; SSE2-SSSE3-NEXT: movd %r14d, %xmm7 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3],xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7] +; SSE2-SSSE3-NEXT: shlq $54, %r15 +; SSE2-SSSE3-NEXT: sarq $63, %r15 +; SSE2-SSSE3-NEXT: movd %r15d, %xmm6 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3] +; SSE2-SSSE3-NEXT: shlq $55, %r12 +; SSE2-SSSE3-NEXT: sarq $63, %r12 +; SSE2-SSSE3-NEXT: movd %r12d, %xmm4 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm1[0],xmm7[1],xmm1[1],xmm7[2],xmm1[2],xmm7[3],xmm1[3],xmm7[4],xmm1[4],xmm7[5],xmm1[5],xmm7[6],xmm1[6],xmm7[7],xmm1[7] +; SSE2-SSSE3-NEXT: shlq $60, %r13 +; SSE2-SSSE3-NEXT: sarq $63, %r13 +; SSE2-SSSE3-NEXT: movd %r13d, %xmm8 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3],xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7] +; SSE2-SSSE3-NEXT: shlq $61, %rbx +; SSE2-SSSE3-NEXT: sarq $63, %rbx +; SSE2-SSSE3-NEXT: movd %ebx, %xmm6 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1],xmm4[2],xmm7[2],xmm4[3],xmm7[3] +; SSE2-SSSE3-NEXT: shlq $62, %rax +; SSE2-SSSE3-NEXT: sarq $63, %rax +; SSE2-SSSE3-NEXT: movd %eax, %xmm7 +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1] +; SSE2-SSSE3-NEXT: shlq $63, %rcx +; SSE2-SSSE3-NEXT: sarq $63, %rcx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3],xmm6[4],xmm8[4],xmm6[5],xmm8[5],xmm6[6],xmm8[6],xmm6[7],xmm8[7] +; SSE2-SSSE3-NEXT: shlq $58, %rdx +; SSE2-SSSE3-NEXT: sarq $63, %rdx +; SSE2-SSSE3-NEXT: movd %edx, %xmm5 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3],xmm1[4],xmm7[4],xmm1[5],xmm7[5],xmm1[6],xmm7[6],xmm1[7],xmm7[7] +; SSE2-SSSE3-NEXT: shlq $59, %rdi +; SSE2-SSSE3-NEXT: sarq $63, %rdi +; SSE2-SSSE3-NEXT: movd %edi, %xmm7 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3] +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3],xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7] +; SSE2-SSSE3-NEXT: shlq $57, %rbp +; SSE2-SSSE3-NEXT: sarq $63, %rbp +; SSE2-SSSE3-NEXT: movd %ebp, %xmm5 +; SSE2-SSSE3-NEXT: shrq $7, %rsi +; SSE2-SSSE3-NEXT: movd %esi, %xmm6 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3],xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1] +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0] +; SSE2-SSSE3-NEXT: popq %rbx +; SSE2-SSSE3-NEXT: popq %r12 +; SSE2-SSSE3-NEXT: popq %r13 +; SSE2-SSSE3-NEXT: popq %r14 +; SSE2-SSSE3-NEXT: popq %r15 +; SSE2-SSSE3-NEXT: popq %rbp +; SSE2-SSSE3-NEXT: retq +; +; AVX1-LABEL: ext_i64_64i8: +; AVX1: # BB#0: +; AVX1-NEXT: pushq %rbp +; AVX1-NEXT: .Lcfi28: +; AVX1-NEXT: .cfi_def_cfa_offset 16 +; AVX1-NEXT: .Lcfi29: +; AVX1-NEXT: .cfi_offset %rbp, -16 +; AVX1-NEXT: movq %rsp, %rbp +; AVX1-NEXT: .Lcfi30: +; AVX1-NEXT: .cfi_def_cfa_register %rbp +; AVX1-NEXT: pushq %r15 +; AVX1-NEXT: pushq %r14 +; AVX1-NEXT: pushq %r13 +; AVX1-NEXT: pushq %r12 +; AVX1-NEXT: pushq %rbx +; AVX1-NEXT: andq $-32, %rsp +; AVX1-NEXT: subq $128, %rsp +; AVX1-NEXT: .Lcfi31: +; AVX1-NEXT: .cfi_offset %rbx, -56 +; AVX1-NEXT: .Lcfi32: +; AVX1-NEXT: .cfi_offset %r12, -48 +; AVX1-NEXT: .Lcfi33: +; AVX1-NEXT: .cfi_offset %r13, -40 +; AVX1-NEXT: .Lcfi34: +; AVX1-NEXT: .cfi_offset %r14, -32 +; AVX1-NEXT: .Lcfi35: +; AVX1-NEXT: .cfi_offset %r15, -24 +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) +; AVX1-NEXT: shrq $32, %rdi +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) +; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rdx +; AVX1-NEXT: movq %rdx, %rcx +; AVX1-NEXT: shlq $47, %rcx +; AVX1-NEXT: sarq $63, %rcx +; AVX1-NEXT: vmovd %ecx, %xmm0 +; AVX1-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: movq %rdx, %r8 +; AVX1-NEXT: movq %rdx, %rcx +; AVX1-NEXT: movq %rdx, %rdi +; AVX1-NEXT: movq %rdx, %r13 +; AVX1-NEXT: movq %rdx, %rsi +; AVX1-NEXT: movq %rdx, %r10 +; AVX1-NEXT: movq %rdx, %r11 +; AVX1-NEXT: movq %rdx, %r9 +; AVX1-NEXT: movq %rdx, %rbx +; AVX1-NEXT: movq %rdx, %r14 +; AVX1-NEXT: movq %rdx, %r15 +; AVX1-NEXT: movq %rdx, %r12 +; AVX1-NEXT: movq %rdx, %rax +; AVX1-NEXT: shlq $46, %rax +; AVX1-NEXT: sarq $63, %rax +; AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX1-NEXT: shlq $45, %rax +; AVX1-NEXT: sarq $63, %rax +; AVX1-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: shlq $44, %r8 +; AVX1-NEXT: sarq $63, %r8 +; AVX1-NEXT: vpinsrb $3, %r8d, %xmm0, %xmm0 +; AVX1-NEXT: movq %rdx, %r8 +; AVX1-NEXT: shlq $43, %rcx +; AVX1-NEXT: sarq $63, %rcx +; AVX1-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movq %rdx, %rcx +; AVX1-NEXT: shlq $42, %rdi +; AVX1-NEXT: sarq $63, %rdi +; AVX1-NEXT: vpinsrb $5, %edi, %xmm0, %xmm0 +; AVX1-NEXT: movq %rdx, %rdi +; AVX1-NEXT: shlq $41, %r13 +; AVX1-NEXT: sarq $63, %r13 +; AVX1-NEXT: vpinsrb $6, %r13d, %xmm0, %xmm0 +; AVX1-NEXT: movq %rdx, %r13 +; AVX1-NEXT: shlq $40, %rsi +; AVX1-NEXT: sarq $63, %rsi +; AVX1-NEXT: vpinsrb $7, %esi, %xmm0, %xmm0 +; AVX1-NEXT: movq %rdx, %rsi +; AVX1-NEXT: shlq $39, %r10 +; AVX1-NEXT: sarq $63, %r10 +; AVX1-NEXT: vpinsrb $8, %r10d, %xmm0, %xmm0 +; AVX1-NEXT: movq %rdx, %r10 +; AVX1-NEXT: shlq $38, %r11 +; AVX1-NEXT: sarq $63, %r11 +; AVX1-NEXT: vpinsrb $9, %r11d, %xmm0, %xmm0 +; AVX1-NEXT: movsbq %dl, %rax +; AVX1-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: shlq $37, %r9 +; AVX1-NEXT: sarq $63, %r9 +; AVX1-NEXT: vpinsrb $10, %r9d, %xmm0, %xmm0 +; AVX1-NEXT: movq %rdx, %r9 +; AVX1-NEXT: shlq $36, %rbx +; AVX1-NEXT: sarq $63, %rbx +; AVX1-NEXT: vpinsrb $11, %ebx, %xmm0, %xmm0 +; AVX1-NEXT: movq %rdx, %rbx +; AVX1-NEXT: shlq $35, %r14 +; AVX1-NEXT: sarq $63, %r14 +; AVX1-NEXT: vpinsrb $12, %r14d, %xmm0, %xmm0 +; AVX1-NEXT: movq %rdx, %r14 +; AVX1-NEXT: shlq $34, %r15 +; AVX1-NEXT: sarq $63, %r15 +; AVX1-NEXT: vpinsrb $13, %r15d, %xmm0, %xmm0 +; AVX1-NEXT: movq %rdx, %r15 +; AVX1-NEXT: shlq $33, %r12 +; AVX1-NEXT: sarq $63, %r12 +; AVX1-NEXT: vpinsrb $14, %r12d, %xmm0, %xmm0 +; AVX1-NEXT: movq %rdx, %r12 +; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX1-NEXT: shrq $31, %rax +; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movq %rdx, %rax +; AVX1-NEXT: shlq $63, %r8 +; AVX1-NEXT: sarq $63, %r8 +; AVX1-NEXT: vmovd %r8d, %xmm1 +; AVX1-NEXT: movq %rdx, %r8 +; AVX1-NEXT: movswq %dx, %rdx +; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %r11 # 8-byte Reload +; AVX1-NEXT: shlq $62, %r11 +; AVX1-NEXT: sarq $63, %r11 +; AVX1-NEXT: vpinsrb $1, %r11d, %xmm1, %xmm1 +; AVX1-NEXT: shlq $61, %rcx +; AVX1-NEXT: sarq $63, %rcx +; AVX1-NEXT: vpinsrb $2, %ecx, %xmm1, %xmm1 +; AVX1-NEXT: shlq $60, %rdi +; AVX1-NEXT: sarq $63, %rdi +; AVX1-NEXT: vpinsrb $3, %edi, %xmm1, %xmm1 +; AVX1-NEXT: shlq $59, %r13 +; AVX1-NEXT: sarq $63, %r13 +; AVX1-NEXT: vpinsrb $4, %r13d, %xmm1, %xmm1 +; AVX1-NEXT: shlq $58, %rsi +; AVX1-NEXT: sarq $63, %rsi +; AVX1-NEXT: vpinsrb $5, %esi, %xmm1, %xmm1 +; AVX1-NEXT: shlq $57, %r10 +; AVX1-NEXT: sarq $63, %r10 +; AVX1-NEXT: vpinsrb $6, %r10d, %xmm1, %xmm1 +; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; AVX1-NEXT: shrq $7, %rcx +; AVX1-NEXT: vpinsrb $7, %ecx, %xmm1, %xmm1 +; AVX1-NEXT: shlq $55, %r9 +; AVX1-NEXT: sarq $63, %r9 +; AVX1-NEXT: vpinsrb $8, %r9d, %xmm1, %xmm1 +; AVX1-NEXT: shlq $54, %rbx +; AVX1-NEXT: sarq $63, %rbx +; AVX1-NEXT: vpinsrb $9, %ebx, %xmm1, %xmm1 +; AVX1-NEXT: shlq $53, %r14 +; AVX1-NEXT: sarq $63, %r14 +; AVX1-NEXT: vpinsrb $10, %r14d, %xmm1, %xmm1 +; AVX1-NEXT: shlq $52, %r15 +; AVX1-NEXT: sarq $63, %r15 +; AVX1-NEXT: vpinsrb $11, %r15d, %xmm1, %xmm1 +; AVX1-NEXT: shlq $51, %r12 +; AVX1-NEXT: sarq $63, %r12 +; AVX1-NEXT: vpinsrb $12, %r12d, %xmm1, %xmm1 +; AVX1-NEXT: shlq $50, %rax +; AVX1-NEXT: sarq $63, %rax +; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 +; AVX1-NEXT: shlq $49, %r8 +; AVX1-NEXT: sarq $63, %r8 +; AVX1-NEXT: vpinsrb $14, %r8d, %xmm1, %xmm1 +; AVX1-NEXT: shrq $15, %rdx +; AVX1-NEXT: vpinsrb $15, %edx, %xmm1, %xmm1 +; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rdx +; AVX1-NEXT: movq %rdx, %rcx +; AVX1-NEXT: shlq $47, %rcx +; AVX1-NEXT: sarq $63, %rcx +; AVX1-NEXT: vmovd %ecx, %xmm2 +; AVX1-NEXT: movq %rdx, %r13 +; AVX1-NEXT: movq %rdx, %rcx +; AVX1-NEXT: movq %rdx, %r9 +; AVX1-NEXT: movq %rdx, %r12 +; AVX1-NEXT: movq %rdx, %rdi +; AVX1-NEXT: movq %rdx, %rbx +; AVX1-NEXT: movq %rdx, %r8 +; AVX1-NEXT: movq %rdx, %r10 +; AVX1-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: movq %rdx, %rsi +; AVX1-NEXT: movq %rdx, %r11 +; AVX1-NEXT: movq %rdx, %r14 +; AVX1-NEXT: movq %rdx, %r15 +; AVX1-NEXT: movq %rdx, %rax +; AVX1-NEXT: shlq $46, %rax +; AVX1-NEXT: sarq $63, %rax +; AVX1-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2 +; AVX1-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: shlq $45, %r13 +; AVX1-NEXT: sarq $63, %r13 +; AVX1-NEXT: vpinsrb $2, %r13d, %xmm2, %xmm2 +; AVX1-NEXT: movq %rdx, %r13 +; AVX1-NEXT: shlq $44, %rcx +; AVX1-NEXT: sarq $63, %rcx +; AVX1-NEXT: vpinsrb $3, %ecx, %xmm2, %xmm2 +; AVX1-NEXT: movq %rdx, %rcx +; AVX1-NEXT: shlq $43, %r9 +; AVX1-NEXT: sarq $63, %r9 +; AVX1-NEXT: vpinsrb $4, %r9d, %xmm2, %xmm2 +; AVX1-NEXT: movq %rdx, %r9 +; AVX1-NEXT: shlq $42, %r12 +; AVX1-NEXT: sarq $63, %r12 +; AVX1-NEXT: vpinsrb $5, %r12d, %xmm2, %xmm2 +; AVX1-NEXT: movq %rdx, %r12 +; AVX1-NEXT: shlq $41, %rdi +; AVX1-NEXT: sarq $63, %rdi +; AVX1-NEXT: vpinsrb $6, %edi, %xmm2, %xmm2 +; AVX1-NEXT: movq %rdx, %rdi +; AVX1-NEXT: shlq $40, %rbx +; AVX1-NEXT: sarq $63, %rbx +; AVX1-NEXT: vpinsrb $7, %ebx, %xmm2, %xmm2 +; AVX1-NEXT: movq %rdx, %rbx +; AVX1-NEXT: shlq $39, %r8 +; AVX1-NEXT: sarq $63, %r8 +; AVX1-NEXT: vpinsrb $8, %r8d, %xmm2, %xmm2 +; AVX1-NEXT: movq %rdx, %r8 +; AVX1-NEXT: shlq $38, %r10 +; AVX1-NEXT: sarq $63, %r10 +; AVX1-NEXT: vpinsrb $9, %r10d, %xmm2, %xmm2 +; AVX1-NEXT: movsbq %dl, %rax +; AVX1-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX1-NEXT: shlq $37, %rax +; AVX1-NEXT: sarq $63, %rax +; AVX1-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 +; AVX1-NEXT: movq %rdx, %r10 +; AVX1-NEXT: shlq $36, %rsi +; AVX1-NEXT: sarq $63, %rsi +; AVX1-NEXT: vpinsrb $11, %esi, %xmm2, %xmm2 +; AVX1-NEXT: movq %rdx, %rsi +; AVX1-NEXT: shlq $35, %r11 +; AVX1-NEXT: sarq $63, %r11 +; AVX1-NEXT: vpinsrb $12, %r11d, %xmm2, %xmm2 +; AVX1-NEXT: movq %rdx, %r11 +; AVX1-NEXT: shlq $34, %r14 +; AVX1-NEXT: sarq $63, %r14 +; AVX1-NEXT: vpinsrb $13, %r14d, %xmm2, %xmm2 +; AVX1-NEXT: movq %rdx, %r14 +; AVX1-NEXT: shlq $33, %r15 +; AVX1-NEXT: sarq $63, %r15 +; AVX1-NEXT: vpinsrb $14, %r15d, %xmm2, %xmm2 +; AVX1-NEXT: movq %rdx, %r15 +; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX1-NEXT: shrq $31, %rax +; AVX1-NEXT: vpinsrb $15, %eax, %xmm2, %xmm2 +; AVX1-NEXT: movq %rdx, %rax +; AVX1-NEXT: shlq $63, %rcx +; AVX1-NEXT: sarq $63, %rcx +; AVX1-NEXT: vmovd %ecx, %xmm3 +; AVX1-NEXT: movq %rdx, %rcx +; AVX1-NEXT: movswq %dx, %rdx +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: shlq $62, %r13 +; AVX1-NEXT: sarq $63, %r13 +; AVX1-NEXT: vpinsrb $1, %r13d, %xmm3, %xmm1 +; AVX1-NEXT: shlq $61, %r9 +; AVX1-NEXT: sarq $63, %r9 +; AVX1-NEXT: vpinsrb $2, %r9d, %xmm1, %xmm1 +; AVX1-NEXT: shlq $60, %r12 +; AVX1-NEXT: sarq $63, %r12 +; AVX1-NEXT: vpinsrb $3, %r12d, %xmm1, %xmm1 +; AVX1-NEXT: shlq $59, %rdi +; AVX1-NEXT: sarq $63, %rdi +; AVX1-NEXT: vpinsrb $4, %edi, %xmm1, %xmm1 +; AVX1-NEXT: shlq $58, %rbx +; AVX1-NEXT: sarq $63, %rbx +; AVX1-NEXT: vpinsrb $5, %ebx, %xmm1, %xmm1 +; AVX1-NEXT: shlq $57, %r8 +; AVX1-NEXT: sarq $63, %r8 +; AVX1-NEXT: vpinsrb $6, %r8d, %xmm1, %xmm1 +; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; AVX1-NEXT: shrq $7, %rdi +; AVX1-NEXT: vpinsrb $7, %edi, %xmm1, %xmm1 +; AVX1-NEXT: shlq $55, %r10 +; AVX1-NEXT: sarq $63, %r10 +; AVX1-NEXT: vpinsrb $8, %r10d, %xmm1, %xmm1 +; AVX1-NEXT: shlq $54, %rsi +; AVX1-NEXT: sarq $63, %rsi +; AVX1-NEXT: vpinsrb $9, %esi, %xmm1, %xmm1 +; AVX1-NEXT: shlq $53, %r11 +; AVX1-NEXT: sarq $63, %r11 +; AVX1-NEXT: vpinsrb $10, %r11d, %xmm1, %xmm1 +; AVX1-NEXT: shlq $52, %r14 +; AVX1-NEXT: sarq $63, %r14 +; AVX1-NEXT: vpinsrb $11, %r14d, %xmm1, %xmm1 +; AVX1-NEXT: shlq $51, %r15 +; AVX1-NEXT: sarq $63, %r15 +; AVX1-NEXT: vpinsrb $12, %r15d, %xmm1, %xmm1 +; AVX1-NEXT: shlq $50, %rax +; AVX1-NEXT: sarq $63, %rax +; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 +; AVX1-NEXT: shlq $49, %rcx +; AVX1-NEXT: sarq $63, %rcx +; AVX1-NEXT: vpinsrb $14, %ecx, %xmm1, %xmm1 +; AVX1-NEXT: shrq $15, %rdx +; AVX1-NEXT: vpinsrb $15, %edx, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1-NEXT: leaq -40(%rbp), %rsp +; AVX1-NEXT: popq %rbx +; AVX1-NEXT: popq %r12 +; AVX1-NEXT: popq %r13 +; AVX1-NEXT: popq %r14 +; AVX1-NEXT: popq %r15 +; AVX1-NEXT: popq %rbp +; AVX1-NEXT: retq +; +; AVX2-LABEL: ext_i64_64i8: +; AVX2: # BB#0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: .Lcfi28: +; AVX2-NEXT: .cfi_def_cfa_offset 16 +; AVX2-NEXT: .Lcfi29: +; AVX2-NEXT: .cfi_offset %rbp, -16 +; AVX2-NEXT: movq %rsp, %rbp +; AVX2-NEXT: .Lcfi30: +; AVX2-NEXT: .cfi_def_cfa_register %rbp +; AVX2-NEXT: pushq %r15 +; AVX2-NEXT: pushq %r14 +; AVX2-NEXT: pushq %r13 +; AVX2-NEXT: pushq %r12 +; AVX2-NEXT: pushq %rbx +; AVX2-NEXT: andq $-32, %rsp +; AVX2-NEXT: subq $128, %rsp +; AVX2-NEXT: .Lcfi31: +; AVX2-NEXT: .cfi_offset %rbx, -56 +; AVX2-NEXT: .Lcfi32: +; AVX2-NEXT: .cfi_offset %r12, -48 +; AVX2-NEXT: .Lcfi33: +; AVX2-NEXT: .cfi_offset %r13, -40 +; AVX2-NEXT: .Lcfi34: +; AVX2-NEXT: .cfi_offset %r14, -32 +; AVX2-NEXT: .Lcfi35: +; AVX2-NEXT: .cfi_offset %r15, -24 +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) +; AVX2-NEXT: shrq $32, %rdi +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) +; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rdx +; AVX2-NEXT: movq %rdx, %rcx +; AVX2-NEXT: shlq $47, %rcx +; AVX2-NEXT: sarq $63, %rcx +; AVX2-NEXT: vmovd %ecx, %xmm0 +; AVX2-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: movq %rdx, %r8 +; AVX2-NEXT: movq %rdx, %rcx +; AVX2-NEXT: movq %rdx, %rdi +; AVX2-NEXT: movq %rdx, %r13 +; AVX2-NEXT: movq %rdx, %rsi +; AVX2-NEXT: movq %rdx, %r10 +; AVX2-NEXT: movq %rdx, %r11 +; AVX2-NEXT: movq %rdx, %r9 +; AVX2-NEXT: movq %rdx, %rbx +; AVX2-NEXT: movq %rdx, %r14 +; AVX2-NEXT: movq %rdx, %r15 +; AVX2-NEXT: movq %rdx, %r12 +; AVX2-NEXT: movq %rdx, %rax +; AVX2-NEXT: shlq $46, %rax +; AVX2-NEXT: sarq $63, %rax +; AVX2-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX2-NEXT: shlq $45, %rax +; AVX2-NEXT: sarq $63, %rax +; AVX2-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: shlq $44, %r8 +; AVX2-NEXT: sarq $63, %r8 +; AVX2-NEXT: vpinsrb $3, %r8d, %xmm0, %xmm0 +; AVX2-NEXT: movq %rdx, %r8 +; AVX2-NEXT: shlq $43, %rcx +; AVX2-NEXT: sarq $63, %rcx +; AVX2-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movq %rdx, %rcx +; AVX2-NEXT: shlq $42, %rdi +; AVX2-NEXT: sarq $63, %rdi +; AVX2-NEXT: vpinsrb $5, %edi, %xmm0, %xmm0 +; AVX2-NEXT: movq %rdx, %rdi +; AVX2-NEXT: shlq $41, %r13 +; AVX2-NEXT: sarq $63, %r13 +; AVX2-NEXT: vpinsrb $6, %r13d, %xmm0, %xmm0 +; AVX2-NEXT: movq %rdx, %r13 +; AVX2-NEXT: shlq $40, %rsi +; AVX2-NEXT: sarq $63, %rsi +; AVX2-NEXT: vpinsrb $7, %esi, %xmm0, %xmm0 +; AVX2-NEXT: movq %rdx, %rsi +; AVX2-NEXT: shlq $39, %r10 +; AVX2-NEXT: sarq $63, %r10 +; AVX2-NEXT: vpinsrb $8, %r10d, %xmm0, %xmm0 +; AVX2-NEXT: movq %rdx, %r10 +; AVX2-NEXT: shlq $38, %r11 +; AVX2-NEXT: sarq $63, %r11 +; AVX2-NEXT: vpinsrb $9, %r11d, %xmm0, %xmm0 +; AVX2-NEXT: movsbq %dl, %rax +; AVX2-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: shlq $37, %r9 +; AVX2-NEXT: sarq $63, %r9 +; AVX2-NEXT: vpinsrb $10, %r9d, %xmm0, %xmm0 +; AVX2-NEXT: movq %rdx, %r9 +; AVX2-NEXT: shlq $36, %rbx +; AVX2-NEXT: sarq $63, %rbx +; AVX2-NEXT: vpinsrb $11, %ebx, %xmm0, %xmm0 +; AVX2-NEXT: movq %rdx, %rbx +; AVX2-NEXT: shlq $35, %r14 +; AVX2-NEXT: sarq $63, %r14 +; AVX2-NEXT: vpinsrb $12, %r14d, %xmm0, %xmm0 +; AVX2-NEXT: movq %rdx, %r14 +; AVX2-NEXT: shlq $34, %r15 +; AVX2-NEXT: sarq $63, %r15 +; AVX2-NEXT: vpinsrb $13, %r15d, %xmm0, %xmm0 +; AVX2-NEXT: movq %rdx, %r15 +; AVX2-NEXT: shlq $33, %r12 +; AVX2-NEXT: sarq $63, %r12 +; AVX2-NEXT: vpinsrb $14, %r12d, %xmm0, %xmm0 +; AVX2-NEXT: movq %rdx, %r12 +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX2-NEXT: shrq $31, %rax +; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movq %rdx, %rax +; AVX2-NEXT: shlq $63, %r8 +; AVX2-NEXT: sarq $63, %r8 +; AVX2-NEXT: vmovd %r8d, %xmm1 +; AVX2-NEXT: movq %rdx, %r8 +; AVX2-NEXT: movswq %dx, %rdx +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r11 # 8-byte Reload +; AVX2-NEXT: shlq $62, %r11 +; AVX2-NEXT: sarq $63, %r11 +; AVX2-NEXT: vpinsrb $1, %r11d, %xmm1, %xmm1 +; AVX2-NEXT: shlq $61, %rcx +; AVX2-NEXT: sarq $63, %rcx +; AVX2-NEXT: vpinsrb $2, %ecx, %xmm1, %xmm1 +; AVX2-NEXT: shlq $60, %rdi +; AVX2-NEXT: sarq $63, %rdi +; AVX2-NEXT: vpinsrb $3, %edi, %xmm1, %xmm1 +; AVX2-NEXT: shlq $59, %r13 +; AVX2-NEXT: sarq $63, %r13 +; AVX2-NEXT: vpinsrb $4, %r13d, %xmm1, %xmm1 +; AVX2-NEXT: shlq $58, %rsi +; AVX2-NEXT: sarq $63, %rsi +; AVX2-NEXT: vpinsrb $5, %esi, %xmm1, %xmm1 +; AVX2-NEXT: shlq $57, %r10 +; AVX2-NEXT: sarq $63, %r10 +; AVX2-NEXT: vpinsrb $6, %r10d, %xmm1, %xmm1 +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; AVX2-NEXT: shrq $7, %rcx +; AVX2-NEXT: vpinsrb $7, %ecx, %xmm1, %xmm1 +; AVX2-NEXT: shlq $55, %r9 +; AVX2-NEXT: sarq $63, %r9 +; AVX2-NEXT: vpinsrb $8, %r9d, %xmm1, %xmm1 +; AVX2-NEXT: shlq $54, %rbx +; AVX2-NEXT: sarq $63, %rbx +; AVX2-NEXT: vpinsrb $9, %ebx, %xmm1, %xmm1 +; AVX2-NEXT: shlq $53, %r14 +; AVX2-NEXT: sarq $63, %r14 +; AVX2-NEXT: vpinsrb $10, %r14d, %xmm1, %xmm1 +; AVX2-NEXT: shlq $52, %r15 +; AVX2-NEXT: sarq $63, %r15 +; AVX2-NEXT: vpinsrb $11, %r15d, %xmm1, %xmm1 +; AVX2-NEXT: shlq $51, %r12 +; AVX2-NEXT: sarq $63, %r12 +; AVX2-NEXT: vpinsrb $12, %r12d, %xmm1, %xmm1 +; AVX2-NEXT: shlq $50, %rax +; AVX2-NEXT: sarq $63, %rax +; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 +; AVX2-NEXT: shlq $49, %r8 +; AVX2-NEXT: sarq $63, %r8 +; AVX2-NEXT: vpinsrb $14, %r8d, %xmm1, %xmm1 +; AVX2-NEXT: shrq $15, %rdx +; AVX2-NEXT: vpinsrb $15, %edx, %xmm1, %xmm1 +; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rdx +; AVX2-NEXT: movq %rdx, %rcx +; AVX2-NEXT: shlq $47, %rcx +; AVX2-NEXT: sarq $63, %rcx +; AVX2-NEXT: vmovd %ecx, %xmm2 +; AVX2-NEXT: movq %rdx, %r13 +; AVX2-NEXT: movq %rdx, %rcx +; AVX2-NEXT: movq %rdx, %r9 +; AVX2-NEXT: movq %rdx, %r12 +; AVX2-NEXT: movq %rdx, %rdi +; AVX2-NEXT: movq %rdx, %rbx +; AVX2-NEXT: movq %rdx, %r8 +; AVX2-NEXT: movq %rdx, %r10 +; AVX2-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: movq %rdx, %rsi +; AVX2-NEXT: movq %rdx, %r11 +; AVX2-NEXT: movq %rdx, %r14 +; AVX2-NEXT: movq %rdx, %r15 +; AVX2-NEXT: movq %rdx, %rax +; AVX2-NEXT: shlq $46, %rax +; AVX2-NEXT: sarq $63, %rax +; AVX2-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2 +; AVX2-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: shlq $45, %r13 +; AVX2-NEXT: sarq $63, %r13 +; AVX2-NEXT: vpinsrb $2, %r13d, %xmm2, %xmm2 +; AVX2-NEXT: movq %rdx, %r13 +; AVX2-NEXT: shlq $44, %rcx +; AVX2-NEXT: sarq $63, %rcx +; AVX2-NEXT: vpinsrb $3, %ecx, %xmm2, %xmm2 +; AVX2-NEXT: movq %rdx, %rcx +; AVX2-NEXT: shlq $43, %r9 +; AVX2-NEXT: sarq $63, %r9 +; AVX2-NEXT: vpinsrb $4, %r9d, %xmm2, %xmm2 +; AVX2-NEXT: movq %rdx, %r9 +; AVX2-NEXT: shlq $42, %r12 +; AVX2-NEXT: sarq $63, %r12 +; AVX2-NEXT: vpinsrb $5, %r12d, %xmm2, %xmm2 +; AVX2-NEXT: movq %rdx, %r12 +; AVX2-NEXT: shlq $41, %rdi +; AVX2-NEXT: sarq $63, %rdi +; AVX2-NEXT: vpinsrb $6, %edi, %xmm2, %xmm2 +; AVX2-NEXT: movq %rdx, %rdi +; AVX2-NEXT: shlq $40, %rbx +; AVX2-NEXT: sarq $63, %rbx +; AVX2-NEXT: vpinsrb $7, %ebx, %xmm2, %xmm2 +; AVX2-NEXT: movq %rdx, %rbx +; AVX2-NEXT: shlq $39, %r8 +; AVX2-NEXT: sarq $63, %r8 +; AVX2-NEXT: vpinsrb $8, %r8d, %xmm2, %xmm2 +; AVX2-NEXT: movq %rdx, %r8 +; AVX2-NEXT: shlq $38, %r10 +; AVX2-NEXT: sarq $63, %r10 +; AVX2-NEXT: vpinsrb $9, %r10d, %xmm2, %xmm2 +; AVX2-NEXT: movsbq %dl, %rax +; AVX2-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX2-NEXT: shlq $37, %rax +; AVX2-NEXT: sarq $63, %rax +; AVX2-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 +; AVX2-NEXT: movq %rdx, %r10 +; AVX2-NEXT: shlq $36, %rsi +; AVX2-NEXT: sarq $63, %rsi +; AVX2-NEXT: vpinsrb $11, %esi, %xmm2, %xmm2 +; AVX2-NEXT: movq %rdx, %rsi +; AVX2-NEXT: shlq $35, %r11 +; AVX2-NEXT: sarq $63, %r11 +; AVX2-NEXT: vpinsrb $12, %r11d, %xmm2, %xmm2 +; AVX2-NEXT: movq %rdx, %r11 +; AVX2-NEXT: shlq $34, %r14 +; AVX2-NEXT: sarq $63, %r14 +; AVX2-NEXT: vpinsrb $13, %r14d, %xmm2, %xmm2 +; AVX2-NEXT: movq %rdx, %r14 +; AVX2-NEXT: shlq $33, %r15 +; AVX2-NEXT: sarq $63, %r15 +; AVX2-NEXT: vpinsrb $14, %r15d, %xmm2, %xmm2 +; AVX2-NEXT: movq %rdx, %r15 +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX2-NEXT: shrq $31, %rax +; AVX2-NEXT: vpinsrb $15, %eax, %xmm2, %xmm2 +; AVX2-NEXT: movq %rdx, %rax +; AVX2-NEXT: shlq $63, %rcx +; AVX2-NEXT: sarq $63, %rcx +; AVX2-NEXT: vmovd %ecx, %xmm3 +; AVX2-NEXT: movq %rdx, %rcx +; AVX2-NEXT: movswq %dx, %rdx +; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX2-NEXT: shlq $62, %r13 +; AVX2-NEXT: sarq $63, %r13 +; AVX2-NEXT: vpinsrb $1, %r13d, %xmm3, %xmm1 +; AVX2-NEXT: shlq $61, %r9 +; AVX2-NEXT: sarq $63, %r9 +; AVX2-NEXT: vpinsrb $2, %r9d, %xmm1, %xmm1 +; AVX2-NEXT: shlq $60, %r12 +; AVX2-NEXT: sarq $63, %r12 +; AVX2-NEXT: vpinsrb $3, %r12d, %xmm1, %xmm1 +; AVX2-NEXT: shlq $59, %rdi +; AVX2-NEXT: sarq $63, %rdi +; AVX2-NEXT: vpinsrb $4, %edi, %xmm1, %xmm1 +; AVX2-NEXT: shlq $58, %rbx +; AVX2-NEXT: sarq $63, %rbx +; AVX2-NEXT: vpinsrb $5, %ebx, %xmm1, %xmm1 +; AVX2-NEXT: shlq $57, %r8 +; AVX2-NEXT: sarq $63, %r8 +; AVX2-NEXT: vpinsrb $6, %r8d, %xmm1, %xmm1 +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; AVX2-NEXT: shrq $7, %rdi +; AVX2-NEXT: vpinsrb $7, %edi, %xmm1, %xmm1 +; AVX2-NEXT: shlq $55, %r10 +; AVX2-NEXT: sarq $63, %r10 +; AVX2-NEXT: vpinsrb $8, %r10d, %xmm1, %xmm1 +; AVX2-NEXT: shlq $54, %rsi +; AVX2-NEXT: sarq $63, %rsi +; AVX2-NEXT: vpinsrb $9, %esi, %xmm1, %xmm1 +; AVX2-NEXT: shlq $53, %r11 +; AVX2-NEXT: sarq $63, %r11 +; AVX2-NEXT: vpinsrb $10, %r11d, %xmm1, %xmm1 +; AVX2-NEXT: shlq $52, %r14 +; AVX2-NEXT: sarq $63, %r14 +; AVX2-NEXT: vpinsrb $11, %r14d, %xmm1, %xmm1 +; AVX2-NEXT: shlq $51, %r15 +; AVX2-NEXT: sarq $63, %r15 +; AVX2-NEXT: vpinsrb $12, %r15d, %xmm1, %xmm1 +; AVX2-NEXT: shlq $50, %rax +; AVX2-NEXT: sarq $63, %rax +; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 +; AVX2-NEXT: shlq $49, %rcx +; AVX2-NEXT: sarq $63, %rcx +; AVX2-NEXT: vpinsrb $14, %ecx, %xmm1, %xmm1 +; AVX2-NEXT: shrq $15, %rdx +; AVX2-NEXT: vpinsrb $15, %edx, %xmm1, %xmm1 +; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX2-NEXT: leaq -40(%rbp), %rsp +; AVX2-NEXT: popq %rbx +; AVX2-NEXT: popq %r12 +; AVX2-NEXT: popq %r13 +; AVX2-NEXT: popq %r14 +; AVX2-NEXT: popq %r15 +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: retq +; +; AVX512-LABEL: ext_i64_64i8: +; AVX512: # BB#0: +; AVX512-NEXT: kmovq %rdi, %k0 +; AVX512-NEXT: vpmovm2b %k0, %zmm0 +; AVX512-NEXT: retq + %1 = bitcast i64 %a0 to <64 x i1> + %2 = sext <64 x i1> %1 to <64 x i8> + ret <64 x i8> %2 +} diff --git a/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll b/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll new file mode 100644 index 00000000000..aa9e60df140 --- /dev/null +++ b/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll @@ -0,0 +1,3279 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE2-SSSE3,SSE2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=SSE2-SSSE3,SSSE3 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX12,AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX12,AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX512 + +; +; 128-bit vectors +; + +define <2 x i64> @ext_i2_2i64(i2 %a0) { +; SSE2-SSSE3-LABEL: ext_i2_2i64: +; SSE2-SSSE3: # BB#0: +; SSE2-SSSE3-NEXT: andb $3, %dil +; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movq %rcx, %xmm0 +; SSE2-SSSE3-NEXT: shrl %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movq %rax, %xmm1 +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE2-SSSE3-NEXT: retq +; +; AVX12-LABEL: ext_i2_2i64: +; AVX12: # BB#0: +; AVX12-NEXT: andb $3, %dil +; AVX12-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; AVX12-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vmovq %rcx, %xmm0 +; AVX12-NEXT: shrl %eax +; AVX12-NEXT: andl $1, %eax +; AVX12-NEXT: vmovq %rax, %xmm1 +; AVX12-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX12-NEXT: retq +; +; AVX512-LABEL: ext_i2_2i64: +; AVX512: # BB#0: +; AVX512-NEXT: andb $3, %dil +; AVX512-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; AVX512-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; AVX512-NEXT: kmovd %eax, %k1 +; AVX512-NEXT: vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z} +; AVX512-NEXT: # kill: %XMM0 %XMM0 %ZMM0 +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %1 = bitcast i2 %a0 to <2 x i1> + %2 = zext <2 x i1> %1 to <2 x i64> + ret <2 x i64> %2 +} + +define <4 x i32> @ext_i4_4i32(i4 %a0) { +; SSE2-SSSE3-LABEL: ext_i4_4i32: +; SSE2-SSSE3: # BB#0: +; SSE2-SSSE3-NEXT: andb $15, %dil +; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $3, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $2, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; SSE2-SSSE3-NEXT: movd %eax, %xmm0 +; SSE2-SSSE3-NEXT: shrl %eax +; SSE2-SSSE3-NEXT: movd %eax, %xmm2 +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE2-SSSE3-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE2-SSSE3-NEXT: retq +; +; AVX1-LABEL: ext_i4_4i32: +; AVX1: # BB#0: +; AVX1-NEXT: andb $15, %dil +; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: movl -{{[0-9]+}}(%rsp), %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl %ecx +; AVX1-NEXT: vmovd %eax, %xmm0 +; AVX1-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $2, %ecx +; AVX1-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: shrl $3, %eax +; AVX1-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: ext_i4_4i32: +; AVX2: # BB#0: +; AVX2-NEXT: andb $15, %dil +; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: movl -{{[0-9]+}}(%rsp), %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl %ecx +; AVX2-NEXT: vmovd %eax, %xmm0 +; AVX2-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $2, %ecx +; AVX2-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: shrl $3, %eax +; AVX2-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0 +; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1] +; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: retq +; +; AVX512-LABEL: ext_i4_4i32: +; AVX512: # BB#0: +; AVX512-NEXT: andb $15, %dil +; AVX512-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; AVX512-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; AVX512-NEXT: kmovd %eax, %k1 +; AVX512-NEXT: vpbroadcastd {{.*}}(%rip), %ymm0 {%k1} {z} +; AVX512-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %1 = bitcast i4 %a0 to <4 x i1> + %2 = zext <4 x i1> %1 to <4 x i32> + ret <4 x i32> %2 +} + +define <8 x i16> @ext_i8_8i16(i8 %a0) { +; SSE2-SSSE3-LABEL: ext_i8_8i16: +; SSE2-SSSE3: # BB#0: +; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $3, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $2, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $5, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $4, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $6, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: shrl $7, %eax +; SSE2-SSSE3-NEXT: movzwl %ax, %eax +; SSE2-SSSE3-NEXT: movd %eax, %xmm3 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; SSE2-SSSE3-NEXT: retq +; +; AVX12-LABEL: ext_i8_8i16: +; AVX12: # BB#0: +; AVX12-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; AVX12-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: movl %eax, %edx +; AVX12-NEXT: andl $1, %edx +; AVX12-NEXT: vmovd %edx, %xmm0 +; AVX12-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $2, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $3, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $4, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $5, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $6, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: shrl $7, %eax +; AVX12-NEXT: movzwl %ax, %eax +; AVX12-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0 +; AVX12-NEXT: retq +; +; AVX512-LABEL: ext_i8_8i16: +; AVX512: # BB#0: +; AVX512-NEXT: kmovd %edi, %k5 +; AVX512-NEXT: kshiftlw $8, %k5, %k0 +; AVX512-NEXT: kshiftrw $15, %k0, %k0 +; AVX512-NEXT: kshiftlw $9, %k5, %k1 +; AVX512-NEXT: kshiftrw $15, %k1, %k1 +; AVX512-NEXT: kshiftlw $10, %k5, %k2 +; AVX512-NEXT: kshiftrw $15, %k2, %k2 +; AVX512-NEXT: kshiftlw $11, %k5, %k3 +; AVX512-NEXT: kshiftrw $15, %k3, %k3 +; AVX512-NEXT: kshiftlw $12, %k5, %k4 +; AVX512-NEXT: kshiftrw $15, %k4, %k4 +; AVX512-NEXT: kshiftlw $13, %k5, %k6 +; AVX512-NEXT: kshiftrw $15, %k6, %k6 +; AVX512-NEXT: kshiftlw $15, %k5, %k7 +; AVX512-NEXT: kshiftrw $15, %k7, %k7 +; AVX512-NEXT: kshiftlw $14, %k5, %k5 +; AVX512-NEXT: kshiftrw $15, %k5, %k5 +; AVX512-NEXT: kmovd %k5, %eax +; AVX512-NEXT: andl $1, %eax +; AVX512-NEXT: kmovd %k7, %ecx +; AVX512-NEXT: andl $1, %ecx +; AVX512-NEXT: vmovd %ecx, %xmm0 +; AVX512-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0 +; AVX512-NEXT: kmovd %k6, %eax +; AVX512-NEXT: andl $1, %eax +; AVX512-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0 +; AVX512-NEXT: kmovd %k4, %eax +; AVX512-NEXT: andl $1, %eax +; AVX512-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0 +; AVX512-NEXT: kmovd %k3, %eax +; AVX512-NEXT: andl $1, %eax +; AVX512-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0 +; AVX512-NEXT: kmovd %k2, %eax +; AVX512-NEXT: andl $1, %eax +; AVX512-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0 +; AVX512-NEXT: kmovd %k1, %eax +; AVX512-NEXT: andl $1, %eax +; AVX512-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: andl $1, %eax +; AVX512-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0 +; AVX512-NEXT: retq + %1 = bitcast i8 %a0 to <8 x i1> + %2 = zext <8 x i1> %1 to <8 x i16> + ret <8 x i16> %2 +} + +define <16 x i8> @ext_i16_16i8(i16 %a0) { +; SSE2-SSSE3-LABEL: ext_i16_16i8: +; SSE2-SSSE3: # BB#0: +; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $7, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $6, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $5, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $4, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $3, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $2, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $11, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $10, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $9, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $8, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $13, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $12, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $14, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: shrl $15, %eax +; SSE2-SSSE3-NEXT: movzwl %ax, %eax +; SSE2-SSSE3-NEXT: movd %eax, %xmm4 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE2-SSSE3-NEXT: retq +; +; AVX12-LABEL: ext_i16_16i8: +; AVX12: # BB#0: +; AVX12-NEXT: movw %di, -{{[0-9]+}}(%rsp) +; AVX12-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: movl %eax, %edx +; AVX12-NEXT: andl $1, %edx +; AVX12-NEXT: vmovd %edx, %xmm0 +; AVX12-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $2, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $3, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $4, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $5, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vpinsrb $5, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $6, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $7, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vpinsrb $7, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $8, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $9, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $10, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $11, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $12, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $13, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $14, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: shrl $15, %eax +; AVX12-NEXT: movzwl %ax, %eax +; AVX12-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 +; AVX12-NEXT: retq +; +; AVX512-LABEL: ext_i16_16i8: +; AVX512: # BB#0: +; AVX512-NEXT: pushq %rbp +; AVX512-NEXT: .Lcfi0: +; AVX512-NEXT: .cfi_def_cfa_offset 16 +; AVX512-NEXT: pushq %r15 +; AVX512-NEXT: .Lcfi1: +; AVX512-NEXT: .cfi_def_cfa_offset 24 +; AVX512-NEXT: pushq %r14 +; AVX512-NEXT: .Lcfi2: +; AVX512-NEXT: .cfi_def_cfa_offset 32 +; AVX512-NEXT: pushq %r13 +; AVX512-NEXT: .Lcfi3: +; AVX512-NEXT: .cfi_def_cfa_offset 40 +; AVX512-NEXT: pushq %r12 +; AVX512-NEXT: .Lcfi4: +; AVX512-NEXT: .cfi_def_cfa_offset 48 +; AVX512-NEXT: pushq %rbx +; AVX512-NEXT: .Lcfi5: +; AVX512-NEXT: .cfi_def_cfa_offset 56 +; AVX512-NEXT: .Lcfi6: +; AVX512-NEXT: .cfi_offset %rbx, -56 +; AVX512-NEXT: .Lcfi7: +; AVX512-NEXT: .cfi_offset %r12, -48 +; AVX512-NEXT: .Lcfi8: +; AVX512-NEXT: .cfi_offset %r13, -40 +; AVX512-NEXT: .Lcfi9: +; AVX512-NEXT: .cfi_offset %r14, -32 +; AVX512-NEXT: .Lcfi10: +; AVX512-NEXT: .cfi_offset %r15, -24 +; AVX512-NEXT: .Lcfi11: +; AVX512-NEXT: .cfi_offset %rbp, -16 +; AVX512-NEXT: kmovd %edi, %k0 +; AVX512-NEXT: kshiftlw $14, %k0, %k1 +; AVX512-NEXT: kshiftrw $15, %k1, %k1 +; AVX512-NEXT: kmovd %k1, %r8d +; AVX512-NEXT: kshiftlw $15, %k0, %k1 +; AVX512-NEXT: kshiftrw $15, %k1, %k1 +; AVX512-NEXT: kmovd %k1, %r9d +; AVX512-NEXT: kshiftlw $13, %k0, %k1 +; AVX512-NEXT: kshiftrw $15, %k1, %k1 +; AVX512-NEXT: kmovd %k1, %r10d +; AVX512-NEXT: kshiftlw $12, %k0, %k1 +; AVX512-NEXT: kshiftrw $15, %k1, %k1 +; AVX512-NEXT: kmovd %k1, %r11d +; AVX512-NEXT: kshiftlw $11, %k0, %k1 +; AVX512-NEXT: kshiftrw $15, %k1, %k1 +; AVX512-NEXT: kmovd %k1, %r14d +; AVX512-NEXT: kshiftlw $10, %k0, %k1 +; AVX512-NEXT: kshiftrw $15, %k1, %k1 +; AVX512-NEXT: kmovd %k1, %r15d +; AVX512-NEXT: kshiftlw $9, %k0, %k1 +; AVX512-NEXT: kshiftrw $15, %k1, %k1 +; AVX512-NEXT: kmovd %k1, %r12d +; AVX512-NEXT: kshiftlw $8, %k0, %k1 +; AVX512-NEXT: kshiftrw $15, %k1, %k1 +; AVX512-NEXT: kmovd %k1, %r13d +; AVX512-NEXT: kshiftlw $7, %k0, %k1 +; AVX512-NEXT: kshiftrw $15, %k1, %k1 +; AVX512-NEXT: kmovd %k1, %esi +; AVX512-NEXT: kshiftlw $6, %k0, %k1 +; AVX512-NEXT: kshiftrw $15, %k1, %k1 +; AVX512-NEXT: kmovd %k1, %ebx +; AVX512-NEXT: kshiftlw $5, %k0, %k1 +; AVX512-NEXT: kshiftrw $15, %k1, %k1 +; AVX512-NEXT: kmovd %k1, %ebp +; AVX512-NEXT: kshiftlw $4, %k0, %k1 +; AVX512-NEXT: kshiftrw $15, %k1, %k1 +; AVX512-NEXT: kmovd %k1, %edi +; AVX512-NEXT: kshiftlw $3, %k0, %k1 +; AVX512-NEXT: kshiftrw $15, %k1, %k1 +; AVX512-NEXT: kmovd %k1, %eax +; AVX512-NEXT: kshiftlw $2, %k0, %k1 +; AVX512-NEXT: kshiftrw $15, %k1, %k1 +; AVX512-NEXT: kmovd %k1, %ecx +; AVX512-NEXT: kshiftlw $1, %k0, %k1 +; AVX512-NEXT: kshiftrw $15, %k1, %k1 +; AVX512-NEXT: kmovd %k1, %edx +; AVX512-NEXT: kshiftrw $15, %k0, %k0 +; AVX512-NEXT: vmovd %r9d, %xmm0 +; AVX512-NEXT: kmovd %k0, %r9d +; AVX512-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0 +; AVX512-NEXT: vpinsrb $2, %r10d, %xmm0, %xmm0 +; AVX512-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0 +; AVX512-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0 +; AVX512-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0 +; AVX512-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0 +; AVX512-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0 +; AVX512-NEXT: vpinsrb $8, %esi, %xmm0, %xmm0 +; AVX512-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0 +; AVX512-NEXT: vpinsrb $10, %ebp, %xmm0, %xmm0 +; AVX512-NEXT: vpinsrb $11, %edi, %xmm0, %xmm0 +; AVX512-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 +; AVX512-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0 +; AVX512-NEXT: vpinsrb $14, %edx, %xmm0, %xmm0 +; AVX512-NEXT: vpinsrb $15, %r9d, %xmm0, %xmm0 +; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX512-NEXT: popq %rbx +; AVX512-NEXT: popq %r12 +; AVX512-NEXT: popq %r13 +; AVX512-NEXT: popq %r14 +; AVX512-NEXT: popq %r15 +; AVX512-NEXT: popq %rbp +; AVX512-NEXT: retq + %1 = bitcast i16 %a0 to <16 x i1> + %2 = zext <16 x i1> %1 to <16 x i8> + ret <16 x i8> %2 +} + +; +; 256-bit vectors +; + +define <4 x i64> @ext_i4_4i64(i4 %a0) { +; SSE2-SSSE3-LABEL: ext_i4_4i64: +; SSE2-SSSE3: # BB#0: +; SSE2-SSSE3-NEXT: andb $15, %dil +; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $3, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $2, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; SSE2-SSSE3-NEXT: movd %eax, %xmm2 +; SSE2-SSSE3-NEXT: shrl %eax +; SSE2-SSSE3-NEXT: movd %eax, %xmm0 +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0] +; SSE2-SSSE3-NEXT: pand {{.*}}(%rip), %xmm2 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,1,3] +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [1,1] +; SSE2-SSSE3-NEXT: pand %xmm3, %xmm0 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,1,3,3] +; SSE2-SSSE3-NEXT: pand %xmm3, %xmm1 +; SSE2-SSSE3-NEXT: retq +; +; AVX1-LABEL: ext_i4_4i64: +; AVX1: # BB#0: +; AVX1-NEXT: andb $15, %dil +; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $3, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vmovq %rcx, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $2, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vmovq %rcx, %xmm1 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vmovq %rcx, %xmm1 +; AVX1-NEXT: shrl %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vmovq %rax, %xmm2 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: ext_i4_4i64: +; AVX2: # BB#0: +; AVX2-NEXT: andb $15, %dil +; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $3, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vmovq %rcx, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $2, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vmovq %rcx, %xmm1 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vmovq %rcx, %xmm1 +; AVX2-NEXT: shrl %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vmovq %rax, %xmm2 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX2-NEXT: retq +; +; AVX512-LABEL: ext_i4_4i64: +; AVX512: # BB#0: +; AVX512-NEXT: andb $15, %dil +; AVX512-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; AVX512-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; AVX512-NEXT: kmovd %eax, %k1 +; AVX512-NEXT: vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z} +; AVX512-NEXT: # kill: %YMM0 %YMM0 %ZMM0 +; AVX512-NEXT: retq + %1 = bitcast i4 %a0 to <4 x i1> + %2 = zext <4 x i1> %1 to <4 x i64> + ret <4 x i64> %2 +} + +define <8 x i32> @ext_i8_8i32(i8 %a0) { +; SSE2-SSSE3-LABEL: ext_i8_8i32: +; SSE2-SSSE3: # BB#0: +; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $3, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $2, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $5, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $4, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $6, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: shrl $7, %eax +; SSE2-SSSE3-NEXT: movzwl %ax, %eax +; SSE2-SSSE3-NEXT: movd %eax, %xmm3 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1] +; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0 +; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-SSSE3-NEXT: pand %xmm2, %xmm1 +; SSE2-SSSE3-NEXT: retq +; +; AVX1-LABEL: ext_i8_8i32: +; AVX1: # BB#0: +; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: movl -{{[0-9]+}}(%rsp), %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $5, %ecx +; AVX1-NEXT: movl %eax, %edx +; AVX1-NEXT: shrl $4, %edx +; AVX1-NEXT: vmovd %edx, %xmm0 +; AVX1-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $6, %ecx +; AVX1-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $7, %ecx +; AVX1-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl %ecx +; AVX1-NEXT: vmovd %eax, %xmm1 +; AVX1-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm1 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $2, %ecx +; AVX1-NEXT: vpinsrd $2, %ecx, %xmm1, %xmm1 +; AVX1-NEXT: shrl $3, %eax +; AVX1-NEXT: vpinsrd $3, %eax, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: ext_i8_8i32: +; AVX2: # BB#0: +; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: movl -{{[0-9]+}}(%rsp), %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $5, %ecx +; AVX2-NEXT: movl %eax, %edx +; AVX2-NEXT: shrl $4, %edx +; AVX2-NEXT: vmovd %edx, %xmm0 +; AVX2-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $6, %ecx +; AVX2-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $7, %ecx +; AVX2-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl %ecx +; AVX2-NEXT: vmovd %eax, %xmm1 +; AVX2-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm1 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $2, %ecx +; AVX2-NEXT: vpinsrd $2, %ecx, %xmm1, %xmm1 +; AVX2-NEXT: shrl $3, %eax +; AVX2-NEXT: vpinsrd $3, %eax, %xmm1, %xmm1 +; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1] +; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: retq +; +; AVX512-LABEL: ext_i8_8i32: +; AVX512: # BB#0: +; AVX512-NEXT: kmovd %edi, %k1 +; AVX512-NEXT: vpbroadcastd {{.*}}(%rip), %ymm0 {%k1} {z} +; AVX512-NEXT: retq + %1 = bitcast i8 %a0 to <8 x i1> + %2 = zext <8 x i1> %1 to <8 x i32> + ret <8 x i32> %2 +} + +define <16 x i16> @ext_i16_16i16(i16 %a0) { +; SSE2-SSSE3-LABEL: ext_i16_16i16: +; SSE2-SSSE3: # BB#0: +; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $7, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $6, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $5, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $4, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $3, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $2, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $11, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $10, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $9, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $8, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $13, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $12, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $14, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: shrl $15, %eax +; SSE2-SSSE3-NEXT: movzwl %ax, %eax +; SSE2-SSSE3-NEXT: movd %eax, %xmm4 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] +; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1,1,1,1,1] +; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0 +; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] +; SSE2-SSSE3-NEXT: pand %xmm2, %xmm1 +; SSE2-SSSE3-NEXT: retq +; +; AVX1-LABEL: ext_i16_16i16: +; AVX1: # BB#0: +; AVX1-NEXT: movw %di, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $9, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: movl %eax, %edx +; AVX1-NEXT: shrl $8, %edx +; AVX1-NEXT: andl $1, %edx +; AVX1-NEXT: vmovd %edx, %xmm0 +; AVX1-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $10, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $11, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $12, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $13, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $14, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $15, %ecx +; AVX1-NEXT: movzwl %cx, %ecx +; AVX1-NEXT: vpinsrw $7, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: movl %eax, %edx +; AVX1-NEXT: andl $1, %edx +; AVX1-NEXT: vmovd %edx, %xmm1 +; AVX1-NEXT: vpinsrw $1, %ecx, %xmm1, %xmm1 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $2, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrw $2, %ecx, %xmm1, %xmm1 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $3, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrw $3, %ecx, %xmm1, %xmm1 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $4, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrw $4, %ecx, %xmm1, %xmm1 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $5, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $6, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1 +; AVX1-NEXT: shrl $7, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: ext_i16_16i16: +; AVX2: # BB#0: +; AVX2-NEXT: movw %di, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $9, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: movl %eax, %edx +; AVX2-NEXT: shrl $8, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: vmovd %edx, %xmm0 +; AVX2-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $10, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $11, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $12, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $13, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $14, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $15, %ecx +; AVX2-NEXT: movzwl %cx, %ecx +; AVX2-NEXT: vpinsrw $7, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: movl %eax, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: vmovd %edx, %xmm1 +; AVX2-NEXT: vpinsrw $1, %ecx, %xmm1, %xmm1 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $2, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrw $2, %ecx, %xmm1, %xmm1 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $3, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrw $3, %ecx, %xmm1, %xmm1 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $4, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrw $4, %ecx, %xmm1, %xmm1 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $5, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $6, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1 +; AVX2-NEXT: shrl $7, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1 +; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX2-NEXT: retq +; +; AVX512-LABEL: ext_i16_16i16: +; AVX512: # BB#0: +; AVX512-NEXT: kmovd %edi, %k1 +; AVX512-NEXT: vmovdqu16 {{.*}}(%rip), %ymm0 {%k1} {z} +; AVX512-NEXT: retq + %1 = bitcast i16 %a0 to <16 x i1> + %2 = zext <16 x i1> %1 to <16 x i16> + ret <16 x i16> %2 +} + +define <32 x i8> @ext_i32_32i8(i32 %a0) { +; SSE2-SSSE3-LABEL: ext_i32_32i8: +; SSE2-SSSE3: # BB#0: +; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: shrl $16, %edi +; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $7, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $6, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $5, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $4, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $3, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $2, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $11, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $10, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $9, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $8, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $13, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $12, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $14, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: shrl $15, %eax +; SSE2-SSSE3-NEXT: movzwl %ax, %eax +; SSE2-SSSE3-NEXT: movd %eax, %xmm4 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $7, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $6, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $5, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $4, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $3, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $2, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $11, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $10, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $9, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $8, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $13, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $12, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $14, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 +; SSE2-SSSE3-NEXT: shrl $15, %eax +; SSE2-SSSE3-NEXT: movzwl %ax, %eax +; SSE2-SSSE3-NEXT: movd %eax, %xmm5 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1] +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; SSE2-SSSE3-NEXT: retq +; +; AVX1-LABEL: ext_i32_32i8: +; AVX1: # BB#0: +; AVX1-NEXT: pushq %rbp +; AVX1-NEXT: .Lcfi0: +; AVX1-NEXT: .cfi_def_cfa_offset 16 +; AVX1-NEXT: .Lcfi1: +; AVX1-NEXT: .cfi_offset %rbp, -16 +; AVX1-NEXT: movq %rsp, %rbp +; AVX1-NEXT: .Lcfi2: +; AVX1-NEXT: .cfi_def_cfa_register %rbp +; AVX1-NEXT: andq $-32, %rsp +; AVX1-NEXT: subq $32, %rsp +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $17, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movl %edi, %ecx +; AVX1-NEXT: shrl $16, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vmovd %ecx, %xmm0 +; AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $18, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $19, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $20, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $21, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $22, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $23, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $24, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $25, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $26, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $27, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $28, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $29, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $30, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $31, %eax +; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movl %edi, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vmovd %ecx, %xmm1 +; AVX1-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $2, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $3, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $4, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $5, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $6, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $7, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $8, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $9, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $10, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $11, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $12, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $13, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $14, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 +; AVX1-NEXT: shrl $15, %edi +; AVX1-NEXT: andl $1, %edi +; AVX1-NEXT: vpinsrb $15, %edi, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: movq %rbp, %rsp +; AVX1-NEXT: popq %rbp +; AVX1-NEXT: retq +; +; AVX2-LABEL: ext_i32_32i8: +; AVX2: # BB#0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: .Lcfi0: +; AVX2-NEXT: .cfi_def_cfa_offset 16 +; AVX2-NEXT: .Lcfi1: +; AVX2-NEXT: .cfi_offset %rbp, -16 +; AVX2-NEXT: movq %rsp, %rbp +; AVX2-NEXT: .Lcfi2: +; AVX2-NEXT: .cfi_def_cfa_register %rbp +; AVX2-NEXT: andq $-32, %rsp +; AVX2-NEXT: subq $32, %rsp +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $17, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: movl %edi, %ecx +; AVX2-NEXT: shrl $16, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vmovd %ecx, %xmm0 +; AVX2-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $18, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $19, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $20, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $21, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $22, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $23, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $24, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $25, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $26, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $27, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $28, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $29, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $30, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $31, %eax +; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: movl %edi, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vmovd %ecx, %xmm1 +; AVX2-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $2, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $3, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $4, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $5, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $6, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $7, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $8, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $9, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $10, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $11, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $12, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $13, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $14, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 +; AVX2-NEXT: shrl $15, %edi +; AVX2-NEXT: andl $1, %edi +; AVX2-NEXT: vpinsrb $15, %edi, %xmm1, %xmm1 +; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX2-NEXT: movq %rbp, %rsp +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: retq +; +; AVX512-LABEL: ext_i32_32i8: +; AVX512: # BB#0: +; AVX512-NEXT: kmovd %edi, %k1 +; AVX512-NEXT: vmovdqu8 {{.*}}(%rip), %ymm0 {%k1} {z} +; AVX512-NEXT: retq + %1 = bitcast i32 %a0 to <32 x i1> + %2 = zext <32 x i1> %1 to <32 x i8> + ret <32 x i8> %2 +} + +; +; 512-bit vectors +; + +define <8 x i64> @ext_i8_8i64(i8 %a0) { +; SSE2-SSSE3-LABEL: ext_i8_8i64: +; SSE2-SSSE3: # BB#0: +; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $3, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $2, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $5, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $4, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $6, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: shrl $7, %eax +; SSE2-SSSE3-NEXT: movzwl %ax, %eax +; SSE2-SSSE3-NEXT: movd %eax, %xmm2 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm1[0] +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,1,0,3] +; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,6,7] +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [1,1] +; SSE2-SSSE3-NEXT: pand %xmm4, %xmm0 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,1,3] +; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,6,7] +; SSE2-SSSE3-NEXT: pand %xmm4, %xmm1 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,1,2,3] +; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,6,7] +; SSE2-SSSE3-NEXT: pand %xmm4, %xmm2 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,1,3,3] +; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,6,7] +; SSE2-SSSE3-NEXT: pand %xmm4, %xmm3 +; SSE2-SSSE3-NEXT: retq +; +; AVX1-LABEL: ext_i8_8i64: +; AVX1: # BB#0: +; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: movl %eax, %edx +; AVX1-NEXT: andl $1, %edx +; AVX1-NEXT: vmovd %edx, %xmm0 +; AVX1-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $2, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $3, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $4, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $5, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $6, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: shrl $7, %eax +; AVX1-NEXT: movzwl %ax, %eax +; AVX1-NEXT: vpinsrw $7, %eax, %xmm0, %xmm1 +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 +; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [1,1,1,1] +; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0 +; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3] +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1 +; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: ext_i8_8i64: +; AVX2: # BB#0: +; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: movl %eax, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: vmovd %edx, %xmm0 +; AVX2-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $2, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $3, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $4, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $5, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $6, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: shrl $7, %eax +; AVX2-NEXT: movzwl %ax, %eax +; AVX2-NEXT: vpinsrw $7, %eax, %xmm0, %xmm1 +; AVX2-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [1,1,1,1] +; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1 +; AVX2-NEXT: retq +; +; AVX512-LABEL: ext_i8_8i64: +; AVX512: # BB#0: +; AVX512-NEXT: kmovd %edi, %k1 +; AVX512-NEXT: vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z} +; AVX512-NEXT: retq + %1 = bitcast i8 %a0 to <8 x i1> + %2 = zext <8 x i1> %1 to <8 x i64> + ret <8 x i64> %2 +} + +define <16 x i32> @ext_i16_16i32(i16 %a0) { +; SSE2-SSSE3-LABEL: ext_i16_16i32: +; SSE2-SSSE3: # BB#0: +; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $7, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $6, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $5, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $4, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $3, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $2, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $11, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $10, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $9, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $8, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $13, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $12, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $14, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: shrl $15, %eax +; SSE2-SSSE3-NEXT: movzwl %ax, %eax +; SSE2-SSSE3-NEXT: movd %eax, %xmm4 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0] +; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [1,1,1,1] +; SSE2-SSSE3-NEXT: pand %xmm4, %xmm0 +; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-SSSE3-NEXT: pand %xmm4, %xmm1 +; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15] +; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm2 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] +; SSE2-SSSE3-NEXT: pand %xmm4, %xmm2 +; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] +; SSE2-SSSE3-NEXT: pand %xmm4, %xmm3 +; SSE2-SSSE3-NEXT: retq +; +; AVX1-LABEL: ext_i16_16i32: +; AVX1: # BB#0: +; AVX1-NEXT: movw %di, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: movl %eax, %edx +; AVX1-NEXT: andl $1, %edx +; AVX1-NEXT: vmovd %edx, %xmm0 +; AVX1-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $2, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $3, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $4, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $5, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrb $5, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $6, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $7, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrb $7, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $8, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $9, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $10, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $11, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $12, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $13, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $14, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: shrl $15, %eax +; AVX1-NEXT: movzwl %ax, %eax +; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm1 +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm0[4,4,5,5,6,6,7,7] +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] +; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0 +; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 +; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: ext_i16_16i32: +; AVX2: # BB#0: +; AVX2-NEXT: movw %di, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: movl %eax, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: vmovd %edx, %xmm0 +; AVX2-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $2, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $3, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $4, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $5, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrb $5, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $6, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $7, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrb $7, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $8, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $9, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $10, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $11, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $12, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $13, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $14, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: shrl $15, %eax +; AVX2-NEXT: movzwl %ax, %eax +; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm1 +; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] +; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1 +; AVX2-NEXT: retq +; +; AVX512-LABEL: ext_i16_16i32: +; AVX512: # BB#0: +; AVX512-NEXT: kmovd %edi, %k1 +; AVX512-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z} +; AVX512-NEXT: retq + %1 = bitcast i16 %a0 to <16 x i1> + %2 = zext <16 x i1> %1 to <16 x i32> + ret <16 x i32> %2 +} + +define <32 x i16> @ext_i32_32i16(i32 %a0) { +; SSE2-SSSE3-LABEL: ext_i32_32i16: +; SSE2-SSSE3: # BB#0: +; SSE2-SSSE3-NEXT: movl %edi, %eax +; SSE2-SSSE3-NEXT: shrl $16, %eax +; SSE2-SSSE3-NEXT: movw %ax, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $7, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $6, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $5, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $4, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $3, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $2, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $11, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $10, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $9, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $8, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $13, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $12, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $14, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: shrl $15, %eax +; SSE2-SSSE3-NEXT: movzwl %ax, %eax +; SSE2-SSSE3-NEXT: movd %eax, %xmm4 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0] +; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $7, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $6, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $5, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $4, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $3, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $2, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $11, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $10, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $9, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $8, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $13, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $12, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $14, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: shrl $15, %eax +; SSE2-SSSE3-NEXT: movzwl %ax, %eax +; SSE2-SSSE3-NEXT: movd %eax, %xmm5 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3],xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] +; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [1,1,1,1,1,1,1,1] +; SSE2-SSSE3-NEXT: pand %xmm4, %xmm0 +; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] +; SSE2-SSSE3-NEXT: pand %xmm4, %xmm1 +; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm2 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE2-SSSE3-NEXT: pand %xmm4, %xmm2 +; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15] +; SSE2-SSSE3-NEXT: pand %xmm4, %xmm3 +; SSE2-SSSE3-NEXT: retq +; +; AVX1-LABEL: ext_i32_32i16: +; AVX1: # BB#0: +; AVX1-NEXT: pushq %rbp +; AVX1-NEXT: .Lcfi3: +; AVX1-NEXT: .cfi_def_cfa_offset 16 +; AVX1-NEXT: .Lcfi4: +; AVX1-NEXT: .cfi_offset %rbp, -16 +; AVX1-NEXT: movq %rsp, %rbp +; AVX1-NEXT: .Lcfi5: +; AVX1-NEXT: .cfi_def_cfa_register %rbp +; AVX1-NEXT: pushq %r15 +; AVX1-NEXT: pushq %r14 +; AVX1-NEXT: pushq %r13 +; AVX1-NEXT: pushq %r12 +; AVX1-NEXT: pushq %rbx +; AVX1-NEXT: andq $-32, %rsp +; AVX1-NEXT: subq $128, %rsp +; AVX1-NEXT: .Lcfi6: +; AVX1-NEXT: .cfi_offset %rbx, -56 +; AVX1-NEXT: .Lcfi7: +; AVX1-NEXT: .cfi_offset %r12, -48 +; AVX1-NEXT: .Lcfi8: +; AVX1-NEXT: .cfi_offset %r13, -40 +; AVX1-NEXT: .Lcfi9: +; AVX1-NEXT: .cfi_offset %r14, -32 +; AVX1-NEXT: .Lcfi10: +; AVX1-NEXT: .cfi_offset %r15, -24 +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX1-NEXT: movl %edi, %r13d +; AVX1-NEXT: movl %edi, %r12d +; AVX1-NEXT: movl %edi, %r15d +; AVX1-NEXT: movl %edi, %r14d +; AVX1-NEXT: movl %edi, %ebx +; AVX1-NEXT: movl %edi, %r11d +; AVX1-NEXT: movl %edi, %r10d +; AVX1-NEXT: movl %edi, %r9d +; AVX1-NEXT: movl %edi, %r8d +; AVX1-NEXT: movl %edi, %esi +; AVX1-NEXT: movl %edi, %edx +; AVX1-NEXT: movl %edi, %ecx +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: andl $1, %edi +; AVX1-NEXT: vmovd %edi, %xmm0 +; AVX1-NEXT: shrl %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 +; AVX1-NEXT: shrl $2, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: shrl $3, %edx +; AVX1-NEXT: andl $1, %edx +; AVX1-NEXT: vpinsrb $3, %edx, %xmm0, %xmm0 +; AVX1-NEXT: shrl $4, %esi +; AVX1-NEXT: andl $1, %esi +; AVX1-NEXT: vpinsrb $4, %esi, %xmm0, %xmm0 +; AVX1-NEXT: shrl $5, %r8d +; AVX1-NEXT: andl $1, %r8d +; AVX1-NEXT: vpinsrb $5, %r8d, %xmm0, %xmm0 +; AVX1-NEXT: shrl $6, %r9d +; AVX1-NEXT: andl $1, %r9d +; AVX1-NEXT: vpinsrb $6, %r9d, %xmm0, %xmm0 +; AVX1-NEXT: shrl $7, %r10d +; AVX1-NEXT: andl $1, %r10d +; AVX1-NEXT: vpinsrb $7, %r10d, %xmm0, %xmm0 +; AVX1-NEXT: shrl $8, %r11d +; AVX1-NEXT: andl $1, %r11d +; AVX1-NEXT: vpinsrb $8, %r11d, %xmm0, %xmm0 +; AVX1-NEXT: shrl $9, %ebx +; AVX1-NEXT: andl $1, %ebx +; AVX1-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0 +; AVX1-NEXT: shrl $10, %r14d +; AVX1-NEXT: andl $1, %r14d +; AVX1-NEXT: vpinsrb $10, %r14d, %xmm0, %xmm0 +; AVX1-NEXT: shrl $11, %r15d +; AVX1-NEXT: andl $1, %r15d +; AVX1-NEXT: vpinsrb $11, %r15d, %xmm0, %xmm0 +; AVX1-NEXT: shrl $12, %r12d +; AVX1-NEXT: andl $1, %r12d +; AVX1-NEXT: vpinsrb $12, %r12d, %xmm0, %xmm0 +; AVX1-NEXT: shrl $13, %r13d +; AVX1-NEXT: andl $1, %r13d +; AVX1-NEXT: vpinsrb $13, %r13d, %xmm0, %xmm0 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX1-NEXT: shrl $14, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX1-NEXT: shrl $15, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX1-NEXT: shrl $16, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vmovd %eax, %xmm1 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX1-NEXT: shrl $17, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX1-NEXT: shrl $18, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX1-NEXT: shrl $19, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX1-NEXT: shrl $20, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX1-NEXT: shrl $21, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX1-NEXT: shrl $22, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX1-NEXT: shrl $23, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX1-NEXT: shrl $24, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX1-NEXT: shrl $25, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX1-NEXT: shrl $26, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX1-NEXT: shrl $27, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX1-NEXT: shrl $28, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX1-NEXT: shrl $29, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX1-NEXT: shrl $30, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX1-NEXT: shrl $31, %eax +; AVX1-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1 +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 +; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] +; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0 +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1 +; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1 +; AVX1-NEXT: leaq -40(%rbp), %rsp +; AVX1-NEXT: popq %rbx +; AVX1-NEXT: popq %r12 +; AVX1-NEXT: popq %r13 +; AVX1-NEXT: popq %r14 +; AVX1-NEXT: popq %r15 +; AVX1-NEXT: popq %rbp +; AVX1-NEXT: retq +; +; AVX2-LABEL: ext_i32_32i16: +; AVX2: # BB#0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: .Lcfi3: +; AVX2-NEXT: .cfi_def_cfa_offset 16 +; AVX2-NEXT: .Lcfi4: +; AVX2-NEXT: .cfi_offset %rbp, -16 +; AVX2-NEXT: movq %rsp, %rbp +; AVX2-NEXT: .Lcfi5: +; AVX2-NEXT: .cfi_def_cfa_register %rbp +; AVX2-NEXT: pushq %r15 +; AVX2-NEXT: pushq %r14 +; AVX2-NEXT: pushq %r13 +; AVX2-NEXT: pushq %r12 +; AVX2-NEXT: pushq %rbx +; AVX2-NEXT: andq $-32, %rsp +; AVX2-NEXT: subq $128, %rsp +; AVX2-NEXT: .Lcfi6: +; AVX2-NEXT: .cfi_offset %rbx, -56 +; AVX2-NEXT: .Lcfi7: +; AVX2-NEXT: .cfi_offset %r12, -48 +; AVX2-NEXT: .Lcfi8: +; AVX2-NEXT: .cfi_offset %r13, -40 +; AVX2-NEXT: .Lcfi9: +; AVX2-NEXT: .cfi_offset %r14, -32 +; AVX2-NEXT: .Lcfi10: +; AVX2-NEXT: .cfi_offset %r15, -24 +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill +; AVX2-NEXT: movl %edi, %r13d +; AVX2-NEXT: movl %edi, %r12d +; AVX2-NEXT: movl %edi, %r15d +; AVX2-NEXT: movl %edi, %r14d +; AVX2-NEXT: movl %edi, %ebx +; AVX2-NEXT: movl %edi, %r11d +; AVX2-NEXT: movl %edi, %r10d +; AVX2-NEXT: movl %edi, %r9d +; AVX2-NEXT: movl %edi, %r8d +; AVX2-NEXT: movl %edi, %esi +; AVX2-NEXT: movl %edi, %edx +; AVX2-NEXT: movl %edi, %ecx +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: andl $1, %edi +; AVX2-NEXT: vmovd %edi, %xmm0 +; AVX2-NEXT: shrl %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 +; AVX2-NEXT: shrl $2, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: shrl $3, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: vpinsrb $3, %edx, %xmm0, %xmm0 +; AVX2-NEXT: shrl $4, %esi +; AVX2-NEXT: andl $1, %esi +; AVX2-NEXT: vpinsrb $4, %esi, %xmm0, %xmm0 +; AVX2-NEXT: shrl $5, %r8d +; AVX2-NEXT: andl $1, %r8d +; AVX2-NEXT: vpinsrb $5, %r8d, %xmm0, %xmm0 +; AVX2-NEXT: shrl $6, %r9d +; AVX2-NEXT: andl $1, %r9d +; AVX2-NEXT: vpinsrb $6, %r9d, %xmm0, %xmm0 +; AVX2-NEXT: shrl $7, %r10d +; AVX2-NEXT: andl $1, %r10d +; AVX2-NEXT: vpinsrb $7, %r10d, %xmm0, %xmm0 +; AVX2-NEXT: shrl $8, %r11d +; AVX2-NEXT: andl $1, %r11d +; AVX2-NEXT: vpinsrb $8, %r11d, %xmm0, %xmm0 +; AVX2-NEXT: shrl $9, %ebx +; AVX2-NEXT: andl $1, %ebx +; AVX2-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0 +; AVX2-NEXT: shrl $10, %r14d +; AVX2-NEXT: andl $1, %r14d +; AVX2-NEXT: vpinsrb $10, %r14d, %xmm0, %xmm0 +; AVX2-NEXT: shrl $11, %r15d +; AVX2-NEXT: andl $1, %r15d +; AVX2-NEXT: vpinsrb $11, %r15d, %xmm0, %xmm0 +; AVX2-NEXT: shrl $12, %r12d +; AVX2-NEXT: andl $1, %r12d +; AVX2-NEXT: vpinsrb $12, %r12d, %xmm0, %xmm0 +; AVX2-NEXT: shrl $13, %r13d +; AVX2-NEXT: andl $1, %r13d +; AVX2-NEXT: vpinsrb $13, %r13d, %xmm0, %xmm0 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX2-NEXT: shrl $14, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX2-NEXT: shrl $15, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX2-NEXT: shrl $16, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vmovd %eax, %xmm1 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX2-NEXT: shrl $17, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX2-NEXT: shrl $18, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX2-NEXT: shrl $19, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX2-NEXT: shrl $20, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX2-NEXT: shrl $21, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX2-NEXT: shrl $22, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX2-NEXT: shrl $23, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX2-NEXT: shrl $24, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX2-NEXT: shrl $25, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX2-NEXT: shrl $26, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX2-NEXT: shrl $27, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX2-NEXT: shrl $28, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX2-NEXT: shrl $29, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX2-NEXT: shrl $30, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload +; AVX2-NEXT: shrl $31, %eax +; AVX2-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1 +; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero +; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] +; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero +; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1 +; AVX2-NEXT: leaq -40(%rbp), %rsp +; AVX2-NEXT: popq %rbx +; AVX2-NEXT: popq %r12 +; AVX2-NEXT: popq %r13 +; AVX2-NEXT: popq %r14 +; AVX2-NEXT: popq %r15 +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: retq +; +; AVX512-LABEL: ext_i32_32i16: +; AVX512: # BB#0: +; AVX512-NEXT: kmovd %edi, %k1 +; AVX512-NEXT: vmovdqu16 {{.*}}(%rip), %zmm0 {%k1} {z} +; AVX512-NEXT: retq + %1 = bitcast i32 %a0 to <32 x i1> + %2 = zext <32 x i1> %1 to <32 x i16> + ret <32 x i16> %2 +} + +define <64 x i8> @ext_i64_64i8(i64 %a0) { +; SSE2-SSSE3-LABEL: ext_i64_64i8: +; SSE2-SSSE3: # BB#0: +; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movq %rdi, %rax +; SSE2-SSSE3-NEXT: shrq $32, %rax +; SSE2-SSSE3-NEXT: movw %ax, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movq %rdi, %rax +; SSE2-SSSE3-NEXT: shrq $48, %rax +; SSE2-SSSE3-NEXT: movw %ax, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: shrl $16, %edi +; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $7, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $6, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $5, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $4, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $3, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $2, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $11, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $10, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $9, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $8, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $13, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $12, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $14, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: shrl $15, %eax +; SSE2-SSSE3-NEXT: movzwl %ax, %eax +; SSE2-SSSE3-NEXT: movd %eax, %xmm4 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $7, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $6, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $5, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $4, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $3, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $2, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $11, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $10, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $9, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $8, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $13, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $12, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $14, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 +; SSE2-SSSE3-NEXT: shrl $15, %eax +; SSE2-SSSE3-NEXT: movzwl %ax, %eax +; SSE2-SSSE3-NEXT: movd %eax, %xmm5 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1] +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0] +; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $7, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $6, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $5, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $4, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $3, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $2, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm5 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3],xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $11, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $10, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $9, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm5 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $8, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $13, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $12, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm5 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $14, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 +; SSE2-SSSE3-NEXT: shrl $15, %eax +; SSE2-SSSE3-NEXT: movzwl %ax, %eax +; SSE2-SSSE3-NEXT: movd %eax, %xmm6 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3],xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1] +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm1[0] +; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $7, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $6, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $5, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $4, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm5 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3],xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $3, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $2, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm6 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $11, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $10, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm5 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $9, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm6 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $8, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm4 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3],xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $13, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm5 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $12, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm6 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3],xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $14, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm5 +; SSE2-SSSE3-NEXT: shrl $15, %eax +; SSE2-SSSE3-NEXT: movzwl %ax, %eax +; SSE2-SSSE3-NEXT: movd %eax, %xmm7 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1],xmm5[2],xmm7[2],xmm5[3],xmm7[3],xmm5[4],xmm7[4],xmm5[5],xmm7[5],xmm5[6],xmm7[6],xmm5[7],xmm7[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1] +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0] +; SSE2-SSSE3-NEXT: retq +; +; AVX1-LABEL: ext_i64_64i8: +; AVX1: # BB#0: +; AVX1-NEXT: pushq %rbp +; AVX1-NEXT: .Lcfi11: +; AVX1-NEXT: .cfi_def_cfa_offset 16 +; AVX1-NEXT: .Lcfi12: +; AVX1-NEXT: .cfi_offset %rbp, -16 +; AVX1-NEXT: movq %rsp, %rbp +; AVX1-NEXT: .Lcfi13: +; AVX1-NEXT: .cfi_def_cfa_register %rbp +; AVX1-NEXT: andq $-32, %rsp +; AVX1-NEXT: subq $64, %rsp +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $17, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movl %edi, %ecx +; AVX1-NEXT: shrl $16, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vmovd %ecx, %xmm0 +; AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $18, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $19, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $20, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $21, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $22, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $23, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $24, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $25, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $26, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $27, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $28, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $29, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $30, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $31, %eax +; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movl %edi, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vmovd %ecx, %xmm1 +; AVX1-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $2, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $3, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $4, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $5, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $6, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $7, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $8, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $9, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $10, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $11, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $12, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $13, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $14, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $15, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: movq %rdi, %rax +; AVX1-NEXT: shrq $49, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movq %rdi, %rcx +; AVX1-NEXT: shrq $48, %rcx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vmovd %ecx, %xmm1 +; AVX1-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movq %rdi, %rax +; AVX1-NEXT: shrq $50, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movq %rdi, %rax +; AVX1-NEXT: shrq $51, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movq %rdi, %rax +; AVX1-NEXT: shrq $52, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movq %rdi, %rax +; AVX1-NEXT: shrq $53, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movq %rdi, %rax +; AVX1-NEXT: shrq $54, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movq %rdi, %rax +; AVX1-NEXT: shrq $55, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movq %rdi, %rax +; AVX1-NEXT: shrq $56, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movq %rdi, %rax +; AVX1-NEXT: shrq $57, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movq %rdi, %rax +; AVX1-NEXT: shrq $58, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movq %rdi, %rax +; AVX1-NEXT: shrq $59, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movq %rdi, %rax +; AVX1-NEXT: shrq $60, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movq %rdi, %rax +; AVX1-NEXT: shrq $61, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movq %rdi, %rax +; AVX1-NEXT: shrq $62, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movq %rdi, %rax +; AVX1-NEXT: shrq $63, %rax +; AVX1-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movq %rdi, %rax +; AVX1-NEXT: shrq $33, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movq %rdi, %rcx +; AVX1-NEXT: shrq $32, %rcx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vmovd %ecx, %xmm2 +; AVX1-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2 +; AVX1-NEXT: movq %rdi, %rax +; AVX1-NEXT: shrq $34, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2 +; AVX1-NEXT: movq %rdi, %rax +; AVX1-NEXT: shrq $35, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2 +; AVX1-NEXT: movq %rdi, %rax +; AVX1-NEXT: shrq $36, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2 +; AVX1-NEXT: movq %rdi, %rax +; AVX1-NEXT: shrq $37, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2 +; AVX1-NEXT: movq %rdi, %rax +; AVX1-NEXT: shrq $38, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2 +; AVX1-NEXT: movq %rdi, %rax +; AVX1-NEXT: shrq $39, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2 +; AVX1-NEXT: movq %rdi, %rax +; AVX1-NEXT: shrq $40, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2 +; AVX1-NEXT: movq %rdi, %rax +; AVX1-NEXT: shrq $41, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2 +; AVX1-NEXT: movq %rdi, %rax +; AVX1-NEXT: shrq $42, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 +; AVX1-NEXT: movq %rdi, %rax +; AVX1-NEXT: shrq $43, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2 +; AVX1-NEXT: movq %rdi, %rax +; AVX1-NEXT: shrq $44, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2 +; AVX1-NEXT: movq %rdi, %rax +; AVX1-NEXT: shrq $45, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2 +; AVX1-NEXT: movq %rdi, %rax +; AVX1-NEXT: shrq $46, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2 +; AVX1-NEXT: shrq $47, %rdi +; AVX1-NEXT: andl $1, %edi +; AVX1-NEXT: vpinsrb $15, %edi, %xmm2, %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 +; AVX1-NEXT: movq %rbp, %rsp +; AVX1-NEXT: popq %rbp +; AVX1-NEXT: retq +; +; AVX2-LABEL: ext_i64_64i8: +; AVX2: # BB#0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: .Lcfi11: +; AVX2-NEXT: .cfi_def_cfa_offset 16 +; AVX2-NEXT: .Lcfi12: +; AVX2-NEXT: .cfi_offset %rbp, -16 +; AVX2-NEXT: movq %rsp, %rbp +; AVX2-NEXT: .Lcfi13: +; AVX2-NEXT: .cfi_def_cfa_register %rbp +; AVX2-NEXT: andq $-32, %rsp +; AVX2-NEXT: subq $64, %rsp +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $17, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: movl %edi, %ecx +; AVX2-NEXT: shrl $16, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vmovd %ecx, %xmm0 +; AVX2-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $18, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $19, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $20, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $21, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $22, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $23, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $24, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $25, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $26, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $27, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $28, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $29, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $30, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $31, %eax +; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: movl %edi, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vmovd %ecx, %xmm1 +; AVX2-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $2, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $3, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $4, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $5, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $6, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $7, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $8, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $9, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $10, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $11, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $12, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $13, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $14, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $15, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1 +; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX2-NEXT: movq %rdi, %rax +; AVX2-NEXT: shrq $49, %rax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: movq %rdi, %rcx +; AVX2-NEXT: shrq $48, %rcx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vmovd %ecx, %xmm1 +; AVX2-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movq %rdi, %rax +; AVX2-NEXT: shrq $50, %rax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movq %rdi, %rax +; AVX2-NEXT: shrq $51, %rax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movq %rdi, %rax +; AVX2-NEXT: shrq $52, %rax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movq %rdi, %rax +; AVX2-NEXT: shrq $53, %rax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movq %rdi, %rax +; AVX2-NEXT: shrq $54, %rax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movq %rdi, %rax +; AVX2-NEXT: shrq $55, %rax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movq %rdi, %rax +; AVX2-NEXT: shrq $56, %rax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movq %rdi, %rax +; AVX2-NEXT: shrq $57, %rax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movq %rdi, %rax +; AVX2-NEXT: shrq $58, %rax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movq %rdi, %rax +; AVX2-NEXT: shrq $59, %rax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movq %rdi, %rax +; AVX2-NEXT: shrq $60, %rax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movq %rdi, %rax +; AVX2-NEXT: shrq $61, %rax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movq %rdi, %rax +; AVX2-NEXT: shrq $62, %rax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movq %rdi, %rax +; AVX2-NEXT: shrq $63, %rax +; AVX2-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movq %rdi, %rax +; AVX2-NEXT: shrq $33, %rax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: movq %rdi, %rcx +; AVX2-NEXT: shrq $32, %rcx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vmovd %ecx, %xmm2 +; AVX2-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2 +; AVX2-NEXT: movq %rdi, %rax +; AVX2-NEXT: shrq $34, %rax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2 +; AVX2-NEXT: movq %rdi, %rax +; AVX2-NEXT: shrq $35, %rax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2 +; AVX2-NEXT: movq %rdi, %rax +; AVX2-NEXT: shrq $36, %rax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2 +; AVX2-NEXT: movq %rdi, %rax +; AVX2-NEXT: shrq $37, %rax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2 +; AVX2-NEXT: movq %rdi, %rax +; AVX2-NEXT: shrq $38, %rax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2 +; AVX2-NEXT: movq %rdi, %rax +; AVX2-NEXT: shrq $39, %rax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2 +; AVX2-NEXT: movq %rdi, %rax +; AVX2-NEXT: shrq $40, %rax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2 +; AVX2-NEXT: movq %rdi, %rax +; AVX2-NEXT: shrq $41, %rax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2 +; AVX2-NEXT: movq %rdi, %rax +; AVX2-NEXT: shrq $42, %rax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 +; AVX2-NEXT: movq %rdi, %rax +; AVX2-NEXT: shrq $43, %rax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2 +; AVX2-NEXT: movq %rdi, %rax +; AVX2-NEXT: shrq $44, %rax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2 +; AVX2-NEXT: movq %rdi, %rax +; AVX2-NEXT: shrq $45, %rax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2 +; AVX2-NEXT: movq %rdi, %rax +; AVX2-NEXT: shrq $46, %rax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2 +; AVX2-NEXT: shrq $47, %rdi +; AVX2-NEXT: andl $1, %edi +; AVX2-NEXT: vpinsrb $15, %edi, %xmm2, %xmm2 +; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1 +; AVX2-NEXT: movq %rbp, %rsp +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: retq +; +; AVX512-LABEL: ext_i64_64i8: +; AVX512: # BB#0: +; AVX512-NEXT: kmovq %rdi, %k1 +; AVX512-NEXT: vmovdqu8 {{.*}}(%rip), %zmm0 {%k1} {z} +; AVX512-NEXT: retq + %1 = bitcast i64 %a0 to <64 x i1> + %2 = zext <64 x i1> %1 to <64 x i8> + ret <64 x i8> %2 +} diff --git a/test/CodeGen/X86/bitcast-int-to-vector-bool.ll b/test/CodeGen/X86/bitcast-int-to-vector-bool.ll new file mode 100644 index 00000000000..a190e057552 --- /dev/null +++ b/test/CodeGen/X86/bitcast-int-to-vector-bool.ll @@ -0,0 +1,685 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE2-SSSE3,SSE2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=SSE2-SSSE3,SSSE3 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX12,AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX12,AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX512 + +define <2 x i1> @bitcast_i2_2i1(i2 zeroext %a0) { +; SSE2-SSSE3-LABEL: bitcast_i2_2i1: +; SSE2-SSSE3: # BB#0: +; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movq %rcx, %xmm0 +; SSE2-SSSE3-NEXT: shrl %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movq %rax, %xmm1 +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE2-SSSE3-NEXT: retq +; +; AVX12-LABEL: bitcast_i2_2i1: +; AVX12: # BB#0: +; AVX12-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; AVX12-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vmovq %rcx, %xmm0 +; AVX12-NEXT: shrl %eax +; AVX12-NEXT: andl $1, %eax +; AVX12-NEXT: vmovq %rax, %xmm1 +; AVX12-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX12-NEXT: retq +; +; AVX512-LABEL: bitcast_i2_2i1: +; AVX512: # BB#0: +; AVX512-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; AVX512-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; AVX512-NEXT: kmovd %eax, %k1 +; AVX512-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} +; AVX512-NEXT: # kill: %XMM0 %XMM0 %ZMM0 +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %1 = bitcast i2 %a0 to <2 x i1> + ret <2 x i1> %1 +} + +define <4 x i1> @bitcast_i4_4i1(i4 zeroext %a0) { +; SSE2-SSSE3-LABEL: bitcast_i4_4i1: +; SSE2-SSSE3: # BB#0: +; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $3, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $2, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; SSE2-SSSE3-NEXT: movd %eax, %xmm0 +; SSE2-SSSE3-NEXT: shrl %eax +; SSE2-SSSE3-NEXT: movd %eax, %xmm2 +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE2-SSSE3-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE2-SSSE3-NEXT: retq +; +; AVX1-LABEL: bitcast_i4_4i1: +; AVX1: # BB#0: +; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: movl -{{[0-9]+}}(%rsp), %eax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl %ecx +; AVX1-NEXT: vmovd %eax, %xmm0 +; AVX1-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: shrl $2, %ecx +; AVX1-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: shrl $3, %eax +; AVX1-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: bitcast_i4_4i1: +; AVX2: # BB#0: +; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: movl -{{[0-9]+}}(%rsp), %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl %ecx +; AVX2-NEXT: vmovd %eax, %xmm0 +; AVX2-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: shrl $2, %ecx +; AVX2-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: shrl $3, %eax +; AVX2-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0 +; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1] +; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: retq +; +; AVX512-LABEL: bitcast_i4_4i1: +; AVX512: # BB#0: +; AVX512-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; AVX512-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; AVX512-NEXT: kmovd %eax, %k1 +; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0 +; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z} +; AVX512-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %1 = bitcast i4 %a0 to <4 x i1> + ret <4 x i1> %1 +} + +define <8 x i1> @bitcast_i8_8i1(i8 zeroext %a0) { +; SSE2-SSSE3-LABEL: bitcast_i8_8i1: +; SSE2-SSSE3: # BB#0: +; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $3, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $2, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $5, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $4, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $6, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: shrl $7, %eax +; SSE2-SSSE3-NEXT: movzwl %ax, %eax +; SSE2-SSSE3-NEXT: movd %eax, %xmm3 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; SSE2-SSSE3-NEXT: retq +; +; AVX12-LABEL: bitcast_i8_8i1: +; AVX12: # BB#0: +; AVX12-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; AVX12-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: movl %eax, %edx +; AVX12-NEXT: andl $1, %edx +; AVX12-NEXT: vmovd %edx, %xmm0 +; AVX12-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $2, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $3, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $4, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $5, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $6, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: shrl $7, %eax +; AVX12-NEXT: movzwl %ax, %eax +; AVX12-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0 +; AVX12-NEXT: retq +; +; AVX512-LABEL: bitcast_i8_8i1: +; AVX512: # BB#0: +; AVX512-NEXT: kmovd %edi, %k0 +; AVX512-NEXT: vpmovm2w %k0, %xmm0 +; AVX512-NEXT: retq + %1 = bitcast i8 %a0 to <8 x i1> + ret <8 x i1> %1 +} + +define <16 x i1> @bitcast_i16_16i1(i16 zeroext %a0) { +; SSE2-SSSE3-LABEL: bitcast_i16_16i1: +; SSE2-SSSE3: # BB#0: +; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $7, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $6, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $5, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $4, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $3, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $2, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $11, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $10, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $9, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $8, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm1 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $13, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $12, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm3 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] +; SSE2-SSSE3-NEXT: movl %eax, %ecx +; SSE2-SSSE3-NEXT: shrl $14, %ecx +; SSE2-SSSE3-NEXT: andl $1, %ecx +; SSE2-SSSE3-NEXT: movd %ecx, %xmm2 +; SSE2-SSSE3-NEXT: shrl $15, %eax +; SSE2-SSSE3-NEXT: movzwl %ax, %eax +; SSE2-SSSE3-NEXT: movd %eax, %xmm4 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] +; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE2-SSSE3-NEXT: retq +; +; AVX12-LABEL: bitcast_i16_16i1: +; AVX12: # BB#0: +; AVX12-NEXT: movw %di, -{{[0-9]+}}(%rsp) +; AVX12-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: movl %eax, %edx +; AVX12-NEXT: andl $1, %edx +; AVX12-NEXT: vmovd %edx, %xmm0 +; AVX12-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $2, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $3, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $4, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $5, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vpinsrb $5, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $6, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $7, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vpinsrb $7, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $8, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $9, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $10, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $11, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $12, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $13, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: movl %eax, %ecx +; AVX12-NEXT: shrl $14, %ecx +; AVX12-NEXT: andl $1, %ecx +; AVX12-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0 +; AVX12-NEXT: shrl $15, %eax +; AVX12-NEXT: movzwl %ax, %eax +; AVX12-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 +; AVX12-NEXT: retq +; +; AVX512-LABEL: bitcast_i16_16i1: +; AVX512: # BB#0: +; AVX512-NEXT: kmovd %edi, %k0 +; AVX512-NEXT: vpmovm2b %k0, %xmm0 +; AVX512-NEXT: retq + %1 = bitcast i16 %a0 to <16 x i1> + ret <16 x i1> %1 +} + +define <32 x i1> @bitcast_i32_32i1(i32 %a0) { +; SSE2-SSSE3-LABEL: bitcast_i32_32i1: +; SSE2-SSSE3: # BB#0: +; SSE2-SSSE3-NEXT: movl %esi, (%rdi) +; SSE2-SSSE3-NEXT: movq %rdi, %rax +; SSE2-SSSE3-NEXT: retq +; +; AVX1-LABEL: bitcast_i32_32i1: +; AVX1: # BB#0: +; AVX1-NEXT: pushq %rbp +; AVX1-NEXT: .Lcfi0: +; AVX1-NEXT: .cfi_def_cfa_offset 16 +; AVX1-NEXT: .Lcfi1: +; AVX1-NEXT: .cfi_offset %rbp, -16 +; AVX1-NEXT: movq %rsp, %rbp +; AVX1-NEXT: .Lcfi2: +; AVX1-NEXT: .cfi_def_cfa_register %rbp +; AVX1-NEXT: andq $-32, %rsp +; AVX1-NEXT: subq $32, %rsp +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $17, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movl %edi, %ecx +; AVX1-NEXT: shrl $16, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vmovd %ecx, %xmm0 +; AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $18, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $19, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $20, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $21, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $22, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $23, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $24, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $25, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $26, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $27, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $28, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $29, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $30, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $31, %eax +; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movl %edi, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: vmovd %ecx, %xmm1 +; AVX1-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $2, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $3, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $4, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $5, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $6, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $7, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $8, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $9, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $10, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $11, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $12, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $13, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: shrl $14, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 +; AVX1-NEXT: shrl $15, %edi +; AVX1-NEXT: andl $1, %edi +; AVX1-NEXT: vpinsrb $15, %edi, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: movq %rbp, %rsp +; AVX1-NEXT: popq %rbp +; AVX1-NEXT: retq +; +; AVX2-LABEL: bitcast_i32_32i1: +; AVX2: # BB#0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: .Lcfi0: +; AVX2-NEXT: .cfi_def_cfa_offset 16 +; AVX2-NEXT: .Lcfi1: +; AVX2-NEXT: .cfi_offset %rbp, -16 +; AVX2-NEXT: movq %rsp, %rbp +; AVX2-NEXT: .Lcfi2: +; AVX2-NEXT: .cfi_def_cfa_register %rbp +; AVX2-NEXT: andq $-32, %rsp +; AVX2-NEXT: subq $32, %rsp +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $17, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: movl %edi, %ecx +; AVX2-NEXT: shrl $16, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vmovd %ecx, %xmm0 +; AVX2-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $18, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $19, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $20, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $21, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $22, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $23, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $24, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $25, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $26, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $27, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $28, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $29, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $30, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $31, %eax +; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: movl %edi, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vmovd %ecx, %xmm1 +; AVX2-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $2, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $3, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $4, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $5, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $6, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $7, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $8, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $9, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $10, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $11, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $12, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $13, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: shrl $14, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 +; AVX2-NEXT: shrl $15, %edi +; AVX2-NEXT: andl $1, %edi +; AVX2-NEXT: vpinsrb $15, %edi, %xmm1, %xmm1 +; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX2-NEXT: movq %rbp, %rsp +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: retq +; +; AVX512-LABEL: bitcast_i32_32i1: +; AVX512: # BB#0: +; AVX512-NEXT: kmovd %edi, %k0 +; AVX512-NEXT: vpmovm2b %k0, %ymm0 +; AVX512-NEXT: retq + %1 = bitcast i32 %a0 to <32 x i1> + ret <32 x i1> %1 +} + +define <64 x i1> @bitcast_i64_64i1(i64 %a0) { +; SSE2-SSSE3-LABEL: bitcast_i64_64i1: +; SSE2-SSSE3: # BB#0: +; SSE2-SSSE3-NEXT: movq %rsi, (%rdi) +; SSE2-SSSE3-NEXT: movq %rdi, %rax +; SSE2-SSSE3-NEXT: retq +; +; AVX12-LABEL: bitcast_i64_64i1: +; AVX12: # BB#0: +; AVX12-NEXT: movq %rsi, (%rdi) +; AVX12-NEXT: movq %rdi, %rax +; AVX12-NEXT: retq +; +; AVX512-LABEL: bitcast_i64_64i1: +; AVX512: # BB#0: +; AVX512-NEXT: kmovq %rdi, %k0 +; AVX512-NEXT: vpmovm2b %k0, %zmm0 +; AVX512-NEXT: retq + %1 = bitcast i64 %a0 to <64 x i1> + ret <64 x i1> %1 +}