mirror of
https://github.com/RPCSX/llvm.git
synced 2024-12-13 23:18:51 +00:00
[X86][SSE] Add support for combining AND bitmasks to shuffles.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@288365 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
f8e7cdba56
commit
3160d05cd6
@ -29889,6 +29889,17 @@ static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
|
|||||||
SDValue N1 = N->getOperand(1);
|
SDValue N1 = N->getOperand(1);
|
||||||
SDLoc DL(N);
|
SDLoc DL(N);
|
||||||
|
|
||||||
|
// Attempt to recursively combine a bitmask AND with shuffles.
|
||||||
|
if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
|
||||||
|
SDValue Op(N, 0);
|
||||||
|
SmallVector<int, 1> NonceMask; // Just a placeholder.
|
||||||
|
NonceMask.push_back(0);
|
||||||
|
if (combineX86ShufflesRecursively({Op}, 0, Op, NonceMask,
|
||||||
|
/*Depth*/ 1, /*HasPSHUFB*/ false, DAG,
|
||||||
|
DCI, Subtarget))
|
||||||
|
return SDValue(); // This routine will use CombineTo to replace N.
|
||||||
|
}
|
||||||
|
|
||||||
// Create BEXTR instructions
|
// Create BEXTR instructions
|
||||||
// BEXTR is ((X >> imm) & (2**size-1))
|
// BEXTR is ((X >> imm) & (2**size-1))
|
||||||
if (VT != MVT::i32 && VT != MVT::i64)
|
if (VT != MVT::i32 && VT != MVT::i64)
|
||||||
|
@ -13,10 +13,17 @@ define <2 x i64> @_clearupper2xi64a(<2 x i64>) nounwind {
|
|||||||
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
|
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
|
||||||
; SSE-NEXT: retq
|
; SSE-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX-LABEL: _clearupper2xi64a:
|
; AVX1-LABEL: _clearupper2xi64a:
|
||||||
; AVX: # BB#0:
|
; AVX1: # BB#0:
|
||||||
; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
|
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||||
; AVX-NEXT: retq
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
|
||||||
|
; AVX1-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX2-LABEL: _clearupper2xi64a:
|
||||||
|
; AVX2: # BB#0:
|
||||||
|
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||||
|
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
|
||||||
|
; AVX2-NEXT: retq
|
||||||
%x0 = extractelement <2 x i64> %0, i32 0
|
%x0 = extractelement <2 x i64> %0, i32 0
|
||||||
%x1 = extractelement <2 x i64> %0, i32 1
|
%x1 = extractelement <2 x i64> %0, i32 1
|
||||||
%trunc0 = trunc i64 %x0 to i32
|
%trunc0 = trunc i64 %x0 to i32
|
||||||
@ -36,7 +43,8 @@ define <4 x i32> @_clearupper4xi32a(<4 x i32>) nounwind {
|
|||||||
;
|
;
|
||||||
; AVX1-LABEL: _clearupper4xi32a:
|
; AVX1-LABEL: _clearupper4xi32a:
|
||||||
; AVX1: # BB#0:
|
; AVX1: # BB#0:
|
||||||
; AVX1-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
|
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||||
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
|
||||||
; AVX1-NEXT: retq
|
; AVX1-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX2-LABEL: _clearupper4xi32a:
|
; AVX2-LABEL: _clearupper4xi32a:
|
||||||
|
@ -16,8 +16,7 @@ define void @knownbits_zext_in_reg(i8*) nounwind {
|
|||||||
; X32-NEXT: shrl $14, %eax
|
; X32-NEXT: shrl $14, %eax
|
||||||
; X32-NEXT: movzbl %al, %eax
|
; X32-NEXT: movzbl %al, %eax
|
||||||
; X32-NEXT: vmovd %eax, %xmm0
|
; X32-NEXT: vmovd %eax, %xmm0
|
||||||
; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,1,1]
|
; X32-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
||||||
; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
|
|
||||||
; X32-NEXT: vpextrd $1, %xmm0, %ebp
|
; X32-NEXT: vpextrd $1, %xmm0, %ebp
|
||||||
; X32-NEXT: xorl %ecx, %ecx
|
; X32-NEXT: xorl %ecx, %ecx
|
||||||
; X32-NEXT: vmovd %xmm0, %esi
|
; X32-NEXT: vmovd %xmm0, %esi
|
||||||
@ -55,8 +54,7 @@ define void @knownbits_zext_in_reg(i8*) nounwind {
|
|||||||
; X64-NEXT: shrl $14, %eax
|
; X64-NEXT: shrl $14, %eax
|
||||||
; X64-NEXT: movzbl %al, %eax
|
; X64-NEXT: movzbl %al, %eax
|
||||||
; X64-NEXT: vmovd %eax, %xmm0
|
; X64-NEXT: vmovd %eax, %xmm0
|
||||||
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,1,1]
|
; X64-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
||||||
; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
|
|
||||||
; X64-NEXT: vpextrd $1, %xmm0, %r8d
|
; X64-NEXT: vpextrd $1, %xmm0, %r8d
|
||||||
; X64-NEXT: xorl %esi, %esi
|
; X64-NEXT: xorl %esi, %esi
|
||||||
; X64-NEXT: vmovd %xmm0, %r9d
|
; X64-NEXT: vmovd %xmm0, %r9d
|
||||||
|
@ -573,7 +573,8 @@ define <2 x double> @uitofp_4i32_to_2f64(<4 x i32> %a) {
|
|||||||
;
|
;
|
||||||
; AVX1-LABEL: uitofp_4i32_to_2f64:
|
; AVX1-LABEL: uitofp_4i32_to_2f64:
|
||||||
; AVX1: # BB#0:
|
; AVX1: # BB#0:
|
||||||
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm1
|
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||||
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
|
||||||
; AVX1-NEXT: vcvtdq2pd %xmm1, %ymm1
|
; AVX1-NEXT: vcvtdq2pd %xmm1, %ymm1
|
||||||
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
|
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
|
||||||
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
|
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
|
||||||
@ -873,7 +874,8 @@ define <4 x double> @uitofp_4i32_to_4f64(<4 x i32> %a) {
|
|||||||
;
|
;
|
||||||
; AVX1-LABEL: uitofp_4i32_to_4f64:
|
; AVX1-LABEL: uitofp_4i32_to_4f64:
|
||||||
; AVX1: # BB#0:
|
; AVX1: # BB#0:
|
||||||
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm1
|
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||||
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
|
||||||
; AVX1-NEXT: vcvtdq2pd %xmm1, %ymm1
|
; AVX1-NEXT: vcvtdq2pd %xmm1, %ymm1
|
||||||
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
|
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
|
||||||
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
|
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
|
||||||
@ -3256,7 +3258,8 @@ define <4 x double> @uitofp_load_4i32_to_4f64(<4 x i32> *%a) {
|
|||||||
; AVX1-LABEL: uitofp_load_4i32_to_4f64:
|
; AVX1-LABEL: uitofp_load_4i32_to_4f64:
|
||||||
; AVX1: # BB#0:
|
; AVX1: # BB#0:
|
||||||
; AVX1-NEXT: vmovdqa (%rdi), %xmm0
|
; AVX1-NEXT: vmovdqa (%rdi), %xmm0
|
||||||
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm1
|
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||||
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
|
||||||
; AVX1-NEXT: vcvtdq2pd %xmm1, %ymm1
|
; AVX1-NEXT: vcvtdq2pd %xmm1, %ymm1
|
||||||
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
|
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
|
||||||
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
|
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
|
||||||
|
@ -11,11 +11,11 @@
|
|||||||
; RUN: llc < %s -mtriple=x86_64 -enable-unsafe-fp-math -mattr=+avx512vl \
|
; RUN: llc < %s -mtriple=x86_64 -enable-unsafe-fp-math -mattr=+avx512vl \
|
||||||
; RUN: | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512VL
|
; RUN: | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512VL
|
||||||
|
|
||||||
; CST: [[MASKCSTADDR:.LCPI[0-9_]+]]:
|
; SSE2: [[MASKCSTADDR:.LCPI[0-9_]+]]:
|
||||||
; CST-NEXT: .long 65535 # 0xffff
|
; SSE2-NEXT: .long 65535 # 0xffff
|
||||||
; CST-NEXT: .long 65535 # 0xffff
|
; SSE2-NEXT: .long 65535 # 0xffff
|
||||||
; CST-NEXT: .long 65535 # 0xffff
|
; SSE2-NEXT: .long 65535 # 0xffff
|
||||||
; CST-NEXT: .long 65535 # 0xffff
|
; SSE2-NEXT: .long 65535 # 0xffff
|
||||||
|
|
||||||
; CST: [[FPMASKCSTADDR:.LCPI[0-9_]+]]:
|
; CST: [[FPMASKCSTADDR:.LCPI[0-9_]+]]:
|
||||||
; CST-NEXT: .long 1199570944 # float 65536
|
; CST-NEXT: .long 1199570944 # float 65536
|
||||||
@ -30,20 +30,32 @@
|
|||||||
; AVX2-NEXT: .long 65535 # 0xffff
|
; AVX2-NEXT: .long 65535 # 0xffff
|
||||||
|
|
||||||
define <4 x float> @test_uitofp_v4i32_to_v4f32(<4 x i32> %arg) {
|
define <4 x float> @test_uitofp_v4i32_to_v4f32(<4 x i32> %arg) {
|
||||||
; SSE-LABEL: test_uitofp_v4i32_to_v4f32:
|
; SSE2-LABEL: test_uitofp_v4i32_to_v4f32:
|
||||||
; SSE: # BB#0:
|
; SSE2: # BB#0:
|
||||||
; SSE-NEXT: movaps {{.*#+}} xmm1 = [65535,65535,65535,65535]
|
; SSE2-NEXT: movaps {{.*#+}} xmm1 = [65535,65535,65535,65535]
|
||||||
; SSE-NEXT: andps %xmm0, %xmm1
|
; SSE2-NEXT: andps %xmm0, %xmm1
|
||||||
; SSE-NEXT: cvtdq2ps %xmm1, %xmm1
|
; SSE2-NEXT: cvtdq2ps %xmm1, %xmm1
|
||||||
; SSE-NEXT: psrld $16, %xmm0
|
; SSE2-NEXT: psrld $16, %xmm0
|
||||||
; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
|
; SSE2-NEXT: cvtdq2ps %xmm0, %xmm0
|
||||||
; SSE-NEXT: mulps [[FPMASKCSTADDR]](%rip), %xmm0
|
; SSE2-NEXT: mulps [[FPMASKCSTADDR]](%rip), %xmm0
|
||||||
; SSE-NEXT: addps %xmm1, %xmm0
|
; SSE2-NEXT: addps %xmm1, %xmm0
|
||||||
; SSE-NEXT: retq
|
; SSE2-NEXT: retq
|
||||||
|
;
|
||||||
|
; SSE41-LABEL: test_uitofp_v4i32_to_v4f32:
|
||||||
|
; SSE41: # BB#0:
|
||||||
|
; SSE41-NEXT: pxor %xmm1, %xmm1
|
||||||
|
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
|
||||||
|
; SSE41-NEXT: cvtdq2ps %xmm1, %xmm1
|
||||||
|
; SSE41-NEXT: psrld $16, %xmm0
|
||||||
|
; SSE41-NEXT: cvtdq2ps %xmm0, %xmm0
|
||||||
|
; SSE41-NEXT: mulps [[FPMASKCSTADDR]](%rip), %xmm0
|
||||||
|
; SSE41-NEXT: addps %xmm1, %xmm0
|
||||||
|
; SSE41-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX-LABEL: test_uitofp_v4i32_to_v4f32:
|
; AVX-LABEL: test_uitofp_v4i32_to_v4f32:
|
||||||
; AVX: # BB#0:
|
; AVX: # BB#0:
|
||||||
; AVX-NEXT: vandps [[MASKCSTADDR]](%rip), %xmm0, %xmm1
|
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||||
|
; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
|
||||||
; AVX-NEXT: vcvtdq2ps %xmm1, %xmm1
|
; AVX-NEXT: vcvtdq2ps %xmm1, %xmm1
|
||||||
; AVX-NEXT: vpsrld $16, %xmm0, %xmm0
|
; AVX-NEXT: vpsrld $16, %xmm0, %xmm0
|
||||||
; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
|
; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
|
||||||
@ -97,25 +109,45 @@ define <4 x float> @test_uitofp_v4i32_to_v4f32(<4 x i32> %arg) {
|
|||||||
; AVX2-NEXT: .long 65535 # 0xffff
|
; AVX2-NEXT: .long 65535 # 0xffff
|
||||||
|
|
||||||
define <8 x float> @test_uitofp_v8i32_to_v8f32(<8 x i32> %arg) {
|
define <8 x float> @test_uitofp_v8i32_to_v8f32(<8 x i32> %arg) {
|
||||||
; SSE-LABEL: test_uitofp_v8i32_to_v8f32:
|
; SSE2-LABEL: test_uitofp_v8i32_to_v8f32:
|
||||||
; SSE: # BB#0:
|
; SSE2: # BB#0:
|
||||||
; SSE-NEXT: movdqa %xmm0, %xmm2
|
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
||||||
; SSE-NEXT: psrld $16, %xmm2
|
; SSE2-NEXT: psrld $16, %xmm2
|
||||||
; SSE-NEXT: cvtdq2ps %xmm2, %xmm2
|
; SSE2-NEXT: cvtdq2ps %xmm2, %xmm2
|
||||||
; SSE-NEXT: movaps {{.*#+}} xmm3 = [6.553600e+04,6.553600e+04,6.553600e+04,6.553600e+04]
|
; SSE2-NEXT: movaps {{.*#+}} xmm3 = [6.553600e+04,6.553600e+04,6.553600e+04,6.553600e+04]
|
||||||
; SSE-NEXT: mulps %xmm3, %xmm2
|
; SSE2-NEXT: mulps %xmm3, %xmm2
|
||||||
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,65535,65535]
|
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,65535,65535]
|
||||||
; SSE-NEXT: pand %xmm4, %xmm0
|
; SSE2-NEXT: pand %xmm4, %xmm0
|
||||||
; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
|
; SSE2-NEXT: cvtdq2ps %xmm0, %xmm0
|
||||||
; SSE-NEXT: addps %xmm2, %xmm0
|
; SSE2-NEXT: addps %xmm2, %xmm0
|
||||||
; SSE-NEXT: movdqa %xmm1, %xmm2
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
||||||
; SSE-NEXT: psrld $16, %xmm2
|
; SSE2-NEXT: psrld $16, %xmm2
|
||||||
; SSE-NEXT: cvtdq2ps %xmm2, %xmm2
|
; SSE2-NEXT: cvtdq2ps %xmm2, %xmm2
|
||||||
; SSE-NEXT: mulps %xmm3, %xmm2
|
; SSE2-NEXT: mulps %xmm3, %xmm2
|
||||||
; SSE-NEXT: pand %xmm4, %xmm1
|
; SSE2-NEXT: pand %xmm4, %xmm1
|
||||||
; SSE-NEXT: cvtdq2ps %xmm1, %xmm1
|
; SSE2-NEXT: cvtdq2ps %xmm1, %xmm1
|
||||||
; SSE-NEXT: addps %xmm2, %xmm1
|
; SSE2-NEXT: addps %xmm2, %xmm1
|
||||||
; SSE-NEXT: retq
|
; SSE2-NEXT: retq
|
||||||
|
;
|
||||||
|
; SSE41-LABEL: test_uitofp_v8i32_to_v8f32:
|
||||||
|
; SSE41: # BB#0:
|
||||||
|
; SSE41-NEXT: movdqa %xmm0, %xmm2
|
||||||
|
; SSE41-NEXT: psrld $16, %xmm2
|
||||||
|
; SSE41-NEXT: cvtdq2ps %xmm2, %xmm2
|
||||||
|
; SSE41-NEXT: movaps {{.*#+}} xmm3 = [6.553600e+04,6.553600e+04,6.553600e+04,6.553600e+04]
|
||||||
|
; SSE41-NEXT: mulps %xmm3, %xmm2
|
||||||
|
; SSE41-NEXT: pxor %xmm4, %xmm4
|
||||||
|
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2],xmm4[3],xmm0[4],xmm4[5],xmm0[6],xmm4[7]
|
||||||
|
; SSE41-NEXT: cvtdq2ps %xmm0, %xmm0
|
||||||
|
; SSE41-NEXT: addps %xmm2, %xmm0
|
||||||
|
; SSE41-NEXT: movdqa %xmm1, %xmm2
|
||||||
|
; SSE41-NEXT: psrld $16, %xmm2
|
||||||
|
; SSE41-NEXT: cvtdq2ps %xmm2, %xmm2
|
||||||
|
; SSE41-NEXT: mulps %xmm3, %xmm2
|
||||||
|
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7]
|
||||||
|
; SSE41-NEXT: cvtdq2ps %xmm1, %xmm1
|
||||||
|
; SSE41-NEXT: addps %xmm2, %xmm1
|
||||||
|
; SSE41-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX-LABEL: test_uitofp_v8i32_to_v8f32:
|
; AVX-LABEL: test_uitofp_v8i32_to_v8f32:
|
||||||
; AVX: # BB#0:
|
; AVX: # BB#0:
|
||||||
|
@ -87,14 +87,12 @@ define <32 x i8> @combine_and_pshufb(<32 x i8> %a0) {
|
|||||||
define <32 x i8> @combine_pshufb_and(<32 x i8> %a0) {
|
define <32 x i8> @combine_pshufb_and(<32 x i8> %a0) {
|
||||||
; X32-LABEL: combine_pshufb_and:
|
; X32-LABEL: combine_pshufb_and:
|
||||||
; X32: # BB#0:
|
; X32: # BB#0:
|
||||||
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1],zero,zero,zero,zero,zero,zero,ymm0[8,9],zero,zero,zero,zero,zero,zero,ymm0[16,17],zero,zero,zero,zero,zero,zero,ymm0[24,25],zero,zero,zero,zero,zero,zero
|
; X32-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
|
||||||
; X32-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
|
|
||||||
; X32-NEXT: retl
|
; X32-NEXT: retl
|
||||||
;
|
;
|
||||||
; X64-LABEL: combine_pshufb_and:
|
; X64-LABEL: combine_pshufb_and:
|
||||||
; X64: # BB#0:
|
; X64: # BB#0:
|
||||||
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1],zero,zero,zero,zero,zero,zero,ymm0[8,9],zero,zero,zero,zero,zero,zero,ymm0[16,17],zero,zero,zero,zero,zero,zero,ymm0[24,25],zero,zero,zero,zero,zero,zero
|
; X64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
|
||||||
; X64-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
|
|
||||||
; X64-NEXT: retq
|
; X64-NEXT: retq
|
||||||
%1 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 0, i8 1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 8, i8 9, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 8, i8 9, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
|
%1 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 0, i8 1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 8, i8 9, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 8, i8 9, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
|
||||||
%2 = shufflevector <32 x i8> %1, <32 x i8> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 32, i32 32, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
|
%2 = shufflevector <32 x i8> %1, <32 x i8> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 32, i32 32, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
|
||||||
|
@ -299,8 +299,7 @@ define <16 x i8> @combine_and_pshufb(<16 x i8> %a0) {
|
|||||||
define <16 x i8> @combine_pshufb_and(<16 x i8> %a0) {
|
define <16 x i8> @combine_pshufb_and(<16 x i8> %a0) {
|
||||||
; SSSE3-LABEL: combine_pshufb_and:
|
; SSSE3-LABEL: combine_pshufb_and:
|
||||||
; SSSE3: # BB#0:
|
; SSSE3: # BB#0:
|
||||||
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1],zero,zero,zero,zero,zero,zero,xmm0[8,9],zero,zero,zero,zero,zero,zero
|
; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
|
||||||
; SSSE3-NEXT: pand {{.*}}(%rip), %xmm0
|
|
||||||
; SSSE3-NEXT: retq
|
; SSSE3-NEXT: retq
|
||||||
;
|
;
|
||||||
; SSE41-LABEL: combine_pshufb_and:
|
; SSE41-LABEL: combine_pshufb_and:
|
||||||
|
Loading…
Reference in New Issue
Block a user