[DAG] visitSCALAR_TO_VECTOR - don't fold scalar_to_vector(bin(extract(x),extract(y)) -> bin(x,y) if extracts have other uses

Fixes #78897 - although the test case still has a number of poor codegen issues (in particular for i686 triples) that will need addressing (combining the nodes in topological order should help).
This commit is contained in:
Simon Pilgrim 2024-01-23 16:22:51 +00:00
parent 4782ac8dd3
commit e1aa5b1fd1
2 changed files with 34 additions and 51 deletions

View File

@ -26021,6 +26021,7 @@ SDValue DAGCombiner::visitSCALAR_TO_VECTOR(SDNode *N) {
// Try to convert a scalar binop with an extracted vector element to a vector
// binop. This is intended to reduce potentially expensive register moves.
// TODO: Check if both operands are extracted.
// TODO: How to prefer scalar/vector ops with multiple uses of the extact?
// TODO: Generalize this, so it can be called from visitINSERT_VECTOR_ELT().
SDValue Scalar = N->getOperand(0);
unsigned Opcode = Scalar.getOpcode();
@ -26029,6 +26030,8 @@ SDValue DAGCombiner::visitSCALAR_TO_VECTOR(SDNode *N) {
TLI.isBinOp(Opcode) && Scalar.getValueType() == VecEltVT &&
Scalar.getOperand(0).getValueType() == VecEltVT &&
Scalar.getOperand(1).getValueType() == VecEltVT &&
Scalar->isOnlyUserOf(Scalar.getOperand(0).getNode()) &&
Scalar->isOnlyUserOf(Scalar.getOperand(1).getNode()) &&
DAG.isSafeToSpeculativelyExecute(Opcode) && hasOperation(Opcode, VT)) {
// Match an extract element and get a shuffle mask equivalent.
SmallVector<int, 8> ShufMask(VT.getVectorNumElements(), -1);

View File

@ -8,7 +8,8 @@
; RUN: llc < %s -mtriple=i686-- -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=X86-AVX512
; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=X64-AVX512
; FIXME: PR78897 - Don't vectorize a mul if we still need the extract
; PR78897 - Don't vectorize a mul of extracted values if we'd still need the extract.
; TODO: We should vectorize on 32-bit targets.
define <16 x i8> @produceShuffleVectorForByte(i8 zeroext %0) nounwind {
; X86-SSE2-LABEL: produceShuffleVectorForByte:
; X86-SSE2: # %bb.0: # %entry
@ -70,21 +71,13 @@ define <16 x i8> @produceShuffleVectorForByte(i8 zeroext %0) nounwind {
; X64-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [17,17,17,17,17,17,17,17,u,u,u,u,u,u,u,u]
; X64-SSE2-NEXT: pand %xmm0, %xmm1
; X64-SSE2-NEXT: movq %xmm1, %rax
; X64-SSE2-NEXT: movdqa %xmm1, %xmm2
; X64-SSE2-NEXT: psrlq $32, %xmm2
; X64-SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1229782938247303440,1229782938247303440]
; X64-SSE2-NEXT: pmuludq %xmm3, %xmm2
; X64-SSE2-NEXT: movdqa {{.*#+}} xmm4 = [286331153,286331153]
; X64-SSE2-NEXT: pmuludq %xmm1, %xmm4
; X64-SSE2-NEXT: paddq %xmm2, %xmm4
; X64-SSE2-NEXT: psllq $32, %xmm4
; X64-SSE2-NEXT: pmuludq %xmm3, %xmm1
; X64-SSE2-NEXT: paddq %xmm4, %xmm1
; X64-SSE2-NEXT: movabsq $76861433640456465, %rcx # imm = 0x111111111111111
; X64-SSE2-NEXT: xorq %rax, %rcx
; X64-SSE2-NEXT: movabsq $1229782938247303440, %rax # imm = 0x1111111111111110
; X64-SSE2-NEXT: movabsq $1229782938247303440, %rcx # imm = 0x1111111111111110
; X64-SSE2-NEXT: movabsq $76861433640456465, %rdx # imm = 0x111111111111111
; X64-SSE2-NEXT: xorq %rax, %rdx
; X64-SSE2-NEXT: imulq %rcx, %rax
; X64-SSE2-NEXT: movq %rax, %xmm2
; X64-SSE2-NEXT: movq %rax, %xmm1
; X64-SSE2-NEXT: imulq %rcx, %rdx
; X64-SSE2-NEXT: movq %rdx, %xmm2
; X64-SSE2-NEXT: pand %xmm0, %xmm1
; X64-SSE2-NEXT: pandn %xmm2, %xmm0
; X64-SSE2-NEXT: por %xmm1, %xmm0
@ -147,24 +140,16 @@ define <16 x i8> @produceShuffleVectorForByte(i8 zeroext %0) nounwind {
; X64-SSE42-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; X64-SSE42-NEXT: pxor %xmm0, %xmm0
; X64-SSE42-NEXT: pcmpeqb %xmm1, %xmm0
; X64-SSE42-NEXT: movdqa {{.*#+}} xmm2 = [17,17,17,17,17,17,17,17,u,u,u,u,u,u,u,u]
; X64-SSE42-NEXT: pand %xmm0, %xmm2
; X64-SSE42-NEXT: movq %xmm2, %rax
; X64-SSE42-NEXT: movdqa %xmm2, %xmm1
; X64-SSE42-NEXT: psrlq $32, %xmm1
; X64-SSE42-NEXT: movdqa {{.*#+}} xmm3 = [1229782938247303440,1229782938247303440]
; X64-SSE42-NEXT: pmuludq %xmm3, %xmm1
; X64-SSE42-NEXT: movdqa {{.*#+}} xmm4 = [286331153,286331153]
; X64-SSE42-NEXT: pmuludq %xmm2, %xmm4
; X64-SSE42-NEXT: paddq %xmm1, %xmm4
; X64-SSE42-NEXT: psllq $32, %xmm4
; X64-SSE42-NEXT: pmuludq %xmm3, %xmm2
; X64-SSE42-NEXT: paddq %xmm4, %xmm2
; X64-SSE42-NEXT: movabsq $76861433640456465, %rcx # imm = 0x111111111111111
; X64-SSE42-NEXT: xorq %rax, %rcx
; X64-SSE42-NEXT: movabsq $1229782938247303440, %rax # imm = 0x1111111111111110
; X64-SSE42-NEXT: movdqa {{.*#+}} xmm1 = [17,17,17,17,17,17,17,17,u,u,u,u,u,u,u,u]
; X64-SSE42-NEXT: pand %xmm0, %xmm1
; X64-SSE42-NEXT: movq %xmm1, %rax
; X64-SSE42-NEXT: movabsq $1229782938247303440, %rcx # imm = 0x1111111111111110
; X64-SSE42-NEXT: movabsq $76861433640456465, %rdx # imm = 0x111111111111111
; X64-SSE42-NEXT: xorq %rax, %rdx
; X64-SSE42-NEXT: imulq %rcx, %rax
; X64-SSE42-NEXT: movq %rax, %xmm1
; X64-SSE42-NEXT: movq %rax, %xmm2
; X64-SSE42-NEXT: imulq %rcx, %rdx
; X64-SSE42-NEXT: movq %rdx, %xmm1
; X64-SSE42-NEXT: pblendvb %xmm0, %xmm2, %xmm1
; X64-SSE42-NEXT: movdqa %xmm1, %xmm0
; X64-SSE42-NEXT: psrlw $4, %xmm0
@ -220,19 +205,13 @@ define <16 x i8> @produceShuffleVectorForByte(i8 zeroext %0) nounwind {
; X64-AVX2-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; X64-AVX2-NEXT: vmovq %xmm1, %rax
; X64-AVX2-NEXT: vpsrlq $32, %xmm1, %xmm2
; X64-AVX2-NEXT: vpbroadcastq {{.*#+}} xmm3 = [1229782938247303440,1229782938247303440]
; X64-AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm4
; X64-AVX2-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
; X64-AVX2-NEXT: vpaddq %xmm2, %xmm4, %xmm2
; X64-AVX2-NEXT: vpsllq $32, %xmm2, %xmm2
; X64-AVX2-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
; X64-AVX2-NEXT: vpaddq %xmm2, %xmm1, %xmm1
; X64-AVX2-NEXT: movabsq $76861433640456465, %rcx # imm = 0x111111111111111
; X64-AVX2-NEXT: xorq %rax, %rcx
; X64-AVX2-NEXT: movabsq $1229782938247303440, %rax # imm = 0x1111111111111110
; X64-AVX2-NEXT: movabsq $1229782938247303440, %rcx # imm = 0x1111111111111110
; X64-AVX2-NEXT: movabsq $76861433640456465, %rdx # imm = 0x111111111111111
; X64-AVX2-NEXT: xorq %rax, %rdx
; X64-AVX2-NEXT: imulq %rcx, %rax
; X64-AVX2-NEXT: vmovq %rax, %xmm2
; X64-AVX2-NEXT: vmovq %rax, %xmm1
; X64-AVX2-NEXT: imulq %rcx, %rdx
; X64-AVX2-NEXT: vmovq %rdx, %xmm2
; X64-AVX2-NEXT: vpblendvb %xmm0, %xmm1, %xmm2, %xmm0
; X64-AVX2-NEXT: vpsrlw $4, %xmm0, %xmm1
; X64-AVX2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
@ -280,16 +259,17 @@ define <16 x i8> @produceShuffleVectorForByte(i8 zeroext %0) nounwind {
; X64-AVX512-NEXT: vpbroadcastb %edi, %xmm0
; X64-AVX512-NEXT: vptestnmb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %k1
; X64-AVX512-NEXT: vmovdqu8 {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 {%k1} {z}
; X64-AVX512-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm1
; X64-AVX512-NEXT: vmovq %xmm0, %rax
; X64-AVX512-NEXT: movabsq $76861433640456465, %rcx # imm = 0x111111111111111
; X64-AVX512-NEXT: xorq %rax, %rcx
; X64-AVX512-NEXT: movabsq $1229782938247303440, %rax # imm = 0x1111111111111110
; X64-AVX512-NEXT: movabsq $1229782938247303440, %rcx # imm = 0x1111111111111110
; X64-AVX512-NEXT: movabsq $76861433640456465, %rdx # imm = 0x111111111111111
; X64-AVX512-NEXT: xorq %rax, %rdx
; X64-AVX512-NEXT: imulq %rcx, %rax
; X64-AVX512-NEXT: vmovq %rax, %xmm0
; X64-AVX512-NEXT: vmovdqu8 %xmm1, %xmm0 {%k1}
; X64-AVX512-NEXT: vpsrlw $4, %xmm0, %xmm1
; X64-AVX512-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; X64-AVX512-NEXT: imulq %rcx, %rdx
; X64-AVX512-NEXT: vmovq %rdx, %xmm1
; X64-AVX512-NEXT: vmovdqu8 %xmm0, %xmm1 {%k1}
; X64-AVX512-NEXT: vpsrlw $4, %xmm1, %xmm0
; X64-AVX512-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; X64-AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
; X64-AVX512-NEXT: retq
entry: