mirror of
https://github.com/RPCSX/llvm.git
synced 2025-02-20 21:14:28 +00:00
[X86][SSE] Generalised combining to VZEXT_MOVL to any vector size
This doesn't change tests codegen as we already combined to blend+zero which is what we lower VZEXT_MOVL to on SSE41+ targets, but it does put us in a better position when we improve shuffling for optsize. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@279273 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
1ea0947ff4
commit
c05f998efe
@ -24906,14 +24906,17 @@ static SDValue combineShuffle256(SDNode *N, SelectionDAG &DAG,
|
||||
static bool matchUnaryVectorShuffle(MVT MaskVT, ArrayRef<int> Mask,
|
||||
const X86Subtarget &Subtarget,
|
||||
unsigned &Shuffle, MVT &ShuffleVT) {
|
||||
unsigned NumMaskElts = Mask.size();
|
||||
bool FloatDomain = MaskVT.isFloatingPoint() ||
|
||||
(!Subtarget.hasAVX2() && MaskVT.is256BitVector());
|
||||
|
||||
// Match a 128-bit vector against a VZEXT_MOVL instruction.
|
||||
if (MaskVT.is128BitVector() && Subtarget.hasSSE2() &&
|
||||
isTargetShuffleEquivalent(Mask, {0, SM_SentinelZero})) {
|
||||
// Match against a VZEXT_MOVL instruction, SSE1 only supports 32-bits (MOVSS).
|
||||
if (((MaskVT.getScalarSizeInBits() == 32) ||
|
||||
(MaskVT.getScalarSizeInBits() == 64 && Subtarget.hasSSE2())) &&
|
||||
isUndefOrEqual(Mask[0], 0) &&
|
||||
isUndefOrZeroInRange(Mask, 1, NumMaskElts - 1)) {
|
||||
Shuffle = X86ISD::VZEXT_MOVL;
|
||||
ShuffleVT = MaskVT;
|
||||
ShuffleVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -24981,8 +24984,7 @@ static bool matchUnaryVectorShuffle(MVT MaskVT, ArrayRef<int> Mask,
|
||||
|
||||
// Attempt to match against broadcast-from-vector.
|
||||
if (Subtarget.hasAVX2()) {
|
||||
unsigned NumElts = Mask.size();
|
||||
SmallVector<int, 64> BroadcastMask(NumElts, 0);
|
||||
SmallVector<int, 64> BroadcastMask(NumMaskElts, 0);
|
||||
if (isTargetShuffleEquivalent(Mask, BroadcastMask)) {
|
||||
ShuffleVT = MaskVT;
|
||||
Shuffle = X86ISD::VBROADCAST;
|
||||
|
@ -277,6 +277,30 @@ define <8 x float> @combine_permps_as_permpd(<8 x float> %a) {
|
||||
ret <8 x float> %1
|
||||
}
|
||||
|
||||
define <4 x double> @combine_pshufb_as_vzmovl_64(<4 x double> %a0) {
|
||||
; CHECK-LABEL: combine_pshufb_as_vzmovl_64:
|
||||
; CHECK: # BB#0:
|
||||
; CHECK-NEXT: vxorpd %ymm1, %ymm1, %ymm1
|
||||
; CHECK-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
|
||||
; CHECK-NEXT: retq
|
||||
%1 = bitcast <4 x double> %a0 to <32 x i8>
|
||||
%2 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %1, <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
|
||||
%3 = bitcast <32 x i8> %2 to <4 x double>
|
||||
ret <4 x double> %3
|
||||
}
|
||||
|
||||
define <8 x float> @combine_pshufb_as_vzmovl_32(<8 x float> %a0) {
|
||||
; CHECK-LABEL: combine_pshufb_as_vzmovl_32:
|
||||
; CHECK: # BB#0:
|
||||
; CHECK-NEXT: vxorps %ymm1, %ymm1, %ymm1
|
||||
; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
|
||||
; CHECK-NEXT: retq
|
||||
%1 = bitcast <8 x float> %a0 to <32 x i8>
|
||||
%2 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %1, <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
|
||||
%3 = bitcast <32 x i8> %2 to <8 x float>
|
||||
ret <8 x float> %3
|
||||
}
|
||||
|
||||
define <32 x i8> @combine_pshufb_as_pslldq(<32 x i8> %a0) {
|
||||
; CHECK-LABEL: combine_pshufb_as_pslldq:
|
||||
; CHECK: # BB#0:
|
||||
|
@ -88,6 +88,59 @@ define <4 x float> @combine_pshufb_as_movss(<4 x float> %a0, <4 x float> %a1) {
|
||||
ret <4 x float> %4
|
||||
}
|
||||
|
||||
define <2 x double> @combine_pshufb_as_vzmovl_64(<2 x double> %a0) {
|
||||
; SSE-LABEL: combine_pshufb_as_vzmovl_64:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: combine_pshufb_as_vzmovl_64:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
|
||||
; AVX-NEXT: retq
|
||||
%1 = bitcast <2 x double> %a0 to <16 x i8>
|
||||
%2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
|
||||
%3 = bitcast <16 x i8> %2 to <2 x double>
|
||||
ret <2 x double> %3
|
||||
}
|
||||
|
||||
define <4 x float> @combine_pshufb_as_vzmovl_32(<4 x float> %a0) {
|
||||
; SSSE3-LABEL: combine_pshufb_as_vzmovl_32:
|
||||
; SSSE3: # BB#0:
|
||||
; SSSE3-NEXT: xorps %xmm1, %xmm1
|
||||
; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
|
||||
; SSSE3-NEXT: movaps %xmm1, %xmm0
|
||||
; SSSE3-NEXT: retq
|
||||
;
|
||||
; SSE41-LABEL: combine_pshufb_as_vzmovl_32:
|
||||
; SSE41: # BB#0:
|
||||
; SSE41-NEXT: xorps %xmm1, %xmm1
|
||||
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX1-LABEL: combine_pshufb_as_vzmovl_32:
|
||||
; AVX1: # BB#0:
|
||||
; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: combine_pshufb_as_vzmovl_32:
|
||||
; AVX2: # BB#0:
|
||||
; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; AVX512F-LABEL: combine_pshufb_as_vzmovl_32:
|
||||
; AVX512F: # BB#0:
|
||||
; AVX512F-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; AVX512F-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX512F-NEXT: retq
|
||||
%1 = bitcast <4 x float> %a0 to <16 x i8>
|
||||
%2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
|
||||
%3 = bitcast <16 x i8> %2 to <4 x float>
|
||||
ret <4 x float> %3
|
||||
}
|
||||
|
||||
define <4 x float> @combine_pshufb_movddup(<4 x float> %a0) {
|
||||
; SSE-LABEL: combine_pshufb_movddup:
|
||||
; SSE: # BB#0:
|
||||
|
Loading…
x
Reference in New Issue
Block a user