mirror of
https://github.com/RPCSX/llvm.git
synced 2025-02-26 16:05:54 +00:00
[X86][SSE] Ensure BLENDPD/BLENDPS/PBLEND inputs are both of the correct input type
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@256782 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
8abd63fcbb
commit
472704020c
@ -4557,6 +4557,7 @@ static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
|
||||
MVT CastVT = Subtarget.hasAVX2() ? MVT::v8i32 : MVT::v8f32;
|
||||
|
||||
SDValue Mask = DAG.getConstant(0x0f, dl, MVT::i8);
|
||||
Result = DAG.getBitcast(CastVT, Result);
|
||||
Vec256 = DAG.getBitcast(CastVT, Vec256);
|
||||
Vec256 = DAG.getNode(X86ISD::BLENDI, dl, CastVT, Result, Vec256, Mask);
|
||||
return DAG.getBitcast(ResultVT, Vec256);
|
||||
@ -27467,6 +27468,8 @@ static SDValue PerformBLENDICombine(SDNode *N, SelectionDAG &DAG) {
|
||||
SDValue V1 = N->getOperand(1);
|
||||
SDLoc DL(N);
|
||||
EVT VT = N->getValueType(0);
|
||||
assert(VT == V0.getValueType() && VT == V1.getValueType() &&
|
||||
"Unexpected input vector types");
|
||||
|
||||
// Canonicalize a v2f64 blend with a mask of 2 by swapping the vector
|
||||
// operands and changing the mask to 1. This saves us a bunch of
|
||||
|
@ -1,38 +1,27 @@
|
||||
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx | FileCheck %s --check-prefix=AVX1
|
||||
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx2 | FileCheck %s --check-prefix=AVX2
|
||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
|
||||
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
|
||||
|
||||
; Prefer a blend instruction to a vinsert128 instruction because blends
|
||||
; are simpler (no lane changes) and therefore will have equal or better
|
||||
; performance.
|
||||
|
||||
define <8 x float> @castA(<4 x float> %m) nounwind uwtable readnone ssp {
|
||||
; AVX1-LABEL: castA:
|
||||
; AVX1: vxorps %ymm1, %ymm1, %ymm1
|
||||
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: castA:
|
||||
; AVX2: vxorps %ymm1, %ymm1, %ymm1
|
||||
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
|
||||
; AVX2-NEXT: retq
|
||||
|
||||
entry:
|
||||
; AVX-LABEL: castA:
|
||||
; AVX: ## BB#0:
|
||||
; AVX-NEXT: vxorps %ymm1, %ymm1, %ymm1
|
||||
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
|
||||
; AVX-NEXT: retq
|
||||
%shuffle.i = shufflevector <4 x float> %m, <4 x float> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4>
|
||||
ret <8 x float> %shuffle.i
|
||||
}
|
||||
|
||||
define <4 x double> @castB(<2 x double> %m) nounwind uwtable readnone ssp {
|
||||
; AVX1-LABEL: castB:
|
||||
; AVX1: vxorpd %ymm1, %ymm1, %ymm1
|
||||
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: castB:
|
||||
; AVX2: vxorpd %ymm1, %ymm1, %ymm1
|
||||
; AVX2-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
|
||||
; AVX2-NEXT: retq
|
||||
|
||||
entry:
|
||||
; AVX-LABEL: castB:
|
||||
; AVX: ## BB#0:
|
||||
; AVX-NEXT: vxorpd %ymm1, %ymm1, %ymm1
|
||||
; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
|
||||
; AVX-NEXT: retq
|
||||
%shuffle.i = shufflevector <2 x double> %m, <2 x double> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 2>
|
||||
ret <4 x double> %shuffle.i
|
||||
}
|
||||
@ -41,16 +30,16 @@ entry:
|
||||
|
||||
define <4 x i64> @castC(<2 x i64> %m) nounwind uwtable readnone ssp {
|
||||
; AVX1-LABEL: castC:
|
||||
; AVX1: vxorps %xmm1, %xmm1, %xmm1
|
||||
; AVX1: ## BB#0:
|
||||
; AVX1-NEXT: vxorps %ymm1, %ymm1, %ymm1
|
||||
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: castC:
|
||||
; AVX2: vpxor %ymm1, %ymm1, %ymm1
|
||||
; AVX2: ## BB#0:
|
||||
; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
|
||||
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
|
||||
; AVX2-NEXT: retq
|
||||
|
||||
entry:
|
||||
%shuffle.i = shufflevector <2 x i64> %m, <2 x i64> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 2>
|
||||
ret <4 x i64> %shuffle.i
|
||||
}
|
||||
@ -59,43 +48,28 @@ entry:
|
||||
; vzeroupper before the return, so just check for the absence of shuffles.
|
||||
|
||||
define <4 x float> @castD(<8 x float> %m) nounwind uwtable readnone ssp {
|
||||
; AVX1-LABEL: castD:
|
||||
; AVX1-NOT: extract
|
||||
; AVX1-NOT: blend
|
||||
;
|
||||
; AVX2-LABEL: castD:
|
||||
; AVX2-NOT: extract
|
||||
; AVX2-NOT: blend
|
||||
|
||||
entry:
|
||||
; AVX-LABEL: castD:
|
||||
; AVX: ## BB#0:
|
||||
; AVX-NEXT: vzeroupper
|
||||
; AVX-NEXT: retq
|
||||
%shuffle.i = shufflevector <8 x float> %m, <8 x float> %m, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
ret <4 x float> %shuffle.i
|
||||
}
|
||||
|
||||
define <2 x i64> @castE(<4 x i64> %m) nounwind uwtable readnone ssp {
|
||||
; AVX1-LABEL: castE:
|
||||
; AVX1-NOT: extract
|
||||
; AVX1-NOT: blend
|
||||
;
|
||||
; AVX2-LABEL: castE:
|
||||
; AVX2-NOT: extract
|
||||
; AVX2-NOT: blend
|
||||
|
||||
entry:
|
||||
; AVX-LABEL: castE:
|
||||
; AVX: ## BB#0:
|
||||
; AVX-NEXT: vzeroupper
|
||||
; AVX-NEXT: retq
|
||||
%shuffle.i = shufflevector <4 x i64> %m, <4 x i64> %m, <2 x i32> <i32 0, i32 1>
|
||||
ret <2 x i64> %shuffle.i
|
||||
}
|
||||
|
||||
define <2 x double> @castF(<4 x double> %m) nounwind uwtable readnone ssp {
|
||||
; AVX1-LABEL: castF:
|
||||
; AVX1-NOT: extract
|
||||
; AVX1-NOT: blend
|
||||
;
|
||||
; AVX2-LABEL: castF:
|
||||
; AVX2-NOT: extract
|
||||
; AVX2-NOT: blend
|
||||
|
||||
entry:
|
||||
; AVX-LABEL: castF:
|
||||
; AVX: ## BB#0:
|
||||
; AVX-NEXT: vzeroupper
|
||||
; AVX-NEXT: retq
|
||||
%shuffle.i = shufflevector <4 x double> %m, <4 x double> %m, <2 x i32> <i32 0, i32 1>
|
||||
ret <2 x double> %shuffle.i
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user