Optimize concat_vectors(X, undef) -> scalar_to_vector(X).

This optimization is not SSE specific so I am moving it to DAGco.
The new scalar_to_vector dag node exposed a missing pattern in the AArch64 target that I needed to add.



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@193393 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Nadav Rotem 2013-10-25 06:41:18 +00:00
parent be3cf5f3e4
commit 97541d400e
3 changed files with 33 additions and 48 deletions

View File

@ -9836,8 +9836,35 @@ SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
return N->getOperand(0); return N->getOperand(0);
// Check if all of the operands are undefs. // Check if all of the operands are undefs.
EVT VT = N->getValueType(0);
if (ISD::allOperandsUndef(N)) if (ISD::allOperandsUndef(N))
return DAG.getUNDEF(N->getValueType(0)); return DAG.getUNDEF(VT);
// Optimize concat_vectors where one of the vectors is undef.
if (N->getNumOperands() == 2 &&
N->getOperand(1)->getOpcode() == ISD::UNDEF) {
SDValue In = N->getOperand(0);
assert(In->getValueType(0).isVector() && "Must concat vectors");
// Transform: concat_vectors(scalar, undef) -> scalar_to_vector(sclr).
if (In->getOpcode() == ISD::BITCAST &&
!In->getOperand(0)->getValueType(0).isVector()) {
SDValue Scalar = In->getOperand(0);
EVT SclTy = Scalar->getValueType(0);
if (!SclTy.isFloatingPoint() && !SclTy.isInteger())
return SDValue();
EVT NVT = EVT::getVectorVT(*DAG.getContext(), SclTy,
VT.getSizeInBits() / SclTy.getSizeInBits());
if (!TLI.isTypeLegal(NVT) || !TLI.isTypeLegal(Scalar.getValueType()))
return SDValue();
SDLoc dl = SDLoc(N);
SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, NVT, Scalar);
return DAG.getNode(ISD::BITCAST, dl, VT, Res);
}
}
// Type legalization of vectors and DAG canonicalization of SHUFFLE_VECTOR // Type legalization of vectors and DAG canonicalization of SHUFFLE_VECTOR
// nodes often generate nop CONCAT_VECTOR nodes. // nodes often generate nop CONCAT_VECTOR nodes.

View File

@ -5130,6 +5130,10 @@ def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$Rn))),
def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$src))), def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$src))),
(FMOVdd $src)>; (FMOVdd $src)>;
def : Pat<(v2f64 (scalar_to_vector (f64 FPR64:$src))),
(INSERT_SUBREG (v2f64 (IMPLICIT_DEF)),
(f64 FPR64:$src), sub_64)>;
class NeonI_DUP_Elt<bit Q, string asmop, string rdlane, string rnlane, class NeonI_DUP_Elt<bit Q, string asmop, string rdlane, string rnlane,
RegisterOperand ResVPR, ValueType ResTy, RegisterOperand ResVPR, ValueType ResTy,
ValueType OpTy, Operand OpImm> ValueType OpTy, Operand OpImm>
@ -5300,4 +5304,4 @@ def : Pat<(v1i64 (extract_subvector (v2i64 VPR128:$Rn), (i64 0))),
def : Pat<(v2f32 (extract_subvector (v4f32 VPR128:$Rn), (i64 0))), def : Pat<(v2f32 (extract_subvector (v4f32 VPR128:$Rn), (i64 0))),
(v2f32 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>; (v2f32 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
def : Pat<(v1f64 (extract_subvector (v2f64 VPR128:$Rn), (i64 0))), def : Pat<(v1f64 (extract_subvector (v2f64 VPR128:$Rn), (i64 0))),
(v1f64 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>; (v1f64 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;

View File

@ -1500,7 +1500,6 @@ void X86TargetLowering::resetOperationActions() {
} }
// We have target-specific dag combine patterns for the following nodes: // We have target-specific dag combine patterns for the following nodes:
setTargetDAGCombine(ISD::CONCAT_VECTORS);
setTargetDAGCombine(ISD::VECTOR_SHUFFLE); setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
setTargetDAGCombine(ISD::VSELECT); setTargetDAGCombine(ISD::VSELECT);
@ -16155,50 +16154,6 @@ static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG,
return SDValue(); return SDValue();
} }
static SDValue PerformConcatCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget *Subtarget) {
// Creating a v8i16 from a v4i16 argument and an undef runs into trouble in
// type legalization and ends up spilling to the stack. Avoid that by
// creating a vector first and bitcasting the result rather than
// bitcasting the source then creating the vector. Similar problems with
// v8i8.
// No point in doing this after legalize, so early exit for that.
if (!DCI.isBeforeLegalize())
return SDValue();
EVT VT = N->getValueType(0);
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (VT.getSizeInBits() == 128 && N->getNumOperands() == 2 &&
Op1->getOpcode() == ISD::UNDEF &&
Op0->getOpcode() == ISD::BITCAST &&
!TLI.isTypeLegal(Op0->getValueType(0)) &&
TLI.isTypeLegal(Op0->getOperand(0)->getValueType(0))) {
if (Op0->getOperand(0)->getValueType(0).isVector())
return SDValue();
SDValue Scalar = Op0->getOperand(0);
// Any legal type here will be a simple value type.
MVT SVT = Scalar->getValueType(0).getSimpleVT();
// As a special case, bail out on MMX values.
if (SVT == MVT::x86mmx)
return SDValue();
EVT NVT = MVT::getVectorVT(SVT, 2);
// If the result vector type isn't legal, this transform won't really
// help, so bail on that, too.
if (!TLI.isTypeLegal(NVT))
return SDValue();
SDLoc dl = SDLoc(N);
SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, NVT, Scalar);
Res = DAG.getNode(ISD::BITCAST, dl, VT, Res);
return Res;
}
return SDValue();
}
/// PerformShuffleCombine - Performs several different shuffle combines. /// PerformShuffleCombine - Performs several different shuffle combines.
static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI, TargetLowering::DAGCombinerInfo &DCI,
@ -19077,7 +19032,6 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
case X86ISD::VPERMILP: case X86ISD::VPERMILP:
case X86ISD::VPERM2X128: case X86ISD::VPERM2X128:
case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget); case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget);
case ISD::CONCAT_VECTORS: return PerformConcatCombine(N, DAG, DCI, Subtarget);
case ISD::FMA: return PerformFMACombine(N, DAG, Subtarget); case ISD::FMA: return PerformFMACombine(N, DAG, Subtarget);
} }