[CodeGen] Add getBuildVector and getSplatBuildVector helpers. NFCI.

Differential Revision: http://reviews.llvm.org/D17176

llvm-svn: 267606
This commit is contained in:
Ahmed Bougacha 2016-04-26 21:15:30 +00:00
parent 67f60eaa89
commit db9e64109d
13 changed files with 173 additions and 178 deletions

View File

@ -631,6 +631,38 @@ public:
return getVectorShuffle(VT, dl, N1, N2, MaskElts.data());
}
/// Return an ISD::BUILD_VECTOR node. The number of elements in VT,
/// which must be a vector type, must match the number of operands in Ops.
/// The operands must have the same type as (or, for integers, a type wider
/// than) VT's element type.
SDValue getBuildVector(EVT VT, SDLoc DL, ArrayRef<SDValue> Ops) {
// VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
}
/// Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all
/// elements. VT must be a vector type. Op's type must be the same as (or,
/// for integers, a type wider than) VT's element type.
SDValue getSplatBuildVector(EVT VT, SDLoc DL, SDValue Op) {
// VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
if (Op.getOpcode() == ISD::UNDEF) {
assert((VT.getVectorElementType() == Op.getValueType() ||
(VT.isInteger() &&
VT.getVectorElementType().bitsLE(Op.getValueType()))) &&
"A splatted value must have a width equal or (for integers) "
"greater than the vector element type!");
return getNode(ISD::UNDEF, SDLoc(), VT);
}
SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Op);
return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
}
/// Return a splat ISD::BUILD_VECTOR node, but with Op's SDLoc.
SDValue getSplatBuildVector(EVT VT, SDValue Op) {
return getSplatBuildVector(VT, SDLoc(Op), Op);
}
/// \brief Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to
/// the shuffle node in input but with swapped operands.
///

View File

@ -5823,7 +5823,7 @@ static SDNode *tryToFoldExtendOfConstant(SDNode *N, const TargetLowering &TLI,
Elts.push_back(DAG.getConstant(C.zext(VTBits), DL, SVT));
}
return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Elts).getNode();
return DAG.getBuildVector(VT, DL, Elts).getNode();
}
// ExtendUsesToFormExtLoad - Trying to extend uses of a load to enable this:
@ -7178,7 +7178,7 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
for (unsigned i = 0, e = BuildVecNumElts; i != e; i += TruncEltOffset)
Opnds.push_back(BuildVect.getOperand(i));
return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), VT, Opnds);
return DAG.getBuildVector(VT, SDLoc(N), Opnds);
}
}
@ -7610,7 +7610,7 @@ ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
DstEltVT, Op));
AddToWorklist(Ops.back().getNode());
}
return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(BV), VT, Ops);
return DAG.getBuildVector(VT, SDLoc(BV), Ops);
}
// Otherwise, we're growing or shrinking the elements. To avoid having to
@ -7666,7 +7666,7 @@ ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
}
EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT, Ops.size());
return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
return DAG.getBuildVector(VT, DL, Ops);
}
// Finally, this must be the case where we are shrinking elements: each input
@ -7696,7 +7696,7 @@ ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
std::reverse(Ops.end()-NumOutputsPerInput, Ops.end());
}
return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
return DAG.getBuildVector(VT, DL, Ops);
}
/// Try to perform FMA combining on a given FADD node.
@ -11186,7 +11186,7 @@ SDValue DAGCombiner::getMergedConstantVectorStore(SelectionDAG &DAG,
BuildVector.push_back(St->getValue());
}
return DAG.getNode(ISD::BUILD_VECTOR, SL, Ty, BuildVector);
return DAG.getBuildVector(Ty, SL, BuildVector);
}
bool DAGCombiner::MergeStoresOfConstantsOrVecElts(
@ -12212,7 +12212,7 @@ SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
}
// Return the new vector
return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
return DAG.getBuildVector(VT, dl, Ops);
}
SDValue DAGCombiner::ReplaceExtractVectorEltOfLoadWithNarrowedLoad(
@ -12589,7 +12589,7 @@ SDValue DAGCombiner::reduceBuildVecExtToExtBuildVec(SDNode *N) {
if (!isTypeLegal(VecVT)) return SDValue();
// Make the new BUILD_VECTOR.
SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, Ops);
SDValue BV = DAG.getBuildVector(VecVT, dl, Ops);
// The new BUILD_VECTOR node has the potential to be further optimized.
AddToWorklist(BV.getNode());
@ -12662,7 +12662,7 @@ SDValue DAGCombiner::reduceBuildVecConvertToConvertBuildVec(SDNode *N) {
else
Opnds.push_back(In.getOperand(0));
}
SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, Opnds);
SDValue BV = DAG.getBuildVector(NVT, dl, Opnds);
AddToWorklist(BV.getNode());
return DAG.getNode(Opcode, dl, VT, BV);
@ -12901,7 +12901,7 @@ static SDValue combineConcatVectorOfScalars(SDNode *N, SelectionDAG &DAG) {
EVT VecVT = EVT::getVectorVT(*DAG.getContext(), SVT,
VT.getSizeInBits() / SVT.getSizeInBits());
return DAG.getNode(ISD::BITCAST, DL, VT,
DAG.getNode(ISD::BUILD_VECTOR, DL, VecVT, Ops));
DAG.getBuildVector(VecVT, DL, Ops));
}
// Check to see if this is a CONCAT_VECTORS of a bunch of EXTRACT_SUBVECTOR
@ -13081,7 +13081,7 @@ SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
assert(VT.getVectorNumElements() == Opnds.size() &&
"Concat vector type mismatch");
return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), VT, Opnds);
return DAG.getBuildVector(VT, SDLoc(N), Opnds);
}
// Fold CONCAT_VECTORS of only bitcast scalars (or undef) to BUILD_VECTOR.
@ -13451,8 +13451,7 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
// Canonicalize any other splat as a build_vector.
const SDValue &Splatted = V->getOperand(SVN->getSplatIndex());
SmallVector<SDValue, 8> Ops(NumElts, Splatted);
SDValue NewBV = DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N),
V->getValueType(0), Ops);
SDValue NewBV = DAG.getBuildVector(V->getValueType(0), SDLoc(N), Ops);
// We may have jumped through bitcasts, so the type of the
// BUILD_VECTOR may not match the type of the shuffle.
@ -13510,7 +13509,7 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
Op = TLI.isZExtFree(Op.getValueType(), SVT)
? DAG.getZExtOrTrunc(Op, SDLoc(N), SVT)
: DAG.getSExtOrTrunc(Op, SDLoc(N), SVT);
return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), VT, Ops);
return DAG.getBuildVector(VT, SDLoc(N), Ops);
}
}

View File

@ -1195,11 +1195,8 @@ SDValue SelectionDAG::getConstant(const ConstantInt &Val, SDLoc DL, EVT VT,
}
SDValue Result(N, 0);
if (VT.isVector()) {
SmallVector<SDValue, 8> Ops;
Ops.assign(VT.getVectorNumElements(), Result);
Result = getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
}
if (VT.isVector())
Result = getSplatBuildVector(VT, DL, Result);
return Result;
}
@ -1238,11 +1235,8 @@ SDValue SelectionDAG::getConstantFP(const ConstantFP& V, SDLoc DL, EVT VT,
}
SDValue Result(N, 0);
if (VT.isVector()) {
SmallVector<SDValue, 8> Ops;
Ops.assign(VT.getVectorNumElements(), Result);
Result = getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
}
if (VT.isVector())
Result = getSplatBuildVector(VT, DL, Result);
return Result;
}
@ -1608,11 +1602,9 @@ SDValue SelectionDAG::getVectorShuffle(EVT VT, SDLoc dl, SDValue N1,
// If the shuffle itself creates a splat, build the vector directly.
if (AllSame && SameNumElts) {
const SDValue &Splatted = BV->getOperand(MaskVec[0]);
SmallVector<SDValue, 8> Ops(NElts, Splatted);
EVT BuildVT = BV->getValueType(0);
SDValue NewBV = getNode(ISD::BUILD_VECTOR, dl, BuildVT, Ops);
const SDValue &Splatted = BV->getOperand(MaskVec[0]);
SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted);
// We may have jumped through bitcasts, so the type of the
// BUILD_VECTOR may not match the type of the shuffle.
@ -3348,7 +3340,7 @@ SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, SDLoc DL, EVT VT,
Outputs.resize(VT.getVectorNumElements(), Outputs.back());
// Build a big vector out of the scalar elements we generated.
return getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Outputs);
return getBuildVector(VT, SDLoc(), Outputs);
}
SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode, SDLoc DL,
@ -3439,9 +3431,7 @@ SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode, SDLoc DL,
ScalarResults.push_back(ScalarResult);
}
assert(ScalarResults.size() == NumElts &&
"Unexpected number of scalar results for BUILD_VECTOR");
return getNode(ISD::BUILD_VECTOR, DL, VT, ScalarResults);
return getBuildVector(VT, DL, ScalarResults);
}
SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1,
@ -3655,7 +3645,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1,
break;
}
if (Ops.size() == VT.getVectorNumElements())
return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
return getBuildVector(VT, DL, Ops);
}
break;
}
@ -4086,13 +4076,9 @@ static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
}
if (VT != Value.getValueType() && !VT.isInteger())
Value = DAG.getNode(ISD::BITCAST, dl, VT.getScalarType(), Value);
if (VT != Value.getValueType()) {
assert(VT.getVectorElementType() == Value.getValueType() &&
"value type should be one vector element here");
SmallVector<SDValue, 8> BVOps(VT.getVectorNumElements(), Value);
Value = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, BVOps);
}
Value = DAG.getBitcast(VT.getScalarType(), Value);
if (VT != Value.getValueType())
Value = DAG.getSplatBuildVector(VT, dl, Value);
return Value;
}

View File

@ -2212,8 +2212,7 @@ static SDValue skipExtensionForVectorMULL(SDNode *N, SelectionDAG &DAG) {
// The values are implicitly truncated so sext vs. zext doesn't matter.
Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), dl, MVT::i32));
}
return DAG.getNode(ISD::BUILD_VECTOR, dl,
MVT::getVectorVT(TruncVT, NumElts), Ops);
return DAG.getBuildVector(MVT::getVectorVT(TruncVT, NumElts), dl, Ops);
}
static bool isSignExtended(SDNode *N, SelectionDAG &DAG) {
@ -5542,29 +5541,28 @@ static SDValue GenerateTBL(SDValue Op, ArrayRef<int> ShuffleMask,
Shuffle = DAG.getNode(
ISD::INTRINSIC_WO_CHAIN, DL, IndexVT,
DAG.getConstant(Intrinsic::aarch64_neon_tbl1, DL, MVT::i32), V1Cst,
DAG.getNode(ISD::BUILD_VECTOR, DL, IndexVT,
makeArrayRef(TBLMask.data(), IndexLen)));
DAG.getBuildVector(IndexVT, DL,
makeArrayRef(TBLMask.data(), IndexLen)));
} else {
if (IndexLen == 8) {
V1Cst = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V1Cst, V2Cst);
Shuffle = DAG.getNode(
ISD::INTRINSIC_WO_CHAIN, DL, IndexVT,
DAG.getConstant(Intrinsic::aarch64_neon_tbl1, DL, MVT::i32), V1Cst,
DAG.getNode(ISD::BUILD_VECTOR, DL, IndexVT,
makeArrayRef(TBLMask.data(), IndexLen)));
DAG.getBuildVector(IndexVT, DL,
makeArrayRef(TBLMask.data(), IndexLen)));
} else {
// FIXME: We cannot, for the moment, emit a TBL2 instruction because we
// cannot currently represent the register constraints on the input
// table registers.
// Shuffle = DAG.getNode(AArch64ISD::TBL2, DL, IndexVT, V1Cst, V2Cst,
// DAG.getNode(ISD::BUILD_VECTOR, DL, IndexVT,
// &TBLMask[0], IndexLen));
// DAG.getBuildVector(IndexVT, DL, &TBLMask[0],
// IndexLen));
Shuffle = DAG.getNode(
ISD::INTRINSIC_WO_CHAIN, DL, IndexVT,
DAG.getConstant(Intrinsic::aarch64_neon_tbl2, DL, MVT::i32),
V1Cst, V2Cst,
DAG.getNode(ISD::BUILD_VECTOR, DL, IndexVT,
makeArrayRef(TBLMask.data(), IndexLen)));
DAG.getConstant(Intrinsic::aarch64_neon_tbl2, DL, MVT::i32), V1Cst,
V2Cst, DAG.getBuildVector(IndexVT, DL,
makeArrayRef(TBLMask.data(), IndexLen)));
}
}
return DAG.getNode(ISD::BITCAST, DL, Op.getValueType(), Shuffle);
@ -6072,7 +6070,7 @@ static SDValue NormalizeBuildVector(SDValue Op,
}
Ops.push_back(Lane);
}
return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
return DAG.getBuildVector(VT, dl, Ops);
}
SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
@ -6373,7 +6371,7 @@ FailedModImm:
for (unsigned i = 0; i < NumElts; ++i)
Ops.push_back(DAG.getNode(ISD::BITCAST, dl, NewType, Op.getOperand(i)));
EVT VecVT = EVT::getVectorVT(*DAG.getContext(), NewType, NumElts);
SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, Ops);
SDValue Val = DAG.getBuildVector(VecVT, dl, Ops);
Val = LowerBUILD_VECTOR(Val, DAG);
if (Val.getNode())
return DAG.getNode(ISD::BITCAST, dl, VT, Val);

View File

@ -876,7 +876,7 @@ SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
for (const SDUse &U : Op->ops())
DAG.ExtractVectorElements(U.get(), Args);
return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(), Args);
return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args);
}
SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
@ -888,7 +888,7 @@ SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
DAG.ExtractVectorElements(Op.getOperand(0), Args, Start,
VT.getVectorNumElements());
return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(), Args);
return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args);
}
SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
@ -1339,10 +1339,8 @@ void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op,
SDValue Res = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
LHS_Lo, RHS_Lo);
SDValue DIV = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2i32,
Res.getValue(0), zero);
SDValue REM = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2i32,
Res.getValue(1), zero);
SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(0), zero});
SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(1), zero});
Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV));
Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM));
@ -1354,7 +1352,7 @@ void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op,
SDValue REM_Part = DAG.getNode(ISD::UREM, DL, HalfVT, LHS_Hi, RHS_Lo);
SDValue REM_Lo = DAG.getSelectCC(DL, RHS_Hi, zero, REM_Part, LHS_Hi, ISD::SETEQ);
SDValue REM = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2i32, REM_Lo, zero);
SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {REM_Lo, zero});
REM = DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM);
SDValue DIV_Hi = DAG.getSelectCC(DL, RHS_Hi, zero, DIV_Part, zero, ISD::SETEQ);
@ -1385,7 +1383,7 @@ void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op,
REM = DAG.getSelectCC(DL, REM, RHS, REM_sub, REM, ISD::SETUGE);
}
SDValue DIV = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2i32, DIV_Lo, DIV_Hi);
SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {DIV_Lo, DIV_Hi});
DIV = DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV);
Results.push_back(DIV);
Results.push_back(REM);
@ -1650,8 +1648,7 @@ SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const {
SDValue SignBit = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, SignBitMask);
// Extend back to to 64-bits.
SDValue SignBit64 = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
Zero, SignBit);
SDValue SignBit64 = DAG.getBuildVector(MVT::v2i32, SL, {Zero, SignBit});
SignBit64 = DAG.getNode(ISD::BITCAST, SL, MVT::i64, SignBit64);
SDValue BcInt = DAG.getNode(ISD::BITCAST, SL, MVT::i64, Src);
@ -2049,7 +2046,7 @@ SDValue AMDGPUTargetLowering::LowerFP64_TO_INT(SDValue Op, SelectionDAG &DAG,
MVT::i32, FloorMul);
SDValue Lo = DAG.getNode(ISD::FP_TO_UINT, SL, MVT::i32, Fma);
SDValue Result = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Lo, Hi);
SDValue Result = DAG.getBuildVector(MVT::v2i32, SL, {Lo, Hi});
return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Result);
}
@ -2095,7 +2092,7 @@ SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
for (unsigned I = 0; I < NElts; ++I)
Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp);
return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Args);
return DAG.getBuildVector(VT, DL, Args);
}
//===----------------------------------------------------------------------===//
@ -2234,7 +2231,7 @@ SDValue AMDGPUTargetLowering::performAndCombine(SDNode *N,
DCI.AddToWorklist(Lo.getNode());
DCI.AddToWorklist(Hi.getNode());
SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, LoAnd, HiAnd);
SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {LoAnd, HiAnd});
return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
}
@ -2268,7 +2265,7 @@ SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Zero, NewShift);
SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {Zero, NewShift});
return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
}
@ -2291,8 +2288,7 @@ SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N,
SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi,
DAG.getConstant(31, SL, MVT::i32));
SDValue BuildVec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
Hi, NewShift);
SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {Hi, NewShift});
return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec);
}
@ -2301,8 +2297,7 @@ SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N,
SDValue Hi = getHiHalf64(N->getOperand(0), DAG);
SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi,
DAG.getConstant(31, SL, MVT::i32));
SDValue BuildVec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
NewShift, NewShift);
SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, NewShift});
return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec);
}
@ -2339,8 +2334,7 @@ SDValue AMDGPUTargetLowering::performSrlCombine(SDNode *N,
SDValue NewConst = DAG.getConstant(ShiftAmt - 32, SL, MVT::i32);
SDValue NewShift = DAG.getNode(ISD::SRL, SL, MVT::i32, Hi, NewConst);
SDValue BuildPair = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
NewShift, Zero);
SDValue BuildPair = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, Zero});
return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildPair);
}

View File

@ -659,8 +659,8 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
interp = DAG.getMachineNode(AMDGPU::INTERP_PAIR_ZW, DL,
MVT::f32, MVT::f32, DAG.getTargetConstant(slot, DL, MVT::i32),
RegisterJNode, RegisterINode);
return DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2f32,
SDValue(interp, 0), SDValue(interp, 1));
return DAG.getBuildVector(MVT::v2f32, DL,
{SDValue(interp, 0), SDValue(interp, 1)});
}
case AMDGPUIntrinsic::r600_tex:
case AMDGPUIntrinsic::r600_texc:
@ -1374,7 +1374,7 @@ SDValue R600TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
DAG.getConstant(0, DL, MVT::i32),
Mask
};
SDValue Input = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v4i32, Src);
SDValue Input = DAG.getBuildVector(MVT::v4i32, DL, Src);
SDValue Args[3] = { Chain, Input, DWordAddr };
return DAG.getMemIntrinsicNode(AMDGPUISD::STORE_MSKOR, DL,
Op->getVTList(), Args, MemVT,
@ -1604,8 +1604,7 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
NewVT = VT;
NumElements = VT.getVectorNumElements();
}
Result = DAG.getNode(ISD::BUILD_VECTOR, DL, NewVT,
makeArrayRef(Slots, NumElements));
Result = DAG.getBuildVector(NewVT, DL, makeArrayRef(Slots, NumElements));
} else {
// non-constant ptr can't be folded, keeps it as a v4f32 load
Result = DAG.getNode(AMDGPUISD::CONST_ADDRESS, DL, MVT::v4i32,
@ -1687,7 +1686,7 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
Loads[i] = DAG.getUNDEF(ElemVT);
}
EVT TargetVT = EVT::getVectorVT(*DAG.getContext(), ElemVT, 4);
LoweredLoad = DAG.getNode(ISD::BUILD_VECTOR, DL, TargetVT, Loads);
LoweredLoad = DAG.getBuildVector(TargetVT, DL, Loads);
} else {
LoweredLoad = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, VT,
Chain, Ptr,
@ -1873,8 +1872,8 @@ static SDValue CompactSwizzlableVector(
}
}
return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(VectorEntry),
VectorEntry.getValueType(), NewBldVec);
return DAG.getBuildVector(VectorEntry.getValueType(), SDLoc(VectorEntry),
NewBldVec);
}
static SDValue ReorganizeVector(SelectionDAG &DAG, SDValue VectorEntry,
@ -1911,8 +1910,8 @@ static SDValue ReorganizeVector(SelectionDAG &DAG, SDValue VectorEntry,
}
}
return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(VectorEntry),
VectorEntry.getValueType(), NewBldVec);
return DAG.getBuildVector(VectorEntry.getValueType(), SDLoc(VectorEntry),
NewBldVec);
}
@ -2042,7 +2041,7 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N,
}
// Return the new vector
return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
return DAG.getBuildVector(VT, dl, Ops);
}
// Extract_vec (Build_vector) generated by custom lowering

View File

@ -848,7 +848,7 @@ SDValue SITargetLowering::LowerFormalArguments(
NumElements = Arg.VT.getVectorNumElements() - NumElements;
Regs.append(NumElements, DAG.getUNDEF(VT));
InVals.push_back(DAG.getNode(ISD::BUILD_VECTOR, DL, Arg.VT, Regs));
InVals.push_back(DAG.getBuildVector(Arg.VT, DL, Regs));
continue;
}
@ -1952,7 +1952,7 @@ SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1);
SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2i32, Lo, Hi);
SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi});
return DAG.getNode(ISD::BITCAST, DL, MVT::i64, Res);
}
@ -2201,8 +2201,7 @@ SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) co
MVT SimpleVT = VT.getSimpleVT();
MVT VecType = MVT::getVectorVT(SimpleVT, 2);
SDValue NewOld = DAG.getNode(ISD::BUILD_VECTOR, DL, VecType,
New, Old);
SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old});
SDValue Ops[] = { ChainIn, Addr, NewOld };
SDVTList VTList = DAG.getVTList(VT, MVT::Other);
return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL,
@ -2310,7 +2309,7 @@ SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N,
assert(Ops.size() == NElts);
return DAG.getNode(ISD::BUILD_VECTOR, DL, FloatVT, Ops);
return DAG.getBuildVector(FloatVT, DL, Ops);
}
return SDValue();

View File

@ -5677,7 +5677,7 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32,
Op.getOperand(i)));
EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, Ops);
SDValue Val = DAG.getBuildVector(VecVT, dl, Ops);
Val = LowerBUILD_VECTOR(Val, DAG, ST);
if (Val.getNode())
return DAG.getNode(ISD::BITCAST, dl, VT, Val);
@ -6076,10 +6076,10 @@ static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op,
if (V2.getNode()->isUndef())
return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1,
DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8, VTBLMask));
DAG.getBuildVector(MVT::v8i8, DL, VTBLMask));
return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2,
DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8, VTBLMask));
DAG.getBuildVector(MVT::v8i8, DL, VTBLMask));
}
static SDValue LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op,
@ -6475,8 +6475,9 @@ static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) {
assert(BVN->getOpcode() == ISD::BUILD_VECTOR &&
BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR");
unsigned LowElt = DAG.getDataLayout().isBigEndian() ? 1 : 0;
return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), MVT::v2i32,
BVN->getOperand(LowElt), BVN->getOperand(LowElt+2));
return DAG.getBuildVector(
MVT::v2i32, SDLoc(N),
{BVN->getOperand(LowElt), BVN->getOperand(LowElt + 2)});
}
// Construct a new BUILD_VECTOR with elements truncated to half the size.
assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR");
@ -6493,8 +6494,7 @@ static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) {
// The values are implicitly truncated so sext vs. zext doesn't matter.
Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), dl, MVT::i32));
}
return DAG.getNode(ISD::BUILD_VECTOR, dl,
MVT::getVectorVT(TruncVT, NumElts), Ops);
return DAG.getBuildVector(MVT::getVectorVT(TruncVT, NumElts), dl, Ops);
}
static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) {
@ -9518,7 +9518,7 @@ static SDValue PerformBUILD_VECTORCombine(SDNode *N,
DCI.AddToWorklist(V.getNode());
}
EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts);
SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, FloatVT, Ops);
SDValue BV = DAG.getBuildVector(FloatVT, dl, Ops);
return DAG.getNode(ISD::BITCAST, dl, VT, BV);
}

View File

@ -1401,8 +1401,8 @@ static SDValue lowerMSASplatZExt(SDValue Op, unsigned OpNr, SelectionDAG &DAG) {
SDValue Ops[16] = { LaneA, LaneB, LaneA, LaneB, LaneA, LaneB, LaneA, LaneB,
LaneA, LaneB, LaneA, LaneB, LaneA, LaneB, LaneA, LaneB };
SDValue Result = DAG.getNode(ISD::BUILD_VECTOR, DL, ViaVecTy,
makeArrayRef(Ops, ViaVecTy.getVectorNumElements()));
SDValue Result = DAG.getBuildVector(
ViaVecTy, DL, makeArrayRef(Ops, ViaVecTy.getVectorNumElements()));
if (ViaVecTy != ResVecTy)
Result = DAG.getNode(ISD::BITCAST, DL, ResVecTy, Result);
@ -1442,8 +1442,8 @@ static SDValue getBuildVectorSplat(EVT VecTy, SDValue SplatValue,
SplatValueA, SplatValueB, SplatValueA, SplatValueB,
SplatValueA, SplatValueB, SplatValueA, SplatValueB };
SDValue Result = DAG.getNode(ISD::BUILD_VECTOR, DL, ViaVecTy,
makeArrayRef(Ops, ViaVecTy.getVectorNumElements()));
SDValue Result = DAG.getBuildVector(
ViaVecTy, DL, makeArrayRef(Ops, ViaVecTy.getVectorNumElements()));
if (VecTy != ViaVecTy)
Result = DAG.getNode(ISD::BITCAST, DL, VecTy, Result);
@ -1471,10 +1471,10 @@ static SDValue lowerMSABinaryBitImmIntr(SDValue Op, SelectionDAG &DAG,
if (BigEndian)
std::swap(BitImmLoOp, BitImmHiOp);
Exp2Imm =
DAG.getNode(ISD::BITCAST, DL, MVT::v2i64,
DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v4i32, BitImmLoOp,
BitImmHiOp, BitImmLoOp, BitImmHiOp));
Exp2Imm = DAG.getNode(
ISD::BITCAST, DL, MVT::v2i64,
DAG.getBuildVector(MVT::v4i32, DL,
{BitImmLoOp, BitImmHiOp, BitImmLoOp, BitImmHiOp}));
}
}
@ -1860,7 +1860,7 @@ SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
// If ResTy is v2i64 then the type legalizer will break this node down into
// an equivalent v4i32.
return DAG.getNode(ISD::BUILD_VECTOR, DL, ResTy, Ops);
return DAG.getBuildVector(ResTy, DL, Ops);
}
case Intrinsic::mips_fexp2_w:
case Intrinsic::mips_fexp2_d: {
@ -2841,7 +2841,7 @@ static SDValue lowerVECTOR_SHUFFLE_VSHF(SDValue Op, EVT ResTy,
++I)
Ops.push_back(DAG.getTargetConstant(*I, DL, MaskEltTy));
SDValue MaskVec = DAG.getNode(ISD::BUILD_VECTOR, DL, MaskVecTy, Ops);
SDValue MaskVec = DAG.getBuildVector(MaskVecTy, DL, Ops);
if (Using1stVec && Using2ndVec) {
Op0 = Op->getOperand(0);

View File

@ -1698,7 +1698,7 @@ NVPTXTargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
DAG.getIntPtrConstant(j, dl)));
}
}
return DAG.getNode(ISD::BUILD_VECTOR, dl, Node->getValueType(0), Ops);
return DAG.getBuildVector(Node->getValueType(0), dl, Ops);
}
/// LowerShiftRightParts - Lower SRL_PARTS, SRA_PARTS, which
@ -4386,7 +4386,7 @@ static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
SDValue LoadChain = NewLD.getValue(NumElts);
SDValue BuildVec = DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, ScalarRes);
SDValue BuildVec = DAG.getBuildVector(ResVT, DL, ScalarRes);
Results.push_back(BuildVec);
Results.push_back(LoadChain);
@ -4499,7 +4499,7 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
SDValue LoadChain = NewLD.getValue(NumElts);
SDValue BuildVec =
DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, ScalarRes);
DAG.getBuildVector(ResVT, DL, ScalarRes);
Results.push_back(BuildVec);
Results.push_back(LoadChain);

View File

@ -7587,8 +7587,7 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
MVT::i32));
}
SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8,
ResultMask);
SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask);
if (isLittleEndian)
return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
V2, V1, VPermMask);
@ -7952,8 +7951,7 @@ SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op,
}
SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
SDValue Value = DAG.getNode(ISD::BUILD_VECTOR, dl,
Op.getValueType(), Vals);
SDValue Value = DAG.getBuildVector(Op.getValueType(), dl, Vals);
if (LN->isIndexed()) {
SDValue RetOps[] = { Value, Vals[0].getValue(1), TF };
@ -7986,7 +7984,7 @@ SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op,
}
LoadChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, VectElmtChains);
SDValue Value = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i1, VectElmts);
SDValue Value = DAG.getBuildVector(MVT::v4i1, dl, VectElmts);
SDValue RVals[] = { Value, LoadChain };
return DAG.getMergeValues(RVals, dl);

View File

@ -820,8 +820,7 @@ static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDLoc DL,
// extend from i64 to full vector size and then bitcast.
assert(VA.getLocVT() == MVT::i64);
assert(VA.getValVT().isVector());
Value = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2i64,
Value, DAG.getUNDEF(MVT::i64));
Value = DAG.getBuildVector(MVT::v2i64, DL, {Value, DAG.getUNDEF(MVT::i64)});
Value = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Value);
} else
assert(VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo");
@ -3720,7 +3719,7 @@ static SDValue getGeneralPermuteNode(SelectionDAG &DAG, SDLoc DL, SDValue *Ops,
IndexNodes[I] = DAG.getConstant(Bytes[I], DL, MVT::i32);
else
IndexNodes[I] = DAG.getUNDEF(MVT::i32);
SDValue Op2 = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, IndexNodes);
SDValue Op2 = DAG.getBuildVector(MVT::v16i8, DL, IndexNodes);
return DAG.getNode(SystemZISD::PERMUTE, DL, MVT::v16i8, Ops[0], Ops[1], Op2);
}
@ -3904,7 +3903,7 @@ static SDValue buildScalarToVector(SelectionDAG &DAG, SDLoc DL, EVT VT,
if (Value.getOpcode() == ISD::Constant ||
Value.getOpcode() == ISD::ConstantFP) {
SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Value);
return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
return DAG.getBuildVector(VT, DL, Ops);
}
if (Value.isUndef())
return DAG.getUNDEF(VT);
@ -4057,7 +4056,7 @@ static SDValue tryBuildVectorShuffle(SelectionDAG &DAG,
ResidueOps.push_back(DAG.getUNDEF(ResidueOps[0].getValueType()));
for (auto &Op : GS.Ops) {
if (!Op.getNode()) {
Op = DAG.getNode(ISD::BUILD_VECTOR, SDLoc(BVN), VT, ResidueOps);
Op = DAG.getBuildVector(VT, SDLoc(BVN), ResidueOps);
break;
}
}
@ -4154,7 +4153,7 @@ static SDValue buildVector(SelectionDAG &DAG, SDLoc DL, EVT VT,
for (unsigned I = 0; I < NumElements; ++I)
if (!Constants[I].getNode())
Constants[I] = DAG.getUNDEF(Elems[I].getValueType());
Result = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Constants);
Result = DAG.getBuildVector(VT, DL, Constants);
} else {
// Otherwise try to use VLVGP to start the sequence in order to
// avoid a false dependency on any previous contents of the vector

View File

@ -4313,7 +4313,7 @@ static SDValue getConstVector(ArrayRef<int> Values, MVT VT,
Ops.push_back(IsUndef ? DAG.getUNDEF(EltVT) :
DAG.getConstant(0, dl, EltVT));
}
SDValue ConstsNode = DAG.getNode(ISD::BUILD_VECTOR, dl, ConstVecVT, Ops);
SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
if (Split)
ConstsNode = DAG.getBitcast(VT, ConstsNode);
return ConstsNode;
@ -4373,8 +4373,8 @@ static SDValue extractSubVector(SDValue Vec, unsigned IdxVal,
// If the input is a buildvector just emit a smaller one.
if (Vec.getOpcode() == ISD::BUILD_VECTOR)
return DAG.getNode(ISD::BUILD_VECTOR, dl, ResultVT,
makeArrayRef(Vec->op_begin() + IdxVal, ElemsPerChunk));
return DAG.getNode(ISD::BUILD_VECTOR,
dl, ResultVT, makeArrayRef(Vec->op_begin() + IdxVal, ElemsPerChunk));
SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
@ -6432,8 +6432,8 @@ static SDValue lowerBuildVectorToBitOp(SDValue Op, SelectionDAG &DAG) {
RHSElts.push_back(RHS);
}
SDValue LHS = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, LHSElts);
SDValue RHS = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, RHSElts);
SDValue LHS = DAG.getBuildVector(VT, DL, LHSElts);
SDValue RHS = DAG.getBuildVector(VT, DL, RHSElts);
return DAG.getNode(Opcode, DL, VT, LHS, RHS);
}
@ -6655,10 +6655,10 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2);
// Build both the lower and upper subvector.
SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
makeArrayRef(&Ops[0], NumElems/2));
SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
makeArrayRef(&Ops[NumElems / 2], NumElems/2));
SDValue Lower =
DAG.getBuildVector(HVT, dl, makeArrayRef(&Ops[0], NumElems / 2));
SDValue Upper = DAG.getBuildVector(
HVT, dl, makeArrayRef(&Ops[NumElems / 2], NumElems / 2));
// Recreate the wider vector with the lower and upper part.
if (VT.is256BitVector())
@ -7208,7 +7208,7 @@ static SDValue lowerVectorShuffleAsBitMask(SDLoc DL, MVT VT, SDValue V1,
if (!V)
return SDValue(); // No non-zeroable elements!
SDValue VMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, VMaskOps);
SDValue VMask = DAG.getBuildVector(VT, DL, VMaskOps);
V = DAG.getNode(VT.isFloatingPoint()
? (unsigned) X86ISD::FAND : (unsigned) ISD::AND,
DL, VT, V, VMask);
@ -7236,7 +7236,7 @@ static SDValue lowerVectorShuffleAsBitBlend(SDLoc DL, MVT VT, SDValue V1,
MaskOps.push_back(Mask[i] < Size ? AllOnes : Zero);
}
SDValue V1Mask = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, MaskOps);
SDValue V1Mask = DAG.getBuildVector(VT, DL, MaskOps);
V1 = DAG.getNode(ISD::AND, DL, VT, V1, V1Mask);
// We have to cast V2 around.
MVT MaskVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
@ -7399,10 +7399,9 @@ static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
V1 = DAG.getBitcast(BlendVT, V1);
V2 = DAG.getBitcast(BlendVT, V2);
return DAG.getBitcast(VT, DAG.getNode(ISD::VSELECT, DL, BlendVT,
DAG.getNode(ISD::BUILD_VECTOR, DL,
BlendVT, VSELECTMask),
V1, V2));
return DAG.getBitcast(
VT, DAG.getNode(ISD::VSELECT, DL, BlendVT,
DAG.getBuildVector(BlendVT, DL, VSELECTMask), V1, V2));
}
default:
@ -7951,10 +7950,9 @@ static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
(i % Scale == 0 && SafeOffset(Idx)) ? Idx : 0x80, DL, MVT::i8);
}
InputV = DAG.getBitcast(MVT::v16i8, InputV);
return DAG.getBitcast(VT,
DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
DAG.getNode(ISD::BUILD_VECTOR, DL,
MVT::v16i8, PSHUFBMask)));
return DAG.getBitcast(
VT, DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
DAG.getBuildVector(MVT::v16i8, DL, PSHUFBMask)));
}
// If we are extending from an offset, ensure we start on a boundary that
@ -9636,11 +9634,11 @@ static SDValue lowerVectorShuffleAsPSHUFB(SDLoc DL, MVT VT, SDValue V1,
if (V1InUse)
V1 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8,
DAG.getBitcast(MVT::v16i8, V1),
DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V1Mask));
DAG.getBuildVector(MVT::v16i8, DL, V1Mask));
if (V2InUse)
V2 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8,
DAG.getBitcast(MVT::v16i8, V2),
DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V2Mask));
DAG.getBuildVector(MVT::v16i8, DL, V2Mask));
// If we need shuffled inputs from both, blend the two.
SDValue V;
@ -10273,8 +10271,8 @@ static SDValue splitAndLowerVectorShuffle(SDLoc DL, MVT VT, SDValue V1,
LoOps.push_back(BV->getOperand(i));
HiOps.push_back(BV->getOperand(i + OrigSplitNumElements));
}
LoV = DAG.getNode(ISD::BUILD_VECTOR, DL, OrigSplitVT, LoOps);
HiV = DAG.getNode(ISD::BUILD_VECTOR, DL, OrigSplitVT, HiOps);
LoV = DAG.getBuildVector(OrigSplitVT, DL, LoOps);
HiV = DAG.getBuildVector(OrigSplitVT, DL, HiOps);
}
return std::make_pair(DAG.getBitcast(SplitVT, LoV),
DAG.getBitcast(SplitVT, HiV));
@ -11171,14 +11169,12 @@ static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
: DAG.getConstant(Mask[i], DL, MVT::i32);
if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask))
return DAG.getNode(
X86ISD::VPERMILPV, DL, MVT::v8f32, V1,
DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask));
return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, V1,
DAG.getBuildVector(MVT::v8i32, DL, VPermMask));
if (Subtarget.hasAVX2())
return DAG.getNode(
X86ISD::VPERMV, DL, MVT::v8f32,
DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask), V1);
return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32,
DAG.getBuildVector(MVT::v8i32, DL, VPermMask), V1);
// Otherwise, fall back.
return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v8f32, V1, V2, Mask,
@ -11271,9 +11267,8 @@ static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
for (int i = 0; i < 8; ++i)
VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
: DAG.getConstant(Mask[i], DL, MVT::i32);
return DAG.getNode(
X86ISD::VPERMV, DL, MVT::v8i32,
DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask), V1);
return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8i32,
DAG.getBuildVector(MVT::v8i32, DL, VPermMask), V1);
}
// Try to simplify this by merging 128-bit lanes to enable a lane-based
@ -11367,11 +11362,11 @@ static SDValue lowerV16I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
PSHUFBMask[2 * i] = DAG.getConstant(2 * M, DL, MVT::i8);
PSHUFBMask[2 * i + 1] = DAG.getConstant(2 * M + 1, DL, MVT::i8);
}
return DAG.getBitcast(MVT::v16i16,
DAG.getNode(X86ISD::PSHUFB, DL, MVT::v32i8,
DAG.getBitcast(MVT::v32i8, V1),
DAG.getNode(ISD::BUILD_VECTOR, DL,
MVT::v32i8, PSHUFBMask)));
return DAG.getBitcast(
MVT::v16i16,
DAG.getNode(X86ISD::PSHUFB, DL, MVT::v32i8,
DAG.getBitcast(MVT::v32i8, V1),
DAG.getBuildVector(MVT::v32i8, DL, PSHUFBMask)));
}
// Try to simplify this by merging 128-bit lanes to enable a lane-based
@ -11451,9 +11446,8 @@ static SDValue lowerV32I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
: DAG.getConstant(Mask[i] < 16 ? Mask[i] : Mask[i] - 16, DL,
MVT::i8);
return DAG.getNode(
X86ISD::PSHUFB, DL, MVT::v32i8, V1,
DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask));
return DAG.getNode(X86ISD::PSHUFB, DL, MVT::v32i8, V1,
DAG.getBuildVector(MVT::v32i8, DL, PSHUFBMask));
}
// Try to simplify this by merging 128-bit lanes to enable a lane-based
@ -13952,7 +13946,7 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
for (unsigned j = 0; j < 8; ++j)
pshufbMask.push_back(DAG.getConstant(0x80, DL, MVT::i8));
}
SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, pshufbMask);
SDValue BV = DAG.getBuildVector(MVT::v32i8, DL, pshufbMask);
In = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v32i8, In, BV);
In = DAG.getBitcast(MVT::v4i64, In);
@ -15004,7 +14998,7 @@ static SDValue ChangeVSETULTtoVSETULE(SDLoc dl, SDValue Op1, SelectionDAG &DAG)
ULTOp1.push_back(DAG.getConstant(Val - 1, dl, EVT));
}
return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, ULTOp1);
return DAG.getBuildVector(VT, dl, ULTOp1);
}
static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget,
@ -15228,8 +15222,7 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget,
} else {
SDValue Sign = DAG.getConstant(0x80000000U, dl, MVT::i32);
SDValue Zero = DAG.getConstant(0x00000000U, dl, MVT::i32);
SB = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
Sign, Zero, Sign, Zero);
SB = DAG.getBuildVector(MVT::v4i32, dl, {Sign, Zero, Sign, Zero});
}
Op0 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op0, SB);
Op1 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op1, SB);
@ -16825,7 +16818,7 @@ static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
break;
}
return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
return DAG.getBuildVector(VT, dl, Elts);
}
return DAG.getNode(Opc, dl, VT, SrcOp,
@ -16873,7 +16866,7 @@ static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, MVT VT,
ShOps.push_back(DAG.getUNDEF(SVT));
MVT BVT = SVT == MVT::i32 ? MVT::v4i32 : MVT::v2i64;
ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, BVT, ShOps);
ShAmt = DAG.getBuildVector(BVT, dl, ShOps);
}
// The return type has to be a 128-bit type with the same element
@ -19685,7 +19678,7 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
}
Elts.push_back(DAG.getConstant(One.shl(ShAmt), dl, SVT));
}
SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
SDValue BV = DAG.getBuildVector(VT, dl, Elts);
return DAG.getNode(ISD::MUL, dl, VT, R, BV);
}
@ -20384,7 +20377,7 @@ static SDValue LowerBITCAST(SDValue Op, const X86Subtarget &Subtarget,
Elts.append(NumElts, DAG.getUNDEF(SVT));
EVT NewVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Elts);
SDValue BV = DAG.getBuildVector(NewVT, dl, Elts);
SDValue ToV2F64 = DAG.getBitcast(MVT::v2f64, BV);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, ToV2F64,
DAG.getIntPtrConstant(0, dl));
@ -20508,7 +20501,7 @@ static SDValue LowerVectorCTPOPInRegLUT(SDValue Op, SDLoc DL,
SmallVector<SDValue, 64> LUTVec;
for (int i = 0; i < NumByteElts; ++i)
LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
SDValue InRegLUT = DAG.getNode(ISD::BUILD_VECTOR, DL, ByteVecVT, LUTVec);
SDValue InRegLUT = DAG.getBuildVector(ByteVecVT, DL, LUTVec);
SDValue M0F = DAG.getConstant(0x0F, DL, ByteVecVT);
// High nibbles
@ -20692,7 +20685,7 @@ static SDValue LowerBITREVERSE(SDValue Op, SelectionDAG &DAG) {
}
}
SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, MaskElts);
SDValue Mask = DAG.getBuildVector(MVT::v16i8, DL, MaskElts);
SDValue Res = DAG.getBitcast(MVT::v16i8, In);
Res = DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, DAG.getUNDEF(MVT::v16i8),
Res, Mask);
@ -20911,7 +20904,7 @@ static SDValue ExtendToType(SDValue InOp, MVT NVT, SelectionDAG &DAG,
DAG.getUNDEF(EltVT);
for (unsigned i = 0; i < WidenNumElts - InNumElts; ++i)
Ops.push_back(FillVal);
return DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, Ops);
return DAG.getBuildVector(NVT, dl, Ops);
}
SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, NVT) :
DAG.getUNDEF(NVT);
@ -21593,7 +21586,7 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT,
ToVecInt, DAG.getIntPtrConstant(i, dl)));
Results.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, DstVT, Elts));
Results.push_back(DAG.getBuildVector(DstVT, dl, Elts));
}
}
}
@ -24202,8 +24195,7 @@ static bool combineX86ShuffleChain(SDValue Input, SDValue Root,
MVT ByteVT = MVT::getVectorVT(MVT::i8, NumBytes);
Res = DAG.getBitcast(ByteVT, Input);
DCI.AddToWorklist(Res.getNode());
SDValue PSHUFBMaskOp =
DAG.getNode(ISD::BUILD_VECTOR, DL, ByteVT, PSHUFBMask);
SDValue PSHUFBMaskOp = DAG.getBuildVector(ByteVT, DL, PSHUFBMask);
DCI.AddToWorklist(PSHUFBMaskOp.getNode());
Res = DAG.getNode(X86ISD::PSHUFB, DL, ByteVT, Res, PSHUFBMaskOp);
DCI.AddToWorklist(Res.getNode());
@ -26831,8 +26823,7 @@ static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG,
if (RHSConstSplat) {
N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT.getVectorElementType(),
SDValue(RHSConstSplat, 0));
SmallVector<SDValue, 8> C(WideVT.getVectorNumElements(), N1);
N1 = DAG.getNode(ISD::BUILD_VECTOR, DL, WideVT, C);
N1 = DAG.getSplatBuildVector(WideVT, DL, N1);
} else if (RHSTrunc) {
N1 = N1->getOperand(0);
}