mirror of
https://github.com/RPCS3/llvm.git
synced 2024-12-14 23:48:56 +00:00
getValueType().getSizeInBits() -> getValueSizeInBits() ; NFCI
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@281493 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
e9c64c5da5
commit
a7c48ccd3f
@ -2487,8 +2487,7 @@ SDValue DAGCombiner::visitMULHS(SDNode *N) {
|
|||||||
if (isOneConstant(N1)) {
|
if (isOneConstant(N1)) {
|
||||||
SDLoc DL(N);
|
SDLoc DL(N);
|
||||||
return DAG.getNode(ISD::SRA, DL, N0.getValueType(), N0,
|
return DAG.getNode(ISD::SRA, DL, N0.getValueType(), N0,
|
||||||
DAG.getConstant(N0.getValueType().getSizeInBits() - 1,
|
DAG.getConstant(N0.getValueSizeInBits() - 1, DL,
|
||||||
DL,
|
|
||||||
getShiftAmountTy(N0.getValueType())));
|
getShiftAmountTy(N0.getValueType())));
|
||||||
}
|
}
|
||||||
// fold (mulhs x, undef) -> 0
|
// fold (mulhs x, undef) -> 0
|
||||||
@ -6616,8 +6615,8 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
|
|||||||
SDValue InnerZExt = N0.getOperand(0);
|
SDValue InnerZExt = N0.getOperand(0);
|
||||||
// If the original shl may be shifting out bits, do not perform this
|
// If the original shl may be shifting out bits, do not perform this
|
||||||
// transformation.
|
// transformation.
|
||||||
unsigned KnownZeroBits = InnerZExt.getValueType().getSizeInBits() -
|
unsigned KnownZeroBits = InnerZExt.getValueSizeInBits() -
|
||||||
InnerZExt.getOperand(0).getValueType().getSizeInBits();
|
InnerZExt.getOperand(0).getValueSizeInBits();
|
||||||
if (ShAmtVal > KnownZeroBits)
|
if (ShAmtVal > KnownZeroBits)
|
||||||
return SDValue();
|
return SDValue();
|
||||||
}
|
}
|
||||||
@ -6878,7 +6877,7 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
|
|||||||
if ((ShAmt & (EVTBits-1)) == 0) {
|
if ((ShAmt & (EVTBits-1)) == 0) {
|
||||||
N0 = N0.getOperand(0);
|
N0 = N0.getOperand(0);
|
||||||
// Is the load width a multiple of size of VT?
|
// Is the load width a multiple of size of VT?
|
||||||
if ((N0.getValueType().getSizeInBits() & (EVTBits-1)) != 0)
|
if ((N0.getValueSizeInBits() & (EVTBits-1)) != 0)
|
||||||
return SDValue();
|
return SDValue();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -7587,7 +7586,7 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) {
|
|||||||
if (N0.getOpcode() == ISD::FCOPYSIGN && N0.getNode()->hasOneUse() &&
|
if (N0.getOpcode() == ISD::FCOPYSIGN && N0.getNode()->hasOneUse() &&
|
||||||
isa<ConstantFPSDNode>(N0.getOperand(0)) &&
|
isa<ConstantFPSDNode>(N0.getOperand(0)) &&
|
||||||
VT.isInteger() && !VT.isVector()) {
|
VT.isInteger() && !VT.isVector()) {
|
||||||
unsigned OrigXWidth = N0.getOperand(1).getValueType().getSizeInBits();
|
unsigned OrigXWidth = N0.getOperand(1).getValueSizeInBits();
|
||||||
EVT IntXVT = EVT::getIntegerVT(*DAG.getContext(), OrigXWidth);
|
EVT IntXVT = EVT::getIntegerVT(*DAG.getContext(), OrigXWidth);
|
||||||
if (isTypeLegal(IntXVT)) {
|
if (isTypeLegal(IntXVT)) {
|
||||||
SDValue X = DAG.getBitcast(IntXVT, N0.getOperand(1));
|
SDValue X = DAG.getBitcast(IntXVT, N0.getOperand(1));
|
||||||
@ -12292,7 +12291,7 @@ SDValue DAGCombiner::splitMergedValStore(StoreSDNode *ST) {
|
|||||||
return SDValue();
|
return SDValue();
|
||||||
|
|
||||||
// Match shift amount to HalfValBitSize.
|
// Match shift amount to HalfValBitSize.
|
||||||
unsigned HalfValBitSize = Val.getValueType().getSizeInBits() / 2;
|
unsigned HalfValBitSize = Val.getValueSizeInBits() / 2;
|
||||||
ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(Op1.getOperand(1));
|
ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(Op1.getOperand(1));
|
||||||
if (!ShAmt || ShAmt->getAPIntValue() != HalfValBitSize)
|
if (!ShAmt || ShAmt->getAPIntValue() != HalfValBitSize)
|
||||||
return SDValue();
|
return SDValue();
|
||||||
@ -12301,10 +12300,10 @@ SDValue DAGCombiner::splitMergedValStore(StoreSDNode *ST) {
|
|||||||
// to i64.
|
// to i64.
|
||||||
if (Lo.getOpcode() != ISD::ZERO_EXTEND || !Lo.hasOneUse() ||
|
if (Lo.getOpcode() != ISD::ZERO_EXTEND || !Lo.hasOneUse() ||
|
||||||
!Lo.getOperand(0).getValueType().isScalarInteger() ||
|
!Lo.getOperand(0).getValueType().isScalarInteger() ||
|
||||||
Lo.getOperand(0).getValueType().getSizeInBits() > HalfValBitSize ||
|
Lo.getOperand(0).getValueSizeInBits() > HalfValBitSize ||
|
||||||
Hi.getOpcode() != ISD::ZERO_EXTEND || !Hi.hasOneUse() ||
|
Hi.getOpcode() != ISD::ZERO_EXTEND || !Hi.hasOneUse() ||
|
||||||
!Hi.getOperand(0).getValueType().isScalarInteger() ||
|
!Hi.getOperand(0).getValueType().isScalarInteger() ||
|
||||||
Hi.getOperand(0).getValueType().getSizeInBits() > HalfValBitSize)
|
Hi.getOperand(0).getValueSizeInBits() > HalfValBitSize)
|
||||||
return SDValue();
|
return SDValue();
|
||||||
|
|
||||||
if (!TLI.isMultiStoresCheaperThanBitsMerge(Lo.getOperand(0),
|
if (!TLI.isMultiStoresCheaperThanBitsMerge(Lo.getOperand(0),
|
||||||
|
@ -1663,7 +1663,7 @@ SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp, EVT SlotVT,
|
|||||||
MachinePointerInfo PtrInfo =
|
MachinePointerInfo PtrInfo =
|
||||||
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
|
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
|
||||||
|
|
||||||
unsigned SrcSize = SrcOp.getValueType().getSizeInBits();
|
unsigned SrcSize = SrcOp.getValueSizeInBits();
|
||||||
unsigned SlotSize = SlotVT.getSizeInBits();
|
unsigned SlotSize = SlotVT.getSizeInBits();
|
||||||
unsigned DestSize = DestVT.getSizeInBits();
|
unsigned DestSize = DestVT.getSizeInBits();
|
||||||
Type *DestType = DestVT.getTypeForEVT(*DAG.getContext());
|
Type *DestType = DestVT.getTypeForEVT(*DAG.getContext());
|
||||||
|
@ -951,11 +951,11 @@ void DAGTypeLegalizer::PromoteSetCCOperands(SDValue &NewLHS,SDValue &NewRHS,
|
|||||||
// than the width of NewLHS/NewRH, we can avoid inserting real truncate
|
// than the width of NewLHS/NewRH, we can avoid inserting real truncate
|
||||||
// instruction, which is redudant eventually.
|
// instruction, which is redudant eventually.
|
||||||
unsigned OpLEffectiveBits =
|
unsigned OpLEffectiveBits =
|
||||||
OpL.getValueType().getSizeInBits() - DAG.ComputeNumSignBits(OpL) + 1;
|
OpL.getValueSizeInBits() - DAG.ComputeNumSignBits(OpL) + 1;
|
||||||
unsigned OpREffectiveBits =
|
unsigned OpREffectiveBits =
|
||||||
OpR.getValueType().getSizeInBits() - DAG.ComputeNumSignBits(OpR) + 1;
|
OpR.getValueSizeInBits() - DAG.ComputeNumSignBits(OpR) + 1;
|
||||||
if (OpLEffectiveBits <= NewLHS.getValueType().getSizeInBits() &&
|
if (OpLEffectiveBits <= NewLHS.getValueSizeInBits() &&
|
||||||
OpREffectiveBits <= NewRHS.getValueType().getSizeInBits()) {
|
OpREffectiveBits <= NewRHS.getValueSizeInBits()) {
|
||||||
NewLHS = OpL;
|
NewLHS = OpL;
|
||||||
NewRHS = OpR;
|
NewRHS = OpR;
|
||||||
} else {
|
} else {
|
||||||
@ -1053,7 +1053,7 @@ SDValue DAGTypeLegalizer::PromoteIntOp_BUILD_VECTOR(SDNode *N) {
|
|||||||
// Promote the inserted value. The type does not need to match the
|
// Promote the inserted value. The type does not need to match the
|
||||||
// vector element type. Check that any extra bits introduced will be
|
// vector element type. Check that any extra bits introduced will be
|
||||||
// truncated away.
|
// truncated away.
|
||||||
assert(N->getOperand(0).getValueType().getSizeInBits() >=
|
assert(N->getOperand(0).getValueSizeInBits() >=
|
||||||
N->getValueType(0).getVectorElementType().getSizeInBits() &&
|
N->getValueType(0).getVectorElementType().getSizeInBits() &&
|
||||||
"Type of inserted value narrower than vector element type!");
|
"Type of inserted value narrower than vector element type!");
|
||||||
|
|
||||||
@ -1083,7 +1083,7 @@ SDValue DAGTypeLegalizer::PromoteIntOp_INSERT_VECTOR_ELT(SDNode *N,
|
|||||||
// have to match the vector element type.
|
// have to match the vector element type.
|
||||||
|
|
||||||
// Check that any extra bits introduced will be truncated away.
|
// Check that any extra bits introduced will be truncated away.
|
||||||
assert(N->getOperand(1).getValueType().getSizeInBits() >=
|
assert(N->getOperand(1).getValueSizeInBits() >=
|
||||||
N->getValueType(0).getVectorElementType().getSizeInBits() &&
|
N->getValueType(0).getVectorElementType().getSizeInBits() &&
|
||||||
"Type of inserted value narrower than vector element type!");
|
"Type of inserted value narrower than vector element type!");
|
||||||
return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0),
|
return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0),
|
||||||
@ -2075,7 +2075,7 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,
|
|||||||
if (ExtType == ISD::SEXTLOAD) {
|
if (ExtType == ISD::SEXTLOAD) {
|
||||||
// The high part is obtained by SRA'ing all but one of the bits of the
|
// The high part is obtained by SRA'ing all but one of the bits of the
|
||||||
// lo part.
|
// lo part.
|
||||||
unsigned LoSize = Lo.getValueType().getSizeInBits();
|
unsigned LoSize = Lo.getValueSizeInBits();
|
||||||
Hi = DAG.getNode(ISD::SRA, dl, NVT, Lo,
|
Hi = DAG.getNode(ISD::SRA, dl, NVT, Lo,
|
||||||
DAG.getConstant(LoSize - 1, dl,
|
DAG.getConstant(LoSize - 1, dl,
|
||||||
TLI.getPointerTy(DAG.getDataLayout())));
|
TLI.getPointerTy(DAG.getDataLayout())));
|
||||||
@ -2446,8 +2446,7 @@ void DAGTypeLegalizer::ExpandIntRes_SIGN_EXTEND(SDNode *N,
|
|||||||
"Operand over promoted?");
|
"Operand over promoted?");
|
||||||
// Split the promoted operand. This will simplify when it is expanded.
|
// Split the promoted operand. This will simplify when it is expanded.
|
||||||
SplitInteger(Res, Lo, Hi);
|
SplitInteger(Res, Lo, Hi);
|
||||||
unsigned ExcessBits =
|
unsigned ExcessBits = Op.getValueSizeInBits() - NVT.getSizeInBits();
|
||||||
Op.getValueType().getSizeInBits() - NVT.getSizeInBits();
|
|
||||||
Hi = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, Hi.getValueType(), Hi,
|
Hi = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, Hi.getValueType(), Hi,
|
||||||
DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(),
|
DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(),
|
||||||
ExcessBits)));
|
ExcessBits)));
|
||||||
@ -2468,13 +2467,12 @@ ExpandIntRes_SIGN_EXTEND_INREG(SDNode *N, SDValue &Lo, SDValue &Hi) {
|
|||||||
// The high part gets the sign extension from the lo-part. This handles
|
// The high part gets the sign extension from the lo-part. This handles
|
||||||
// things like sextinreg V:i64 from i8.
|
// things like sextinreg V:i64 from i8.
|
||||||
Hi = DAG.getNode(ISD::SRA, dl, Hi.getValueType(), Lo,
|
Hi = DAG.getNode(ISD::SRA, dl, Hi.getValueType(), Lo,
|
||||||
DAG.getConstant(Hi.getValueType().getSizeInBits() - 1, dl,
|
DAG.getConstant(Hi.getValueSizeInBits() - 1, dl,
|
||||||
TLI.getPointerTy(DAG.getDataLayout())));
|
TLI.getPointerTy(DAG.getDataLayout())));
|
||||||
} else {
|
} else {
|
||||||
// For example, extension of an i48 to an i64. Leave the low part alone,
|
// For example, extension of an i48 to an i64. Leave the low part alone,
|
||||||
// sext_inreg the high part.
|
// sext_inreg the high part.
|
||||||
unsigned ExcessBits =
|
unsigned ExcessBits = EVT.getSizeInBits() - Lo.getValueSizeInBits();
|
||||||
EVT.getSizeInBits() - Lo.getValueType().getSizeInBits();
|
|
||||||
Hi = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, Hi.getValueType(), Hi,
|
Hi = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, Hi.getValueType(), Hi,
|
||||||
DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(),
|
DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(),
|
||||||
ExcessBits)));
|
ExcessBits)));
|
||||||
@ -2700,8 +2698,7 @@ void DAGTypeLegalizer::ExpandIntRes_ZERO_EXTEND(SDNode *N,
|
|||||||
"Operand over promoted?");
|
"Operand over promoted?");
|
||||||
// Split the promoted operand. This will simplify when it is expanded.
|
// Split the promoted operand. This will simplify when it is expanded.
|
||||||
SplitInteger(Res, Lo, Hi);
|
SplitInteger(Res, Lo, Hi);
|
||||||
unsigned ExcessBits =
|
unsigned ExcessBits = Op.getValueSizeInBits() - NVT.getSizeInBits();
|
||||||
Op.getValueType().getSizeInBits() - NVT.getSizeInBits();
|
|
||||||
Hi = DAG.getZeroExtendInReg(Hi, dl,
|
Hi = DAG.getZeroExtendInReg(Hi, dl,
|
||||||
EVT::getIntegerVT(*DAG.getContext(),
|
EVT::getIntegerVT(*DAG.getContext(),
|
||||||
ExcessBits));
|
ExcessBits));
|
||||||
|
@ -794,7 +794,7 @@ void DAGTypeLegalizer::SetScalarizedVector(SDValue Op, SDValue Result) {
|
|||||||
// Note that in some cases vector operation operands may be greater than
|
// Note that in some cases vector operation operands may be greater than
|
||||||
// the vector element type. For example BUILD_VECTOR of type <1 x i1> with
|
// the vector element type. For example BUILD_VECTOR of type <1 x i1> with
|
||||||
// a constant i8 operand.
|
// a constant i8 operand.
|
||||||
assert(Result.getValueType().getSizeInBits() >=
|
assert(Result.getValueSizeInBits() >=
|
||||||
Op.getValueType().getVectorElementType().getSizeInBits() &&
|
Op.getValueType().getVectorElementType().getSizeInBits() &&
|
||||||
"Invalid type for scalarized vector");
|
"Invalid type for scalarized vector");
|
||||||
AnalyzeNewValue(Result);
|
AnalyzeNewValue(Result);
|
||||||
@ -905,7 +905,7 @@ void DAGTypeLegalizer::SetWidenedVector(SDValue Op, SDValue Result) {
|
|||||||
|
|
||||||
/// Convert to an integer of the same size.
|
/// Convert to an integer of the same size.
|
||||||
SDValue DAGTypeLegalizer::BitConvertToInteger(SDValue Op) {
|
SDValue DAGTypeLegalizer::BitConvertToInteger(SDValue Op) {
|
||||||
unsigned BitWidth = Op.getValueType().getSizeInBits();
|
unsigned BitWidth = Op.getValueSizeInBits();
|
||||||
return DAG.getNode(ISD::BITCAST, SDLoc(Op),
|
return DAG.getNode(ISD::BITCAST, SDLoc(Op),
|
||||||
EVT::getIntegerVT(*DAG.getContext(), BitWidth), Op);
|
EVT::getIntegerVT(*DAG.getContext(), BitWidth), Op);
|
||||||
}
|
}
|
||||||
@ -1145,7 +1145,7 @@ void DAGTypeLegalizer::SplitInteger(SDValue Op,
|
|||||||
SDValue &Lo, SDValue &Hi) {
|
SDValue &Lo, SDValue &Hi) {
|
||||||
SDLoc dl(Op);
|
SDLoc dl(Op);
|
||||||
assert(LoVT.getSizeInBits() + HiVT.getSizeInBits() ==
|
assert(LoVT.getSizeInBits() + HiVT.getSizeInBits() ==
|
||||||
Op.getValueType().getSizeInBits() && "Invalid integer splitting!");
|
Op.getValueSizeInBits() && "Invalid integer splitting!");
|
||||||
Lo = DAG.getNode(ISD::TRUNCATE, dl, LoVT, Op);
|
Lo = DAG.getNode(ISD::TRUNCATE, dl, LoVT, Op);
|
||||||
Hi = DAG.getNode(ISD::SRL, dl, Op.getValueType(), Op,
|
Hi = DAG.getNode(ISD::SRL, dl, Op.getValueType(), Op,
|
||||||
DAG.getConstant(LoVT.getSizeInBits(), dl,
|
DAG.getConstant(LoVT.getSizeInBits(), dl,
|
||||||
@ -1157,8 +1157,8 @@ void DAGTypeLegalizer::SplitInteger(SDValue Op,
|
|||||||
/// size of Op's.
|
/// size of Op's.
|
||||||
void DAGTypeLegalizer::SplitInteger(SDValue Op,
|
void DAGTypeLegalizer::SplitInteger(SDValue Op,
|
||||||
SDValue &Lo, SDValue &Hi) {
|
SDValue &Lo, SDValue &Hi) {
|
||||||
EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(),
|
EVT HalfVT =
|
||||||
Op.getValueType().getSizeInBits()/2);
|
EVT::getIntegerVT(*DAG.getContext(), Op.getValueSizeInBits() / 2);
|
||||||
SplitInteger(Op, HalfVT, HalfVT, Lo, Hi);
|
SplitInteger(Op, HalfVT, HalfVT, Lo, Hi);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -141,10 +141,9 @@ void DAGTypeLegalizer::ExpandRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi) {
|
|||||||
if (DAG.getDataLayout().isBigEndian())
|
if (DAG.getDataLayout().isBigEndian())
|
||||||
std::swap(LHS, RHS);
|
std::swap(LHS, RHS);
|
||||||
|
|
||||||
Vals.push_back(DAG.getNode(ISD::BUILD_PAIR, dl,
|
Vals.push_back(DAG.getNode(
|
||||||
EVT::getIntegerVT(
|
ISD::BUILD_PAIR, dl,
|
||||||
*DAG.getContext(),
|
EVT::getIntegerVT(*DAG.getContext(), LHS.getValueSizeInBits() << 1),
|
||||||
LHS.getValueType().getSizeInBits() << 1),
|
|
||||||
LHS, RHS));
|
LHS, RHS));
|
||||||
}
|
}
|
||||||
Lo = Vals[Slot++];
|
Lo = Vals[Slot++];
|
||||||
|
@ -951,7 +951,7 @@ SDValue VectorLegalizer::ExpandVSELECT(SDValue Op) {
|
|||||||
// If the mask and the type are different sizes, unroll the vector op. This
|
// If the mask and the type are different sizes, unroll the vector op. This
|
||||||
// can occur when getSetCCResultType returns something that is different in
|
// can occur when getSetCCResultType returns something that is different in
|
||||||
// size from the operand types. For example, v4i8 = select v4i32, v4i8, v4i8.
|
// size from the operand types. For example, v4i8 = select v4i32, v4i8, v4i8.
|
||||||
if (VT.getSizeInBits() != Op1.getValueType().getSizeInBits())
|
if (VT.getSizeInBits() != Op1.getValueSizeInBits())
|
||||||
return DAG.UnrollVectorOp(Op.getNode());
|
return DAG.UnrollVectorOp(Op.getNode());
|
||||||
|
|
||||||
// Bitcast the operands to be the same type as the mask.
|
// Bitcast the operands to be the same type as the mask.
|
||||||
|
@ -882,7 +882,7 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_SUBVECTOR(SDNode *N, SDValue &Lo,
|
|||||||
DAG.getLoad(Lo.getValueType(), dl, Store, StackPtr, MachinePointerInfo());
|
DAG.getLoad(Lo.getValueType(), dl, Store, StackPtr, MachinePointerInfo());
|
||||||
|
|
||||||
// Increment the pointer to the other part.
|
// Increment the pointer to the other part.
|
||||||
unsigned IncrementSize = Lo.getValueType().getSizeInBits() / 8;
|
unsigned IncrementSize = Lo.getValueSizeInBits() / 8;
|
||||||
StackPtr =
|
StackPtr =
|
||||||
DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr,
|
DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr,
|
||||||
DAG.getConstant(IncrementSize, dl, StackPtr.getValueType()));
|
DAG.getConstant(IncrementSize, dl, StackPtr.getValueType()));
|
||||||
@ -1014,7 +1014,7 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo,
|
|||||||
DAG.getLoad(Lo.getValueType(), dl, Store, StackPtr, MachinePointerInfo());
|
DAG.getLoad(Lo.getValueType(), dl, Store, StackPtr, MachinePointerInfo());
|
||||||
|
|
||||||
// Increment the pointer to the other part.
|
// Increment the pointer to the other part.
|
||||||
unsigned IncrementSize = Lo.getValueType().getSizeInBits() / 8;
|
unsigned IncrementSize = Lo.getValueSizeInBits() / 8;
|
||||||
StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr,
|
StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr,
|
||||||
DAG.getConstant(IncrementSize, dl,
|
DAG.getConstant(IncrementSize, dl,
|
||||||
StackPtr.getValueType()));
|
StackPtr.getValueType()));
|
||||||
|
@ -1022,7 +1022,7 @@ SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) {
|
|||||||
SDValue SelectionDAG::getAnyExtendVectorInReg(SDValue Op, const SDLoc &DL,
|
SDValue SelectionDAG::getAnyExtendVectorInReg(SDValue Op, const SDLoc &DL,
|
||||||
EVT VT) {
|
EVT VT) {
|
||||||
assert(VT.isVector() && "This DAG node is restricted to vector types.");
|
assert(VT.isVector() && "This DAG node is restricted to vector types.");
|
||||||
assert(VT.getSizeInBits() == Op.getValueType().getSizeInBits() &&
|
assert(VT.getSizeInBits() == Op.getValueSizeInBits() &&
|
||||||
"The sizes of the input and result must match in order to perform the "
|
"The sizes of the input and result must match in order to perform the "
|
||||||
"extend in-register.");
|
"extend in-register.");
|
||||||
assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
|
assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
|
||||||
@ -1033,7 +1033,7 @@ SDValue SelectionDAG::getAnyExtendVectorInReg(SDValue Op, const SDLoc &DL,
|
|||||||
SDValue SelectionDAG::getSignExtendVectorInReg(SDValue Op, const SDLoc &DL,
|
SDValue SelectionDAG::getSignExtendVectorInReg(SDValue Op, const SDLoc &DL,
|
||||||
EVT VT) {
|
EVT VT) {
|
||||||
assert(VT.isVector() && "This DAG node is restricted to vector types.");
|
assert(VT.isVector() && "This DAG node is restricted to vector types.");
|
||||||
assert(VT.getSizeInBits() == Op.getValueType().getSizeInBits() &&
|
assert(VT.getSizeInBits() == Op.getValueSizeInBits() &&
|
||||||
"The sizes of the input and result must match in order to perform the "
|
"The sizes of the input and result must match in order to perform the "
|
||||||
"extend in-register.");
|
"extend in-register.");
|
||||||
assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
|
assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
|
||||||
@ -1044,7 +1044,7 @@ SDValue SelectionDAG::getSignExtendVectorInReg(SDValue Op, const SDLoc &DL,
|
|||||||
SDValue SelectionDAG::getZeroExtendVectorInReg(SDValue Op, const SDLoc &DL,
|
SDValue SelectionDAG::getZeroExtendVectorInReg(SDValue Op, const SDLoc &DL,
|
||||||
EVT VT) {
|
EVT VT) {
|
||||||
assert(VT.isVector() && "This DAG node is restricted to vector types.");
|
assert(VT.isVector() && "This DAG node is restricted to vector types.");
|
||||||
assert(VT.getSizeInBits() == Op.getValueType().getSizeInBits() &&
|
assert(VT.getSizeInBits() == Op.getValueSizeInBits() &&
|
||||||
"The sizes of the input and result must match in order to perform the "
|
"The sizes of the input and result must match in order to perform the "
|
||||||
"extend in-register.");
|
"extend in-register.");
|
||||||
assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
|
assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
|
||||||
@ -2441,7 +2441,7 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
|
|||||||
computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
|
computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
|
||||||
const unsigned Index =
|
const unsigned Index =
|
||||||
cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
|
cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
|
||||||
const unsigned BitWidth = Op.getValueType().getSizeInBits();
|
const unsigned BitWidth = Op.getValueSizeInBits();
|
||||||
|
|
||||||
// Remove low part of known bits mask
|
// Remove low part of known bits mask
|
||||||
KnownZero = KnownZero.getHiBits(KnownZero.getBitWidth() - Index * BitWidth);
|
KnownZero = KnownZero.getHiBits(KnownZero.getBitWidth() - Index * BitWidth);
|
||||||
@ -2707,9 +2707,8 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const {
|
|||||||
break;
|
break;
|
||||||
case ISD::EXTRACT_ELEMENT: {
|
case ISD::EXTRACT_ELEMENT: {
|
||||||
const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1);
|
const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1);
|
||||||
const int BitWidth = Op.getValueType().getSizeInBits();
|
const int BitWidth = Op.getValueSizeInBits();
|
||||||
const int Items =
|
const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth;
|
||||||
Op.getOperand(0).getValueType().getSizeInBits() / BitWidth;
|
|
||||||
|
|
||||||
// Get reverse index (starting from 1), Op1 value indexes elements from
|
// Get reverse index (starting from 1), Op1 value indexes elements from
|
||||||
// little end. Sign starts at big end.
|
// little end. Sign starts at big end.
|
||||||
@ -3162,8 +3161,8 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
|
|||||||
break;
|
break;
|
||||||
case ISD::BITCAST:
|
case ISD::BITCAST:
|
||||||
// Basic sanity checking.
|
// Basic sanity checking.
|
||||||
assert(VT.getSizeInBits() == Operand.getValueType().getSizeInBits()
|
assert(VT.getSizeInBits() == Operand.getValueSizeInBits() &&
|
||||||
&& "Cannot BITCAST between types of different sizes!");
|
"Cannot BITCAST between types of different sizes!");
|
||||||
if (VT == Operand.getValueType()) return Operand; // noop conversion.
|
if (VT == Operand.getValueType()) return Operand; // noop conversion.
|
||||||
if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x)
|
if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x)
|
||||||
return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0));
|
return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0));
|
||||||
@ -3577,8 +3576,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
|
|||||||
// amounts. This catches things like trying to shift an i1024 value by an
|
// amounts. This catches things like trying to shift an i1024 value by an
|
||||||
// i8, which is easy to fall into in generic code that uses
|
// i8, which is easy to fall into in generic code that uses
|
||||||
// TLI.getShiftAmount().
|
// TLI.getShiftAmount().
|
||||||
assert(N2.getValueType().getSizeInBits() >=
|
assert(N2.getValueSizeInBits() >= Log2_32_Ceil(N1.getValueSizeInBits()) &&
|
||||||
Log2_32_Ceil(N1.getValueType().getSizeInBits()) &&
|
|
||||||
"Invalid use of small shift amount with oversized value!");
|
"Invalid use of small shift amount with oversized value!");
|
||||||
|
|
||||||
// Always fold shifts of i1 values so the code generator doesn't need to
|
// Always fold shifts of i1 values so the code generator doesn't need to
|
||||||
|
@ -183,7 +183,7 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL,
|
|||||||
Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
|
Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
|
||||||
Hi =
|
Hi =
|
||||||
DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
|
DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
|
||||||
DAG.getConstant(Lo.getValueType().getSizeInBits(), DL,
|
DAG.getConstant(Lo.getValueSizeInBits(), DL,
|
||||||
TLI.getPointerTy(DAG.getDataLayout())));
|
TLI.getPointerTy(DAG.getDataLayout())));
|
||||||
Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
|
Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
|
||||||
Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
|
Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
|
||||||
@ -2638,7 +2638,7 @@ void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
|
|||||||
// Coerce the shift amount to the right type if we can.
|
// Coerce the shift amount to the right type if we can.
|
||||||
if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
|
if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
|
||||||
unsigned ShiftSize = ShiftTy.getSizeInBits();
|
unsigned ShiftSize = ShiftTy.getSizeInBits();
|
||||||
unsigned Op2Size = Op2.getValueType().getSizeInBits();
|
unsigned Op2Size = Op2.getValueSizeInBits();
|
||||||
SDLoc DL = getCurSDLoc();
|
SDLoc DL = getCurSDLoc();
|
||||||
|
|
||||||
// If the operand is smaller than the shift count type, promote it.
|
// If the operand is smaller than the shift count type, promote it.
|
||||||
@ -2649,7 +2649,7 @@ void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
|
|||||||
// count type has enough bits to represent any shift value, truncate
|
// count type has enough bits to represent any shift value, truncate
|
||||||
// it now. This is a common case and it exposes the truncate to
|
// it now. This is a common case and it exposes the truncate to
|
||||||
// optimization early.
|
// optimization early.
|
||||||
else if (ShiftSize >= Log2_32_Ceil(Op2.getValueType().getSizeInBits()))
|
else if (ShiftSize >= Log2_32_Ceil(Op2.getValueSizeInBits()))
|
||||||
Op2 = DAG.getNode(ISD::TRUNCATE, DL, ShiftTy, Op2);
|
Op2 = DAG.getNode(ISD::TRUNCATE, DL, ShiftTy, Op2);
|
||||||
// Otherwise we'll need to temporarily settle for some other convenient
|
// Otherwise we'll need to temporarily settle for some other convenient
|
||||||
// type. Type legalization will make adjustments once the shiftee is split.
|
// type. Type legalization will make adjustments once the shiftee is split.
|
||||||
|
@ -3491,7 +3491,7 @@ void SelectionDAGISel::SelectCodeCommon(SDNode *NodeToMatch,
|
|||||||
NodeToMatch->getValueType(i) == MVT::iPTR ||
|
NodeToMatch->getValueType(i) == MVT::iPTR ||
|
||||||
Res.getValueType() == MVT::iPTR ||
|
Res.getValueType() == MVT::iPTR ||
|
||||||
NodeToMatch->getValueType(i).getSizeInBits() ==
|
NodeToMatch->getValueType(i).getSizeInBits() ==
|
||||||
Res.getValueType().getSizeInBits()) &&
|
Res.getValueSizeInBits()) &&
|
||||||
"invalid replacement");
|
"invalid replacement");
|
||||||
CurDAG->ReplaceAllUsesOfValueWith(SDValue(NodeToMatch, i), Res);
|
CurDAG->ReplaceAllUsesOfValueWith(SDValue(NodeToMatch, i), Res);
|
||||||
}
|
}
|
||||||
|
@ -351,8 +351,7 @@ spillIncomingStatepointValue(SDValue Incoming, SDValue Chain,
|
|||||||
// can consider allowing spills of smaller values to larger slots
|
// can consider allowing spills of smaller values to larger slots
|
||||||
// (i.e. change the '==' in the assert below to a '>=').
|
// (i.e. change the '==' in the assert below to a '>=').
|
||||||
MachineFrameInfo &MFI = Builder.DAG.getMachineFunction().getFrameInfo();
|
MachineFrameInfo &MFI = Builder.DAG.getMachineFunction().getFrameInfo();
|
||||||
assert((MFI.getObjectSize(Index) * 8) ==
|
assert((MFI.getObjectSize(Index) * 8) == Incoming.getValueSizeInBits() &&
|
||||||
Incoming.getValueType().getSizeInBits() &&
|
|
||||||
"Bad spill: stack slot does not match!");
|
"Bad spill: stack slot does not match!");
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -1133,7 +1133,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
|
|||||||
if (!TLO.LegalOperations() &&
|
if (!TLO.LegalOperations() &&
|
||||||
!Op.getValueType().isVector() &&
|
!Op.getValueType().isVector() &&
|
||||||
!Op.getOperand(0).getValueType().isVector() &&
|
!Op.getOperand(0).getValueType().isVector() &&
|
||||||
NewMask == APInt::getSignBit(Op.getValueType().getSizeInBits()) &&
|
NewMask == APInt::getSignBit(Op.getValueSizeInBits()) &&
|
||||||
Op.getOperand(0).getValueType().isFloatingPoint()) {
|
Op.getOperand(0).getValueType().isFloatingPoint()) {
|
||||||
bool OpVTLegal = isOperationLegalOrCustom(ISD::FGETSIGN, Op.getValueType());
|
bool OpVTLegal = isOperationLegalOrCustom(ISD::FGETSIGN, Op.getValueType());
|
||||||
bool i32Legal = isOperationLegalOrCustom(ISD::FGETSIGN, MVT::i32);
|
bool i32Legal = isOperationLegalOrCustom(ISD::FGETSIGN, MVT::i32);
|
||||||
@ -1144,10 +1144,10 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
|
|||||||
// Make a FGETSIGN + SHL to move the sign bit into the appropriate
|
// Make a FGETSIGN + SHL to move the sign bit into the appropriate
|
||||||
// place. We expect the SHL to be eliminated by other optimizations.
|
// place. We expect the SHL to be eliminated by other optimizations.
|
||||||
SDValue Sign = TLO.DAG.getNode(ISD::FGETSIGN, dl, Ty, Op.getOperand(0));
|
SDValue Sign = TLO.DAG.getNode(ISD::FGETSIGN, dl, Ty, Op.getOperand(0));
|
||||||
unsigned OpVTSizeInBits = Op.getValueType().getSizeInBits();
|
unsigned OpVTSizeInBits = Op.getValueSizeInBits();
|
||||||
if (!OpVTLegal && OpVTSizeInBits > 32)
|
if (!OpVTLegal && OpVTSizeInBits > 32)
|
||||||
Sign = TLO.DAG.getNode(ISD::ZERO_EXTEND, dl, Op.getValueType(), Sign);
|
Sign = TLO.DAG.getNode(ISD::ZERO_EXTEND, dl, Op.getValueType(), Sign);
|
||||||
unsigned ShVal = Op.getValueType().getSizeInBits()-1;
|
unsigned ShVal = Op.getValueSizeInBits() - 1;
|
||||||
SDValue ShAmt = TLO.DAG.getConstant(ShVal, dl, Op.getValueType());
|
SDValue ShAmt = TLO.DAG.getConstant(ShVal, dl, Op.getValueType());
|
||||||
return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, dl,
|
return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, dl,
|
||||||
Op.getValueType(),
|
Op.getValueType(),
|
||||||
@ -1414,7 +1414,7 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
|
|||||||
const APInt &ShAmt
|
const APInt &ShAmt
|
||||||
= cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
|
= cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
|
||||||
if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
|
if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
|
||||||
ShAmt == Log2_32(N0.getValueType().getSizeInBits())) {
|
ShAmt == Log2_32(N0.getValueSizeInBits())) {
|
||||||
if ((C1 == 0) == (Cond == ISD::SETEQ)) {
|
if ((C1 == 0) == (Cond == ISD::SETEQ)) {
|
||||||
// (srl (ctlz x), 5) == 0 -> X != 0
|
// (srl (ctlz x), 5) == 0 -> X != 0
|
||||||
// (srl (ctlz x), 5) != 1 -> X != 0
|
// (srl (ctlz x), 5) != 1 -> X != 0
|
||||||
@ -1436,8 +1436,8 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
|
|||||||
CTPOP = N0.getOperand(0);
|
CTPOP = N0.getOperand(0);
|
||||||
|
|
||||||
if (CTPOP.hasOneUse() && CTPOP.getOpcode() == ISD::CTPOP &&
|
if (CTPOP.hasOneUse() && CTPOP.getOpcode() == ISD::CTPOP &&
|
||||||
(N0 == CTPOP || N0.getValueType().getSizeInBits() >
|
(N0 == CTPOP ||
|
||||||
Log2_32_Ceil(CTPOP.getValueType().getSizeInBits()))) {
|
N0.getValueSizeInBits() > Log2_32_Ceil(CTPOP.getValueSizeInBits()))) {
|
||||||
EVT CTVT = CTPOP.getValueType();
|
EVT CTVT = CTPOP.getValueType();
|
||||||
SDValue CTOp = CTPOP.getOperand(0);
|
SDValue CTOp = CTPOP.getOperand(0);
|
||||||
|
|
||||||
@ -1558,7 +1558,7 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
|
|||||||
APInt bestMask;
|
APInt bestMask;
|
||||||
unsigned bestWidth = 0, bestOffset = 0;
|
unsigned bestWidth = 0, bestOffset = 0;
|
||||||
if (!Lod->isVolatile() && Lod->isUnindexed()) {
|
if (!Lod->isVolatile() && Lod->isUnindexed()) {
|
||||||
unsigned origWidth = N0.getValueType().getSizeInBits();
|
unsigned origWidth = N0.getValueSizeInBits();
|
||||||
unsigned maskWidth = origWidth;
|
unsigned maskWidth = origWidth;
|
||||||
// We can narrow (e.g.) 16-bit extending loads on 32-bit target to
|
// We can narrow (e.g.) 16-bit extending loads on 32-bit target to
|
||||||
// 8 bits, but have to be careful...
|
// 8 bits, but have to be careful...
|
||||||
@ -1605,7 +1605,7 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
|
|||||||
|
|
||||||
// If the LHS is a ZERO_EXTEND, perform the comparison on the input.
|
// If the LHS is a ZERO_EXTEND, perform the comparison on the input.
|
||||||
if (N0.getOpcode() == ISD::ZERO_EXTEND) {
|
if (N0.getOpcode() == ISD::ZERO_EXTEND) {
|
||||||
unsigned InSize = N0.getOperand(0).getValueType().getSizeInBits();
|
unsigned InSize = N0.getOperand(0).getValueSizeInBits();
|
||||||
|
|
||||||
// If the comparison constant has bits in the upper part, the
|
// If the comparison constant has bits in the upper part, the
|
||||||
// zero-extended value could never match.
|
// zero-extended value could never match.
|
||||||
|
@ -349,7 +349,7 @@ bool AArch64DAGToDAGISel::SelectShiftedRegister(SDValue N, bool AllowROR,
|
|||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
|
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
|
||||||
unsigned BitSize = N.getValueType().getSizeInBits();
|
unsigned BitSize = N.getValueSizeInBits();
|
||||||
unsigned Val = RHS->getZExtValue() & (BitSize - 1);
|
unsigned Val = RHS->getZExtValue() & (BitSize - 1);
|
||||||
unsigned ShVal = AArch64_AM::getShifterImm(ShType, Val);
|
unsigned ShVal = AArch64_AM::getShifterImm(ShType, Val);
|
||||||
|
|
||||||
|
@ -3715,7 +3715,7 @@ SDValue AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
|
|||||||
// Don't combine AND since emitComparison converts the AND to an ANDS
|
// Don't combine AND since emitComparison converts the AND to an ANDS
|
||||||
// (a.k.a. TST) and the test in the test bit and branch instruction
|
// (a.k.a. TST) and the test in the test bit and branch instruction
|
||||||
// becomes redundant. This would also increase register pressure.
|
// becomes redundant. This would also increase register pressure.
|
||||||
uint64_t Mask = LHS.getValueType().getSizeInBits() - 1;
|
uint64_t Mask = LHS.getValueSizeInBits() - 1;
|
||||||
return DAG.getNode(AArch64ISD::TBNZ, dl, MVT::Other, Chain, LHS,
|
return DAG.getNode(AArch64ISD::TBNZ, dl, MVT::Other, Chain, LHS,
|
||||||
DAG.getConstant(Mask, dl, MVT::i64), Dest);
|
DAG.getConstant(Mask, dl, MVT::i64), Dest);
|
||||||
}
|
}
|
||||||
@ -3725,7 +3725,7 @@ SDValue AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
|
|||||||
// Don't combine AND since emitComparison converts the AND to an ANDS
|
// Don't combine AND since emitComparison converts the AND to an ANDS
|
||||||
// (a.k.a. TST) and the test in the test bit and branch instruction
|
// (a.k.a. TST) and the test in the test bit and branch instruction
|
||||||
// becomes redundant. This would also increase register pressure.
|
// becomes redundant. This would also increase register pressure.
|
||||||
uint64_t Mask = LHS.getValueType().getSizeInBits() - 1;
|
uint64_t Mask = LHS.getValueSizeInBits() - 1;
|
||||||
return DAG.getNode(AArch64ISD::TBZ, dl, MVT::Other, Chain, LHS,
|
return DAG.getNode(AArch64ISD::TBZ, dl, MVT::Other, Chain, LHS,
|
||||||
DAG.getConstant(Mask, dl, MVT::i64), Dest);
|
DAG.getConstant(Mask, dl, MVT::i64), Dest);
|
||||||
}
|
}
|
||||||
@ -5412,7 +5412,7 @@ static SDValue tryFormConcatFromShuffle(SDValue Op, SelectionDAG &DAG) {
|
|||||||
VT.getVectorElementType() != V1.getValueType().getVectorElementType())
|
VT.getVectorElementType() != V1.getValueType().getVectorElementType())
|
||||||
return SDValue();
|
return SDValue();
|
||||||
|
|
||||||
bool SplitV0 = V0.getValueType().getSizeInBits() == 128;
|
bool SplitV0 = V0.getValueSizeInBits() == 128;
|
||||||
|
|
||||||
if (!isConcatMask(Mask, VT, SplitV0))
|
if (!isConcatMask(Mask, VT, SplitV0))
|
||||||
return SDValue();
|
return SDValue();
|
||||||
@ -5423,7 +5423,7 @@ static SDValue tryFormConcatFromShuffle(SDValue Op, SelectionDAG &DAG) {
|
|||||||
V0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V0,
|
V0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V0,
|
||||||
DAG.getConstant(0, DL, MVT::i64));
|
DAG.getConstant(0, DL, MVT::i64));
|
||||||
}
|
}
|
||||||
if (V1.getValueType().getSizeInBits() == 128) {
|
if (V1.getValueSizeInBits() == 128) {
|
||||||
V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V1,
|
V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V1,
|
||||||
DAG.getConstant(0, DL, MVT::i64));
|
DAG.getConstant(0, DL, MVT::i64));
|
||||||
}
|
}
|
||||||
@ -5554,7 +5554,7 @@ static SDValue GenerateTBL(SDValue Op, ArrayRef<int> ShuffleMask,
|
|||||||
|
|
||||||
MVT IndexVT = MVT::v8i8;
|
MVT IndexVT = MVT::v8i8;
|
||||||
unsigned IndexLen = 8;
|
unsigned IndexLen = 8;
|
||||||
if (Op.getValueType().getSizeInBits() == 128) {
|
if (Op.getValueSizeInBits() == 128) {
|
||||||
IndexVT = MVT::v16i8;
|
IndexVT = MVT::v16i8;
|
||||||
IndexLen = 16;
|
IndexLen = 16;
|
||||||
}
|
}
|
||||||
@ -6382,7 +6382,7 @@ FailedModImm:
|
|||||||
// DUPLANE works on 128-bit vectors, widen it if necessary.
|
// DUPLANE works on 128-bit vectors, widen it if necessary.
|
||||||
SDValue Lane = Value.getOperand(1);
|
SDValue Lane = Value.getOperand(1);
|
||||||
Value = Value.getOperand(0);
|
Value = Value.getOperand(0);
|
||||||
if (Value.getValueType().getSizeInBits() == 64)
|
if (Value.getValueSizeInBits() == 64)
|
||||||
Value = WidenVector(Value, DAG);
|
Value = WidenVector(Value, DAG);
|
||||||
|
|
||||||
unsigned Opcode = getDUPLANEOp(VT.getVectorElementType());
|
unsigned Opcode = getDUPLANEOp(VT.getVectorElementType());
|
||||||
@ -6559,7 +6559,7 @@ SDValue AArch64TargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
|
|||||||
return SDValue();
|
return SDValue();
|
||||||
unsigned Val = Cst->getZExtValue();
|
unsigned Val = Cst->getZExtValue();
|
||||||
|
|
||||||
unsigned Size = Op.getValueType().getSizeInBits();
|
unsigned Size = Op.getValueSizeInBits();
|
||||||
|
|
||||||
// This will get lowered to an appropriate EXTRACT_SUBREG in ISel.
|
// This will get lowered to an appropriate EXTRACT_SUBREG in ISel.
|
||||||
if (Val == 0)
|
if (Val == 0)
|
||||||
@ -7686,7 +7686,7 @@ static SDValue performIntToFpCombine(SDNode *N, SelectionDAG &DAG,
|
|||||||
return SDValue();
|
return SDValue();
|
||||||
|
|
||||||
// Only optimize when the source and destination types have the same width.
|
// Only optimize when the source and destination types have the same width.
|
||||||
if (VT.getSizeInBits() != N->getOperand(0).getValueType().getSizeInBits())
|
if (VT.getSizeInBits() != N->getOperand(0).getValueSizeInBits())
|
||||||
return SDValue();
|
return SDValue();
|
||||||
|
|
||||||
// If the result of an integer load is only used by an integer-to-float
|
// If the result of an integer load is only used by an integer-to-float
|
||||||
@ -8189,7 +8189,7 @@ static SDValue tryCombineFixedPointConvert(SDNode *N,
|
|||||||
// The vector width should be 128 bits by the time we get here, even
|
// The vector width should be 128 bits by the time we get here, even
|
||||||
// if it started as 64 bits (the extract_vector handling will have
|
// if it started as 64 bits (the extract_vector handling will have
|
||||||
// done so).
|
// done so).
|
||||||
assert(Vec.getValueType().getSizeInBits() == 128 &&
|
assert(Vec.getValueSizeInBits() == 128 &&
|
||||||
"unexpected vector size on extract_vector_elt!");
|
"unexpected vector size on extract_vector_elt!");
|
||||||
if (Vec.getValueType() == MVT::v4i32)
|
if (Vec.getValueType() == MVT::v4i32)
|
||||||
VecResTy = MVT::v4f32;
|
VecResTy = MVT::v4f32;
|
||||||
|
@ -2235,7 +2235,7 @@ static
|
|||||||
bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
|
bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
|
||||||
MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,
|
MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,
|
||||||
const TargetInstrInfo *TII) {
|
const TargetInstrInfo *TII) {
|
||||||
unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
|
unsigned Bytes = Arg.getValueSizeInBits() / 8;
|
||||||
int FI = INT_MAX;
|
int FI = INT_MAX;
|
||||||
if (Arg.getOpcode() == ISD::CopyFromReg) {
|
if (Arg.getOpcode() == ISD::CopyFromReg) {
|
||||||
unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
|
unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
|
||||||
|
@ -2612,7 +2612,7 @@ HexagonTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (VT.getSizeInBits() == 64 &&
|
if (VT.getSizeInBits() == 64 &&
|
||||||
Operand.getValueType().getSizeInBits() == 32) {
|
Operand.getValueSizeInBits() == 32) {
|
||||||
SDValue C = DAG.getConstant(0, dl, MVT::i32);
|
SDValue C = DAG.getConstant(0, dl, MVT::i32);
|
||||||
Operand = DAG.getNode(HexagonISD::COMBINE, dl, VT, C, Operand);
|
Operand = DAG.getNode(HexagonISD::COMBINE, dl, VT, C, Operand);
|
||||||
}
|
}
|
||||||
@ -2677,7 +2677,7 @@ HexagonTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
|
|||||||
unsigned N = NElts-i-1;
|
unsigned N = NElts-i-1;
|
||||||
SDValue OpN = Op.getOperand(N);
|
SDValue OpN = Op.getOperand(N);
|
||||||
|
|
||||||
if (VT.getSizeInBits() == 64 && OpN.getValueType().getSizeInBits() == 32) {
|
if (VT.getSizeInBits() == 64 && OpN.getValueSizeInBits() == 32) {
|
||||||
SDValue C = DAG.getConstant(0, dl, MVT::i32);
|
SDValue C = DAG.getConstant(0, dl, MVT::i32);
|
||||||
OpN = DAG.getNode(HexagonISD::COMBINE, dl, VT, C, OpN);
|
OpN = DAG.getNode(HexagonISD::COMBINE, dl, VT, C, OpN);
|
||||||
}
|
}
|
||||||
@ -2857,8 +2857,7 @@ HexagonTargetLowering::LowerINSERT_VECTOR(SDValue Op,
|
|||||||
DAG.getConstant(32, dl, MVT::i64));
|
DAG.getConstant(32, dl, MVT::i64));
|
||||||
SDValue Combined = DAG.getNode(ISD::OR, dl, MVT::i64, Shifted, Offset);
|
SDValue Combined = DAG.getNode(ISD::OR, dl, MVT::i64, Shifted, Offset);
|
||||||
|
|
||||||
if (VT.getSizeInBits() == 64 &&
|
if (VT.getSizeInBits() == 64 && Val.getValueSizeInBits() == 32) {
|
||||||
Val.getValueType().getSizeInBits() == 32) {
|
|
||||||
SDValue C = DAG.getConstant(0, dl, MVT::i32);
|
SDValue C = DAG.getConstant(0, dl, MVT::i32);
|
||||||
Val = DAG.getNode(HexagonISD::COMBINE, dl, VT, C, Val);
|
Val = DAG.getNode(HexagonISD::COMBINE, dl, VT, C, Val);
|
||||||
}
|
}
|
||||||
|
@ -4911,7 +4911,7 @@ bool NVPTXDAGToDAGISel::tryBFE(SDNode *N) {
|
|||||||
uint64_t StartVal = StartConst->getZExtValue();
|
uint64_t StartVal = StartConst->getZExtValue();
|
||||||
// How many "good" bits do we have left? "good" is defined here as bits
|
// How many "good" bits do we have left? "good" is defined here as bits
|
||||||
// that exist in the original value, not shifted in.
|
// that exist in the original value, not shifted in.
|
||||||
uint64_t GoodBits = Start.getValueType().getSizeInBits() - StartVal;
|
uint64_t GoodBits = Start.getValueSizeInBits() - StartVal;
|
||||||
if (NumBits > GoodBits) {
|
if (NumBits > GoodBits) {
|
||||||
// Do not handle the case where bits have been shifted in. In theory
|
// Do not handle the case where bits have been shifted in. In theory
|
||||||
// we could handle this, but the cost is likely higher than just
|
// we could handle this, but the cost is likely higher than just
|
||||||
@ -5019,15 +5019,14 @@ bool NVPTXDAGToDAGISel::tryBFE(SDNode *N) {
|
|||||||
// If the outer shift is more than the type size, we have no bitfield to
|
// If the outer shift is more than the type size, we have no bitfield to
|
||||||
// extract (since we also check that the inner shift is <= the outer shift
|
// extract (since we also check that the inner shift is <= the outer shift
|
||||||
// then this also implies that the inner shift is < the type size)
|
// then this also implies that the inner shift is < the type size)
|
||||||
if (OuterShiftAmt >= Val.getValueType().getSizeInBits()) {
|
if (OuterShiftAmt >= Val.getValueSizeInBits()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
Start =
|
Start = CurDAG->getTargetConstant(OuterShiftAmt - InnerShiftAmt, DL,
|
||||||
CurDAG->getTargetConstant(OuterShiftAmt - InnerShiftAmt, DL, MVT::i32);
|
MVT::i32);
|
||||||
Len =
|
Len = CurDAG->getTargetConstant(Val.getValueSizeInBits() - OuterShiftAmt,
|
||||||
CurDAG->getTargetConstant(Val.getValueType().getSizeInBits() -
|
DL, MVT::i32);
|
||||||
OuterShiftAmt, DL, MVT::i32);
|
|
||||||
|
|
||||||
if (N->getOpcode() == ISD::SRA) {
|
if (N->getOpcode() == ISD::SRA) {
|
||||||
// If we have a arithmetic right shift, we need to use the signed bfe
|
// If we have a arithmetic right shift, we need to use the signed bfe
|
||||||
|
@ -2444,7 +2444,7 @@ NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
|
|||||||
// 11 elem => 3 st.v4
|
// 11 elem => 3 st.v4
|
||||||
|
|
||||||
unsigned VecSize = 4;
|
unsigned VecSize = 4;
|
||||||
if (OutVals[0].getValueType().getSizeInBits() == 64)
|
if (OutVals[0].getValueSizeInBits() == 64)
|
||||||
VecSize = 2;
|
VecSize = 2;
|
||||||
|
|
||||||
unsigned Offset = 0;
|
unsigned Offset = 0;
|
||||||
@ -2532,7 +2532,7 @@ NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
|
|||||||
TmpVal = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, TmpVal);
|
TmpVal = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, TmpVal);
|
||||||
TheStoreType = MVT::i32;
|
TheStoreType = MVT::i32;
|
||||||
}
|
}
|
||||||
else if (TmpVal.getValueType().getSizeInBits() < 16)
|
else if (TmpVal.getValueSizeInBits() < 16)
|
||||||
TmpVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, TmpVal);
|
TmpVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, TmpVal);
|
||||||
|
|
||||||
SDValue Ops[] = {
|
SDValue Ops[] = {
|
||||||
|
@ -3200,7 +3200,7 @@ SDValue PPCDAGToDAGISel::combineToCMPB(SDNode *N) {
|
|||||||
Op0.getOperand(1) == Op1.getOperand(1) && CC == ISD::SETEQ &&
|
Op0.getOperand(1) == Op1.getOperand(1) && CC == ISD::SETEQ &&
|
||||||
isa<ConstantSDNode>(Op0.getOperand(1))) {
|
isa<ConstantSDNode>(Op0.getOperand(1))) {
|
||||||
|
|
||||||
unsigned Bits = Op0.getValueType().getSizeInBits();
|
unsigned Bits = Op0.getValueSizeInBits();
|
||||||
if (b != Bits/8-1)
|
if (b != Bits/8-1)
|
||||||
return false;
|
return false;
|
||||||
if (Op0.getConstantOperandVal(1) != Bits-8)
|
if (Op0.getConstantOperandVal(1) != Bits-8)
|
||||||
@ -3228,9 +3228,9 @@ SDValue PPCDAGToDAGISel::combineToCMPB(SDNode *N) {
|
|||||||
|
|
||||||
// Now we need to make sure that the upper bytes are known to be
|
// Now we need to make sure that the upper bytes are known to be
|
||||||
// zero.
|
// zero.
|
||||||
unsigned Bits = Op0.getValueType().getSizeInBits();
|
unsigned Bits = Op0.getValueSizeInBits();
|
||||||
if (!CurDAG->MaskedValueIsZero(Op0,
|
if (!CurDAG->MaskedValueIsZero(
|
||||||
APInt::getHighBitsSet(Bits, Bits - (b+1)*8)))
|
Op0, APInt::getHighBitsSet(Bits, Bits - (b + 1) * 8)))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
LHS = Op0.getOperand(0);
|
LHS = Op0.getOperand(0);
|
||||||
@ -3263,7 +3263,7 @@ SDValue PPCDAGToDAGISel::combineToCMPB(SDNode *N) {
|
|||||||
} else if (Op.getOpcode() == ISD::SRL) {
|
} else if (Op.getOpcode() == ISD::SRL) {
|
||||||
if (!isa<ConstantSDNode>(Op.getOperand(1)))
|
if (!isa<ConstantSDNode>(Op.getOperand(1)))
|
||||||
return false;
|
return false;
|
||||||
unsigned Bits = Op.getValueType().getSizeInBits();
|
unsigned Bits = Op.getValueSizeInBits();
|
||||||
if (b != Bits/8-1)
|
if (b != Bits/8-1)
|
||||||
return false;
|
return false;
|
||||||
if (Op.getConstantOperandVal(1) != Bits-8)
|
if (Op.getConstantOperandVal(1) != Bits-8)
|
||||||
|
@ -4218,7 +4218,7 @@ CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
|
|||||||
SDValue Arg, int SPDiff, unsigned ArgOffset,
|
SDValue Arg, int SPDiff, unsigned ArgOffset,
|
||||||
SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) {
|
SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) {
|
||||||
int Offset = ArgOffset + SPDiff;
|
int Offset = ArgOffset + SPDiff;
|
||||||
uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8;
|
uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8;
|
||||||
int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
|
int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
|
||||||
EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
|
EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
|
||||||
SDValue FIN = DAG.getFrameIndex(FI, VT);
|
SDValue FIN = DAG.getFrameIndex(FI, VT);
|
||||||
|
@ -117,7 +117,7 @@ static uint64_t allOnes(unsigned int Count) {
|
|||||||
// case the result will be truncated as part of the operation).
|
// case the result will be truncated as part of the operation).
|
||||||
struct RxSBGOperands {
|
struct RxSBGOperands {
|
||||||
RxSBGOperands(unsigned Op, SDValue N)
|
RxSBGOperands(unsigned Op, SDValue N)
|
||||||
: Opcode(Op), BitSize(N.getValueType().getSizeInBits()),
|
: Opcode(Op), BitSize(N.getValueSizeInBits()),
|
||||||
Mask(allOnes(BitSize)), Input(N), Start(64 - BitSize), End(63),
|
Mask(allOnes(BitSize)), Input(N), Start(64 - BitSize), End(63),
|
||||||
Rotate(0) {}
|
Rotate(0) {}
|
||||||
|
|
||||||
@ -709,7 +709,7 @@ bool SystemZDAGToDAGISel::detectOrAndInsertion(SDValue &Op,
|
|||||||
|
|
||||||
// It's only an insertion if all bits are covered or are known to be zero.
|
// It's only an insertion if all bits are covered or are known to be zero.
|
||||||
// The inner check covers all cases but is more expensive.
|
// The inner check covers all cases but is more expensive.
|
||||||
uint64_t Used = allOnes(Op.getValueType().getSizeInBits());
|
uint64_t Used = allOnes(Op.getValueSizeInBits());
|
||||||
if (Used != (AndMask | InsertMask)) {
|
if (Used != (AndMask | InsertMask)) {
|
||||||
APInt KnownZero, KnownOne;
|
APInt KnownZero, KnownOne;
|
||||||
CurDAG->computeKnownBits(Op.getOperand(0), KnownZero, KnownOne);
|
CurDAG->computeKnownBits(Op.getOperand(0), KnownZero, KnownOne);
|
||||||
@ -749,7 +749,7 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {
|
|||||||
case ISD::TRUNCATE: {
|
case ISD::TRUNCATE: {
|
||||||
if (RxSBG.Opcode == SystemZ::RNSBG)
|
if (RxSBG.Opcode == SystemZ::RNSBG)
|
||||||
return false;
|
return false;
|
||||||
uint64_t BitSize = N.getValueType().getSizeInBits();
|
uint64_t BitSize = N.getValueSizeInBits();
|
||||||
uint64_t Mask = allOnes(BitSize);
|
uint64_t Mask = allOnes(BitSize);
|
||||||
if (!refineRxSBGMask(RxSBG, Mask))
|
if (!refineRxSBGMask(RxSBG, Mask))
|
||||||
return false;
|
return false;
|
||||||
@ -825,7 +825,7 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {
|
|||||||
case ISD::ZERO_EXTEND:
|
case ISD::ZERO_EXTEND:
|
||||||
if (RxSBG.Opcode != SystemZ::RNSBG) {
|
if (RxSBG.Opcode != SystemZ::RNSBG) {
|
||||||
// Restrict the mask to the extended operand.
|
// Restrict the mask to the extended operand.
|
||||||
unsigned InnerBitSize = N.getOperand(0).getValueType().getSizeInBits();
|
unsigned InnerBitSize = N.getOperand(0).getValueSizeInBits();
|
||||||
if (!refineRxSBGMask(RxSBG, allOnes(InnerBitSize)))
|
if (!refineRxSBGMask(RxSBG, allOnes(InnerBitSize)))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
@ -837,7 +837,7 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {
|
|||||||
case ISD::SIGN_EXTEND: {
|
case ISD::SIGN_EXTEND: {
|
||||||
// Check that the extension bits are don't-care (i.e. are masked out
|
// Check that the extension bits are don't-care (i.e. are masked out
|
||||||
// by the final mask).
|
// by the final mask).
|
||||||
unsigned InnerBitSize = N.getOperand(0).getValueType().getSizeInBits();
|
unsigned InnerBitSize = N.getOperand(0).getValueSizeInBits();
|
||||||
if (maskMatters(RxSBG, allOnes(RxSBG.BitSize) - allOnes(InnerBitSize)))
|
if (maskMatters(RxSBG, allOnes(RxSBG.BitSize) - allOnes(InnerBitSize)))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
@ -851,7 +851,7 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {
|
|||||||
return false;
|
return false;
|
||||||
|
|
||||||
uint64_t Count = CountNode->getZExtValue();
|
uint64_t Count = CountNode->getZExtValue();
|
||||||
unsigned BitSize = N.getValueType().getSizeInBits();
|
unsigned BitSize = N.getValueSizeInBits();
|
||||||
if (Count < 1 || Count >= BitSize)
|
if (Count < 1 || Count >= BitSize)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
@ -878,7 +878,7 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {
|
|||||||
return false;
|
return false;
|
||||||
|
|
||||||
uint64_t Count = CountNode->getZExtValue();
|
uint64_t Count = CountNode->getZExtValue();
|
||||||
unsigned BitSize = N.getValueType().getSizeInBits();
|
unsigned BitSize = N.getValueSizeInBits();
|
||||||
if (Count < 1 || Count >= BitSize)
|
if (Count < 1 || Count >= BitSize)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
@ -1136,8 +1136,7 @@ bool SystemZDAGToDAGISel::tryScatter(StoreSDNode *Store, unsigned Opcode) {
|
|||||||
SDValue Value = Store->getValue();
|
SDValue Value = Store->getValue();
|
||||||
if (Value.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
|
if (Value.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
|
||||||
return false;
|
return false;
|
||||||
if (Store->getMemoryVT().getSizeInBits() !=
|
if (Store->getMemoryVT().getSizeInBits() != Value.getValueSizeInBits())
|
||||||
Value.getValueType().getSizeInBits())
|
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
SDValue ElemV = Value.getOperand(1);
|
SDValue ElemV = Value.getOperand(1);
|
||||||
@ -1323,7 +1322,7 @@ void SystemZDAGToDAGISel::Select(SDNode *Node) {
|
|||||||
|
|
||||||
case ISD::STORE: {
|
case ISD::STORE: {
|
||||||
auto *Store = cast<StoreSDNode>(Node);
|
auto *Store = cast<StoreSDNode>(Node);
|
||||||
unsigned ElemBitSize = Store->getValue().getValueType().getSizeInBits();
|
unsigned ElemBitSize = Store->getValue().getValueSizeInBits();
|
||||||
if (ElemBitSize == 32) {
|
if (ElemBitSize == 32) {
|
||||||
if (tryScatter(Store, SystemZ::VSCEF))
|
if (tryScatter(Store, SystemZ::VSCEF))
|
||||||
return;
|
return;
|
||||||
|
@ -1860,8 +1860,7 @@ static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL,
|
|||||||
C.Op1.getOpcode() == ISD::Constant &&
|
C.Op1.getOpcode() == ISD::Constant &&
|
||||||
cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
|
cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
|
||||||
auto *L = cast<LoadSDNode>(C.Op0.getOperand(0));
|
auto *L = cast<LoadSDNode>(C.Op0.getOperand(0));
|
||||||
if (L->getMemoryVT().getStoreSizeInBits()
|
if (L->getMemoryVT().getStoreSizeInBits() <= C.Op0.getValueSizeInBits()) {
|
||||||
<= C.Op0.getValueType().getSizeInBits()) {
|
|
||||||
unsigned Type = L->getExtensionType();
|
unsigned Type = L->getExtensionType();
|
||||||
if ((Type == ISD::ZEXTLOAD && C.ICmpType != SystemZICMP::SignedOnly) ||
|
if ((Type == ISD::ZEXTLOAD && C.ICmpType != SystemZICMP::SignedOnly) ||
|
||||||
(Type == ISD::SEXTLOAD && C.ICmpType != SystemZICMP::UnsignedOnly)) {
|
(Type == ISD::SEXTLOAD && C.ICmpType != SystemZICMP::UnsignedOnly)) {
|
||||||
@ -1880,7 +1879,7 @@ static bool isSimpleShift(SDValue N, unsigned &ShiftVal) {
|
|||||||
return false;
|
return false;
|
||||||
|
|
||||||
uint64_t Amount = Shift->getZExtValue();
|
uint64_t Amount = Shift->getZExtValue();
|
||||||
if (Amount >= N.getValueType().getSizeInBits())
|
if (Amount >= N.getValueSizeInBits())
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
ShiftVal = Amount;
|
ShiftVal = Amount;
|
||||||
@ -2031,7 +2030,7 @@ static void adjustForTestUnderMask(SelectionDAG &DAG, const SDLoc &DL,
|
|||||||
|
|
||||||
// Check whether the combination of mask, comparison value and comparison
|
// Check whether the combination of mask, comparison value and comparison
|
||||||
// type are suitable.
|
// type are suitable.
|
||||||
unsigned BitSize = NewC.Op0.getValueType().getSizeInBits();
|
unsigned BitSize = NewC.Op0.getValueSizeInBits();
|
||||||
unsigned NewCCMask, ShiftVal;
|
unsigned NewCCMask, ShiftVal;
|
||||||
if (NewC.ICmpType != SystemZICMP::SignedOnly &&
|
if (NewC.ICmpType != SystemZICMP::SignedOnly &&
|
||||||
NewC.Op0.getOpcode() == ISD::SHL &&
|
NewC.Op0.getOpcode() == ISD::SHL &&
|
||||||
@ -4771,7 +4770,7 @@ SDValue SystemZTargetLowering::combineExtract(const SDLoc &DL, EVT ResVT,
|
|||||||
// We're extracting the low part of one operand of the BUILD_VECTOR.
|
// We're extracting the low part of one operand of the BUILD_VECTOR.
|
||||||
Op = Op.getOperand(End / OpBytesPerElement - 1);
|
Op = Op.getOperand(End / OpBytesPerElement - 1);
|
||||||
if (!Op.getValueType().isInteger()) {
|
if (!Op.getValueType().isInteger()) {
|
||||||
EVT VT = MVT::getIntegerVT(Op.getValueType().getSizeInBits());
|
EVT VT = MVT::getIntegerVT(Op.getValueSizeInBits());
|
||||||
Op = DAG.getNode(ISD::BITCAST, DL, VT, Op);
|
Op = DAG.getNode(ISD::BITCAST, DL, VT, Op);
|
||||||
DCI.AddToWorklist(Op.getNode());
|
DCI.AddToWorklist(Op.getNode());
|
||||||
}
|
}
|
||||||
@ -4871,8 +4870,7 @@ SDValue SystemZTargetLowering::combineSIGN_EXTEND(
|
|||||||
SDValue Inner = N0.getOperand(0);
|
SDValue Inner = N0.getOperand(0);
|
||||||
if (SraAmt && Inner.hasOneUse() && Inner.getOpcode() == ISD::SHL) {
|
if (SraAmt && Inner.hasOneUse() && Inner.getOpcode() == ISD::SHL) {
|
||||||
if (auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.getOperand(1))) {
|
if (auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.getOperand(1))) {
|
||||||
unsigned Extra = (VT.getSizeInBits() -
|
unsigned Extra = (VT.getSizeInBits() - N0.getValueSizeInBits());
|
||||||
N0.getValueType().getSizeInBits());
|
|
||||||
unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra;
|
unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra;
|
||||||
unsigned NewSraAmt = SraAmt->getZExtValue() + Extra;
|
unsigned NewSraAmt = SraAmt->getZExtValue() + Extra;
|
||||||
EVT ShiftVT = N0.getOperand(1).getValueType();
|
EVT ShiftVT = N0.getOperand(1).getValueType();
|
||||||
|
@ -3498,7 +3498,7 @@ static
|
|||||||
bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
|
bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
|
||||||
MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,
|
MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,
|
||||||
const X86InstrInfo *TII, const CCValAssign &VA) {
|
const X86InstrInfo *TII, const CCValAssign &VA) {
|
||||||
unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
|
unsigned Bytes = Arg.getValueSizeInBits() / 8;
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
// Look through nodes that don't alter the bits of the incoming value.
|
// Look through nodes that don't alter the bits of the incoming value.
|
||||||
@ -3567,7 +3567,7 @@ bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
|
|||||||
if (Offset != MFI.getObjectOffset(FI))
|
if (Offset != MFI.getObjectOffset(FI))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (VA.getLocVT().getSizeInBits() > Arg.getValueType().getSizeInBits()) {
|
if (VA.getLocVT().getSizeInBits() > Arg.getValueSizeInBits()) {
|
||||||
// If the argument location is wider than the argument type, check that any
|
// If the argument location is wider than the argument type, check that any
|
||||||
// extension flags match.
|
// extension flags match.
|
||||||
if (Flags.isZExt() != MFI.isObjectZExt(FI) ||
|
if (Flags.isZExt() != MFI.isObjectZExt(FI) ||
|
||||||
@ -5842,7 +5842,7 @@ static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget &Subtarget,
|
|||||||
|
|
||||||
// AVX-512 has register version of the broadcast
|
// AVX-512 has register version of the broadcast
|
||||||
bool hasRegVer = Subtarget.hasAVX512() && VT.is512BitVector() &&
|
bool hasRegVer = Subtarget.hasAVX512() && VT.is512BitVector() &&
|
||||||
Ld.getValueType().getSizeInBits() >= 32;
|
Ld.getValueSizeInBits() >= 32;
|
||||||
if (!ConstSplatVal && ((!Sc.hasOneUse() || !Ld.hasOneUse()) &&
|
if (!ConstSplatVal && ((!Sc.hasOneUse() || !Ld.hasOneUse()) &&
|
||||||
!hasRegVer))
|
!hasRegVer))
|
||||||
return SDValue();
|
return SDValue();
|
||||||
@ -5850,7 +5850,7 @@ static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget &Subtarget,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned ScalarSize = Ld.getValueType().getSizeInBits();
|
unsigned ScalarSize = Ld.getValueSizeInBits();
|
||||||
bool IsGE256 = (VT.getSizeInBits() >= 256);
|
bool IsGE256 = (VT.getSizeInBits() >= 256);
|
||||||
|
|
||||||
// When optimizing for size, generate up to 5 extra bytes for a broadcast
|
// When optimizing for size, generate up to 5 extra bytes for a broadcast
|
||||||
@ -6041,8 +6041,7 @@ static SDValue ConvertI1VectorToInteger(SDValue Op, SelectionDAG &DAG) {
|
|||||||
Immediate |= cast<ConstantSDNode>(In)->getZExtValue() << idx;
|
Immediate |= cast<ConstantSDNode>(In)->getZExtValue() << idx;
|
||||||
}
|
}
|
||||||
SDLoc dl(Op);
|
SDLoc dl(Op);
|
||||||
MVT VT =
|
MVT VT = MVT::getIntegerVT(std::max((int)Op.getValueSizeInBits(), 8));
|
||||||
MVT::getIntegerVT(std::max((int)Op.getValueType().getSizeInBits(), 8));
|
|
||||||
return DAG.getConstant(Immediate, dl, VT);
|
return DAG.getConstant(Immediate, dl, VT);
|
||||||
}
|
}
|
||||||
// Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
|
// Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
|
||||||
@ -7263,7 +7262,7 @@ static SmallBitVector computeZeroableShuffleElements(ArrayRef<int> Mask,
|
|||||||
bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
|
bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
|
||||||
bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
|
bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
|
||||||
|
|
||||||
int VectorSizeInBits = V1.getValueType().getSizeInBits();
|
int VectorSizeInBits = V1.getValueSizeInBits();
|
||||||
int ScalarSizeInBits = VectorSizeInBits / Mask.size();
|
int ScalarSizeInBits = VectorSizeInBits / Mask.size();
|
||||||
assert(!(VectorSizeInBits % ScalarSizeInBits) && "Illegal shuffle mask size");
|
assert(!(VectorSizeInBits % ScalarSizeInBits) && "Illegal shuffle mask size");
|
||||||
|
|
||||||
@ -13609,7 +13608,7 @@ SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
|
|||||||
// shouldn't be necessary except that RFP cannot be live across
|
// shouldn't be necessary except that RFP cannot be live across
|
||||||
// multiple blocks. When stackifier is fixed, they can be uncoupled.
|
// multiple blocks. When stackifier is fixed, they can be uncoupled.
|
||||||
MachineFunction &MF = DAG.getMachineFunction();
|
MachineFunction &MF = DAG.getMachineFunction();
|
||||||
unsigned SSFISize = Op.getValueType().getSizeInBits()/8;
|
unsigned SSFISize = Op.getValueSizeInBits()/8;
|
||||||
int SSFI = MF.getFrameInfo().CreateStackObject(SSFISize, SSFISize, false);
|
int SSFI = MF.getFrameInfo().CreateStackObject(SSFISize, SSFISize, false);
|
||||||
auto PtrVT = getPointerTy(MF.getDataLayout());
|
auto PtrVT = getPointerTy(MF.getDataLayout());
|
||||||
SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
|
SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
|
||||||
@ -31392,8 +31391,7 @@ static SDValue combineLoopSADPattern(SDNode *N, SelectionDAG &DAG,
|
|||||||
// If the reduction vector is at least as wide as the psadbw result, just
|
// If the reduction vector is at least as wide as the psadbw result, just
|
||||||
// bitcast. If it's narrower, truncate - the high i32 of each i64 is zero
|
// bitcast. If it's narrower, truncate - the high i32 of each i64 is zero
|
||||||
// anyway.
|
// anyway.
|
||||||
MVT ResVT =
|
MVT ResVT = MVT::getVectorVT(MVT::i32, Sad.getValueSizeInBits() / 32);
|
||||||
MVT::getVectorVT(MVT::i32, Sad.getValueType().getSizeInBits() / 32);
|
|
||||||
if (VT.getSizeInBits() >= ResVT.getSizeInBits())
|
if (VT.getSizeInBits() >= ResVT.getSizeInBits())
|
||||||
Sad = DAG.getNode(ISD::BITCAST, DL, ResVT, Sad);
|
Sad = DAG.getNode(ISD::BITCAST, DL, ResVT, Sad);
|
||||||
else
|
else
|
||||||
|
@ -20,7 +20,7 @@ SDValue XCoreSelectionDAGInfo::EmitTargetCodeForMemcpy(
|
|||||||
SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src,
|
SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src,
|
||||||
SDValue Size, unsigned Align, bool isVolatile, bool AlwaysInline,
|
SDValue Size, unsigned Align, bool isVolatile, bool AlwaysInline,
|
||||||
MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const {
|
MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const {
|
||||||
unsigned SizeBitWidth = Size.getValueType().getSizeInBits();
|
unsigned SizeBitWidth = Size.getValueSizeInBits();
|
||||||
// Call __memcpy_4 if the src, dst and size are all 4 byte aligned.
|
// Call __memcpy_4 if the src, dst and size are all 4 byte aligned.
|
||||||
if (!AlwaysInline && (Align & 3) == 0 &&
|
if (!AlwaysInline && (Align & 3) == 0 &&
|
||||||
DAG.MaskedValueIsZero(Size, APInt(SizeBitWidth, 3))) {
|
DAG.MaskedValueIsZero(Size, APInt(SizeBitWidth, 3))) {
|
||||||
|
Loading…
Reference in New Issue
Block a user