mirror of
https://github.com/RPCS3/llvm.git
synced 2024-12-14 07:31:53 +00:00
Rename LoadX to LoadExt.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57526 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
2642196a65
commit
0329466b6b
@ -734,7 +734,7 @@ namespace ISD {
|
||||
EXTLOAD,
|
||||
SEXTLOAD,
|
||||
ZEXTLOAD,
|
||||
LAST_LOADX_TYPE
|
||||
LAST_LOADEXT_TYPE
|
||||
};
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
|
@ -305,23 +305,23 @@ public:
|
||||
getOperationAction(Op, VT) == Custom);
|
||||
}
|
||||
|
||||
/// getLoadXAction - Return how this load with extension should be treated:
|
||||
/// getLoadExtAction - Return how this load with extension should be treated:
|
||||
/// either it is legal, needs to be promoted to a larger size, needs to be
|
||||
/// expanded to some other code sequence, or the target has a custom expander
|
||||
/// for it.
|
||||
LegalizeAction getLoadXAction(unsigned LType, MVT VT) const {
|
||||
assert(LType < array_lengthof(LoadXActions) &&
|
||||
(unsigned)VT.getSimpleVT() < sizeof(LoadXActions[0])*4 &&
|
||||
LegalizeAction getLoadExtAction(unsigned LType, MVT VT) const {
|
||||
assert(LType < array_lengthof(LoadExtActions) &&
|
||||
(unsigned)VT.getSimpleVT() < sizeof(LoadExtActions[0])*4 &&
|
||||
"Table isn't big enough!");
|
||||
return (LegalizeAction)((LoadXActions[LType] >> (2*VT.getSimpleVT())) & 3);
|
||||
return (LegalizeAction)((LoadExtActions[LType] >> (2*VT.getSimpleVT())) & 3);
|
||||
}
|
||||
|
||||
/// isLoadXLegal - Return true if the specified load with extension is legal
|
||||
/// isLoadExtLegal - Return true if the specified load with extension is legal
|
||||
/// on this target.
|
||||
bool isLoadXLegal(unsigned LType, MVT VT) const {
|
||||
bool isLoadExtLegal(unsigned LType, MVT VT) const {
|
||||
return VT.isSimple() &&
|
||||
(getLoadXAction(LType, VT) == Legal ||
|
||||
getLoadXAction(LType, VT) == Custom);
|
||||
(getLoadExtAction(LType, VT) == Legal ||
|
||||
getLoadExtAction(LType, VT) == Custom);
|
||||
}
|
||||
|
||||
/// getTruncStoreAction - Return how this store with truncation should be
|
||||
@ -839,15 +839,15 @@ protected:
|
||||
OpActions[Op] |= (uint64_t)Action << VT.getSimpleVT()*2;
|
||||
}
|
||||
|
||||
/// setLoadXAction - Indicate that the specified load with extension does not
|
||||
/// work with the with specified type and indicate what to do about it.
|
||||
void setLoadXAction(unsigned ExtType, MVT VT,
|
||||
/// setLoadExtAction - Indicate that the specified load with extension does
|
||||
/// not work with the with specified type and indicate what to do about it.
|
||||
void setLoadExtAction(unsigned ExtType, MVT VT,
|
||||
LegalizeAction Action) {
|
||||
assert((unsigned)VT.getSimpleVT() < sizeof(LoadXActions[0])*4 &&
|
||||
ExtType < array_lengthof(LoadXActions) &&
|
||||
assert((unsigned)VT.getSimpleVT() < sizeof(LoadExtActions[0])*4 &&
|
||||
ExtType < array_lengthof(LoadExtActions) &&
|
||||
"Table isn't big enough!");
|
||||
LoadXActions[ExtType] &= ~(uint64_t(3UL) << VT.getSimpleVT()*2);
|
||||
LoadXActions[ExtType] |= (uint64_t)Action << VT.getSimpleVT()*2;
|
||||
LoadExtActions[ExtType] &= ~(uint64_t(3UL) << VT.getSimpleVT()*2);
|
||||
LoadExtActions[ExtType] |= (uint64_t)Action << VT.getSimpleVT()*2;
|
||||
}
|
||||
|
||||
/// setTruncStoreAction - Indicate that the specified truncating store does
|
||||
@ -1411,10 +1411,10 @@ private:
|
||||
/// non-legal value types are not described here.
|
||||
uint64_t OpActions[OpActionsCapacity];
|
||||
|
||||
/// LoadXActions - For each load of load extension type and each value type,
|
||||
/// LoadExtActions - For each load of load extension type and each value type,
|
||||
/// keep a LegalizeAction that indicates how instruction selection should deal
|
||||
/// with the load.
|
||||
uint64_t LoadXActions[ISD::LAST_LOADX_TYPE];
|
||||
uint64_t LoadExtActions[ISD::LAST_LOADEXT_TYPE];
|
||||
|
||||
/// TruncStoreActions - For each truncating store, keep a LegalizeAction that
|
||||
/// indicates how instruction selection should deal with the store.
|
||||
|
@ -1724,7 +1724,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
|
||||
if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth,
|
||||
BitWidth - EVT.getSizeInBits())) &&
|
||||
((!AfterLegalize && !LN0->isVolatile()) ||
|
||||
TLI.isLoadXLegal(ISD::ZEXTLOAD, EVT))) {
|
||||
TLI.isLoadExtLegal(ISD::ZEXTLOAD, EVT))) {
|
||||
SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, LN0->getChain(),
|
||||
LN0->getBasePtr(), LN0->getSrcValue(),
|
||||
LN0->getSrcValueOffset(), EVT,
|
||||
@ -1746,7 +1746,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
|
||||
if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth,
|
||||
BitWidth - EVT.getSizeInBits())) &&
|
||||
((!AfterLegalize && !LN0->isVolatile()) ||
|
||||
TLI.isLoadXLegal(ISD::ZEXTLOAD, EVT))) {
|
||||
TLI.isLoadExtLegal(ISD::ZEXTLOAD, EVT))) {
|
||||
SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, LN0->getChain(),
|
||||
LN0->getBasePtr(), LN0->getSrcValue(),
|
||||
LN0->getSrcValueOffset(), EVT,
|
||||
@ -1775,7 +1775,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
|
||||
// Do not generate loads of non-round integer types since these can
|
||||
// be expensive (and would be wrong if the type is not byte sized).
|
||||
if (EVT != MVT::Other && LoadedVT.bitsGT(EVT) && EVT.isRound() &&
|
||||
(!AfterLegalize || TLI.isLoadXLegal(ISD::ZEXTLOAD, EVT))) {
|
||||
(!AfterLegalize || TLI.isLoadExtLegal(ISD::ZEXTLOAD, EVT))) {
|
||||
MVT PtrType = N0.getOperand(1).getValueType();
|
||||
// For big endian targets, we need to add an offset to the pointer to
|
||||
// load the correct bytes. For little endian systems, we merely need to
|
||||
@ -2858,7 +2858,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
|
||||
// fold (sext (load x)) -> (sext (truncate (sextload x)))
|
||||
if (ISD::isNON_EXTLoad(N0.getNode()) &&
|
||||
((!AfterLegalize && !cast<LoadSDNode>(N0)->isVolatile()) ||
|
||||
TLI.isLoadXLegal(ISD::SEXTLOAD, N0.getValueType()))) {
|
||||
TLI.isLoadExtLegal(ISD::SEXTLOAD, N0.getValueType()))) {
|
||||
bool DoXform = true;
|
||||
SmallVector<SDNode*, 4> SetCCs;
|
||||
if (!N0.hasOneUse())
|
||||
@ -2900,7 +2900,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
|
||||
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
|
||||
MVT EVT = LN0->getMemoryVT();
|
||||
if ((!AfterLegalize && !LN0->isVolatile()) ||
|
||||
TLI.isLoadXLegal(ISD::SEXTLOAD, EVT)) {
|
||||
TLI.isLoadExtLegal(ISD::SEXTLOAD, EVT)) {
|
||||
SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, LN0->getChain(),
|
||||
LN0->getBasePtr(), LN0->getSrcValue(),
|
||||
LN0->getSrcValueOffset(), EVT,
|
||||
@ -2984,7 +2984,7 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
|
||||
// fold (zext (load x)) -> (zext (truncate (zextload x)))
|
||||
if (ISD::isNON_EXTLoad(N0.getNode()) &&
|
||||
((!AfterLegalize && !cast<LoadSDNode>(N0)->isVolatile()) ||
|
||||
TLI.isLoadXLegal(ISD::ZEXTLOAD, N0.getValueType()))) {
|
||||
TLI.isLoadExtLegal(ISD::ZEXTLOAD, N0.getValueType()))) {
|
||||
bool DoXform = true;
|
||||
SmallVector<SDNode*, 4> SetCCs;
|
||||
if (!N0.hasOneUse())
|
||||
@ -3026,7 +3026,7 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
|
||||
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
|
||||
MVT EVT = LN0->getMemoryVT();
|
||||
if ((!AfterLegalize && !LN0->isVolatile()) ||
|
||||
TLI.isLoadXLegal(ISD::ZEXTLOAD, EVT)) {
|
||||
TLI.isLoadExtLegal(ISD::ZEXTLOAD, EVT)) {
|
||||
SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, LN0->getChain(),
|
||||
LN0->getBasePtr(), LN0->getSrcValue(),
|
||||
LN0->getSrcValueOffset(), EVT,
|
||||
@ -3106,7 +3106,7 @@ SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
|
||||
// fold (aext (load x)) -> (aext (truncate (extload x)))
|
||||
if (ISD::isNON_EXTLoad(N0.getNode()) && N0.hasOneUse() &&
|
||||
((!AfterLegalize && !cast<LoadSDNode>(N0)->isVolatile()) ||
|
||||
TLI.isLoadXLegal(ISD::EXTLOAD, N0.getValueType()))) {
|
||||
TLI.isLoadExtLegal(ISD::EXTLOAD, N0.getValueType()))) {
|
||||
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
|
||||
SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, VT, LN0->getChain(),
|
||||
LN0->getBasePtr(), LN0->getSrcValue(),
|
||||
@ -3212,7 +3212,7 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
|
||||
if (Opc == ISD::SIGN_EXTEND_INREG) {
|
||||
ExtType = ISD::SEXTLOAD;
|
||||
EVT = cast<VTSDNode>(N->getOperand(1))->getVT();
|
||||
if (AfterLegalize && !TLI.isLoadXLegal(ISD::SEXTLOAD, EVT))
|
||||
if (AfterLegalize && !TLI.isLoadExtLegal(ISD::SEXTLOAD, EVT))
|
||||
return SDValue();
|
||||
}
|
||||
|
||||
@ -3345,7 +3345,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
|
||||
ISD::isUNINDEXEDLoad(N0.getNode()) &&
|
||||
EVT == cast<LoadSDNode>(N0)->getMemoryVT() &&
|
||||
((!AfterLegalize && !cast<LoadSDNode>(N0)->isVolatile()) ||
|
||||
TLI.isLoadXLegal(ISD::SEXTLOAD, EVT))) {
|
||||
TLI.isLoadExtLegal(ISD::SEXTLOAD, EVT))) {
|
||||
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
|
||||
SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, LN0->getChain(),
|
||||
LN0->getBasePtr(), LN0->getSrcValue(),
|
||||
@ -3361,7 +3361,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
|
||||
N0.hasOneUse() &&
|
||||
EVT == cast<LoadSDNode>(N0)->getMemoryVT() &&
|
||||
((!AfterLegalize && !cast<LoadSDNode>(N0)->isVolatile()) ||
|
||||
TLI.isLoadXLegal(ISD::SEXTLOAD, EVT))) {
|
||||
TLI.isLoadExtLegal(ISD::SEXTLOAD, EVT))) {
|
||||
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
|
||||
SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, LN0->getChain(),
|
||||
LN0->getBasePtr(), LN0->getSrcValue(),
|
||||
@ -4043,7 +4043,7 @@ SDValue DAGCombiner::visitFP_EXTEND(SDNode *N) {
|
||||
// fold (fpext (load x)) -> (fpext (fptrunc (extload x)))
|
||||
if (ISD::isNON_EXTLoad(N0.getNode()) && N0.hasOneUse() &&
|
||||
((!AfterLegalize && !cast<LoadSDNode>(N0)->isVolatile()) ||
|
||||
TLI.isLoadXLegal(ISD::EXTLOAD, N0.getValueType()))) {
|
||||
TLI.isLoadExtLegal(ISD::EXTLOAD, N0.getValueType()))) {
|
||||
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
|
||||
SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, VT, LN0->getChain(),
|
||||
LN0->getBasePtr(), LN0->getSrcValue(),
|
||||
|
@ -456,7 +456,7 @@ static SDValue ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP,
|
||||
if (CFP->isValueValidForType(SVT, CFP->getValueAPF()) &&
|
||||
// Only do this if the target has a native EXTLOAD instruction from
|
||||
// smaller type.
|
||||
TLI.isLoadXLegal(ISD::EXTLOAD, SVT) &&
|
||||
TLI.isLoadExtLegal(ISD::EXTLOAD, SVT) &&
|
||||
TLI.ShouldShrinkFPConstant(OrigVT)) {
|
||||
const Type *SType = SVT.getTypeForMVT();
|
||||
LLVMC = cast<ConstantFP>(ConstantExpr::getFPTrunc(LLVMC, SType));
|
||||
@ -1981,7 +1981,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
|
||||
// nice to have an effective generic way of getting these benefits...
|
||||
// Until such a way is found, don't insist on promoting i1 here.
|
||||
(SrcVT != MVT::i1 ||
|
||||
TLI.getLoadXAction(ExtType, MVT::i1) == TargetLowering::Promote)) {
|
||||
TLI.getLoadExtAction(ExtType, MVT::i1) == TargetLowering::Promote)) {
|
||||
// Promote to a byte-sized load if not loading an integral number of
|
||||
// bytes. For example, promote EXTLOAD:i20 -> EXTLOAD:i24.
|
||||
unsigned NewWidth = SrcVT.getStoreSizeInBits();
|
||||
@ -2086,7 +2086,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
|
||||
Tmp1 = LegalizeOp(Result);
|
||||
Tmp2 = LegalizeOp(Ch);
|
||||
} else {
|
||||
switch (TLI.getLoadXAction(ExtType, SrcVT)) {
|
||||
switch (TLI.getLoadExtAction(ExtType, SrcVT)) {
|
||||
default: assert(0 && "This action is not supported yet!");
|
||||
case TargetLowering::Custom:
|
||||
isCustom = true;
|
||||
|
@ -402,7 +402,7 @@ TargetLowering::TargetLowering(TargetMachine &tm)
|
||||
"Fixed size array in TargetLowering is not large enough!");
|
||||
// All operations default to being supported.
|
||||
memset(OpActions, 0, sizeof(OpActions));
|
||||
memset(LoadXActions, 0, sizeof(LoadXActions));
|
||||
memset(LoadExtActions, 0, sizeof(LoadExtActions));
|
||||
memset(TruncStoreActions, 0, sizeof(TruncStoreActions));
|
||||
memset(IndexedModeActions, 0, sizeof(IndexedModeActions));
|
||||
memset(ConvertActions, 0, sizeof(ConvertActions));
|
||||
|
@ -131,10 +131,10 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
|
||||
computeRegisterProperties();
|
||||
|
||||
// ARM does not have f32 extending load.
|
||||
setLoadXAction(ISD::EXTLOAD, MVT::f32, Expand);
|
||||
setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
|
||||
|
||||
// ARM does not have i1 sign extending load.
|
||||
setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote);
|
||||
setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
|
||||
|
||||
// ARM supports all 4 flavors of integer indexed load / store.
|
||||
for (unsigned im = (unsigned)ISD::PRE_INC;
|
||||
|
@ -52,15 +52,15 @@ AlphaTargetLowering::AlphaTargetLowering(TargetMachine &TM) : TargetLowering(TM)
|
||||
// We want to custom lower some of our intrinsics.
|
||||
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
|
||||
|
||||
setLoadXAction(ISD::EXTLOAD, MVT::i1, Promote);
|
||||
setLoadXAction(ISD::EXTLOAD, MVT::f32, Expand);
|
||||
setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
|
||||
setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
|
||||
|
||||
setLoadXAction(ISD::ZEXTLOAD, MVT::i1, Promote);
|
||||
setLoadXAction(ISD::ZEXTLOAD, MVT::i32, Expand);
|
||||
setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
|
||||
setLoadExtAction(ISD::ZEXTLOAD, MVT::i32, Expand);
|
||||
|
||||
setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote);
|
||||
setLoadXAction(ISD::SEXTLOAD, MVT::i8, Expand);
|
||||
setLoadXAction(ISD::SEXTLOAD, MVT::i16, Expand);
|
||||
setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
|
||||
setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand);
|
||||
setLoadExtAction(ISD::SEXTLOAD, MVT::i16, Expand);
|
||||
|
||||
// setOperationAction(ISD::BRIND, MVT::Other, Expand);
|
||||
setOperationAction(ISD::BR_JT, MVT::Other, Expand);
|
||||
|
@ -131,27 +131,27 @@ SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM)
|
||||
addRegisterClass(MVT::i128, SPU::GPRCRegisterClass);
|
||||
|
||||
// SPU has no sign or zero extended loads for i1, i8, i16:
|
||||
setLoadXAction(ISD::EXTLOAD, MVT::i1, Promote);
|
||||
setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote);
|
||||
setLoadXAction(ISD::ZEXTLOAD, MVT::i1, Promote);
|
||||
setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
|
||||
setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
|
||||
setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
|
||||
setTruncStoreAction(MVT::i8, MVT::i1, Custom);
|
||||
setTruncStoreAction(MVT::i16, MVT::i1, Custom);
|
||||
setTruncStoreAction(MVT::i32, MVT::i1, Custom);
|
||||
setTruncStoreAction(MVT::i64, MVT::i1, Custom);
|
||||
setTruncStoreAction(MVT::i128, MVT::i1, Custom);
|
||||
|
||||
setLoadXAction(ISD::EXTLOAD, MVT::i8, Custom);
|
||||
setLoadXAction(ISD::SEXTLOAD, MVT::i8, Custom);
|
||||
setLoadXAction(ISD::ZEXTLOAD, MVT::i8, Custom);
|
||||
setLoadExtAction(ISD::EXTLOAD, MVT::i8, Custom);
|
||||
setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Custom);
|
||||
setLoadExtAction(ISD::ZEXTLOAD, MVT::i8, Custom);
|
||||
setTruncStoreAction(MVT::i8 , MVT::i8, Custom);
|
||||
setTruncStoreAction(MVT::i16 , MVT::i8, Custom);
|
||||
setTruncStoreAction(MVT::i32 , MVT::i8, Custom);
|
||||
setTruncStoreAction(MVT::i64 , MVT::i8, Custom);
|
||||
setTruncStoreAction(MVT::i128, MVT::i8, Custom);
|
||||
|
||||
setLoadXAction(ISD::EXTLOAD, MVT::i16, Custom);
|
||||
setLoadXAction(ISD::SEXTLOAD, MVT::i16, Custom);
|
||||
setLoadXAction(ISD::ZEXTLOAD, MVT::i16, Custom);
|
||||
setLoadExtAction(ISD::EXTLOAD, MVT::i16, Custom);
|
||||
setLoadExtAction(ISD::SEXTLOAD, MVT::i16, Custom);
|
||||
setLoadExtAction(ISD::ZEXTLOAD, MVT::i16, Custom);
|
||||
|
||||
// SPU constant load actions are custom lowered:
|
||||
setOperationAction(ISD::Constant, MVT::i64, Custom);
|
||||
|
@ -35,14 +35,14 @@ IA64TargetLowering::IA64TargetLowering(TargetMachine &TM)
|
||||
// register class for predicate registers
|
||||
addRegisterClass(MVT::i1, IA64::PRRegisterClass);
|
||||
|
||||
setLoadXAction(ISD::EXTLOAD , MVT::i1 , Promote);
|
||||
setLoadExtAction(ISD::EXTLOAD , MVT::i1 , Promote);
|
||||
|
||||
setLoadXAction(ISD::ZEXTLOAD , MVT::i1 , Promote);
|
||||
setLoadExtAction(ISD::ZEXTLOAD , MVT::i1 , Promote);
|
||||
|
||||
setLoadXAction(ISD::SEXTLOAD , MVT::i1 , Promote);
|
||||
setLoadXAction(ISD::SEXTLOAD , MVT::i8 , Expand);
|
||||
setLoadXAction(ISD::SEXTLOAD , MVT::i16 , Expand);
|
||||
setLoadXAction(ISD::SEXTLOAD , MVT::i32 , Expand);
|
||||
setLoadExtAction(ISD::SEXTLOAD , MVT::i1 , Promote);
|
||||
setLoadExtAction(ISD::SEXTLOAD , MVT::i8 , Expand);
|
||||
setLoadExtAction(ISD::SEXTLOAD , MVT::i16 , Expand);
|
||||
setLoadExtAction(ISD::SEXTLOAD , MVT::i32 , Expand);
|
||||
|
||||
setOperationAction(ISD::BRIND , MVT::Other, Expand);
|
||||
setOperationAction(ISD::BR_JT , MVT::Other, Expand);
|
||||
|
@ -82,9 +82,9 @@ MipsTargetLowering(MipsTargetMachine &TM): TargetLowering(TM)
|
||||
addLegalFPImmediate(APFloat(+0.0f));
|
||||
|
||||
// Load extented operations for i1 types must be promoted
|
||||
setLoadXAction(ISD::EXTLOAD, MVT::i1, Promote);
|
||||
setLoadXAction(ISD::ZEXTLOAD, MVT::i1, Promote);
|
||||
setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote);
|
||||
setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
|
||||
setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
|
||||
setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
|
||||
|
||||
// Used by legalize types to correctly generate the setcc result.
|
||||
// Without this, every float setcc comes with a AND/OR with the result,
|
||||
|
@ -60,9 +60,9 @@ PIC16TargetLowering(PIC16TargetMachine &TM): TargetLowering(TM)
|
||||
addRegisterClass(MVT::i16, PIC16::PTRRegsRegisterClass);
|
||||
|
||||
// Load extented operations for i1 types must be promoted .
|
||||
setLoadXAction(ISD::EXTLOAD, MVT::i1, Promote);
|
||||
setLoadXAction(ISD::ZEXTLOAD, MVT::i1, Promote);
|
||||
setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote);
|
||||
setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
|
||||
setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
|
||||
setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
|
||||
|
||||
setOperationAction(ISD::ADD, MVT::i1, Promote);
|
||||
setOperationAction(ISD::ADD, MVT::i8, Legal);
|
||||
|
@ -53,8 +53,8 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
|
||||
addRegisterClass(MVT::f64, PPC::F8RCRegisterClass);
|
||||
|
||||
// PowerPC has an i16 but no i8 (or i1) SEXTLOAD
|
||||
setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote);
|
||||
setLoadXAction(ISD::SEXTLOAD, MVT::i8, Expand);
|
||||
setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
|
||||
setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand);
|
||||
|
||||
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
|
||||
|
||||
|
@ -515,9 +515,9 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM)
|
||||
addRegisterClass(MVT::f64, SP::DFPRegsRegisterClass);
|
||||
|
||||
// Turn FP extload into load/fextend
|
||||
setLoadXAction(ISD::EXTLOAD, MVT::f32, Expand);
|
||||
setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
|
||||
// Sparc doesn't have i1 sign extending load
|
||||
setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote);
|
||||
setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
|
||||
// Turn FP truncstore into trunc + store.
|
||||
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
|
||||
|
||||
|
@ -84,7 +84,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
|
||||
if (Subtarget->is64Bit())
|
||||
addRegisterClass(MVT::i64, X86::GR64RegisterClass);
|
||||
|
||||
setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote);
|
||||
setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
|
||||
|
||||
// We don't accept any truncstore of integer registers.
|
||||
setTruncStoreAction(MVT::i64, MVT::i32, Expand);
|
||||
|
Loading…
Reference in New Issue
Block a user