mirror of
https://github.com/RPCS3/llvm.git
synced 2025-01-08 13:00:50 +00:00
Target: Remove unused entities.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@283690 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
f1bd24e068
commit
6f158fa384
@ -179,12 +179,6 @@ public:
|
||||
virtual void adjustForHiPEPrologue(MachineFunction &MF,
|
||||
MachineBasicBlock &PrologueMBB) const {}
|
||||
|
||||
/// Adjust the prologue to add an allocation at a fixed offset from the frame
|
||||
/// pointer.
|
||||
virtual void
|
||||
adjustForFrameAllocatePrologue(MachineFunction &MF,
|
||||
MachineBasicBlock &PrologueMBB) const {}
|
||||
|
||||
/// spillCalleeSavedRegisters - Issues instruction(s) to spill all callee
|
||||
/// saved registers and returns true if it isn't possible / profitable to do
|
||||
/// so by issuing a series of store instructions via
|
||||
|
@ -611,40 +611,6 @@ public:
|
||||
virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
|
||||
MachineBasicBlock *NewDest) const;
|
||||
|
||||
/// Get an instruction that performs an unconditional branch to the given
|
||||
/// symbol.
|
||||
virtual void
|
||||
getUnconditionalBranch(MCInst &MI,
|
||||
const MCSymbolRefExpr *BranchTarget) const {
|
||||
llvm_unreachable("Target didn't implement "
|
||||
"TargetInstrInfo::getUnconditionalBranch!");
|
||||
}
|
||||
|
||||
/// Get a machine trap instruction.
|
||||
virtual void getTrap(MCInst &MI) const {
|
||||
llvm_unreachable("Target didn't implement TargetInstrInfo::getTrap!");
|
||||
}
|
||||
|
||||
/// Get a number of bytes that suffices to hold
|
||||
/// either the instruction returned by getUnconditionalBranch or the
|
||||
/// instruction returned by getTrap. This only makes sense because
|
||||
/// getUnconditionalBranch returns a single, specific instruction. This
|
||||
/// information is needed by the jumptable construction code, since it must
|
||||
/// decide how many bytes to use for a jumptable entry so it can generate the
|
||||
/// right mask.
|
||||
///
|
||||
/// Note that if the jumptable instruction requires alignment, then that
|
||||
/// alignment should be factored into this required bound so that the
|
||||
/// resulting bound gives the right alignment for the instruction.
|
||||
virtual unsigned getJumpInstrTableEntryBound() const {
|
||||
// This method gets called by LLVMTargetMachine always, so it can't fail
|
||||
// just because there happens to be no implementation for this target.
|
||||
// Any code that tries to use a jumptable annotation without defining
|
||||
// getUnconditionalBranch on the appropriate Target will fail anyway, and
|
||||
// the value returned here won't matter in that case.
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// Return true if it's legal to split the given basic
|
||||
/// block at the specified instruction (i.e. instruction would be the start
|
||||
/// of a new basic block).
|
||||
@ -1293,22 +1259,6 @@ public:
|
||||
const MachineInstr &UseMI,
|
||||
unsigned UseIdx) const;
|
||||
|
||||
/// Compute and return the latency of the given data dependent def and use
|
||||
/// when the operand indices are already known. UseMI may be \c nullptr for
|
||||
/// an unknown use.
|
||||
///
|
||||
/// FindMin may be set to get the minimum vs. expected latency. Minimum
|
||||
/// latency is used for scheduling groups, while expected latency is for
|
||||
/// instruction cost and critical path.
|
||||
///
|
||||
/// Depending on the subtarget's itinerary properties, this may or may not
|
||||
/// need to call getOperandLatency(). For most subtargets, we don't need
|
||||
/// DefIdx or UseIdx to compute min latency.
|
||||
unsigned computeOperandLatency(const InstrItineraryData *ItinData,
|
||||
const MachineInstr &DefMI, unsigned DefIdx,
|
||||
const MachineInstr *UseMI,
|
||||
unsigned UseIdx) const;
|
||||
|
||||
/// Compute the instruction latency of a given instruction.
|
||||
/// If the instruction has higher cost when predicated, it's returned via
|
||||
/// PredCost.
|
||||
|
@ -191,9 +191,6 @@ public:
|
||||
return getPointerTy(DL);
|
||||
}
|
||||
|
||||
/// Return true if the select operation is expensive for this target.
|
||||
bool isSelectExpensive() const { return SelectIsExpensive; }
|
||||
|
||||
virtual bool isSelectSupported(SelectSupportKind /*kind*/) const {
|
||||
return true;
|
||||
}
|
||||
@ -1378,12 +1375,6 @@ protected:
|
||||
StackPointerRegisterToSaveRestore = R;
|
||||
}
|
||||
|
||||
/// Tells the code generator not to expand operations into sequences that use
|
||||
/// the select operations if possible.
|
||||
void setSelectIsExpensive(bool isExpensive = true) {
|
||||
SelectIsExpensive = isExpensive;
|
||||
}
|
||||
|
||||
/// Tells the code generator that the target has multiple (allocatable)
|
||||
/// condition registers that can be used to store the results of comparisons
|
||||
/// for use by selects and conditional branches. With multiple condition
|
||||
@ -1425,15 +1416,6 @@ protected:
|
||||
RegClassForVT[VT.SimpleTy] = RC;
|
||||
}
|
||||
|
||||
/// Remove all register classes.
|
||||
void clearRegisterClasses() {
|
||||
std::fill(std::begin(RegClassForVT), std::end(RegClassForVT), nullptr);
|
||||
}
|
||||
|
||||
/// \brief Remove all operation actions.
|
||||
void clearOperationActions() {
|
||||
}
|
||||
|
||||
/// Return the largest legal super-reg register class of the register class
|
||||
/// for the specified type and its associated "cost".
|
||||
virtual std::pair<const TargetRegisterClass *, uint8_t>
|
||||
@ -1761,11 +1743,6 @@ public:
|
||||
/// In other words, unless the target performs a post-isel load combining,
|
||||
/// this information should not be provided because it will generate more
|
||||
/// loads.
|
||||
virtual bool hasPairedLoad(Type * /*LoadedType*/,
|
||||
unsigned & /*RequiredAligment*/) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
virtual bool hasPairedLoad(EVT /*LoadedType*/,
|
||||
unsigned & /*RequiredAligment*/) const {
|
||||
return false;
|
||||
@ -1915,10 +1892,6 @@ public:
|
||||
private:
|
||||
const TargetMachine &TM;
|
||||
|
||||
/// Tells the code generator not to expand operations into sequences that use
|
||||
/// the select operations if possible.
|
||||
bool SelectIsExpensive;
|
||||
|
||||
/// Tells the code generator that the target has multiple (allocatable)
|
||||
/// condition registers that can be used to store the results of comparisons
|
||||
/// for use by selects and conditional branches. With multiple condition
|
||||
|
@ -120,12 +120,6 @@ public:
|
||||
getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
|
||||
const TargetMachine &TM) const = 0;
|
||||
|
||||
/// Allow the target to completely override section assignment of a global.
|
||||
virtual const MCSection *
|
||||
getSpecialCasedSectionGlobals(const GlobalValue *GV, SectionKind Kind) const {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
/// Return an MCExpr to use for a reference to the specified global variable
|
||||
/// from exception handling information.
|
||||
virtual const MCExpr *getTTypeGlobalReference(const GlobalValue *GV,
|
||||
|
@ -193,12 +193,6 @@ public:
|
||||
|
||||
bool shouldPrintMachineCode() const { return Options.PrintMachineCode; }
|
||||
|
||||
/// Returns the default value of asm verbosity.
|
||||
///
|
||||
bool getAsmVerbosityDefault() const {
|
||||
return Options.MCOptions.AsmVerbose;
|
||||
}
|
||||
|
||||
bool getUniqueSectionNames() const { return Options.UniqueSectionNames; }
|
||||
|
||||
/// Return true if data objects should be emitted into their own section,
|
||||
|
@ -447,11 +447,6 @@ public:
|
||||
virtual const MCPhysReg*
|
||||
getCalleeSavedRegs(const MachineFunction *MF) const = 0;
|
||||
|
||||
virtual const MCPhysReg*
|
||||
getCalleeSavedRegsViaCopy(const MachineFunction *MF) const {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
/// Return a mask of call-preserved registers for the given calling convention
|
||||
/// on the current function. The mask should include all call-preserved
|
||||
/// aliases. This is used by the register allocator to determine which
|
||||
|
@ -1100,35 +1100,6 @@ int TargetInstrInfo::computeDefOperandLatency(
|
||||
return -1;
|
||||
}
|
||||
|
||||
unsigned TargetInstrInfo::computeOperandLatency(
|
||||
const InstrItineraryData *ItinData, const MachineInstr &DefMI,
|
||||
unsigned DefIdx, const MachineInstr *UseMI, unsigned UseIdx) const {
|
||||
|
||||
int DefLatency = computeDefOperandLatency(ItinData, DefMI);
|
||||
if (DefLatency >= 0)
|
||||
return DefLatency;
|
||||
|
||||
assert(ItinData && !ItinData->isEmpty() && "computeDefOperandLatency fail");
|
||||
|
||||
int OperLatency = 0;
|
||||
if (UseMI)
|
||||
OperLatency = getOperandLatency(ItinData, DefMI, DefIdx, *UseMI, UseIdx);
|
||||
else {
|
||||
unsigned DefClass = DefMI.getDesc().getSchedClass();
|
||||
OperLatency = ItinData->getOperandCycle(DefClass, DefIdx);
|
||||
}
|
||||
if (OperLatency >= 0)
|
||||
return OperLatency;
|
||||
|
||||
// No operand latency was found.
|
||||
unsigned InstrLatency = getInstrLatency(ItinData, DefMI);
|
||||
|
||||
// Expected latency is the max of the stage latency and itinerary props.
|
||||
InstrLatency = std::max(InstrLatency,
|
||||
defaultDefLatency(ItinData->SchedModel, DefMI));
|
||||
return InstrLatency;
|
||||
}
|
||||
|
||||
bool TargetInstrInfo::getRegSequenceInputs(
|
||||
const MachineInstr &MI, unsigned DefIdx,
|
||||
SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
|
||||
|
@ -806,7 +806,6 @@ TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) {
|
||||
= MaxStoresPerMemmoveOptSize = 4;
|
||||
UseUnderscoreSetJmp = false;
|
||||
UseUnderscoreLongJmp = false;
|
||||
SelectIsExpensive = false;
|
||||
HasMultipleConditionRegisters = false;
|
||||
HasExtractBitsInsn = false;
|
||||
JumpIsExpensive = JumpIsExpensiveOverride;
|
||||
|
@ -7058,16 +7058,6 @@ bool AArch64TargetLowering::isExtFreeImpl(const Instruction *Ext) const {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool AArch64TargetLowering::hasPairedLoad(Type *LoadedType,
|
||||
unsigned &RequiredAligment) const {
|
||||
if (!LoadedType->isIntegerTy() && !LoadedType->isFloatTy())
|
||||
return false;
|
||||
// Cyclone supports unaligned accesses.
|
||||
RequiredAligment = 0;
|
||||
unsigned NumBits = LoadedType->getPrimitiveSizeInBits();
|
||||
return NumBits == 32 || NumBits == 64;
|
||||
}
|
||||
|
||||
bool AArch64TargetLowering::hasPairedLoad(EVT LoadedType,
|
||||
unsigned &RequiredAligment) const {
|
||||
if (!LoadedType.isSimple() ||
|
||||
|
@ -309,8 +309,6 @@ public:
|
||||
bool isZExtFree(EVT VT1, EVT VT2) const override;
|
||||
bool isZExtFree(SDValue Val, EVT VT2) const override;
|
||||
|
||||
bool hasPairedLoad(Type *LoadedType,
|
||||
unsigned &RequiredAligment) const override;
|
||||
bool hasPairedLoad(EVT LoadedType, unsigned &RequiredAligment) const override;
|
||||
|
||||
unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
|
||||
|
@ -35,7 +35,7 @@ public:
|
||||
/// Code Generation virtual methods...
|
||||
const MCPhysReg *getCalleeSavedRegs(const MachineFunction *MF) const override;
|
||||
const MCPhysReg *
|
||||
getCalleeSavedRegsViaCopy(const MachineFunction *MF) const override;
|
||||
getCalleeSavedRegsViaCopy(const MachineFunction *MF) const;
|
||||
const uint32_t *getCallPreservedMask(const MachineFunction &MF,
|
||||
CallingConv::ID) const override;
|
||||
|
||||
|
@ -443,7 +443,6 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
|
||||
// (Section 7.3)
|
||||
setHasFloatingPointExceptions(Subtarget->hasFPExceptions());
|
||||
|
||||
setSelectIsExpensive(false);
|
||||
PredictableSelectIsExpensive = false;
|
||||
|
||||
// We want to find all load dependencies for long chains of stores to enable
|
||||
|
@ -99,7 +99,7 @@ public:
|
||||
/// Code Generation virtual methods...
|
||||
const MCPhysReg *getCalleeSavedRegs(const MachineFunction *MF) const override;
|
||||
const MCPhysReg *
|
||||
getCalleeSavedRegsViaCopy(const MachineFunction *MF) const override;
|
||||
getCalleeSavedRegsViaCopy(const MachineFunction *MF) const;
|
||||
const uint32_t *getCallPreservedMask(const MachineFunction &MF,
|
||||
CallingConv::ID) const override;
|
||||
const uint32_t *getNoPreservedMask() const override;
|
||||
|
@ -75,7 +75,7 @@ public:
|
||||
|
||||
/// Code Generation virtual methods...
|
||||
const MCPhysReg *getCalleeSavedRegs(const MachineFunction *MF) const override;
|
||||
const MCPhysReg *getCalleeSavedRegsViaCopy(const MachineFunction *MF) const override;
|
||||
const MCPhysReg *getCalleeSavedRegsViaCopy(const MachineFunction *MF) const;
|
||||
const uint32_t *getCallPreservedMask(const MachineFunction &MF,
|
||||
CallingConv::ID CC) const override;
|
||||
const uint32_t *getNoPreservedMask() const override;
|
||||
|
@ -8073,32 +8073,6 @@ void X86InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
|
||||
NopInst.setOpcode(X86::NOOP);
|
||||
}
|
||||
|
||||
// This code must remain in sync with getJumpInstrTableEntryBound in this class!
|
||||
// In particular, getJumpInstrTableEntryBound must always return an upper bound
|
||||
// on the encoding lengths of the instructions generated by
|
||||
// getUnconditionalBranch and getTrap.
|
||||
void X86InstrInfo::getUnconditionalBranch(
|
||||
MCInst &Branch, const MCSymbolRefExpr *BranchTarget) const {
|
||||
Branch.setOpcode(X86::JMP_1);
|
||||
Branch.addOperand(MCOperand::createExpr(BranchTarget));
|
||||
}
|
||||
|
||||
// This code must remain in sync with getJumpInstrTableEntryBound in this class!
|
||||
// In particular, getJumpInstrTableEntryBound must always return an upper bound
|
||||
// on the encoding lengths of the instructions generated by
|
||||
// getUnconditionalBranch and getTrap.
|
||||
void X86InstrInfo::getTrap(MCInst &MI) const {
|
||||
MI.setOpcode(X86::TRAP);
|
||||
}
|
||||
|
||||
// See getTrap and getUnconditionalBranch for conditions on the value returned
|
||||
// by this function.
|
||||
unsigned X86InstrInfo::getJumpInstrTableEntryBound() const {
|
||||
// 5 bytes suffice: JMP_4 Symbol@PLT is uses 1 byte (E9) for the JMP_4 and 4
|
||||
// bytes for the symbol offset. And TRAP is ud2, which is two bytes (0F 0B).
|
||||
return 5;
|
||||
}
|
||||
|
||||
bool X86InstrInfo::isHighLatencyDef(int opc) const {
|
||||
switch (opc) {
|
||||
default: return false;
|
||||
|
@ -488,14 +488,6 @@ public:
|
||||
unsigned Size, unsigned Alignment,
|
||||
bool AllowCommute) const;
|
||||
|
||||
void
|
||||
getUnconditionalBranch(MCInst &Branch,
|
||||
const MCSymbolRefExpr *BranchTarget) const override;
|
||||
|
||||
void getTrap(MCInst &MI) const override;
|
||||
|
||||
unsigned getJumpInstrTableEntryBound() const override;
|
||||
|
||||
bool isHighLatencyDef(int opc) const override;
|
||||
|
||||
bool hasHighOperandLatency(const TargetSchedModel &SchedModel,
|
||||
|
@ -100,7 +100,7 @@ public:
|
||||
const MCPhysReg *
|
||||
getCalleeSavedRegs(const MachineFunction* MF) const override;
|
||||
const MCPhysReg *
|
||||
getCalleeSavedRegsViaCopy(const MachineFunction *MF) const override;
|
||||
getCalleeSavedRegsViaCopy(const MachineFunction *MF) const;
|
||||
const uint32_t *getCallPreservedMask(const MachineFunction &MF,
|
||||
CallingConv::ID) const override;
|
||||
const uint32_t *getNoPreservedMask() const override;
|
||||
|
Loading…
Reference in New Issue
Block a user