mirror of
https://github.com/RPCS3/llvm.git
synced 2024-11-29 22:50:55 +00:00
ARM64: override all the things.
Actually, mostly only those in the top-level directory that already had a "virtual" attached. But it's the thought that counts and it's been a long day. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@205131 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
b52cb5e0db
commit
c31891e350
@ -181,12 +181,12 @@ public:
|
||||
APInt &KnownOne, const SelectionDAG &DAG,
|
||||
unsigned Depth = 0) const;
|
||||
|
||||
virtual MVT getScalarShiftAmountTy(EVT LHSTy) const;
|
||||
MVT getScalarShiftAmountTy(EVT LHSTy) const override;
|
||||
|
||||
/// allowsUnalignedMemoryAccesses - Returns true if the target allows
|
||||
/// unaligned memory accesses. of the specified type.
|
||||
virtual bool allowsUnalignedMemoryAccesses(EVT VT, unsigned AddrSpace = 0,
|
||||
bool *Fast = 0) const {
|
||||
bool allowsUnalignedMemoryAccesses(EVT VT, unsigned AddrSpace = 0,
|
||||
bool *Fast = 0) const override {
|
||||
if (RequireStrictAlign)
|
||||
return false;
|
||||
// FIXME: True for Cyclone, but not necessary others.
|
||||
@ -196,40 +196,40 @@ public:
|
||||
}
|
||||
|
||||
/// LowerOperation - Provide custom lowering hooks for some operations.
|
||||
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
|
||||
|
||||
virtual const char *getTargetNodeName(unsigned Opcode) const;
|
||||
const char *getTargetNodeName(unsigned Opcode) const override;
|
||||
|
||||
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
|
||||
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
|
||||
|
||||
/// getFunctionAlignment - Return the Log2 alignment of this function.
|
||||
virtual unsigned getFunctionAlignment(const Function *F) const;
|
||||
unsigned getFunctionAlignment(const Function *F) const;
|
||||
|
||||
/// getMaximalGlobalOffset - Returns the maximal possible offset which can
|
||||
/// be used for loads / stores from the global.
|
||||
virtual unsigned getMaximalGlobalOffset() const;
|
||||
unsigned getMaximalGlobalOffset() const override;
|
||||
|
||||
/// Returns true if a cast between SrcAS and DestAS is a noop.
|
||||
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
|
||||
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
|
||||
// Addrspacecasts are always noops.
|
||||
return true;
|
||||
}
|
||||
|
||||
/// createFastISel - This method returns a target specific FastISel object,
|
||||
/// or null if the target does not support "fast" ISel.
|
||||
virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
|
||||
const TargetLibraryInfo *libInfo) const;
|
||||
FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
|
||||
const TargetLibraryInfo *libInfo) const override;
|
||||
|
||||
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
|
||||
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
|
||||
|
||||
virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
|
||||
bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
|
||||
|
||||
/// isShuffleMaskLegal - Return true if the given shuffle mask can be
|
||||
/// codegen'd directly, or if it should be stack expanded.
|
||||
virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const;
|
||||
bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const override;
|
||||
|
||||
/// getSetCCResultType - Return the ISD::SETCC ValueType
|
||||
virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const;
|
||||
EVT getSetCCResultType(LLVMContext &Context, EVT VT) const override;
|
||||
|
||||
SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
|
||||
|
||||
@ -249,52 +249,52 @@ public:
|
||||
MachineBasicBlock *EmitF128CSEL(MachineInstr *MI,
|
||||
MachineBasicBlock *BB) const;
|
||||
|
||||
virtual MachineBasicBlock *
|
||||
EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const;
|
||||
MachineBasicBlock *
|
||||
EmitInstrWithCustomInserter(MachineInstr *MI,
|
||||
MachineBasicBlock *MBB) const override;
|
||||
|
||||
virtual bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
|
||||
unsigned Intrinsic) const;
|
||||
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
|
||||
unsigned Intrinsic) const override;
|
||||
|
||||
virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const;
|
||||
virtual bool isTruncateFree(EVT VT1, EVT VT2) const;
|
||||
bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
|
||||
bool isTruncateFree(EVT VT1, EVT VT2) const override;
|
||||
|
||||
virtual bool isZExtFree(Type *Ty1, Type *Ty2) const;
|
||||
virtual bool isZExtFree(EVT VT1, EVT VT2) const;
|
||||
virtual bool isZExtFree(SDValue Val, EVT VT2) const;
|
||||
bool isZExtFree(Type *Ty1, Type *Ty2) const override;
|
||||
bool isZExtFree(EVT VT1, EVT VT2) const override;
|
||||
bool isZExtFree(SDValue Val, EVT VT2) const override;
|
||||
|
||||
virtual bool hasPairedLoad(Type *LoadedType,
|
||||
unsigned &RequiredAligment) const;
|
||||
virtual bool hasPairedLoad(EVT LoadedType, unsigned &RequiredAligment) const;
|
||||
bool hasPairedLoad(Type *LoadedType,
|
||||
unsigned &RequiredAligment) const override;
|
||||
bool hasPairedLoad(EVT LoadedType, unsigned &RequiredAligment) const override;
|
||||
|
||||
virtual bool isLegalAddImmediate(int64_t) const;
|
||||
virtual bool isLegalICmpImmediate(int64_t) const;
|
||||
bool isLegalAddImmediate(int64_t) const override;
|
||||
bool isLegalICmpImmediate(int64_t) const override;
|
||||
|
||||
virtual EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
|
||||
unsigned SrcAlign, bool IsMemset,
|
||||
bool ZeroMemset, bool MemcpyStrSrc,
|
||||
MachineFunction &MF) const;
|
||||
EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
|
||||
bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
|
||||
MachineFunction &MF) const override;
|
||||
|
||||
/// isLegalAddressingMode - Return true if the addressing mode represented
|
||||
/// by AM is legal for this target, for a load/store of the specified type.
|
||||
virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const;
|
||||
bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const override;
|
||||
|
||||
/// \brief Return the cost of the scaling factor used in the addressing
|
||||
/// mode represented by AM for this target, for a load/store
|
||||
/// of the specified type.
|
||||
/// If the AM is supported, the return value must be >= 0.
|
||||
/// If the AM is not supported, it returns a negative value.
|
||||
virtual int getScalingFactorCost(const AddrMode &AM, Type *Ty) const;
|
||||
int getScalingFactorCost(const AddrMode &AM, Type *Ty) const override;
|
||||
|
||||
/// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
|
||||
/// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
|
||||
/// expanded to FMAs when this method returns true, otherwise fmuladd is
|
||||
/// expanded to fmul + fadd.
|
||||
virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const;
|
||||
bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
|
||||
|
||||
virtual const uint16_t *getScratchRegisters(CallingConv::ID CC) const;
|
||||
const uint16_t *getScratchRegisters(CallingConv::ID CC) const override;
|
||||
|
||||
virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
|
||||
Type *Ty) const;
|
||||
bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
|
||||
Type *Ty) const override;
|
||||
|
||||
private:
|
||||
/// Subtarget - Keep a pointer to the ARM64Subtarget around so that we can
|
||||
@ -305,14 +305,14 @@ private:
|
||||
void addDRTypeForNEON(MVT VT);
|
||||
void addQRTypeForNEON(MVT VT);
|
||||
|
||||
virtual SDValue
|
||||
SDValue
|
||||
LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
|
||||
const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL,
|
||||
SelectionDAG &DAG,
|
||||
SmallVectorImpl<SDValue> &InVals) const;
|
||||
SmallVectorImpl<SDValue> &InVals) const override;
|
||||
|
||||
virtual SDValue LowerCall(CallLoweringInfo & /*CLI*/,
|
||||
SmallVectorImpl<SDValue> &InVals) const;
|
||||
SDValue LowerCall(CallLoweringInfo & /*CLI*/,
|
||||
SmallVectorImpl<SDValue> &InVals) const override;
|
||||
|
||||
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
|
||||
CallingConv::ID CallConv, bool isVarArg,
|
||||
@ -330,16 +330,15 @@ private:
|
||||
void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, SDLoc DL,
|
||||
SDValue &Chain) const;
|
||||
|
||||
virtual bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
|
||||
bool isVarArg,
|
||||
const SmallVectorImpl<ISD::OutputArg> &Outs,
|
||||
LLVMContext &Context) const;
|
||||
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
|
||||
bool isVarArg,
|
||||
const SmallVectorImpl<ISD::OutputArg> &Outs,
|
||||
LLVMContext &Context) const override;
|
||||
|
||||
virtual SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv,
|
||||
bool isVarArg,
|
||||
const SmallVectorImpl<ISD::OutputArg> &Outs,
|
||||
const SmallVectorImpl<SDValue> &OutVals, SDLoc DL,
|
||||
SelectionDAG &DAG) const;
|
||||
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
|
||||
const SmallVectorImpl<ISD::OutputArg> &Outs,
|
||||
const SmallVectorImpl<SDValue> &OutVals, SDLoc DL,
|
||||
SelectionDAG &DAG) const override;
|
||||
|
||||
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
|
||||
|
@ -42,17 +42,17 @@ public:
|
||||
/// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
|
||||
/// such, whenever a client has an instance of instruction info, it should
|
||||
/// always be able to get register info as well (through this method).
|
||||
virtual const ARM64RegisterInfo &getRegisterInfo() const { return RI; }
|
||||
const ARM64RegisterInfo &getRegisterInfo() const { return RI; }
|
||||
|
||||
unsigned GetInstSizeInBytes(const MachineInstr *MI) const;
|
||||
|
||||
virtual bool isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg,
|
||||
unsigned &DstReg, unsigned &SubIdx) const;
|
||||
bool isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg,
|
||||
unsigned &DstReg, unsigned &SubIdx) const override;
|
||||
|
||||
virtual unsigned isLoadFromStackSlot(const MachineInstr *MI,
|
||||
int &FrameIndex) const;
|
||||
virtual unsigned isStoreToStackSlot(const MachineInstr *MI,
|
||||
int &FrameIndex) const;
|
||||
unsigned isLoadFromStackSlot(const MachineInstr *MI,
|
||||
int &FrameIndex) const override;
|
||||
unsigned isStoreToStackSlot(const MachineInstr *MI,
|
||||
int &FrameIndex) const override;
|
||||
|
||||
/// \brief Does this instruction set its full destination register to zero?
|
||||
bool isGPRZero(const MachineInstr *MI) const;
|
||||
@ -75,18 +75,17 @@ public:
|
||||
/// Hint that pairing the given load or store is unprofitable.
|
||||
void suppressLdStPair(MachineInstr *MI) const;
|
||||
|
||||
virtual bool getLdStBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
|
||||
unsigned &Offset,
|
||||
const TargetRegisterInfo *TRI) const;
|
||||
bool getLdStBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
|
||||
unsigned &Offset,
|
||||
const TargetRegisterInfo *TRI) const override;
|
||||
|
||||
virtual bool enableClusterLoads() const { return true; }
|
||||
bool enableClusterLoads() const override { return true; }
|
||||
|
||||
virtual bool shouldClusterLoads(MachineInstr *FirstLdSt,
|
||||
MachineInstr *SecondLdSt,
|
||||
unsigned NumLoads) const;
|
||||
bool shouldClusterLoads(MachineInstr *FirstLdSt, MachineInstr *SecondLdSt,
|
||||
unsigned NumLoads) const override;
|
||||
|
||||
virtual bool shouldScheduleAdjacent(MachineInstr *First,
|
||||
MachineInstr *Second) const;
|
||||
bool shouldScheduleAdjacent(MachineInstr *First,
|
||||
MachineInstr *Second) const override;
|
||||
|
||||
MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF, int FrameIx,
|
||||
uint64_t Offset, const MDNode *MDPtr,
|
||||
@ -95,60 +94,57 @@ public:
|
||||
DebugLoc DL, unsigned DestReg, unsigned SrcReg,
|
||||
bool KillSrc, unsigned Opcode,
|
||||
llvm::ArrayRef<unsigned> Indices) const;
|
||||
virtual void copyPhysReg(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator I, DebugLoc DL,
|
||||
unsigned DestReg, unsigned SrcReg,
|
||||
bool KillSrc) const;
|
||||
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
|
||||
DebugLoc DL, unsigned DestReg, unsigned SrcReg,
|
||||
bool KillSrc) const override;
|
||||
|
||||
virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MBBI,
|
||||
unsigned SrcReg, bool isKill, int FrameIndex,
|
||||
const TargetRegisterClass *RC,
|
||||
const TargetRegisterInfo *TRI) const;
|
||||
void storeRegToStackSlot(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MBBI, unsigned SrcReg,
|
||||
bool isKill, int FrameIndex,
|
||||
const TargetRegisterClass *RC,
|
||||
const TargetRegisterInfo *TRI) const override;
|
||||
|
||||
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MBBI,
|
||||
unsigned DestReg, int FrameIndex,
|
||||
const TargetRegisterClass *RC,
|
||||
const TargetRegisterInfo *TRI) const;
|
||||
void loadRegFromStackSlot(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MBBI, unsigned DestReg,
|
||||
int FrameIndex, const TargetRegisterClass *RC,
|
||||
const TargetRegisterInfo *TRI) const override;
|
||||
|
||||
virtual MachineInstr *
|
||||
MachineInstr *
|
||||
foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const;
|
||||
int FrameIndex) const override;
|
||||
|
||||
virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
|
||||
MachineBasicBlock *&FBB,
|
||||
SmallVectorImpl<MachineOperand> &Cond,
|
||||
bool AllowModify = false) const;
|
||||
virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
|
||||
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
|
||||
MachineBasicBlock *FBB,
|
||||
const SmallVectorImpl<MachineOperand> &Cond,
|
||||
DebugLoc DL) const;
|
||||
virtual bool
|
||||
ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
|
||||
virtual bool canInsertSelect(const MachineBasicBlock &,
|
||||
const SmallVectorImpl<MachineOperand> &Cond,
|
||||
unsigned, unsigned, int &, int &, int &) const;
|
||||
virtual void insertSelect(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MI, DebugLoc DL,
|
||||
unsigned DstReg,
|
||||
const SmallVectorImpl<MachineOperand> &Cond,
|
||||
unsigned TrueReg, unsigned FalseReg) const;
|
||||
virtual void getNoopForMachoTarget(MCInst &NopInst) const;
|
||||
bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
|
||||
MachineBasicBlock *&FBB,
|
||||
SmallVectorImpl<MachineOperand> &Cond,
|
||||
bool AllowModify = false) const override;
|
||||
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
|
||||
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
|
||||
MachineBasicBlock *FBB,
|
||||
const SmallVectorImpl<MachineOperand> &Cond,
|
||||
DebugLoc DL) const override;
|
||||
bool
|
||||
ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
|
||||
bool canInsertSelect(const MachineBasicBlock &,
|
||||
const SmallVectorImpl<MachineOperand> &Cond, unsigned,
|
||||
unsigned, int &, int &, int &) const override;
|
||||
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
|
||||
DebugLoc DL, unsigned DstReg,
|
||||
const SmallVectorImpl<MachineOperand> &Cond,
|
||||
unsigned TrueReg, unsigned FalseReg) const override;
|
||||
void getNoopForMachoTarget(MCInst &NopInst) const override;
|
||||
|
||||
/// analyzeCompare - For a comparison instruction, return the source registers
|
||||
/// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
|
||||
/// Return true if the comparison instruction can be analyzed.
|
||||
virtual bool analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
|
||||
unsigned &SrcReg2, int &CmpMask,
|
||||
int &CmpValue) const;
|
||||
bool analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
|
||||
unsigned &SrcReg2, int &CmpMask,
|
||||
int &CmpValue) const override;
|
||||
/// optimizeCompareInstr - Convert the instruction supplying the argument to
|
||||
/// the comparison into one that sets the zero bit in the flags register.
|
||||
virtual bool optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg,
|
||||
unsigned SrcReg2, int CmpMask, int CmpValue,
|
||||
const MachineRegisterInfo *MRI) const;
|
||||
bool optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg,
|
||||
unsigned SrcReg2, int CmpMask, int CmpValue,
|
||||
const MachineRegisterInfo *MRI) const override;
|
||||
|
||||
private:
|
||||
void instantiateCondBranch(MachineBasicBlock &MBB, DebugLoc DL,
|
||||
|
@ -33,10 +33,12 @@ private:
|
||||
public:
|
||||
ARM64RegisterInfo(const ARM64InstrInfo *tii, const ARM64Subtarget *sti);
|
||||
|
||||
/// Code Generation virtual methods...
|
||||
bool isReservedReg(const MachineFunction &MF, unsigned Reg) const;
|
||||
const uint16_t *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
|
||||
const uint32_t *getCallPreservedMask(CallingConv::ID) const;
|
||||
|
||||
/// Code Generation virtual methods...
|
||||
const uint16_t *
|
||||
getCalleeSavedRegs(const MachineFunction *MF = 0) const override;
|
||||
const uint32_t *getCallPreservedMask(CallingConv::ID) const override;
|
||||
|
||||
// Calls involved in thread-local variable lookup save more registers than
|
||||
// normal calls, so they need a different mask to represent this.
|
||||
@ -52,36 +54,39 @@ public:
|
||||
/// this property
|
||||
const uint32_t *getThisReturnPreservedMask(CallingConv::ID) const;
|
||||
|
||||
BitVector getReservedRegs(const MachineFunction &MF) const;
|
||||
const TargetRegisterClass *getPointerRegClass(const MachineFunction &MF,
|
||||
unsigned Kind = 0) const;
|
||||
BitVector getReservedRegs(const MachineFunction &MF) const override;
|
||||
const TargetRegisterClass *
|
||||
getCrossCopyRegClass(const TargetRegisterClass *RC) const;
|
||||
getPointerRegClass(const MachineFunction &MF,
|
||||
unsigned Kind = 0) const override;
|
||||
const TargetRegisterClass *
|
||||
getCrossCopyRegClass(const TargetRegisterClass *RC) const override;
|
||||
|
||||
bool requiresRegisterScavenging(const MachineFunction &MF) const;
|
||||
bool useFPForScavengingIndex(const MachineFunction &MF) const;
|
||||
bool requiresFrameIndexScavenging(const MachineFunction &MF) const;
|
||||
bool requiresRegisterScavenging(const MachineFunction &MF) const override;
|
||||
bool useFPForScavengingIndex(const MachineFunction &MF) const override;
|
||||
bool requiresFrameIndexScavenging(const MachineFunction &MF) const override;
|
||||
|
||||
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const;
|
||||
bool isFrameOffsetLegal(const MachineInstr *MI, int64_t Offset) const;
|
||||
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override;
|
||||
bool isFrameOffsetLegal(const MachineInstr *MI,
|
||||
int64_t Offset) const override;
|
||||
void materializeFrameBaseRegister(MachineBasicBlock *MBB, unsigned BaseReg,
|
||||
int FrameIdx, int64_t Offset) const;
|
||||
int FrameIdx,
|
||||
int64_t Offset) const override;
|
||||
void resolveFrameIndex(MachineBasicBlock::iterator I, unsigned BaseReg,
|
||||
int64_t Offset) const;
|
||||
int64_t Offset) const override;
|
||||
void eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
|
||||
unsigned FIOperandNum,
|
||||
RegScavenger *RS = NULL) const;
|
||||
|
||||
RegScavenger *RS = NULL) const override;
|
||||
bool cannotEliminateFrame(const MachineFunction &MF) const;
|
||||
bool requiresVirtualBaseRegisters(const MachineFunction &MF) const;
|
||||
|
||||
bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override;
|
||||
bool hasBasePointer(const MachineFunction &MF) const;
|
||||
unsigned getBaseRegister() const;
|
||||
|
||||
// Debug information queries.
|
||||
unsigned getFrameRegister(const MachineFunction &MF) const;
|
||||
unsigned getFrameRegister(const MachineFunction &MF) const override;
|
||||
|
||||
unsigned getRegPressureLimit(const TargetRegisterClass *RC,
|
||||
MachineFunction &MF) const;
|
||||
MachineFunction &MF) const override;
|
||||
};
|
||||
|
||||
} // end namespace llvm
|
||||
|
@ -27,11 +27,10 @@ public:
|
||||
explicit ARM64SelectionDAGInfo(const TargetMachine &TM);
|
||||
~ARM64SelectionDAGInfo();
|
||||
|
||||
virtual SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
|
||||
SDValue Chain, SDValue Dst,
|
||||
SDValue Src, SDValue Size,
|
||||
unsigned Align, bool isVolatile,
|
||||
MachinePointerInfo DstPtrInfo) const;
|
||||
SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl, SDValue Chain,
|
||||
SDValue Dst, SDValue Src, SDValue Size,
|
||||
unsigned Align, bool isVolatile,
|
||||
MachinePointerInfo DstPtrInfo) const override;
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -45,7 +45,7 @@ public:
|
||||
ARM64Subtarget(const std::string &TT, const std::string &CPU,
|
||||
const std::string &FS);
|
||||
|
||||
virtual bool enableMachineScheduler() const { return true; }
|
||||
bool enableMachineScheduler() const override { return true; }
|
||||
|
||||
bool hasZeroCycleRegMove() const { return HasZeroCycleRegMove; }
|
||||
|
||||
|
@ -41,27 +41,27 @@ public:
|
||||
const TargetOptions &Options, Reloc::Model RM,
|
||||
CodeModel::Model CM, CodeGenOpt::Level OL);
|
||||
|
||||
virtual const ARM64Subtarget *getSubtargetImpl() const { return &Subtarget; }
|
||||
virtual const ARM64TargetLowering *getTargetLowering() const {
|
||||
const ARM64Subtarget *getSubtargetImpl() const override { return &Subtarget; }
|
||||
const ARM64TargetLowering *getTargetLowering() const override {
|
||||
return &TLInfo;
|
||||
}
|
||||
virtual const DataLayout *getDataLayout() const { return &DL; }
|
||||
virtual const ARM64FrameLowering *getFrameLowering() const {
|
||||
const DataLayout *getDataLayout() const override { return &DL; }
|
||||
const ARM64FrameLowering *getFrameLowering() const override {
|
||||
return &FrameLowering;
|
||||
}
|
||||
virtual const ARM64InstrInfo *getInstrInfo() const { return &InstrInfo; }
|
||||
virtual const ARM64RegisterInfo *getRegisterInfo() const {
|
||||
const ARM64InstrInfo *getInstrInfo() const override { return &InstrInfo; }
|
||||
const ARM64RegisterInfo *getRegisterInfo() const override {
|
||||
return &InstrInfo.getRegisterInfo();
|
||||
}
|
||||
virtual const ARM64SelectionDAGInfo *getSelectionDAGInfo() const {
|
||||
const ARM64SelectionDAGInfo *getSelectionDAGInfo() const override {
|
||||
return &TSInfo;
|
||||
}
|
||||
|
||||
// Pass Pipeline Configuration
|
||||
virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
|
||||
TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
|
||||
|
||||
/// \brief Register ARM64 analysis passes with a pass manager.
|
||||
virtual void addAnalysisPasses(PassManagerBase &PM);
|
||||
void addAnalysisPasses(PassManagerBase &PM) override;
|
||||
};
|
||||
|
||||
} // end namespace llvm
|
||||
|
@ -18,7 +18,7 @@ class ARM64TargetMachine;
|
||||
|
||||
/// This implementation is used for AArch64 ELF targets (Linux in particular).
|
||||
class ARM64_ELFTargetObjectFile : public TargetLoweringObjectFileELF {
|
||||
virtual void Initialize(MCContext &Ctx, const TargetMachine &TM);
|
||||
void Initialize(MCContext &Ctx, const TargetMachine &TM) override;
|
||||
};
|
||||
|
||||
/// ARM64_MachoTargetObjectFile - This TLOF implementation is used for Darwin.
|
||||
|
Loading…
Reference in New Issue
Block a user