diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h index dca78a38ae1..948a2f4f63d 100644 --- a/include/llvm/Target/TargetLowering.h +++ b/include/llvm/Target/TargetLowering.h @@ -193,7 +193,7 @@ public: /// getSchedulingPreference - Some scheduler, e.g. hybrid, can switch to /// different scheduling heuristics for different nodes. This function returns /// the preference (or none) for the given node. - virtual Sched::Preference getSchedulingPreference(SDNode *N) const { + virtual Sched::Preference getSchedulingPreference(SDNode *) const { return Sched::None; } @@ -328,15 +328,15 @@ public: bool writeMem; // writes memory? }; - virtual bool getTgtMemIntrinsic(IntrinsicInfo &Info, - const CallInst &I, unsigned Intrinsic) const { + virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, + unsigned /*Intrinsic*/) const { return false; } /// isFPImmLegal - Returns true if the target can instruction select the /// specified FP immediate natively. If false, the legalizer will materialize /// the FP immediate as a load from a constant pool. - virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const { + virtual bool isFPImmLegal(const APFloat &/*Imm*/, EVT /*VT*/) const { return false; } @@ -344,8 +344,8 @@ public: /// support *some* VECTOR_SHUFFLE operations, those with specific masks. /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values /// are assumed to be legal. - virtual bool isShuffleMaskLegal(const SmallVectorImpl &Mask, - EVT VT) const { + virtual bool isShuffleMaskLegal(const SmallVectorImpl &/*Mask*/, + EVT /*VT*/) const { return true; } @@ -358,8 +358,8 @@ public: /// used by Targets can use this to indicate if there is a suitable /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant /// pool entry. - virtual bool isVectorClearMaskLegal(const SmallVectorImpl &Mask, - EVT VT) const { + virtual bool isVectorClearMaskLegal(const SmallVectorImpl &/*Mask*/, + EVT /*VT*/) const { return false; } @@ -587,7 +587,7 @@ public: /// ShouldShrinkFPConstant - If true, then instruction selection should /// seek to shrink the FP constant of the specified type to a smaller type /// in order to save space and / or reduce runtime. - virtual bool ShouldShrinkFPConstant(EVT VT) const { return true; } + virtual bool ShouldShrinkFPConstant(EVT) const { return true; } /// hasTargetDAGCombine - If true, the target has custom DAG combine /// transformations that it can perform for the specified node. @@ -629,7 +629,7 @@ public: /// use helps to ensure that such replacements don't generate code that causes /// an alignment error (trap) on the target machine. /// @brief Determine if the target supports unaligned memory accesses. - virtual bool allowsUnalignedMemoryAccesses(EVT VT) const { + virtual bool allowsUnalignedMemoryAccesses(EVT) const { return false; } @@ -652,10 +652,11 @@ public: /// constant so it does not need to be loaded. /// It returns EVT::Other if the type should be determined using generic /// target-independent logic. - virtual EVT getOptimalMemOpType(uint64_t Size, - unsigned DstAlign, unsigned SrcAlign, - bool NonScalarIntSafe, bool MemcpyStrSrc, - MachineFunction &MF) const { + virtual EVT getOptimalMemOpType(uint64_t /*Size*/, + unsigned /*DstAlign*/, unsigned /*SrcAlign*/, + bool /*NonScalarIntSafe*/, + bool /*MemcpyStrSrc*/, + MachineFunction &/*MF*/) const { return MVT::Other; } @@ -745,20 +746,20 @@ public: /// getPreIndexedAddressParts - returns true by value, base pointer and /// offset pointer and addressing mode by reference if the node's address /// can be legally represented as pre-indexed load / store address. - virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, - SDValue &Offset, - ISD::MemIndexedMode &AM, - SelectionDAG &DAG) const { + virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/, + SDValue &/*Offset*/, + ISD::MemIndexedMode &/*AM*/, + SelectionDAG &/*DAG*/) const { return false; } /// getPostIndexedAddressParts - returns true by value, base pointer and /// offset pointer and addressing mode by reference if this node can be /// combined with a load / store to form a post-indexed load / store. - virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, - SDValue &Base, SDValue &Offset, - ISD::MemIndexedMode &AM, - SelectionDAG &DAG) const { + virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/, + SDValue &/*Base*/, SDValue &/*Offset*/, + ISD::MemIndexedMode &/*AM*/, + SelectionDAG &/*DAG*/) const { return false; } @@ -768,9 +769,9 @@ public: virtual unsigned getJumpTableEncoding() const; virtual const MCExpr * - LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, - const MachineBasicBlock *MBB, unsigned uid, - MCContext &Ctx) const { + LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/, + const MachineBasicBlock * /*MBB*/, unsigned /*uid*/, + MCContext &/*Ctx*/) const { assert(0 && "Need to implement this hook if target has custom JTIs"); return 0; } @@ -796,7 +797,8 @@ public: /// protector cookies at a fixed offset in some non-standard address /// space, and populates the address space and offset as /// appropriate. - virtual bool getStackCookieLocation(unsigned &AddressSpace, unsigned &Offset) const { + virtual bool getStackCookieLocation(unsigned &/*AddressSpace*/, + unsigned &/*Offset*/) const { return false; } @@ -931,7 +933,7 @@ public: /// the specified value type and it is 'desirable' to use the type for the /// given node type. e.g. On x86 i16 is legal, but undesirable since i16 /// instruction encodings are longer and some i16 instructions are slow. - virtual bool isTypeDesirableForOp(unsigned Opc, EVT VT) const { + virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const { // By default, assume all legal types are desirable. return isTypeLegal(VT); } @@ -939,14 +941,15 @@ public: /// isDesirableToPromoteOp - Return true if it is profitable for dag combiner /// to transform a floating point op of specified opcode to a equivalent op of /// an integer type. e.g. f32 load -> i32 load can be profitable on ARM. - virtual bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const { + virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/, + EVT /*VT*/) const { return false; } /// IsDesirableToPromoteOp - This method query the target whether it is /// beneficial for dag combiner to promote the specified node. If true, it /// should return the desired promotion type by reference. - virtual bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const { + virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const { return false; } @@ -1190,11 +1193,11 @@ public: /// chain value. /// virtual SDValue - LowerFormalArguments(SDValue Chain, - CallingConv::ID CallConv, bool isVarArg, - const SmallVectorImpl &Ins, - DebugLoc dl, SelectionDAG &DAG, - SmallVectorImpl &InVals) const { + LowerFormalArguments(SDValue /*Chain*/, CallingConv::ID /*CallConv*/, + bool /*isVarArg*/, + const SmallVectorImpl &/*Ins*/, + DebugLoc /*dl*/, SelectionDAG &/*DAG*/, + SmallVectorImpl &/*InVals*/) const { assert(0 && "Not Implemented"); return SDValue(); // this is here to silence compiler errors } @@ -1233,13 +1236,14 @@ public: /// InVals array with legal-type return values from the call, and return /// the resulting token chain value. virtual SDValue - LowerCall(SDValue Chain, SDValue Callee, - CallingConv::ID CallConv, bool isVarArg, bool &isTailCall, - const SmallVectorImpl &Outs, - const SmallVectorImpl &OutVals, - const SmallVectorImpl &Ins, - DebugLoc dl, SelectionDAG &DAG, - SmallVectorImpl &InVals) const { + LowerCall(SDValue /*Chain*/, SDValue /*Callee*/, + CallingConv::ID /*CallConv*/, bool /*isVarArg*/, + bool &/*isTailCall*/, + const SmallVectorImpl &/*Outs*/, + const SmallVectorImpl &/*OutVals*/, + const SmallVectorImpl &/*Ins*/, + DebugLoc /*dl*/, SelectionDAG &/*DAG*/, + SmallVectorImpl &/*InVals*/) const { assert(0 && "Not Implemented"); return SDValue(); // this is here to silence compiler errors } @@ -1251,10 +1255,10 @@ public: /// return values described by the Outs array can fit into the return /// registers. If false is returned, an sret-demotion is performed. /// - virtual bool CanLowerReturn(CallingConv::ID CallConv, - MachineFunction &MF, bool isVarArg, - const SmallVectorImpl &Outs, - LLVMContext &Context) const + virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/, + MachineFunction &/*MF*/, bool /*isVarArg*/, + const SmallVectorImpl &/*Outs*/, + LLVMContext &/*Context*/) const { // Return true by default to get preexisting behavior. return true; @@ -1266,10 +1270,11 @@ public: /// value. /// virtual SDValue - LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, - const SmallVectorImpl &Outs, - const SmallVectorImpl &OutVals, - DebugLoc dl, SelectionDAG &DAG) const { + LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/, + bool /*isVarArg*/, + const SmallVectorImpl &/*Outs*/, + const SmallVectorImpl &/*OutVals*/, + DebugLoc /*dl*/, SelectionDAG &/*DAG*/) const { assert(0 && "Not Implemented"); return SDValue(); // this is here to silence compiler errors } @@ -1277,7 +1282,7 @@ public: /// isUsedByReturnOnly - Return true if result of the specified node is used /// by a return node only. This is used to determine whether it is possible /// to codegen a libcall as tail call at legalization time. - virtual bool isUsedByReturnOnly(SDNode *N) const { + virtual bool isUsedByReturnOnly(SDNode *) const { return false; } @@ -1285,7 +1290,7 @@ public: /// call instruction as a tail call. This is used by optimization passes to /// determine if it's profitable to duplicate return instructions to enable /// tailcall optimization. - virtual bool mayBeEmittedAsTailCall(CallInst *CI) const { + virtual bool mayBeEmittedAsTailCall(CallInst *) const { return false; } @@ -1296,7 +1301,7 @@ public: /// necessary for non-C calling conventions. The frontend should handle this /// and include all of the necessary information. virtual EVT getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT, - ISD::NodeType ExtendKind) const { + ISD::NodeType /*ExtendKind*/) const { EVT MinVT = getRegisterType(Context, MVT::i32); return VT.bitsLT(MinVT) ? MinVT : VT; } @@ -1333,8 +1338,9 @@ public: /// /// If the target has no operations that require custom lowering, it need not /// implement this. The default implementation aborts. - virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl &Results, - SelectionDAG &DAG) const { + virtual void ReplaceNodeResults(SDNode * /*N*/, + SmallVectorImpl &/*Results*/, + SelectionDAG &/*DAG*/) const { assert(0 && "ReplaceNodeResults not implemented for this target!"); } @@ -1344,7 +1350,7 @@ public: /// createFastISel - This method returns a target specific FastISel object, /// or null if the target does not support "fast" ISel. - virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo) const { + virtual FastISel *createFastISel(FunctionLoweringInfo &) const { return 0; } @@ -1356,7 +1362,7 @@ public: /// call to be explicit llvm code if it wants to. This is useful for /// turning simple inline asms into LLVM intrinsics, which gives the /// compiler more information about the behavior of the code. - virtual bool ExpandInlineAsm(CallInst *CI) const { + virtual bool ExpandInlineAsm(CallInst *) const { return false; } @@ -1538,7 +1544,7 @@ public: /// icmp immediate, that is the target has icmp instructions which can compare /// a register against the immediate without having to materialize the /// immediate into a register. - virtual bool isLegalICmpImmediate(int64_t Imm) const { + virtual bool isLegalICmpImmediate(int64_t) const { return true; } @@ -1546,18 +1552,18 @@ public: /// add immediate, that is the target has add instructions which can add /// a register with the immediate without having to materialize the /// immediate into a register. - virtual bool isLegalAddImmediate(int64_t Imm) const { + virtual bool isLegalAddImmediate(int64_t) const { return true; } /// isTruncateFree - Return true if it's free to truncate a value of /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in /// register EAX to i16 by referencing its sub-register AX. - virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const { + virtual bool isTruncateFree(Type * /*Ty1*/, Type * /*Ty2*/) const { return false; } - virtual bool isTruncateFree(EVT VT1, EVT VT2) const { + virtual bool isTruncateFree(EVT /*VT1*/, EVT /*VT2*/) const { return false; } @@ -1569,18 +1575,18 @@ public: /// does not necessarily apply to truncate instructions. e.g. on x86-64, /// all instructions that define 32-bit values implicit zero-extend the /// result out to 64 bits. - virtual bool isZExtFree(Type *Ty1, Type *Ty2) const { + virtual bool isZExtFree(Type * /*Ty1*/, Type * /*Ty2*/) const { return false; } - virtual bool isZExtFree(EVT VT1, EVT VT2) const { + virtual bool isZExtFree(EVT /*VT1*/, EVT /*VT2*/) const { return false; } /// isNarrowingProfitable - Return true if it's profitable to narrow /// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow /// from i32 to i8 but not from i32 to i16. - virtual bool isNarrowingProfitable(EVT VT1, EVT VT2) const { + virtual bool isNarrowingProfitable(EVT /*VT1*/, EVT /*VT2*/) const { return false; }