mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-12-05 02:07:56 +00:00
Apply clang-tidy's performance-unnecessary-value-param to LLVM.
With some minor manual fixes for using function_ref instead of std::function. No functional change intended. llvm-svn: 291904
This commit is contained in:
parent
85d555f012
commit
5fd769f791
@ -144,7 +144,7 @@ public:
|
||||
/// Iterate the given function (typically something like doubling the width)
|
||||
/// on Ty until we find a legal type for this operation.
|
||||
LLT findLegalType(const InstrAspect &Aspect,
|
||||
std::function<LLT(LLT)> NextType) const {
|
||||
function_ref<LLT(LLT)> NextType) const {
|
||||
LegalizeAction Action;
|
||||
const TypeMap &Map = Actions[Aspect.Opcode - FirstOp][Aspect.Idx];
|
||||
LLT Ty = Aspect.Type;
|
||||
|
@ -55,7 +55,7 @@ template <> struct ScalarTraits<StringValue> {
|
||||
|
||||
struct FlowStringValue : StringValue {
|
||||
FlowStringValue() {}
|
||||
FlowStringValue(std::string Value) : StringValue(Value) {}
|
||||
FlowStringValue(std::string Value) : StringValue(std::move(Value)) {}
|
||||
};
|
||||
|
||||
template <> struct ScalarTraits<FlowStringValue> {
|
||||
|
@ -496,7 +496,7 @@ class PBQPRAGraph : public PBQP::Graph<RegAllocSolverImpl> {
|
||||
private:
|
||||
typedef PBQP::Graph<RegAllocSolverImpl> BaseT;
|
||||
public:
|
||||
PBQPRAGraph(GraphMetadata Metadata) : BaseT(Metadata) {}
|
||||
PBQPRAGraph(GraphMetadata Metadata) : BaseT(std::move(Metadata)) {}
|
||||
|
||||
/// @brief Dump this graph to dbgs().
|
||||
void dump() const;
|
||||
|
@ -239,7 +239,7 @@ public:
|
||||
std::function<void(SDNode *, SDNode *)> Callback;
|
||||
DAGNodeDeletedListener(SelectionDAG &DAG,
|
||||
std::function<void(SDNode *, SDNode *)> Callback)
|
||||
: DAGUpdateListener(DAG), Callback(Callback) {}
|
||||
: DAGUpdateListener(DAG), Callback(std::move(Callback)) {}
|
||||
void NodeDeleted(SDNode *N, SDNode *E) override { Callback(N, E); }
|
||||
};
|
||||
|
||||
|
@ -176,7 +176,7 @@ struct BinaryAnnotationIterator {
|
||||
return Data == Other.Data;
|
||||
}
|
||||
|
||||
bool operator!=(BinaryAnnotationIterator Other) const {
|
||||
bool operator!=(const BinaryAnnotationIterator &Other) const {
|
||||
return !(*this == Other);
|
||||
}
|
||||
|
||||
|
@ -27,7 +27,7 @@ public:
|
||||
TypeIndex getNextTypeIndex() const;
|
||||
|
||||
/// Records the name of a type, and reserves its type index.
|
||||
void recordType(StringRef Name, CVType Data);
|
||||
void recordType(StringRef Name, const CVType &Data);
|
||||
|
||||
/// Saves the name in a StringSet and creates a stable StringRef.
|
||||
StringRef saveTypeName(StringRef TypeName);
|
||||
|
@ -75,7 +75,7 @@ class IRBuilderCallbackInserter : IRBuilderDefaultInserter {
|
||||
|
||||
public:
|
||||
IRBuilderCallbackInserter(std::function<void(Instruction *)> Callback)
|
||||
: Callback(Callback) {}
|
||||
: Callback(std::move(Callback)) {}
|
||||
|
||||
protected:
|
||||
void InsertHelper(Instruction *I, const Twine &Name,
|
||||
|
@ -29,7 +29,7 @@ typedef std::function<void(unsigned Task, StringRef Path)> AddFileFn;
|
||||
|
||||
/// Create a local file system cache which uses the given cache directory and
|
||||
/// file callback.
|
||||
NativeObjectCache localCache(std::string CacheDirectoryPath, AddFileFn AddFile);
|
||||
NativeObjectCache localCache(StringRef CacheDirectoryPath, AddFileFn AddFile);
|
||||
|
||||
} // namespace lto
|
||||
} // namespace llvm
|
||||
|
@ -186,7 +186,7 @@ public:
|
||||
|
||||
bool parseEOL(const Twine &ErrMsg);
|
||||
|
||||
bool parseMany(std::function<bool()> parseOne, bool hasComma = true);
|
||||
bool parseMany(function_ref<bool()> parseOne, bool hasComma = true);
|
||||
|
||||
bool parseIntToken(int64_t &V, const Twine &ErrMsg);
|
||||
|
||||
|
@ -85,7 +85,7 @@ public:
|
||||
return getParser().parseToken(T, Msg);
|
||||
}
|
||||
|
||||
bool parseMany(std::function<bool()> parseOne, bool hasComma = true) {
|
||||
bool parseMany(function_ref<bool()> parseOne, bool hasComma = true) {
|
||||
return getParser().parseMany(parseOne, hasComma);
|
||||
}
|
||||
|
||||
|
@ -268,8 +268,8 @@ class LLVMTargetMachine : public TargetMachine {
|
||||
protected: // Can only create subclasses.
|
||||
LLVMTargetMachine(const Target &T, StringRef DataLayoutString,
|
||||
const Triple &TargetTriple, StringRef CPU, StringRef FS,
|
||||
TargetOptions Options, Reloc::Model RM, CodeModel::Model CM,
|
||||
CodeGenOpt::Level OL);
|
||||
const TargetOptions &Options, Reloc::Model RM,
|
||||
CodeModel::Model CM, CodeGenOpt::Level OL);
|
||||
|
||||
void initAsmInfo();
|
||||
public:
|
||||
|
@ -307,7 +307,7 @@ class CFLAndersAAResult::FunctionInfo {
|
||||
|
||||
public:
|
||||
FunctionInfo(const Function &, const SmallVectorImpl<Value *> &,
|
||||
const ReachabilitySet &, AliasAttrMap);
|
||||
const ReachabilitySet &, const AliasAttrMap &);
|
||||
|
||||
bool mayAlias(const Value *, uint64_t, const Value *, uint64_t) const;
|
||||
const AliasSummary &getAliasSummary() const { return Summary; }
|
||||
@ -470,7 +470,7 @@ static void populateExternalAttributes(
|
||||
|
||||
CFLAndersAAResult::FunctionInfo::FunctionInfo(
|
||||
const Function &Fn, const SmallVectorImpl<Value *> &RetVals,
|
||||
const ReachabilitySet &ReachSet, AliasAttrMap AMap) {
|
||||
const ReachabilitySet &ReachSet, const AliasAttrMap &AMap) {
|
||||
populateAttrMap(AttrMap, AMap);
|
||||
populateExternalAttributes(Summary.RetParamAttributes, Fn, RetVals, AMap);
|
||||
populateAliasMap(AliasMap, ReachSet);
|
||||
|
@ -456,7 +456,7 @@ class MetadataLoader::MetadataLoaderImpl {
|
||||
PlaceholderQueue &Placeholders, StringRef Blob,
|
||||
unsigned &NextMetadataNo);
|
||||
Error parseMetadataStrings(ArrayRef<uint64_t> Record, StringRef Blob,
|
||||
std::function<void(StringRef)> CallBack);
|
||||
function_ref<void(StringRef)> CallBack);
|
||||
Error parseGlobalObjectAttachment(GlobalObject &GO,
|
||||
ArrayRef<uint64_t> Record);
|
||||
Error parseMetadataKindRecord(SmallVectorImpl<uint64_t> &Record);
|
||||
@ -480,7 +480,7 @@ public:
|
||||
bool IsImporting)
|
||||
: MetadataList(TheModule.getContext()), ValueList(ValueList),
|
||||
Stream(Stream), Context(TheModule.getContext()), TheModule(TheModule),
|
||||
getTypeByID(getTypeByID), IsImporting(IsImporting) {}
|
||||
getTypeByID(std::move(getTypeByID)), IsImporting(IsImporting) {}
|
||||
|
||||
Error parseMetadata(bool ModuleLevel);
|
||||
|
||||
@ -1506,7 +1506,7 @@ Error MetadataLoader::MetadataLoaderImpl::parseOneMetadata(
|
||||
|
||||
Error MetadataLoader::MetadataLoaderImpl::parseMetadataStrings(
|
||||
ArrayRef<uint64_t> Record, StringRef Blob,
|
||||
std::function<void(StringRef)> CallBack) {
|
||||
function_ref<void(StringRef)> CallBack) {
|
||||
// All the MDStrings in the block are emitted together in a single
|
||||
// record. The strings are concatenated and stored in a blob along with
|
||||
// their sizes.
|
||||
@ -1703,8 +1703,8 @@ MetadataLoader::MetadataLoader(BitstreamCursor &Stream, Module &TheModule,
|
||||
BitcodeReaderValueList &ValueList,
|
||||
bool IsImporting,
|
||||
std::function<Type *(unsigned)> getTypeByID)
|
||||
: Pimpl(llvm::make_unique<MetadataLoaderImpl>(Stream, TheModule, ValueList,
|
||||
getTypeByID, IsImporting)) {}
|
||||
: Pimpl(llvm::make_unique<MetadataLoaderImpl>(
|
||||
Stream, TheModule, ValueList, std::move(getTypeByID), IsImporting)) {}
|
||||
|
||||
Error MetadataLoader::parseMetadata(bool ModuleLevel) {
|
||||
return Pimpl->parseMetadata(ModuleLevel);
|
||||
|
@ -54,7 +54,7 @@ void MachineIRBuilder::setInsertPt(MachineBasicBlock &MBB,
|
||||
|
||||
void MachineIRBuilder::recordInsertions(
|
||||
std::function<void(MachineInstr *)> Inserted) {
|
||||
InsertedInstr = Inserted;
|
||||
InsertedInstr = std::move(Inserted);
|
||||
}
|
||||
|
||||
void MachineIRBuilder::stopRecordingInsertions() {
|
||||
|
@ -85,7 +85,7 @@ void LLVMTargetMachine::initAsmInfo() {
|
||||
LLVMTargetMachine::LLVMTargetMachine(const Target &T,
|
||||
StringRef DataLayoutString,
|
||||
const Triple &TT, StringRef CPU,
|
||||
StringRef FS, TargetOptions Options,
|
||||
StringRef FS, const TargetOptions &Options,
|
||||
Reloc::Model RM, CodeModel::Model CM,
|
||||
CodeGenOpt::Level OL)
|
||||
: TargetMachine(T, DataLayoutString, TT, CPU, FS, Options) {
|
||||
|
@ -384,9 +384,9 @@ namespace {
|
||||
SDValue reduceBuildVecExtToExtBuildVec(SDNode *N);
|
||||
SDValue reduceBuildVecConvertToConvertBuildVec(SDNode *N);
|
||||
SDValue reduceBuildVecToShuffle(SDNode *N);
|
||||
SDValue createBuildVecShuffle(SDLoc DL, SDNode *N, ArrayRef<int> VectorMask,
|
||||
SDValue VecIn1, SDValue VecIn2,
|
||||
unsigned LeftIdx);
|
||||
SDValue createBuildVecShuffle(const SDLoc &DL, SDNode *N,
|
||||
ArrayRef<int> VectorMask, SDValue VecIn1,
|
||||
SDValue VecIn2, unsigned LeftIdx);
|
||||
|
||||
SDValue GetDemandedBits(SDValue V, const APInt &Mask);
|
||||
|
||||
@ -13010,7 +13010,7 @@ SDValue DAGCombiner::reduceBuildVecConvertToConvertBuildVec(SDNode *N) {
|
||||
return DAG.getNode(Opcode, DL, VT, BV);
|
||||
}
|
||||
|
||||
SDValue DAGCombiner::createBuildVecShuffle(SDLoc DL, SDNode *N,
|
||||
SDValue DAGCombiner::createBuildVecShuffle(const SDLoc &DL, SDNode *N,
|
||||
ArrayRef<int> VectorMask,
|
||||
SDValue VecIn1, SDValue VecIn2,
|
||||
unsigned LeftIdx) {
|
||||
|
@ -4764,7 +4764,7 @@ bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
|
||||
SDDbgValue *SelectionDAGBuilder::getDbgValue(SDValue N,
|
||||
DILocalVariable *Variable,
|
||||
DIExpression *Expr, int64_t Offset,
|
||||
DebugLoc dl,
|
||||
const DebugLoc &dl,
|
||||
unsigned DbgSDNodeOrder) {
|
||||
SDDbgValue *SDV;
|
||||
auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode());
|
||||
|
@ -944,8 +944,8 @@ private:
|
||||
|
||||
/// Return the appropriate SDDbgValue based on N.
|
||||
SDDbgValue *getDbgValue(SDValue N, DILocalVariable *Variable,
|
||||
DIExpression *Expr, int64_t Offset, DebugLoc dl,
|
||||
unsigned DbgSDNodeOrder);
|
||||
DIExpression *Expr, int64_t Offset,
|
||||
const DebugLoc &dl, unsigned DbgSDNodeOrder);
|
||||
};
|
||||
|
||||
/// RegsForValue - This struct represents the registers (physical or virtual)
|
||||
|
@ -71,7 +71,7 @@ TypeIndex TypeDatabase::getNextTypeIndex() const {
|
||||
}
|
||||
|
||||
/// Records the name of a type, and reserves its type index.
|
||||
void TypeDatabase::recordType(StringRef Name, CVType Data) {
|
||||
void TypeDatabase::recordType(StringRef Name, const CVType &Data) {
|
||||
CVUDTNames.push_back(Name);
|
||||
TypeRecords.push_back(Data);
|
||||
}
|
||||
|
@ -46,7 +46,7 @@ static void commitEntry(StringRef TempFilename, StringRef EntryPath) {
|
||||
}
|
||||
}
|
||||
|
||||
NativeObjectCache lto::localCache(std::string CacheDirectoryPath,
|
||||
NativeObjectCache lto::localCache(StringRef CacheDirectoryPath,
|
||||
AddFileFn AddFile) {
|
||||
return [=](unsigned Task, StringRef Key) -> AddStreamFn {
|
||||
// First, see if we have a cache hit.
|
||||
@ -68,8 +68,9 @@ NativeObjectCache lto::localCache(std::string CacheDirectoryPath,
|
||||
CacheStream(std::unique_ptr<raw_pwrite_stream> OS, AddFileFn AddFile,
|
||||
std::string TempFilename, std::string EntryPath,
|
||||
unsigned Task)
|
||||
: NativeObjectStream(std::move(OS)), AddFile(AddFile),
|
||||
TempFilename(TempFilename), EntryPath(EntryPath), Task(Task) {}
|
||||
: NativeObjectStream(std::move(OS)), AddFile(std::move(AddFile)),
|
||||
TempFilename(std::move(TempFilename)),
|
||||
EntryPath(std::move(EntryPath)), Task(Task) {}
|
||||
|
||||
~CacheStream() {
|
||||
// Make sure the file is closed before committing it.
|
||||
|
@ -118,7 +118,7 @@ bool MCAsmParser::addErrorSuffix(const Twine &Suffix) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool MCAsmParser::parseMany(std::function<bool()> parseOne, bool hasComma) {
|
||||
bool MCAsmParser::parseMany(function_ref<bool()> parseOne, bool hasComma) {
|
||||
if (parseOptionalToken(AsmToken::EndOfStatement))
|
||||
return false;
|
||||
while (1) {
|
||||
|
@ -129,11 +129,10 @@ struct OutgoingArgHandler : public CallLowering::ValueHandler {
|
||||
MachineInstrBuilder MIB;
|
||||
};
|
||||
|
||||
void AArch64CallLowering::splitToValueTypes(const ArgInfo &OrigArg,
|
||||
SmallVectorImpl<ArgInfo> &SplitArgs,
|
||||
const DataLayout &DL,
|
||||
MachineRegisterInfo &MRI,
|
||||
SplitArgTy PerformArgSplit) const {
|
||||
void AArch64CallLowering::splitToValueTypes(
|
||||
const ArgInfo &OrigArg, SmallVectorImpl<ArgInfo> &SplitArgs,
|
||||
const DataLayout &DL, MachineRegisterInfo &MRI,
|
||||
const SplitArgTy &PerformArgSplit) const {
|
||||
const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
|
||||
LLVMContext &Ctx = OrigArg.Ty->getContext();
|
||||
|
||||
|
@ -50,7 +50,7 @@ private:
|
||||
void splitToValueTypes(const ArgInfo &OrigArgInfo,
|
||||
SmallVectorImpl<ArgInfo> &SplitArgs,
|
||||
const DataLayout &DL, MachineRegisterInfo &MRI,
|
||||
SplitArgTy SplitArg) const;
|
||||
const SplitArgTy &SplitArg) const;
|
||||
};
|
||||
} // End of namespace llvm;
|
||||
#endif
|
||||
|
@ -116,10 +116,9 @@ MCELFStreamer &AMDGPUTargetELFStreamer::getStreamer() {
|
||||
return static_cast<MCELFStreamer &>(Streamer);
|
||||
}
|
||||
|
||||
void
|
||||
AMDGPUTargetELFStreamer::EmitAMDGPUNote(const MCExpr* DescSZ,
|
||||
PT_NOTE::NoteType Type,
|
||||
std::function<void(MCELFStreamer &)> EmitDesc) {
|
||||
void AMDGPUTargetELFStreamer::EmitAMDGPUNote(
|
||||
const MCExpr *DescSZ, PT_NOTE::NoteType Type,
|
||||
function_ref<void(MCELFStreamer &)> EmitDesc) {
|
||||
auto &S = getStreamer();
|
||||
auto &Context = S.getContext();
|
||||
|
||||
|
@ -78,9 +78,8 @@ public:
|
||||
class AMDGPUTargetELFStreamer : public AMDGPUTargetStreamer {
|
||||
MCStreamer &Streamer;
|
||||
|
||||
void EmitAMDGPUNote(const MCExpr* DescSize,
|
||||
AMDGPU::PT_NOTE::NoteType Type,
|
||||
std::function<void(MCELFStreamer &)> EmitDesc);
|
||||
void EmitAMDGPUNote(const MCExpr *DescSize, AMDGPU::PT_NOTE::NoteType Type,
|
||||
function_ref<void(MCELFStreamer &)> EmitDesc);
|
||||
|
||||
public:
|
||||
AMDGPUTargetELFStreamer(MCStreamer &S);
|
||||
|
@ -2212,8 +2212,9 @@ SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
|
||||
!shouldEmitGOTReloc(GA->getGlobal());
|
||||
}
|
||||
|
||||
static SDValue buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV,
|
||||
SDLoc DL, unsigned Offset, EVT PtrVT,
|
||||
static SDValue
|
||||
buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV,
|
||||
const SDLoc &DL, unsigned Offset, EVT PtrVT,
|
||||
unsigned GAFlags = SIInstrInfo::MO_NONE) {
|
||||
// In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is
|
||||
// lowered to the following code sequence:
|
||||
@ -2332,7 +2333,8 @@ SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG,
|
||||
DAG.getValueType(VT));
|
||||
}
|
||||
|
||||
static SDValue emitNonHSAIntrinsicError(SelectionDAG& DAG, SDLoc DL, EVT VT) {
|
||||
static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
|
||||
EVT VT) {
|
||||
DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(),
|
||||
"non-hsa intrinsic with hsa target",
|
||||
DL.getDebugLoc());
|
||||
@ -2340,7 +2342,8 @@ static SDValue emitNonHSAIntrinsicError(SelectionDAG& DAG, SDLoc DL, EVT VT) {
|
||||
return DAG.getUNDEF(VT);
|
||||
}
|
||||
|
||||
static SDValue emitRemovedIntrinsicError(SelectionDAG& DAG, SDLoc DL, EVT VT) {
|
||||
static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
|
||||
EVT VT) {
|
||||
DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(),
|
||||
"intrinsic not supported on subtarget",
|
||||
DL.getDebugLoc());
|
||||
|
@ -30,7 +30,7 @@ using namespace llvm;
|
||||
ARMCallLowering::ARMCallLowering(const ARMTargetLowering &TLI)
|
||||
: CallLowering(&TLI) {}
|
||||
|
||||
static bool isSupportedType(const DataLayout DL, const ARMTargetLowering &TLI,
|
||||
static bool isSupportedType(const DataLayout &DL, const ARMTargetLowering &TLI,
|
||||
Type *T) {
|
||||
EVT VT = TLI.getValueType(DL, T);
|
||||
if (!VT.isSimple() || !VT.isInteger() || VT.isVector())
|
||||
|
@ -3084,7 +3084,7 @@ static bool isSimpleType(Type *T) {
|
||||
}
|
||||
|
||||
static SDValue promoteToConstantPool(const GlobalValue *GV, SelectionDAG &DAG,
|
||||
EVT PtrVT, SDLoc dl) {
|
||||
EVT PtrVT, const SDLoc &dl) {
|
||||
// If we're creating a pool entry for a constant global with unnamed address,
|
||||
// and the global is small enough, we can emit it inline into the constant pool
|
||||
// to save ourselves an indirection.
|
||||
|
@ -6376,7 +6376,7 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
|
||||
return SDValue();
|
||||
}
|
||||
|
||||
static Constant *getConstantVector(MVT VT, APInt SplatValue,
|
||||
static Constant *getConstantVector(MVT VT, const APInt &SplatValue,
|
||||
unsigned SplatBitSize, LLVMContext &C) {
|
||||
unsigned ScalarSize = VT.getScalarSizeInBits();
|
||||
unsigned NumElm = SplatBitSize / ScalarSize;
|
||||
@ -8009,7 +8009,7 @@ static unsigned getV4X86ShuffleImm(ArrayRef<int> Mask) {
|
||||
return Imm;
|
||||
}
|
||||
|
||||
static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, SDLoc DL,
|
||||
static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, const SDLoc &DL,
|
||||
SelectionDAG &DAG) {
|
||||
return DAG.getConstant(getV4X86ShuffleImm(Mask), DL, MVT::i8);
|
||||
}
|
||||
@ -8096,8 +8096,8 @@ static SmallBitVector computeZeroableShuffleElements(ArrayRef<int> Mask,
|
||||
//
|
||||
// The function looks for a sub-mask that the nonzero elements are in
|
||||
// increasing order. If such sub-mask exist. The function returns true.
|
||||
static bool isNonZeroElementsInOrder(const SmallBitVector Zeroable,
|
||||
ArrayRef<int> Mask,const EVT &VectorType,
|
||||
static bool isNonZeroElementsInOrder(const SmallBitVector &Zeroable,
|
||||
ArrayRef<int> Mask, const EVT &VectorType,
|
||||
bool &IsZeroSideLeft) {
|
||||
int NextElement = -1;
|
||||
// Check if the Mask's nonzero elements are in increasing order.
|
||||
@ -12921,7 +12921,7 @@ static SDValue lowerV8F64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
|
||||
}
|
||||
|
||||
/// \brief Handle lowering of 16-lane 32-bit floating point shuffles.
|
||||
static SDValue lowerV16F32VectorShuffle(SDLoc DL, ArrayRef<int> Mask,
|
||||
static SDValue lowerV16F32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
|
||||
const SmallBitVector &Zeroable,
|
||||
SDValue V1, SDValue V2,
|
||||
const X86Subtarget &Subtarget,
|
||||
|
@ -48,7 +48,7 @@ public:
|
||||
}
|
||||
|
||||
explicit SimpleInliner(InlineParams Params)
|
||||
: LegacyInlinerBase(ID), Params(Params) {
|
||||
: LegacyInlinerBase(ID), Params(std::move(Params)) {
|
||||
initializeSimpleInlinerPass(*PassRegistry::getPassRegistry());
|
||||
}
|
||||
|
||||
|
@ -33,7 +33,7 @@ STATISTIC(NumPartialInlined, "Number of functions partially inlined");
|
||||
|
||||
namespace {
|
||||
struct PartialInlinerImpl {
|
||||
PartialInlinerImpl(InlineFunctionInfo IFI) : IFI(IFI) {}
|
||||
PartialInlinerImpl(InlineFunctionInfo IFI) : IFI(std::move(IFI)) {}
|
||||
bool run(Module &M);
|
||||
Function *unswitchFunction(Function *F);
|
||||
|
||||
|
@ -194,7 +194,7 @@ void simplifyExternals(Module &M) {
|
||||
}
|
||||
|
||||
void filterModule(
|
||||
Module *M, std::function<bool(const GlobalValue *)> ShouldKeepDefinition) {
|
||||
Module *M, function_ref<bool(const GlobalValue *)> ShouldKeepDefinition) {
|
||||
for (Function &F : *M) {
|
||||
if (ShouldKeepDefinition(&F))
|
||||
continue;
|
||||
|
@ -289,8 +289,8 @@ void ConstantHoistingPass::collectConstantCandidates(Function &Fn) {
|
||||
// bit widths (APInt Operator- does not like that). If the value cannot be
|
||||
// represented in uint64 we return an "empty" APInt. This is then interpreted
|
||||
// as the value is not in range.
|
||||
static llvm::Optional<APInt> calculateOffsetDiff(APInt V1, APInt V2)
|
||||
{
|
||||
static llvm::Optional<APInt> calculateOffsetDiff(const APInt &V1,
|
||||
const APInt &V2) {
|
||||
llvm::Optional<APInt> Res = None;
|
||||
unsigned BW = V1.getBitWidth() > V2.getBitWidth() ?
|
||||
V1.getBitWidth() : V2.getBitWidth();
|
||||
|
@ -634,7 +634,7 @@ static BDVState meetBDVStateImpl(const BDVState &LHS, const BDVState &RHS) {
|
||||
|
||||
// Values of type BDVState form a lattice, and this function implements the meet
|
||||
// operation.
|
||||
static BDVState meetBDVState(BDVState LHS, BDVState RHS) {
|
||||
static BDVState meetBDVState(const BDVState &LHS, const BDVState &RHS) {
|
||||
BDVState Result = meetBDVStateImpl(LHS, RHS);
|
||||
assert(Result == meetBDVStateImpl(RHS, LHS) &&
|
||||
"Math is wrong: meet does not commute!");
|
||||
|
Loading…
Reference in New Issue
Block a user