Fix parameter name comments using clang-tidy. NFC.

This patch applies clang-tidy's bugprone-argument-comment tool
to LLVM, clang and lld source trees. Here is how I created this
patch:

$ git clone https://github.com/llvm/llvm-project.git
$ cd llvm-project
$ mkdir build
$ cd build
$ cmake -GNinja -DCMAKE_BUILD_TYPE=Debug \
    -DLLVM_ENABLE_PROJECTS='clang;lld;clang-tools-extra' \
    -DCMAKE_EXPORT_COMPILE_COMMANDS=On -DLLVM_ENABLE_LLD=On \
    -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ ../llvm
$ ninja
$ parallel clang-tidy -checks='-*,bugprone-argument-comment' \
    -config='{CheckOptions: [{key: StrictMode, value: 1}]}' -fix \
    ::: ../llvm/lib/**/*.{cpp,h} ../clang/lib/**/*.{cpp,h} ../lld/**/*.{cpp,h}

llvm-svn: 366177
This commit is contained in:
Rui Ueyama 2019-07-16 04:46:31 +00:00
parent d36e380c67
commit 1179e18f0c
37 changed files with 68 additions and 68 deletions

View File

@ -1278,12 +1278,12 @@ public:
case Intrinsic::experimental_vector_reduce_fmin:
return ConcreteTTI->getMinMaxReductionCost(
Tys[0], CmpInst::makeCmpResultType(Tys[0]), /*IsPairwiseForm=*/false,
/*IsSigned=*/true);
/*IsUnsigned=*/true);
case Intrinsic::experimental_vector_reduce_umax:
case Intrinsic::experimental_vector_reduce_umin:
return ConcreteTTI->getMinMaxReductionCost(
Tys[0], CmpInst::makeCmpResultType(Tys[0]), /*IsPairwiseForm=*/false,
/*IsSigned=*/false);
/*IsUnsigned=*/false);
case Intrinsic::sadd_sat:
case Intrinsic::ssub_sat: {
Type *CondTy = Type::getInt1Ty(RetTy->getContext());

View File

@ -27,7 +27,7 @@ ViewEdgeBundles("view-edge-bundles", cl::Hidden,
char EdgeBundles::ID = 0;
INITIALIZE_PASS(EdgeBundles, "edge-bundles", "Bundle Machine CFG Edges",
/* cfg = */true, /* analysis = */ true)
/* cfg = */true, /* is_analysis = */ true)
char &llvm::EdgeBundlesID = EdgeBundles::ID;

View File

@ -998,7 +998,7 @@ MachineBasicBlock *MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ,
while (!KilledRegs.empty()) {
unsigned Reg = KilledRegs.pop_back_val();
for (instr_iterator I = instr_end(), E = instr_begin(); I != E;) {
if (!(--I)->addRegisterKilled(Reg, TRI, /* addIfNotFound= */ false))
if (!(--I)->addRegisterKilled(Reg, TRI, /* AddIfNotFound= */ false))
continue;
if (TargetRegisterInfo::isVirtualRegister(Reg))
LV->getVarInfo(Reg).Kills.push_back(&*I);

View File

@ -3040,7 +3040,7 @@ bool MachineBlockPlacement::runOnMachineFunction(MachineFunction &MF) {
if (BF.OptimizeFunction(MF, TII, MF.getSubtarget().getRegisterInfo(),
getAnalysisIfAvailable<MachineModuleInfo>(), MLI,
/*AfterBlockPlacement=*/true)) {
/*AfterPlacement=*/true)) {
// Redo the layout if tail merging creates/removes/moves blocks.
BlockToChain.clear();
ComputedEdges.clear();

View File

@ -92,7 +92,7 @@ int MachineFrameInfo::CreateFixedObject(uint64_t Size, int64_t SPOffset,
Alignment = clampStackAlignment(!StackRealignable, Alignment, StackAlignment);
Objects.insert(Objects.begin(),
StackObject(Size, Alignment, SPOffset, IsImmutable,
/*isSpillSlot=*/false, /*Alloca=*/nullptr,
/*IsSpillSlot=*/false, /*Alloca=*/nullptr,
IsAliased));
return -++NumFixedObjects;
}

View File

@ -165,7 +165,7 @@ void MachineFunction::init() {
!F.hasFnAttribute("no-realign-stack");
FrameInfo = new (Allocator) MachineFrameInfo(
getFnStackAlignment(STI, F), /*StackRealignable=*/CanRealignSP,
/*ForceRealign=*/CanRealignSP &&
/*ForcedRealign=*/CanRealignSP &&
F.hasFnAttribute(Attribute::StackAlignment));
if (F.hasFnAttribute(Attribute::StackAlignment))

View File

@ -781,7 +781,7 @@ bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
unsigned Reg = getRegForValue(Val);
if (!Reg)
return false;
Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
}
}
return true;
@ -830,8 +830,8 @@ bool FastISel::selectStackmap(const CallInst *I) {
const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
for (unsigned i = 0; ScratchRegs[i]; ++i)
Ops.push_back(MachineOperand::CreateReg(
ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
/*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
ScratchRegs[i], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false,
/*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true));
// Issue CALLSEQ_START
unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
@ -941,7 +941,7 @@ bool FastISel::selectPatchpoint(const CallInst *I) {
assert(CLI.NumResultRegs == 0 && "Unexpected result register.");
CLI.ResultReg = createResultReg(TLI.getRegClassFor(MVT::i64));
CLI.NumResultRegs = 1;
Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*IsDef=*/true));
Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*isDef=*/true));
}
// Add the <id> and <numBytes> constants.
@ -990,13 +990,13 @@ bool FastISel::selectPatchpoint(const CallInst *I) {
unsigned Reg = getRegForValue(I->getArgOperand(i));
if (!Reg)
return false;
Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
}
}
// Push the arguments from the call instruction.
for (auto Reg : CLI.OutRegs)
Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
// Push live variables for the stack map.
if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs))
@ -1010,13 +1010,13 @@ bool FastISel::selectPatchpoint(const CallInst *I) {
const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
for (unsigned i = 0; ScratchRegs[i]; ++i)
Ops.push_back(MachineOperand::CreateReg(
ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
/*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
ScratchRegs[i], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false,
/*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true));
// Add implicit defs (return values).
for (auto Reg : CLI.InRegs)
Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/true,
/*IsImpl=*/true));
Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/true,
/*isImp=*/true));
// Insert the patchpoint instruction before the call generated by the target.
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, CLI.Call, DbgLoc,
@ -1044,9 +1044,9 @@ bool FastISel::selectXRayCustomEvent(const CallInst *I) {
return true; // don't do anything to this instruction.
SmallVector<MachineOperand, 8> Ops;
Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)),
/*IsDef=*/false));
/*isDef=*/false));
Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)),
/*IsDef=*/false));
/*isDef=*/false));
MachineInstrBuilder MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::PATCHABLE_EVENT_CALL));
@ -1063,11 +1063,11 @@ bool FastISel::selectXRayTypedEvent(const CallInst *I) {
return true; // don't do anything to this instruction.
SmallVector<MachineOperand, 8> Ops;
Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)),
/*IsDef=*/false));
/*isDef=*/false));
Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)),
/*IsDef=*/false));
/*isDef=*/false));
Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(2)),
/*IsDef=*/false));
/*isDef=*/false));
MachineInstrBuilder MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::PATCHABLE_TYPED_EVENT_CALL));

View File

@ -151,7 +151,7 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
auto Iter = CatchObjects.find(AI);
if (Iter != CatchObjects.end() && TLI->needsFixedCatchObjects()) {
FrameIndex = MF->getFrameInfo().CreateFixedObject(
TySize, 0, /*Immutable=*/false, /*isAliased=*/true);
TySize, 0, /*IsImmutable=*/false, /*isAliased=*/true);
MF->getFrameInfo().setObjectAlignment(FrameIndex, Align);
} else {
FrameIndex =

View File

@ -1476,7 +1476,7 @@ void llvm::GetReturnInfo(CallingConv::ID CC, Type *ReturnType,
Flags.setZExt();
for (unsigned i = 0; i < NumParts; ++i)
Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, /*isFixed=*/true, 0, 0));
Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, /*isfixed=*/true, 0, 0));
}
}

View File

@ -1224,14 +1224,14 @@ void WinEHPrepare::replaceUseWithLoad(Value *V, Use &U, AllocaInst *&SpillSlot,
if (!Load)
Load = new LoadInst(V->getType(), SpillSlot,
Twine(V->getName(), ".wineh.reload"),
/*Volatile=*/false, IncomingBlock->getTerminator());
/*isVolatile=*/false, IncomingBlock->getTerminator());
U.set(Load);
} else {
// Reload right before the old use.
auto *Load = new LoadInst(V->getType(), SpillSlot,
Twine(V->getName(), ".wineh.reload"),
/*Volatile=*/false, UsingInst);
/*isVolatile=*/false, UsingInst);
U.set(Load);
}
}

View File

@ -310,7 +310,7 @@ void PassManagerPrettyStackEntry::print(raw_ostream &OS) const {
OS << "value";
OS << " '";
V->printAsOperand(OS, /*PrintTy=*/false, M);
V->printAsOperand(OS, /*PrintType=*/false, M);
OS << "'\n";
}

View File

@ -22,18 +22,18 @@ APSInt::APSInt(StringRef Str) {
// (Over-)estimate the required number of bits.
unsigned NumBits = ((Str.size() * 64) / 19) + 2;
APInt Tmp(NumBits, Str, /*Radix=*/10);
APInt Tmp(NumBits, Str, /*radix=*/10);
if (Str[0] == '-') {
unsigned MinBits = Tmp.getMinSignedBits();
if (MinBits > 0 && MinBits < NumBits)
Tmp = Tmp.trunc(MinBits);
*this = APSInt(Tmp, /*IsUnsigned=*/false);
*this = APSInt(Tmp, /*isUnsigned=*/false);
return;
}
unsigned ActiveBits = Tmp.getActiveBits();
if (ActiveBits > 0 && ActiveBits < NumBits)
Tmp = Tmp.trunc(ActiveBits);
*this = APSInt(Tmp, /*IsUnsigned=*/true);
*this = APSInt(Tmp, /*isUnsigned=*/true);
}
void APSInt::Profile(FoldingSetNodeID& ID) const {

View File

@ -17,14 +17,14 @@ using namespace llvm;
LLT::LLT(MVT VT) {
if (VT.isVector()) {
init(/*isPointer=*/false, VT.getVectorNumElements() > 1,
init(/*IsPointer=*/false, VT.getVectorNumElements() > 1,
VT.getVectorNumElements(), VT.getVectorElementType().getSizeInBits(),
/*AddressSpace=*/0);
} else if (VT.isValid()) {
// Aggregates are no different from real scalars as far as GlobalISel is
// concerned.
assert(VT.getSizeInBits() != 0 && "invalid zero-sized type");
init(/*isPointer=*/false, /*isVector=*/false, /*NumElements=*/0,
init(/*IsPointer=*/false, /*IsVector=*/false, /*NumElements=*/0,
VT.getSizeInBits(), /*AddressSpace=*/0);
} else {
IsPointer = false;

View File

@ -612,7 +612,7 @@ raw_fd_ostream::~raw_fd_ostream() {
// destructing raw_ostream objects which may have errors.
if (has_error())
report_fatal_error("IO failure on output stream: " + error().message(),
/*GenCrashDiag=*/false);
/*gen_crash_diag=*/false);
}
#if defined(_WIN32)

View File

@ -2365,7 +2365,7 @@ bool AArch64FastISel::emitCompareAndBranch(const BranchInst *BI) {
AArch64::sub_32);
if ((BW < 32) && !IsBitTest)
SrcReg = emitIntExt(VT, SrcReg, MVT::i32, /*IsZExt=*/true);
SrcReg = emitIntExt(VT, SrcReg, MVT::i32, /*isZExt=*/true);
// Emit the combined compare and branch instruction.
SrcReg = constrainOperandRegClass(II, SrcReg, II.getNumDefs());
@ -4272,7 +4272,7 @@ unsigned AArch64FastISel::emitASR_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill,
const TargetRegisterClass *RC =
(RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
if (NeedTrunc) {
Op0Reg = emitIntExt(RetVT, Op0Reg, MVT::i32, /*IsZExt=*/false);
Op0Reg = emitIntExt(RetVT, Op0Reg, MVT::i32, /*isZExt=*/false);
Op1Reg = emitAnd_ri(MVT::i32, Op1Reg, Op1IsKill, Mask);
Op0IsKill = Op1IsKill = true;
}
@ -4952,7 +4952,7 @@ std::pair<unsigned, bool> AArch64FastISel::getRegForGEPIndex(const Value *Idx) {
MVT PtrVT = TLI.getPointerTy(DL);
EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
if (IdxVT.bitsLT(PtrVT)) {
IdxN = emitIntExt(IdxVT.getSimpleVT(), IdxN, PtrVT, /*IsZExt=*/false);
IdxN = emitIntExt(IdxVT.getSimpleVT(), IdxN, PtrVT, /*isZExt=*/false);
IdxNIsKill = true;
} else if (IdxVT.bitsGT(PtrVT))
llvm_unreachable("AArch64 FastISel doesn't support types larger than i64");

View File

@ -119,11 +119,11 @@ bool AMDGPUOpenCLEnqueuedBlockLowering::runOnModule(Module &M) {
auto T = ArrayType::get(Type::getInt64Ty(C), 2);
auto *GV = new GlobalVariable(
M, T,
/*IsConstant=*/false, GlobalValue::ExternalLinkage,
/*isConstant=*/false, GlobalValue::ExternalLinkage,
/*Initializer=*/Constant::getNullValue(T), RuntimeHandle,
/*InsertBefore=*/nullptr, GlobalValue::NotThreadLocal,
AMDGPUAS::GLOBAL_ADDRESS,
/*IsExternallyInitialized=*/false);
/*isExternallyInitialized=*/false);
LLVM_DEBUG(dbgs() << "runtime handle created: " << *GV << '\n');
for (auto U : F.users()) {

View File

@ -2259,7 +2259,7 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
unsigned TargetFlags = GV->hasDLLImportStorageClass()
? ARMII::MO_DLLIMPORT
: ARMII::MO_NO_FLAG;
Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, /*Offset=*/0,
Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, /*offset=*/0,
TargetFlags);
if (GV->hasDLLImportStorageClass())
Callee =
@ -2914,7 +2914,7 @@ SDValue ARMTargetLowering::LowerConstantPool(SDValue Op,
auto M = const_cast<Module*>(DAG.getMachineFunction().
getFunction().getParent());
auto GV = new GlobalVariable(
*M, T, /*isConst=*/true, GlobalVariable::InternalLinkage, C,
*M, T, /*isConstant=*/true, GlobalVariable::InternalLinkage, C,
Twine(DAG.getDataLayout().getPrivateGlobalPrefix()) + "CP" +
Twine(DAG.getMachineFunction().getFunctionNumber()) + "_" +
Twine(AFI->createPICLabelUId())
@ -3467,7 +3467,7 @@ SDValue ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op,
// FIXME: Once remat is capable of dealing with instructions with register
// operands, expand this into two nodes.
Result = DAG.getNode(ARMISD::Wrapper, DL, PtrVT,
DAG.getTargetGlobalAddress(GV, DL, PtrVT, /*Offset=*/0,
DAG.getTargetGlobalAddress(GV, DL, PtrVT, /*offset=*/0,
TargetFlags));
if (TargetFlags & (ARMII::MO_DLLIMPORT | ARMII::MO_COFFSTUB))
Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,

View File

@ -34,7 +34,7 @@ protected:
LanaiELFObjectWriter::LanaiELFObjectWriter(uint8_t OSABI)
: MCELFObjectTargetWriter(/*Is64Bit_=*/false, OSABI, ELF::EM_LANAI,
/*HasRelocationAddend=*/true) {}
/*HasRelocationAddend_=*/true) {}
unsigned LanaiELFObjectWriter::getRelocType(MCContext & /*Ctx*/,
const MCValue & /*Target*/,

View File

@ -36,8 +36,8 @@ protected:
} // end anonymous namespace
SystemZObjectWriter::SystemZObjectWriter(uint8_t OSABI)
: MCELFObjectTargetWriter(/*Is64Bit=*/true, OSABI, ELF::EM_S390,
/*HasRelocationAddend=*/ true) {}
: MCELFObjectTargetWriter(/*Is64Bit_=*/true, OSABI, ELF::EM_S390,
/*HasRelocationAddend_=*/ true) {}
// Return the relocation type for an absolute value of MCFixupKind Kind.
static unsigned getAbsoluteReloc(unsigned Kind) {

View File

@ -194,7 +194,7 @@ static std::string toString(const APFloat &FP) {
static const size_t BufBytes = 128;
char Buf[BufBytes];
auto Written = FP.convertToHexString(
Buf, /*hexDigits=*/0, /*upperCase=*/false, APFloat::rmNearestTiesToEven);
Buf, /*HexDigits=*/0, /*UpperCase=*/false, APFloat::rmNearestTiesToEven);
(void)Written;
assert(Written != 0);
assert(Written < BufBytes);

View File

@ -115,7 +115,7 @@ class WebAssemblyFastISel final : public FastISel {
private:
// Utility helper routines
MVT::SimpleValueType getSimpleType(Type *Ty) {
EVT VT = TLI.getValueType(DL, Ty, /*HandleUnknown=*/true);
EVT VT = TLI.getValueType(DL, Ty, /*AllowUnknown=*/true);
return VT.isSimple() ? VT.getSimpleVT().SimpleTy
: MVT::INVALID_SIMPLE_VALUE_TYPE;
}

View File

@ -81,7 +81,7 @@ void WebAssemblyRegisterInfo::eliminateFrameIndex(
if (static_cast<uint64_t>(Offset) <= std::numeric_limits<uint32_t>::max()) {
MI.getOperand(OffsetOperandNum).setImm(Offset);
MI.getOperand(FIOperandNum)
.ChangeToRegister(FrameRegister, /*IsDef=*/false);
.ChangeToRegister(FrameRegister, /*isDef=*/false);
return;
}
}
@ -102,7 +102,7 @@ void WebAssemblyRegisterInfo::eliminateFrameIndex(
MachineOperand &ImmMO = Def->getOperand(1);
ImmMO.setImm(ImmMO.getImm() + uint32_t(FrameOffset));
MI.getOperand(FIOperandNum)
.ChangeToRegister(FrameRegister, /*IsDef=*/false);
.ChangeToRegister(FrameRegister, /*isDef=*/false);
return;
}
}
@ -127,7 +127,7 @@ void WebAssemblyRegisterInfo::eliminateFrameIndex(
.addReg(FrameRegister)
.addReg(OffsetOp);
}
MI.getOperand(FIOperandNum).ChangeToRegister(FIRegOperand, /*IsDef=*/false);
MI.getOperand(FIOperandNum).ChangeToRegister(FIRegOperand, /*isDef=*/false);
}
Register

View File

@ -289,7 +289,7 @@ bool X86FastISel::foldX86XALUIntrinsic(X86::CondCode &CC, const Instruction *I,
}
bool X86FastISel::isTypeLegal(Type *Ty, MVT &VT, bool AllowI1) {
EVT evt = TLI.getValueType(DL, Ty, /*HandleUnknown=*/true);
EVT evt = TLI.getValueType(DL, Ty, /*AllowUnknown=*/true);
if (evt == MVT::Other || !evt.isSimple())
// Unhandled type. Halt "fast" selection and bail.
return false;

View File

@ -3170,7 +3170,7 @@ void X86FrameLowering::processFunctionBeforeFrameFinalized(
MinFixedObjOffset -= std::abs(MinFixedObjOffset) % 8;
int64_t UnwindHelpOffset = MinFixedObjOffset - SlotSize;
int UnwindHelpFI =
MFI.CreateFixedObject(SlotSize, UnwindHelpOffset, /*Immutable=*/false);
MFI.CreateFixedObject(SlotSize, UnwindHelpOffset, /*IsImmutable=*/false);
EHInfo.UnwindHelpFrameIdx = UnwindHelpFI;
// Store -2 into UnwindHelp on function entry. We have to scan forwards past

View File

@ -3021,7 +3021,7 @@ X86TargetLowering::LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
// load from our portion of it. This assumes that if the first part of an
// argument is in memory, the rest will also be in memory.
int FI = MFI.CreateFixedObject(ArgVT.getStoreSize(), VA.getLocMemOffset(),
/*Immutable=*/false);
/*IsImmutable=*/false);
PartAddr = DAG.getFrameIndex(FI, PtrVT);
return DAG.getLoad(
ValVT, dl, Chain, PartAddr,
@ -23719,7 +23719,7 @@ SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
// Set up a frame object for the return address.
unsigned SlotSize = RegInfo->getSlotSize();
FrameAddrIndex = MF.getFrameInfo().CreateFixedObject(
SlotSize, /*Offset=*/0, /*IsImmutable=*/false);
SlotSize, /*SPOffset=*/0, /*IsImmutable=*/false);
FuncInfo->setFAIndex(FrameAddrIndex);
}
return DAG.getFrameIndex(FrameAddrIndex, VT);

View File

@ -250,7 +250,7 @@ void X86WinAllocaExpander::lower(MachineInstr* MI, Lowering L) {
// Do the probe.
STI->getFrameLowering()->emitStackProbe(*MBB->getParent(), *MBB, MI, DL,
/*InPrologue=*/false);
/*InProlog=*/false);
} else {
// Sub
BuildMI(*MBB, I, DL,

View File

@ -113,7 +113,7 @@ void Lowerer::lowerCoroNoop(IntrinsicInst *II) {
StructType *FrameTy = StructType::create(C, "NoopCoro.Frame");
auto *FramePtrTy = FrameTy->getPointerTo();
auto *FnTy = FunctionType::get(Type::getVoidTy(C), FramePtrTy,
/*IsVarArgs=*/false);
/*isVarArg=*/false);
auto *FnPtrTy = FnTy->getPointerTo();
FrameTy->setBody({FnPtrTy, FnPtrTy});

View File

@ -378,7 +378,7 @@ static StructType *buildFrameType(Function &F, coro::Shape &Shape,
StructType *FrameTy = StructType::create(C, Name);
auto *FramePtrTy = FrameTy->getPointerTo();
auto *FnTy = FunctionType::get(Type::getVoidTy(C), FramePtrTy,
/*IsVarArgs=*/false);
/*isVarArg=*/false);
auto *FnPtrTy = FnTy->getPointerTo();
// Figure out how wide should be an integer type storing the suspend index.

View File

@ -866,7 +866,7 @@ static void createDevirtTriggerFunc(CallGraph &CG, CallGraphSCC &SCC) {
LLVMContext &C = M.getContext();
auto *FnTy = FunctionType::get(Type::getVoidTy(C), Type::getInt8PtrTy(C),
/*IsVarArgs=*/false);
/*isVarArg=*/false);
Function *DevirtFn =
Function::Create(FnTy, GlobalValue::LinkageTypes::PrivateLinkage,
CORO_DEVIRT_TRIGGER_FN, &M);

View File

@ -967,7 +967,7 @@ static Value *foldSignedTruncationCheck(ICmpInst *ICmp0, ICmpInst *ICmp1,
// Can it be decomposed into icmp eq (X & Mask), 0 ?
if (llvm::decomposeBitTestICmp(ICmp->getOperand(0), ICmp->getOperand(1),
Pred, X, UnsetBitsMask,
/*LookThruTrunc=*/false) &&
/*LookThroughTrunc=*/false) &&
Pred == ICmpInst::ICMP_EQ)
return true;
// Is it icmp eq (X & Mask), 0 already?

View File

@ -624,7 +624,7 @@ static bool isMultiple(const APInt &C1, const APInt &C2, APInt &Quotient,
if (IsSigned && C1.isMinSignedValue() && C2.isAllOnesValue())
return false;
APInt Remainder(C1.getBitWidth(), /*Val=*/0ULL, IsSigned);
APInt Remainder(C1.getBitWidth(), /*val=*/0ULL, IsSigned);
if (IsSigned)
APInt::sdivrem(C1, C2, Quotient, Remainder);
else
@ -661,7 +661,7 @@ Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) {
// (X / C1) / C2 -> X / (C1*C2)
if ((IsSigned && match(Op0, m_SDiv(m_Value(X), m_APInt(C1)))) ||
(!IsSigned && match(Op0, m_UDiv(m_Value(X), m_APInt(C1))))) {
APInt Product(C1->getBitWidth(), /*Val=*/0ULL, IsSigned);
APInt Product(C1->getBitWidth(), /*val=*/0ULL, IsSigned);
if (!multiplyOverflows(*C1, *C2, Product, IsSigned))
return BinaryOperator::Create(I.getOpcode(), X,
ConstantInt::get(Ty, Product));
@ -669,7 +669,7 @@ Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) {
if ((IsSigned && match(Op0, m_NSWMul(m_Value(X), m_APInt(C1)))) ||
(!IsSigned && match(Op0, m_NUWMul(m_Value(X), m_APInt(C1))))) {
APInt Quotient(C1->getBitWidth(), /*Val=*/0ULL, IsSigned);
APInt Quotient(C1->getBitWidth(), /*val=*/0ULL, IsSigned);
// (X * C1) / C2 -> X / (C2 / C1) if C2 is a multiple of C1.
if (isMultiple(*C2, *C1, Quotient, IsSigned)) {
@ -693,7 +693,7 @@ Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) {
if ((IsSigned && match(Op0, m_NSWShl(m_Value(X), m_APInt(C1))) &&
*C1 != C1->getBitWidth() - 1) ||
(!IsSigned && match(Op0, m_NUWShl(m_Value(X), m_APInt(C1))))) {
APInt Quotient(C1->getBitWidth(), /*Val=*/0ULL, IsSigned);
APInt Quotient(C1->getBitWidth(), /*val=*/0ULL, IsSigned);
APInt C1Shifted = APInt::getOneBitSet(
C1->getBitWidth(), static_cast<unsigned>(C1->getLimitedValue()));

View File

@ -354,7 +354,7 @@ void HWAddressSanitizer::initializeModule(Module &M) {
if (!TargetTriple.isAndroid()) {
Constant *C = M.getOrInsertGlobal("__hwasan_tls", IntptrTy, [&] {
auto *GV = new GlobalVariable(M, IntptrTy, /*isConstantGlobal=*/false,
auto *GV = new GlobalVariable(M, IntptrTy, /*isConstant=*/false,
GlobalValue::ExternalLinkage, nullptr,
"__hwasan_tls", nullptr,
GlobalVariable::InitialExecTLSModel);

View File

@ -541,7 +541,7 @@ static bool processUDivOrURem(BinaryOperator *Instr, LazyValueInfo *LVI) {
// Find the smallest power of two bitwidth that's sufficient to hold Instr's
// operands.
auto OrigWidth = Instr->getType()->getIntegerBitWidth();
ConstantRange OperandRange(OrigWidth, /*isFullset=*/false);
ConstantRange OperandRange(OrigWidth, /*isFullSet=*/false);
for (Value *Operand : Instr->operands()) {
OperandRange = OperandRange.unionWith(
LVI->getConstantRange(Operand, Instr->getParent()));

View File

@ -436,7 +436,7 @@ Value *Float2IntPass::convert(Instruction *I, Type *ToTy) {
} else if (Instruction *VI = dyn_cast<Instruction>(V)) {
NewOperands.push_back(convert(VI, ToTy));
} else if (ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
APSInt Val(ToTy->getPrimitiveSizeInBits(), /*IsUnsigned=*/false);
APSInt Val(ToTy->getPrimitiveSizeInBits(), /*isUnsigned=*/false);
bool Exact;
CF->getValueAPF().convertToInteger(Val,
APFloat::rmNearestTiesToEven,

View File

@ -3125,7 +3125,7 @@ static bool canFoldIVIncExpr(const SCEV *IncExpr, Instruction *UserInst,
MemAccessTy AccessTy = getAccessType(TTI, UserInst, Operand);
int64_t IncOffset = IncConst->getValue()->getSExtValue();
if (!isAlwaysFoldable(TTI, LSRUse::Address, AccessTy, /*BaseGV=*/nullptr,
IncOffset, /*HaseBaseReg=*/false))
IncOffset, /*HasBaseReg=*/false))
return false;
return true;

View File

@ -494,7 +494,7 @@ void LowerSwitch::processSwitchInst(SwitchInst *SI,
KnownBits Known = computeKnownBits(Val, DL, /*Depth=*/0, AC, SI);
// TODO Shouldn't this create a signed range?
ConstantRange KnownBitsRange =
ConstantRange::fromKnownBits(Known, /*ForSigned=*/false);
ConstantRange::fromKnownBits(Known, /*IsSigned=*/false);
const ConstantRange LVIRange = LVI->getConstantRange(Val, OrigBlock, SI);
ConstantRange ValRange = KnownBitsRange.intersectWith(LVIRange);
// We delegate removal of unreachable non-default cases to other passes. In

View File

@ -5025,7 +5025,7 @@ SwitchLookupTable::SwitchLookupTable(
ArrayType *ArrayTy = ArrayType::get(ValueType, TableSize);
Constant *Initializer = ConstantArray::get(ArrayTy, TableContents);
Array = new GlobalVariable(M, ArrayTy, /*constant=*/true,
Array = new GlobalVariable(M, ArrayTy, /*isConstant=*/true,
GlobalVariable::PrivateLinkage, Initializer,
"switch.table." + FuncName);
Array->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);