mirror of
https://github.com/capstone-engine/llvm-capstone.git
synced 2025-02-17 08:21:13 +00:00
std::optional::value => operator*/operator->
value() has undesired exception checking semantics and calls __throw_bad_optional_access in libc++. Moreover, the API is unavailable without _LIBCPP_NO_EXCEPTIONS on older Mach-O platforms (see _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS). This fixes clang.
This commit is contained in:
parent
2e9c3fe6fc
commit
21c4dc7997
@ -344,7 +344,7 @@ protected:
|
||||
if (!res)
|
||||
Cached[hash] = ConditionTruthVal();
|
||||
else
|
||||
Cached[hash] = ConditionTruthVal(res.value());
|
||||
Cached[hash] = ConditionTruthVal(*res);
|
||||
|
||||
return Cached[hash];
|
||||
}
|
||||
|
@ -279,7 +279,7 @@ bool RISCVTargetInfo::hasFeature(StringRef Feature) const {
|
||||
.Case("64bit", Is64Bit)
|
||||
.Default(std::nullopt);
|
||||
if (Result)
|
||||
return Result.value();
|
||||
return *Result;
|
||||
|
||||
if (ISAInfo->isSupportedExtensionFeature(Feature))
|
||||
return ISAInfo->hasExtension(Feature);
|
||||
|
@ -3414,7 +3414,7 @@ class OffloadingActionBuilder final {
|
||||
AssociatedOffloadKind);
|
||||
|
||||
if (CompileDeviceOnly && CurPhase == FinalPhase && BundleOutput &&
|
||||
BundleOutput.value()) {
|
||||
*BundleOutput) {
|
||||
for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I) {
|
||||
OffloadAction::DeviceDependences DDep;
|
||||
DDep.add(*CudaDeviceActions[I], *ToolChains.front(), GpuArchList[I],
|
||||
|
@ -498,9 +498,8 @@ void AVR::Linker::ConstructJob(Compilation &C, const JobAction &JA,
|
||||
}
|
||||
|
||||
if (SectionAddressData) {
|
||||
std::string DataSectionArg =
|
||||
std::string("-Tdata=0x") + llvm::utohexstr(SectionAddressData.value());
|
||||
CmdArgs.push_back(Args.MakeArgString(DataSectionArg));
|
||||
CmdArgs.push_back(Args.MakeArgString(
|
||||
"-Tdata=0x" + Twine::utohexstr(*SectionAddressData)));
|
||||
} else {
|
||||
// We do not have an entry for this CPU in the address mapping table yet.
|
||||
D.Diag(diag::warn_drv_avr_linker_section_addresses_not_implemented) << CPU;
|
||||
|
@ -2260,8 +2260,8 @@ void Clang::AddHexagonTargetArgs(const ArgList &Args,
|
||||
|
||||
if (auto G = toolchains::HexagonToolChain::getSmallDataThreshold(Args)) {
|
||||
CmdArgs.push_back("-mllvm");
|
||||
CmdArgs.push_back(Args.MakeArgString("-hexagon-small-data-threshold=" +
|
||||
Twine(G.value())));
|
||||
CmdArgs.push_back(
|
||||
Args.MakeArgString("-hexagon-small-data-threshold=" + Twine(*G)));
|
||||
}
|
||||
|
||||
if (!Args.hasArg(options::OPT_fno_short_enums))
|
||||
|
@ -2136,7 +2136,7 @@ void Generic_GCC::GCCInstallationDetector::print(raw_ostream &OS) const {
|
||||
|
||||
bool Generic_GCC::GCCInstallationDetector::getBiarchSibling(Multilib &M) const {
|
||||
if (BiarchSibling) {
|
||||
M = BiarchSibling.value();
|
||||
M = *BiarchSibling;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -341,8 +341,8 @@ constructHexagonLinkArgs(Compilation &C, const JobAction &JA,
|
||||
CmdArgs.push_back("-pie");
|
||||
|
||||
if (auto G = toolchains::HexagonToolChain::getSmallDataThreshold(Args)) {
|
||||
CmdArgs.push_back(Args.MakeArgString("-G" + Twine(G.value())));
|
||||
UseG0 = G.value() == 0;
|
||||
CmdArgs.push_back(Args.MakeArgString("-G" + Twine(*G)));
|
||||
UseG0 = *G == 0;
|
||||
}
|
||||
|
||||
CmdArgs.push_back("-o");
|
||||
|
@ -1990,7 +1990,7 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
|
||||
} else {
|
||||
Opts.DiagnosticsHotnessThreshold = *ResultOrErr;
|
||||
if ((!Opts.DiagnosticsHotnessThreshold ||
|
||||
Opts.DiagnosticsHotnessThreshold.value() > 0) &&
|
||||
*Opts.DiagnosticsHotnessThreshold > 0) &&
|
||||
!UsingProfile)
|
||||
Diags.Report(diag::warn_drv_diagnostics_hotness_requires_pgo)
|
||||
<< "-fdiagnostics-hotness-threshold=";
|
||||
@ -2007,7 +2007,7 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
|
||||
} else {
|
||||
Opts.DiagnosticsMisExpectTolerance = *ResultOrErr;
|
||||
if ((!Opts.DiagnosticsMisExpectTolerance ||
|
||||
Opts.DiagnosticsMisExpectTolerance.value() > 0) &&
|
||||
*Opts.DiagnosticsMisExpectTolerance > 0) &&
|
||||
!UsingProfile)
|
||||
Diags.Report(diag::warn_drv_diagnostics_misexpect_requires_pgo)
|
||||
<< "-fdiagnostics-misexpect-tolerance=";
|
||||
|
@ -2677,7 +2677,7 @@ static void handleAvailabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
|
||||
if (IOSToWatchOSMapping) {
|
||||
if (auto MappedVersion = IOSToWatchOSMapping->map(
|
||||
Version, MinimumWatchOSVersion, std::nullopt)) {
|
||||
return MappedVersion.value();
|
||||
return *MappedVersion;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2686,10 +2686,10 @@ static void handleAvailabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
|
||||
if (NewMajor >= 2) {
|
||||
if (Version.getMinor()) {
|
||||
if (Version.getSubminor())
|
||||
return VersionTuple(NewMajor, Version.getMinor().value(),
|
||||
Version.getSubminor().value());
|
||||
return VersionTuple(NewMajor, *Version.getMinor(),
|
||||
*Version.getSubminor());
|
||||
else
|
||||
return VersionTuple(NewMajor, Version.getMinor().value());
|
||||
return VersionTuple(NewMajor, *Version.getMinor());
|
||||
}
|
||||
return VersionTuple(NewMajor);
|
||||
}
|
||||
|
@ -770,7 +770,7 @@ void VariadicMethodTypeChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
|
||||
if (!errorNode)
|
||||
errorNode = C.generateNonFatalErrorNode();
|
||||
|
||||
if (!errorNode.value())
|
||||
if (!*errorNode)
|
||||
continue;
|
||||
|
||||
SmallString<128> sbuf;
|
||||
@ -787,8 +787,8 @@ void VariadicMethodTypeChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
|
||||
ArgTy.print(os, C.getLangOpts());
|
||||
os << "'";
|
||||
|
||||
auto R = std::make_unique<PathSensitiveBugReport>(*BT, os.str(),
|
||||
errorNode.value());
|
||||
auto R =
|
||||
std::make_unique<PathSensitiveBugReport>(*BT, os.str(), *errorNode);
|
||||
R->addRange(msg.getArgSourceRange(I));
|
||||
C.emitReport(std::move(R));
|
||||
}
|
||||
|
@ -1174,10 +1174,9 @@ MallocChecker::performKernelMalloc(const CallEvent &Call, CheckerContext &C,
|
||||
}
|
||||
|
||||
NonLoc Flags = V.castAs<NonLoc>();
|
||||
NonLoc ZeroFlag =
|
||||
C.getSValBuilder()
|
||||
.makeIntVal(KernelZeroFlagVal.value(), FlagsEx->getType())
|
||||
.castAs<NonLoc>();
|
||||
NonLoc ZeroFlag = C.getSValBuilder()
|
||||
.makeIntVal(*KernelZeroFlagVal, FlagsEx->getType())
|
||||
.castAs<NonLoc>();
|
||||
SVal MaskedFlagsUC = C.getSValBuilder().evalBinOpNN(State, BO_And,
|
||||
Flags, ZeroFlag,
|
||||
FlagsEx->getType());
|
||||
|
@ -234,7 +234,7 @@ void UnixAPIMisuseChecker::CheckOpenVariant(CheckerContext &C,
|
||||
}
|
||||
NonLoc oflags = V.castAs<NonLoc>();
|
||||
NonLoc ocreateFlag = C.getSValBuilder()
|
||||
.makeIntVal(Val_O_CREAT.value(), oflagsEx->getType())
|
||||
.makeIntVal(*Val_O_CREAT, oflagsEx->getType())
|
||||
.castAs<NonLoc>();
|
||||
SVal maskedFlagsUC = C.getSValBuilder().evalBinOpNN(state, BO_And,
|
||||
oflags, ocreateFlag,
|
||||
|
@ -3255,9 +3255,9 @@ bool ConditionBRVisitor::printValue(const Expr *CondVarExpr, raw_ostream &Out,
|
||||
Out << (TookTrue ? "not equal to 0" : "0");
|
||||
} else {
|
||||
if (Ty->isBooleanType())
|
||||
Out << (IntValue.value()->getBoolValue() ? "true" : "false");
|
||||
Out << ((*IntValue)->getBoolValue() ? "true" : "false");
|
||||
else
|
||||
Out << *IntValue.value();
|
||||
Out << **IntValue;
|
||||
}
|
||||
|
||||
return true;
|
||||
@ -3453,7 +3453,7 @@ void FalsePositiveRefutationBRVisitor::finalizeVisitor(
|
||||
if (!IsSAT)
|
||||
return;
|
||||
|
||||
if (!IsSAT.value())
|
||||
if (!*IsSAT)
|
||||
BR.markInvalid("Infeasible constraints", EndPathNode->getLocationContext());
|
||||
}
|
||||
|
||||
|
@ -449,7 +449,7 @@ int clang_main(int Argc, char **Argv) {
|
||||
std::optional<std::string> OptCL = llvm::sys::Process::GetEnv("CL");
|
||||
if (OptCL) {
|
||||
SmallVector<const char *, 8> PrependedOpts;
|
||||
getCLEnvVarOptions(OptCL.value(), Saver, PrependedOpts);
|
||||
getCLEnvVarOptions(*OptCL, Saver, PrependedOpts);
|
||||
|
||||
// Insert right after the program name to prepend to the argument list.
|
||||
Args.insert(Args.begin() + 1, PrependedOpts.begin(), PrependedOpts.end());
|
||||
@ -458,7 +458,7 @@ int clang_main(int Argc, char **Argv) {
|
||||
std::optional<std::string> Opt_CL_ = llvm::sys::Process::GetEnv("_CL_");
|
||||
if (Opt_CL_) {
|
||||
SmallVector<const char *, 8> AppendedOpts;
|
||||
getCLEnvVarOptions(Opt_CL_.value(), Saver, AppendedOpts);
|
||||
getCLEnvVarOptions(*Opt_CL_, Saver, AppendedOpts);
|
||||
|
||||
// Insert at the end of the argument list to append.
|
||||
Args.append(AppendedOpts.begin(), AppendedOpts.end());
|
||||
|
@ -112,30 +112,30 @@ public:
|
||||
// Getters functions that assert if the required values are not present.
|
||||
bool isEmbedded() const {
|
||||
assert(IsEmbedded.has_value() && "IsEmbedded is not set");
|
||||
return IsEmbedded.value();
|
||||
return *IsEmbedded;
|
||||
}
|
||||
|
||||
bool isTargetCodegen() const {
|
||||
assert(IsTargetCodegen.has_value() && "IsTargetCodegen is not set");
|
||||
return IsTargetCodegen.value();
|
||||
return *IsTargetCodegen;
|
||||
}
|
||||
|
||||
bool hasRequiresUnifiedSharedMemory() const {
|
||||
assert(HasRequiresUnifiedSharedMemory.has_value() &&
|
||||
"HasUnifiedSharedMemory is not set");
|
||||
return HasRequiresUnifiedSharedMemory.value();
|
||||
return *HasRequiresUnifiedSharedMemory;
|
||||
}
|
||||
|
||||
bool openMPOffloadMandatory() const {
|
||||
assert(OpenMPOffloadMandatory.has_value() &&
|
||||
"OpenMPOffloadMandatory is not set");
|
||||
return OpenMPOffloadMandatory.value();
|
||||
return *OpenMPOffloadMandatory;
|
||||
}
|
||||
// Returns the FirstSeparator if set, otherwise use the default
|
||||
// separator depending on isTargetCodegen
|
||||
StringRef firstSeparator() const {
|
||||
if (FirstSeparator.has_value())
|
||||
return FirstSeparator.value();
|
||||
return *FirstSeparator;
|
||||
if (isTargetCodegen())
|
||||
return "_";
|
||||
return ".";
|
||||
@ -145,7 +145,7 @@ public:
|
||||
// separator depending on isTargetCodegen
|
||||
StringRef separator() const {
|
||||
if (Separator.has_value())
|
||||
return Separator.value();
|
||||
return *Separator;
|
||||
if (isTargetCodegen())
|
||||
return "$";
|
||||
return ".";
|
||||
|
@ -827,8 +827,8 @@ bool AMDGPUCallLowering::passSpecialInputs(MachineIRBuilder &MIRBuilder,
|
||||
} else if (InputID == AMDGPUFunctionArgInfo::LDS_KERNEL_ID) {
|
||||
std::optional<uint32_t> Id =
|
||||
AMDGPUMachineFunction::getLDSKernelIdMetadata(MF.getFunction());
|
||||
if (Id.has_value()) {
|
||||
MIRBuilder.buildConstant(InputReg, Id.value());
|
||||
if (Id) {
|
||||
MIRBuilder.buildConstant(InputReg, *Id);
|
||||
} else {
|
||||
MIRBuilder.buildUndef(InputReg);
|
||||
}
|
||||
|
@ -1387,7 +1387,7 @@ bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const {
|
||||
getIConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI);
|
||||
|
||||
if (Arg) {
|
||||
const int64_t Value = Arg.value().Value.getSExtValue();
|
||||
const int64_t Value = Arg->Value.getSExtValue();
|
||||
if (Value == 0) {
|
||||
unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
|
||||
BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0);
|
||||
@ -4246,7 +4246,7 @@ AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
|
||||
},
|
||||
[=](MachineInstrBuilder &MIB) { // vaddr
|
||||
if (FI)
|
||||
MIB.addFrameIndex(FI.value());
|
||||
MIB.addFrameIndex(*FI);
|
||||
else
|
||||
MIB.addReg(VAddr);
|
||||
},
|
||||
|
@ -4189,7 +4189,7 @@ bool AMDGPULegalizerInfo::getLDSKernelId(Register DstReg,
|
||||
std::optional<uint32_t> KnownSize =
|
||||
AMDGPUMachineFunction::getLDSKernelIdMetadata(F);
|
||||
if (KnownSize.has_value())
|
||||
B.buildConstant(DstReg, KnownSize.value());
|
||||
B.buildConstant(DstReg, *KnownSize);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -133,7 +133,7 @@ public:
|
||||
bool IsAOneAddressSpace = isOneAddressSpace(A);
|
||||
bool IsBOneAddressSpace = isOneAddressSpace(B);
|
||||
|
||||
return AIO.value() >= BIO.value() &&
|
||||
return *AIO >= *BIO &&
|
||||
(IsAOneAddressSpace == IsBOneAddressSpace || !IsAOneAddressSpace);
|
||||
}
|
||||
};
|
||||
|
@ -1716,7 +1716,7 @@ SDValue SITargetLowering::getLDSKernelId(SelectionDAG &DAG,
|
||||
std::optional<uint32_t> KnownSize =
|
||||
AMDGPUMachineFunction::getLDSKernelIdMetadata(F);
|
||||
if (KnownSize.has_value())
|
||||
return DAG.getConstant(KnownSize.value(), SL, MVT::i32);
|
||||
return DAG.getConstant(*KnownSize, SL, MVT::i32);
|
||||
return SDValue();
|
||||
}
|
||||
|
||||
@ -2870,7 +2870,7 @@ void SITargetLowering::passSpecialInputs(
|
||||
std::optional<uint32_t> Id =
|
||||
AMDGPUMachineFunction::getLDSKernelIdMetadata(F);
|
||||
if (Id.has_value()) {
|
||||
InputReg = DAG.getConstant(Id.value(), DL, ArgVT);
|
||||
InputReg = DAG.getConstant(*Id, DL, ArgVT);
|
||||
} else {
|
||||
InputReg = DAG.getUNDEF(ArgVT);
|
||||
}
|
||||
|
@ -2318,13 +2318,13 @@ bool SIMemoryLegalizer::runOnMachineFunction(MachineFunction &MF) {
|
||||
continue;
|
||||
|
||||
if (const auto &MOI = MOA.getLoadInfo(MI))
|
||||
Changed |= expandLoad(MOI.value(), MI);
|
||||
Changed |= expandLoad(*MOI, MI);
|
||||
else if (const auto &MOI = MOA.getStoreInfo(MI))
|
||||
Changed |= expandStore(MOI.value(), MI);
|
||||
Changed |= expandStore(*MOI, MI);
|
||||
else if (const auto &MOI = MOA.getAtomicFenceInfo(MI))
|
||||
Changed |= expandAtomicFence(MOI.value(), MI);
|
||||
Changed |= expandAtomicFence(*MOI, MI);
|
||||
else if (const auto &MOI = MOA.getAtomicCmpxchgOrRmwInfo(MI))
|
||||
Changed |= expandAtomicCmpxchgOrRmw(MOI.value(), MI);
|
||||
Changed |= expandAtomicCmpxchgOrRmw(*MOI, MI);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -351,13 +351,13 @@ std::optional<int64_t> MVEGatherScatterLowering::getIfConst(const Value *V) {
|
||||
if (!Op0 || !Op1)
|
||||
return std::optional<int64_t>{};
|
||||
if (I->getOpcode() == Instruction::Add)
|
||||
return std::optional<int64_t>{Op0.value() + Op1.value()};
|
||||
return std::optional<int64_t>{*Op0 + *Op1};
|
||||
if (I->getOpcode() == Instruction::Mul)
|
||||
return std::optional<int64_t>{Op0.value() * Op1.value()};
|
||||
return std::optional<int64_t>{*Op0 * *Op1};
|
||||
if (I->getOpcode() == Instruction::Shl)
|
||||
return std::optional<int64_t>{Op0.value() << Op1.value()};
|
||||
return std::optional<int64_t>{*Op0 << *Op1};
|
||||
if (I->getOpcode() == Instruction::Or)
|
||||
return std::optional<int64_t>{Op0.value() | Op1.value()};
|
||||
return std::optional<int64_t>{*Op0 | *Op1};
|
||||
}
|
||||
return std::optional<int64_t>{};
|
||||
}
|
||||
|
@ -1020,11 +1020,9 @@ findCFILocation(MachineBasicBlock &B) {
|
||||
}
|
||||
|
||||
void HexagonFrameLowering::insertCFIInstructions(MachineFunction &MF) const {
|
||||
for (auto &B : MF) {
|
||||
auto At = findCFILocation(B);
|
||||
if (At)
|
||||
insertCFIInstructionsAt(B, At.value());
|
||||
}
|
||||
for (auto &B : MF)
|
||||
if (auto At = findCFILocation(B))
|
||||
insertCFIInstructionsAt(B, *At);
|
||||
}
|
||||
|
||||
void HexagonFrameLowering::insertCFIInstructionsAt(MachineBasicBlock &MBB,
|
||||
|
@ -400,10 +400,10 @@ private:
|
||||
static const char *SgnNames[] = {"Positive", "Signed", "Unsigned"};
|
||||
OS << Instruction::getOpcodeName(Op.Opcode) << '.' << Op.Frac;
|
||||
if (Op.RoundAt.has_value()) {
|
||||
if (Op.Frac != 0 && Op.RoundAt.value() == Op.Frac - 1) {
|
||||
if (Op.Frac != 0 && *Op.RoundAt == Op.Frac - 1) {
|
||||
OS << ":rnd";
|
||||
} else {
|
||||
OS << " + 1<<" << Op.RoundAt.value();
|
||||
OS << " + 1<<" << *Op.RoundAt;
|
||||
}
|
||||
}
|
||||
OS << "\n X:(" << SgnNames[Op.X.Sgn] << ") " << *Op.X.Val << "\n"
|
||||
|
@ -706,14 +706,14 @@ LanaiAsmParser::parseRegister(bool RestoreOnFailure) {
|
||||
RegNum = MatchRegisterName(Lexer.getTok().getIdentifier());
|
||||
if (RegNum == 0) {
|
||||
if (PercentTok && RestoreOnFailure)
|
||||
Lexer.UnLex(PercentTok.value());
|
||||
Lexer.UnLex(*PercentTok);
|
||||
return nullptr;
|
||||
}
|
||||
Parser.Lex(); // Eat identifier token
|
||||
return LanaiOperand::createReg(RegNum, Start, End);
|
||||
}
|
||||
if (PercentTok && RestoreOnFailure)
|
||||
Lexer.UnLex(PercentTok.value());
|
||||
Lexer.UnLex(*PercentTok);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
@ -1997,7 +1997,7 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
|
||||
InFlag = Ret.getValue(2);
|
||||
|
||||
if (ProxyRegTruncates[i]) {
|
||||
Ret = DAG.getNode(ISD::TRUNCATE, dl, ProxyRegTruncates[i].value(), Ret);
|
||||
Ret = DAG.getNode(ISD::TRUNCATE, dl, *ProxyRegTruncates[i], Ret);
|
||||
}
|
||||
|
||||
InVals.push_back(Ret);
|
||||
|
@ -41,7 +41,7 @@ SDValue VETargetLowering::lowerToVVP(SDValue Op, SelectionDAG &DAG) const {
|
||||
auto VVPOpcodeOpt = getVVPOpcode(Opcode);
|
||||
if (!VVPOpcodeOpt)
|
||||
return SDValue();
|
||||
unsigned VVPOpcode = VVPOpcodeOpt.value();
|
||||
unsigned VVPOpcode = *VVPOpcodeOpt;
|
||||
const bool FromVP = ISD::isVPOpcode(Opcode);
|
||||
|
||||
// The representative and legalized vector type of this operation.
|
||||
|
@ -87,14 +87,14 @@ bool WebAssemblyAsmTypeCheck::popType(SMLoc ErrorLoc,
|
||||
if (Stack.empty()) {
|
||||
return typeError(ErrorLoc,
|
||||
EVT ? StringRef("empty stack while popping ") +
|
||||
WebAssembly::typeToString(EVT.value())
|
||||
WebAssembly::typeToString(*EVT)
|
||||
: StringRef("empty stack while popping value"));
|
||||
}
|
||||
auto PVT = Stack.pop_back_val();
|
||||
if (EVT && EVT.value() != PVT) {
|
||||
return typeError(
|
||||
ErrorLoc, StringRef("popped ") + WebAssembly::typeToString(PVT) +
|
||||
", expected " + WebAssembly::typeToString(EVT.value()));
|
||||
if (EVT && *EVT != PVT) {
|
||||
return typeError(ErrorLoc,
|
||||
StringRef("popped ") + WebAssembly::typeToString(PVT) +
|
||||
", expected " + WebAssembly::typeToString(*EVT));
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -551,7 +551,7 @@ Value *WebAssemblyLowerEmscriptenEHSjLj::wrapInvoke(CallBase *CI) {
|
||||
auto [SizeArg, NEltArg] = *Args;
|
||||
SizeArg += 1;
|
||||
if (NEltArg)
|
||||
NEltArg = NEltArg.value() + 1;
|
||||
NEltArg = *NEltArg + 1;
|
||||
FnAttrs.addAllocSizeAttr(SizeArg, NEltArg);
|
||||
}
|
||||
// In case the callee has 'noreturn' attribute, We need to remove it, because
|
||||
|
@ -359,7 +359,7 @@ InstructionCost X86TTIImpl::getArithmeticInstrCost(
|
||||
if (const auto *Entry =
|
||||
CostTableLookup(AVX512BWUniformConstCostTable, ISD, LT.second))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * KindCost.value();
|
||||
return LT.first * *KindCost;
|
||||
|
||||
static const CostKindTblEntry AVX512UniformConstCostTable[] = {
|
||||
{ ISD::SHL, MVT::v64i8, { 2, 12, 5, 6 } }, // psllw + pand.
|
||||
@ -395,7 +395,7 @@ InstructionCost X86TTIImpl::getArithmeticInstrCost(
|
||||
if (const auto *Entry =
|
||||
CostTableLookup(AVX512UniformConstCostTable, ISD, LT.second))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * KindCost.value();
|
||||
return LT.first * *KindCost;
|
||||
|
||||
static const CostKindTblEntry AVX2UniformConstCostTable[] = {
|
||||
{ ISD::SHL, MVT::v16i8, { 1, 8, 2, 3 } }, // psllw + pand.
|
||||
@ -436,7 +436,7 @@ InstructionCost X86TTIImpl::getArithmeticInstrCost(
|
||||
if (const auto *Entry =
|
||||
CostTableLookup(AVX2UniformConstCostTable, ISD, LT.second))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * KindCost.value();
|
||||
return LT.first * *KindCost;
|
||||
|
||||
static const CostKindTblEntry AVXUniformConstCostTable[] = {
|
||||
{ ISD::SHL, MVT::v16i8, { 2, 7, 2, 3 } }, // psllw + pand.
|
||||
@ -479,7 +479,7 @@ InstructionCost X86TTIImpl::getArithmeticInstrCost(
|
||||
if (const auto *Entry =
|
||||
CostTableLookup(AVXUniformConstCostTable, ISD, LT.second))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * KindCost.value();
|
||||
return LT.first * *KindCost;
|
||||
|
||||
static const CostKindTblEntry SSE2UniformConstCostTable[] = {
|
||||
{ ISD::SHL, MVT::v16i8, { 1, 7, 2, 3 } }, // psllw + pand.
|
||||
@ -510,7 +510,7 @@ InstructionCost X86TTIImpl::getArithmeticInstrCost(
|
||||
if (const auto *Entry =
|
||||
CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * KindCost.value();
|
||||
return LT.first * *KindCost;
|
||||
|
||||
static const CostKindTblEntry AVX512BWConstCostTable[] = {
|
||||
{ ISD::SDIV, MVT::v64i8, { 14 } }, // 2*ext+2*pmulhw sequence
|
||||
@ -528,7 +528,7 @@ InstructionCost X86TTIImpl::getArithmeticInstrCost(
|
||||
if (const auto *Entry =
|
||||
CostTableLookup(AVX512BWConstCostTable, ISD, LT.second))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * KindCost.value();
|
||||
return LT.first * *KindCost;
|
||||
|
||||
static const CostKindTblEntry AVX512ConstCostTable[] = {
|
||||
{ ISD::SDIV, MVT::v64i8, { 28 } }, // 4*ext+4*pmulhw sequence
|
||||
@ -551,7 +551,7 @@ InstructionCost X86TTIImpl::getArithmeticInstrCost(
|
||||
if (const auto *Entry =
|
||||
CostTableLookup(AVX512ConstCostTable, ISD, LT.second))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * KindCost.value();
|
||||
return LT.first * *KindCost;
|
||||
|
||||
static const CostKindTblEntry AVX2ConstCostTable[] = {
|
||||
{ ISD::SDIV, MVT::v32i8, { 14 } }, // 2*ext+2*pmulhw sequence
|
||||
@ -573,7 +573,7 @@ InstructionCost X86TTIImpl::getArithmeticInstrCost(
|
||||
if (Op2Info.isConstant() && ST->hasAVX2())
|
||||
if (const auto *Entry = CostTableLookup(AVX2ConstCostTable, ISD, LT.second))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * KindCost.value();
|
||||
return LT.first * *KindCost;
|
||||
|
||||
static const CostKindTblEntry AVXConstCostTable[] = {
|
||||
{ ISD::SDIV, MVT::v32i8, { 30 } }, // 4*ext+4*pmulhw sequence + split.
|
||||
@ -595,7 +595,7 @@ InstructionCost X86TTIImpl::getArithmeticInstrCost(
|
||||
if (Op2Info.isConstant() && ST->hasAVX())
|
||||
if (const auto *Entry = CostTableLookup(AVXConstCostTable, ISD, LT.second))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * KindCost.value();
|
||||
return LT.first * *KindCost;
|
||||
|
||||
static const CostKindTblEntry SSE41ConstCostTable[] = {
|
||||
{ ISD::SDIV, MVT::v4i32, { 15 } }, // vpmuludq sequence
|
||||
@ -606,7 +606,7 @@ InstructionCost X86TTIImpl::getArithmeticInstrCost(
|
||||
if (const auto *Entry =
|
||||
CostTableLookup(SSE41ConstCostTable, ISD, LT.second))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * KindCost.value();
|
||||
return LT.first * *KindCost;
|
||||
|
||||
static const CostKindTblEntry SSE2ConstCostTable[] = {
|
||||
{ ISD::SDIV, MVT::v16i8, { 14 } }, // 2*ext+2*pmulhw sequence
|
||||
@ -628,7 +628,7 @@ InstructionCost X86TTIImpl::getArithmeticInstrCost(
|
||||
if (Op2Info.isConstant() && ST->hasSSE2())
|
||||
if (const auto *Entry = CostTableLookup(SSE2ConstCostTable, ISD, LT.second))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * KindCost.value();
|
||||
return LT.first * *KindCost;
|
||||
|
||||
static const CostKindTblEntry AVX512BWUniformCostTable[] = {
|
||||
{ ISD::SHL, MVT::v16i8, { 3, 5, 5, 7 } }, // psllw + pand.
|
||||
@ -650,7 +650,7 @@ InstructionCost X86TTIImpl::getArithmeticInstrCost(
|
||||
if (const auto *Entry =
|
||||
CostTableLookup(AVX512BWUniformCostTable, ISD, LT.second))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * KindCost.value();
|
||||
return LT.first * *KindCost;
|
||||
|
||||
static const CostKindTblEntry AVX512UniformCostTable[] = {
|
||||
{ ISD::SHL, MVT::v32i16, { 5,10, 5, 7 } }, // psllw + split.
|
||||
@ -674,7 +674,7 @@ InstructionCost X86TTIImpl::getArithmeticInstrCost(
|
||||
if (const auto *Entry =
|
||||
CostTableLookup(AVX512UniformCostTable, ISD, LT.second))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * KindCost.value();
|
||||
return LT.first * *KindCost;
|
||||
|
||||
static const CostKindTblEntry AVX2UniformCostTable[] = {
|
||||
// Uniform splats are cheaper for the following instructions.
|
||||
@ -711,7 +711,7 @@ InstructionCost X86TTIImpl::getArithmeticInstrCost(
|
||||
if (const auto *Entry =
|
||||
CostTableLookup(AVX2UniformCostTable, ISD, LT.second))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * KindCost.value();
|
||||
return LT.first * *KindCost;
|
||||
|
||||
static const CostKindTblEntry AVXUniformCostTable[] = {
|
||||
{ ISD::SHL, MVT::v16i8, { 4, 4, 6, 8 } }, // psllw + pand.
|
||||
@ -749,7 +749,7 @@ InstructionCost X86TTIImpl::getArithmeticInstrCost(
|
||||
if (const auto *Entry =
|
||||
CostTableLookup(AVXUniformCostTable, ISD, LT.second))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * KindCost.value();
|
||||
return LT.first * *KindCost;
|
||||
|
||||
static const CostKindTblEntry SSE2UniformCostTable[] = {
|
||||
// Uniform splats are cheaper for the following instructions.
|
||||
@ -775,7 +775,7 @@ InstructionCost X86TTIImpl::getArithmeticInstrCost(
|
||||
if (const auto *Entry =
|
||||
CostTableLookup(SSE2UniformCostTable, ISD, LT.second))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * KindCost.value();
|
||||
return LT.first * *KindCost;
|
||||
|
||||
static const CostKindTblEntry AVX512DQCostTable[] = {
|
||||
{ ISD::MUL, MVT::v2i64, { 2, 15, 1, 3 } }, // pmullq
|
||||
@ -787,7 +787,7 @@ InstructionCost X86TTIImpl::getArithmeticInstrCost(
|
||||
if (ST->hasDQI())
|
||||
if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD, LT.second))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * KindCost.value();
|
||||
return LT.first * *KindCost;
|
||||
|
||||
static const CostKindTblEntry AVX512BWCostTable[] = {
|
||||
{ ISD::SHL, MVT::v16i8, { 4, 8, 4, 5 } }, // extend/vpsllvw/pack sequence.
|
||||
@ -833,7 +833,7 @@ InstructionCost X86TTIImpl::getArithmeticInstrCost(
|
||||
if (ST->hasBWI())
|
||||
if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, LT.second))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * KindCost.value();
|
||||
return LT.first * *KindCost;
|
||||
|
||||
static const CostKindTblEntry AVX512CostTable[] = {
|
||||
{ ISD::SHL, MVT::v64i8, { 15, 19,27,33 } }, // vpblendv+split sequence.
|
||||
@ -925,7 +925,7 @@ InstructionCost X86TTIImpl::getArithmeticInstrCost(
|
||||
if (ST->hasAVX512())
|
||||
if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * KindCost.value();
|
||||
return LT.first * *KindCost;
|
||||
|
||||
static const CostKindTblEntry AVX2ShiftCostTable[] = {
|
||||
// Shifts on vXi64/vXi32 on AVX2 is legal even though we declare to
|
||||
@ -961,7 +961,7 @@ InstructionCost X86TTIImpl::getArithmeticInstrCost(
|
||||
|
||||
if (const auto *Entry = CostTableLookup(AVX2ShiftCostTable, ISD, LT.second))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * KindCost.value();
|
||||
return LT.first * *KindCost;
|
||||
}
|
||||
|
||||
static const CostKindTblEntry XOPShiftCostTable[] = {
|
||||
@ -1003,7 +1003,7 @@ InstructionCost X86TTIImpl::getArithmeticInstrCost(
|
||||
if (const auto *Entry =
|
||||
CostTableLookup(XOPShiftCostTable, ShiftISD, LT.second))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * KindCost.value();
|
||||
return LT.first * *KindCost;
|
||||
}
|
||||
|
||||
if (ISD == ISD::SHL && !Op2Info.isUniform() && Op2Info.isConstant()) {
|
||||
@ -1025,7 +1025,7 @@ InstructionCost X86TTIImpl::getArithmeticInstrCost(
|
||||
if (ST->useGLMDivSqrtCosts())
|
||||
if (const auto *Entry = CostTableLookup(GLMCostTable, ISD, LT.second))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * KindCost.value();
|
||||
return LT.first * *KindCost;
|
||||
|
||||
static const CostKindTblEntry SLMCostTable[] = {
|
||||
{ ISD::MUL, MVT::v4i32, { 11, 11, 1, 7 } }, // pmulld
|
||||
@ -1054,7 +1054,7 @@ InstructionCost X86TTIImpl::getArithmeticInstrCost(
|
||||
if (ST->useSLMArithCosts())
|
||||
if (const auto *Entry = CostTableLookup(SLMCostTable, ISD, LT.second))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * KindCost.value();
|
||||
return LT.first * *KindCost;
|
||||
|
||||
static const CostKindTblEntry AVX2CostTable[] = {
|
||||
{ ISD::SHL, MVT::v16i8, { 6, 21,11,16 } }, // vpblendvb sequence.
|
||||
@ -1125,7 +1125,7 @@ InstructionCost X86TTIImpl::getArithmeticInstrCost(
|
||||
if (ST->hasAVX2())
|
||||
if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * KindCost.value();
|
||||
return LT.first * *KindCost;
|
||||
|
||||
static const CostKindTblEntry AVX1CostTable[] = {
|
||||
// We don't have to scalarize unsupported ops. We can issue two half-sized
|
||||
@ -1224,7 +1224,7 @@ InstructionCost X86TTIImpl::getArithmeticInstrCost(
|
||||
if (ST->hasAVX())
|
||||
if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, LT.second))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * KindCost.value();
|
||||
return LT.first * *KindCost;
|
||||
|
||||
static const CostKindTblEntry SSE42CostTable[] = {
|
||||
{ ISD::FADD, MVT::f64, { 1, 3, 1, 1 } }, // Nehalem from http://www.agner.org/
|
||||
@ -1253,8 +1253,7 @@ InstructionCost X86TTIImpl::getArithmeticInstrCost(
|
||||
if (ST->hasSSE42())
|
||||
if (const auto *Entry = CostTableLookup(SSE42CostTable, ISD, LT.second))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * KindCost.value();
|
||||
|
||||
return LT.first * *KindCost;
|
||||
|
||||
static const CostKindTblEntry SSE41CostTable[] = {
|
||||
{ ISD::SHL, MVT::v16i8, { 15, 24,17,22 } }, // pblendvb sequence.
|
||||
@ -1277,7 +1276,7 @@ InstructionCost X86TTIImpl::getArithmeticInstrCost(
|
||||
if (ST->hasSSE41())
|
||||
if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * KindCost.value();
|
||||
return LT.first * *KindCost;
|
||||
|
||||
static const CostKindTblEntry SSE2CostTable[] = {
|
||||
// We don't correctly identify costs of casts because they are marked as
|
||||
@ -1344,7 +1343,7 @@ InstructionCost X86TTIImpl::getArithmeticInstrCost(
|
||||
if (ST->hasSSE2())
|
||||
if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * KindCost.value();
|
||||
return LT.first * *KindCost;
|
||||
|
||||
static const CostKindTblEntry SSE1CostTable[] = {
|
||||
{ ISD::FDIV, MVT::f32, { 17, 18, 1, 1 } }, // Pentium III from http://www.agner.org/
|
||||
@ -1366,7 +1365,7 @@ InstructionCost X86TTIImpl::getArithmeticInstrCost(
|
||||
if (ST->hasSSE1())
|
||||
if (const auto *Entry = CostTableLookup(SSE1CostTable, ISD, LT.second))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * KindCost.value();
|
||||
return LT.first * *KindCost;
|
||||
|
||||
static const CostKindTblEntry X64CostTbl[] = { // 64-bit targets
|
||||
{ ISD::ADD, MVT::i64, { 1 } }, // Core (Merom) from http://www.agner.org/
|
||||
@ -1377,7 +1376,7 @@ InstructionCost X86TTIImpl::getArithmeticInstrCost(
|
||||
if (ST->is64Bit())
|
||||
if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, LT.second))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * KindCost.value();
|
||||
return LT.first * *KindCost;
|
||||
|
||||
static const CostKindTblEntry X86CostTbl[] = { // 32 or 64-bit targets
|
||||
{ ISD::ADD, MVT::i8, { 1 } }, // Pentium III from http://www.agner.org/
|
||||
@ -1397,7 +1396,7 @@ InstructionCost X86TTIImpl::getArithmeticInstrCost(
|
||||
|
||||
if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, LT.second))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * KindCost.value();
|
||||
return LT.first * *KindCost;
|
||||
|
||||
// It is not a good idea to vectorize division. We have to scalarize it and
|
||||
// in the process we will often end up having to spilling regular
|
||||
@ -1785,7 +1784,7 @@ InstructionCost X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
|
||||
if (ST->hasAVX512())
|
||||
if (const auto *Entry = CostTableLookup(AVX512ShuffleTbl, Kind, LT.second))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * KindCost.value();
|
||||
return LT.first * *KindCost;
|
||||
|
||||
static const CostTblEntry AVX2ShuffleTbl[] = {
|
||||
{TTI::SK_Broadcast, MVT::v4f64, 1}, // vbroadcastpd
|
||||
@ -3263,52 +3262,52 @@ InstructionCost X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
|
||||
if (ST->useSLMArithCosts())
|
||||
if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * (ExtraCost + KindCost.value());
|
||||
return LT.first * (ExtraCost + *KindCost);
|
||||
|
||||
if (ST->hasBWI())
|
||||
if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * (ExtraCost + KindCost.value());
|
||||
return LT.first * (ExtraCost + *KindCost);
|
||||
|
||||
if (ST->hasAVX512())
|
||||
if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * (ExtraCost + KindCost.value());
|
||||
return LT.first * (ExtraCost + *KindCost);
|
||||
|
||||
if (ST->hasAVX2())
|
||||
if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * (ExtraCost + KindCost.value());
|
||||
return LT.first * (ExtraCost + *KindCost);
|
||||
|
||||
if (ST->hasXOP())
|
||||
if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * (ExtraCost + KindCost.value());
|
||||
return LT.first * (ExtraCost + *KindCost);
|
||||
|
||||
if (ST->hasAVX())
|
||||
if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * (ExtraCost + KindCost.value());
|
||||
return LT.first * (ExtraCost + *KindCost);
|
||||
|
||||
if (ST->hasSSE42())
|
||||
if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * (ExtraCost + KindCost.value());
|
||||
return LT.first * (ExtraCost + *KindCost);
|
||||
|
||||
if (ST->hasSSE41())
|
||||
if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * (ExtraCost + KindCost.value());
|
||||
return LT.first * (ExtraCost + *KindCost);
|
||||
|
||||
if (ST->hasSSE2())
|
||||
if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * (ExtraCost + KindCost.value());
|
||||
return LT.first * (ExtraCost + *KindCost);
|
||||
|
||||
if (ST->hasSSE1())
|
||||
if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return LT.first * (ExtraCost + KindCost.value());
|
||||
return LT.first * (ExtraCost + *KindCost);
|
||||
|
||||
// Assume a 3cy latency for fp select ops.
|
||||
if (CostKind == TTI::TCK_Latency && Opcode == Instruction::Select)
|
||||
@ -4100,109 +4099,109 @@ X86TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
|
||||
if (ST->useGLMDivSqrtCosts())
|
||||
if (const auto *Entry = CostTableLookup(GLMCostTbl, ISD, MTy))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
|
||||
return adjustTableCost(Entry->ISD, *KindCost, LT.first,
|
||||
ICA.getFlags());
|
||||
|
||||
if (ST->useSLMArithCosts())
|
||||
if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
|
||||
return adjustTableCost(Entry->ISD, *KindCost, LT.first,
|
||||
ICA.getFlags());
|
||||
|
||||
if (ST->hasVBMI2())
|
||||
if (const auto *Entry = CostTableLookup(AVX512VBMI2CostTbl, ISD, MTy))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
|
||||
return adjustTableCost(Entry->ISD, *KindCost, LT.first,
|
||||
ICA.getFlags());
|
||||
|
||||
if (ST->hasBITALG())
|
||||
if (const auto *Entry = CostTableLookup(AVX512BITALGCostTbl, ISD, MTy))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
|
||||
return adjustTableCost(Entry->ISD, *KindCost, LT.first,
|
||||
ICA.getFlags());
|
||||
|
||||
if (ST->hasVPOPCNTDQ())
|
||||
if (const auto *Entry = CostTableLookup(AVX512VPOPCNTDQCostTbl, ISD, MTy))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
|
||||
return adjustTableCost(Entry->ISD, *KindCost, LT.first,
|
||||
ICA.getFlags());
|
||||
|
||||
if (ST->hasCDI())
|
||||
if (const auto *Entry = CostTableLookup(AVX512CDCostTbl, ISD, MTy))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
|
||||
return adjustTableCost(Entry->ISD, *KindCost, LT.first,
|
||||
ICA.getFlags());
|
||||
|
||||
if (ST->hasBWI())
|
||||
if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
|
||||
return adjustTableCost(Entry->ISD, *KindCost, LT.first,
|
||||
ICA.getFlags());
|
||||
|
||||
if (ST->hasAVX512())
|
||||
if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
|
||||
return adjustTableCost(Entry->ISD, *KindCost, LT.first,
|
||||
ICA.getFlags());
|
||||
|
||||
if (ST->hasXOP())
|
||||
if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
|
||||
return adjustTableCost(Entry->ISD, *KindCost, LT.first,
|
||||
ICA.getFlags());
|
||||
|
||||
if (ST->hasAVX2())
|
||||
if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
|
||||
return adjustTableCost(Entry->ISD, *KindCost, LT.first,
|
||||
ICA.getFlags());
|
||||
|
||||
if (ST->hasAVX())
|
||||
if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
|
||||
return adjustTableCost(Entry->ISD, *KindCost, LT.first,
|
||||
ICA.getFlags());
|
||||
|
||||
if (ST->hasSSE42())
|
||||
if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
|
||||
return adjustTableCost(Entry->ISD, *KindCost, LT.first,
|
||||
ICA.getFlags());
|
||||
|
||||
if (ST->hasSSE41())
|
||||
if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
|
||||
return adjustTableCost(Entry->ISD, *KindCost, LT.first,
|
||||
ICA.getFlags());
|
||||
|
||||
if (ST->hasSSSE3())
|
||||
if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
|
||||
return adjustTableCost(Entry->ISD, *KindCost, LT.first,
|
||||
ICA.getFlags());
|
||||
|
||||
if (ST->hasSSE2())
|
||||
if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
|
||||
return adjustTableCost(Entry->ISD, *KindCost, LT.first,
|
||||
ICA.getFlags());
|
||||
|
||||
if (ST->hasSSE1())
|
||||
if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
|
||||
return adjustTableCost(Entry->ISD, *KindCost, LT.first,
|
||||
ICA.getFlags());
|
||||
|
||||
if (ST->hasBMI()) {
|
||||
if (ST->is64Bit())
|
||||
if (const auto *Entry = CostTableLookup(BMI64CostTbl, ISD, MTy))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
|
||||
return adjustTableCost(Entry->ISD, *KindCost, LT.first,
|
||||
ICA.getFlags());
|
||||
|
||||
if (const auto *Entry = CostTableLookup(BMI32CostTbl, ISD, MTy))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
|
||||
return adjustTableCost(Entry->ISD, *KindCost, LT.first,
|
||||
ICA.getFlags());
|
||||
}
|
||||
|
||||
@ -4210,12 +4209,12 @@ X86TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
|
||||
if (ST->is64Bit())
|
||||
if (const auto *Entry = CostTableLookup(LZCNT64CostTbl, ISD, MTy))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
|
||||
return adjustTableCost(Entry->ISD, *KindCost, LT.first,
|
||||
ICA.getFlags());
|
||||
|
||||
if (const auto *Entry = CostTableLookup(LZCNT32CostTbl, ISD, MTy))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
|
||||
return adjustTableCost(Entry->ISD, *KindCost, LT.first,
|
||||
ICA.getFlags());
|
||||
}
|
||||
|
||||
@ -4223,12 +4222,12 @@ X86TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
|
||||
if (ST->is64Bit())
|
||||
if (const auto *Entry = CostTableLookup(POPCNT64CostTbl, ISD, MTy))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
|
||||
return adjustTableCost(Entry->ISD, *KindCost, LT.first,
|
||||
ICA.getFlags());
|
||||
|
||||
if (const auto *Entry = CostTableLookup(POPCNT32CostTbl, ISD, MTy))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
|
||||
return adjustTableCost(Entry->ISD, *KindCost, LT.first,
|
||||
ICA.getFlags());
|
||||
}
|
||||
|
||||
@ -4246,13 +4245,12 @@ X86TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
|
||||
if (ST->is64Bit())
|
||||
if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
|
||||
return adjustTableCost(Entry->ISD, *KindCost, LT.first,
|
||||
ICA.getFlags());
|
||||
|
||||
if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy))
|
||||
if (auto KindCost = Entry->Cost[CostKind])
|
||||
return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
|
||||
ICA.getFlags());
|
||||
return adjustTableCost(Entry->ISD, *KindCost, LT.first, ICA.getFlags());
|
||||
}
|
||||
|
||||
return BaseT::getIntrinsicInstrCost(ICA, CostKind);
|
||||
|
@ -818,8 +818,8 @@ Argument *IRPosition::getAssociatedArgument() const {
|
||||
}
|
||||
|
||||
// If we found a unique callback candidate argument, return it.
|
||||
if (CBCandidateArg && CBCandidateArg.value())
|
||||
return CBCandidateArg.value();
|
||||
if (CBCandidateArg && *CBCandidateArg)
|
||||
return *CBCandidateArg;
|
||||
|
||||
// If no callbacks were found, or none used the underlying call site operand
|
||||
// exclusively, use the direct callee argument if available.
|
||||
@ -1186,7 +1186,7 @@ bool Attributor::getAssumedSimplifiedValues(
|
||||
std::optional<Value *> CBResult = CB(IRP, AA, UsedAssumedInformation);
|
||||
if (!CBResult.has_value())
|
||||
continue;
|
||||
Value *V = CBResult.value();
|
||||
Value *V = *CBResult;
|
||||
if (!V)
|
||||
return false;
|
||||
if ((S & AA::ValueScope::Interprocedural) ||
|
||||
@ -2843,8 +2843,8 @@ void InformationCache::initializeInformationCache(const Function &CF,
|
||||
std::optional<short> &NumUses = AssumeUsesMap[I];
|
||||
if (!NumUses)
|
||||
NumUses = I->getNumUses();
|
||||
NumUses = NumUses.value() - /* this assume */ 1;
|
||||
if (NumUses.value() != 0)
|
||||
NumUses = *NumUses - /* this assume */ 1;
|
||||
if (*NumUses != 0)
|
||||
continue;
|
||||
AssumeOnlyValues.insert(I);
|
||||
for (const Value *Op : I->operands())
|
||||
|
@ -2080,14 +2080,14 @@ ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
|
||||
// Check if we have an assumed unique return value that we could manifest.
|
||||
std::optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
|
||||
|
||||
if (!UniqueRV || !UniqueRV.value())
|
||||
if (!UniqueRV || !*UniqueRV)
|
||||
return Changed;
|
||||
|
||||
// Bookkeeping.
|
||||
STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
|
||||
"Number of function with unique return");
|
||||
// If the assumed unique return value is an argument, annotate it.
|
||||
if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.value())) {
|
||||
if (auto *UniqueRVArg = dyn_cast<Argument>(*UniqueRV)) {
|
||||
if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
|
||||
getAssociatedFunction()->getReturnType())) {
|
||||
getIRPosition() = IRPosition::argument(*UniqueRVArg);
|
||||
@ -2865,9 +2865,9 @@ struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
|
||||
// or we got back a simplified value to continue.
|
||||
std::optional<Value *> SimplifiedPtrOp =
|
||||
stopOnUndefOrAssumed(A, PtrOp, &I);
|
||||
if (!SimplifiedPtrOp || !SimplifiedPtrOp.value())
|
||||
if (!SimplifiedPtrOp || !*SimplifiedPtrOp)
|
||||
return true;
|
||||
const Value *PtrOpVal = SimplifiedPtrOp.value();
|
||||
const Value *PtrOpVal = *SimplifiedPtrOp;
|
||||
|
||||
// A memory access through a pointer is considered UB
|
||||
// only if the pointer has constant null value.
|
||||
@ -2957,14 +2957,14 @@ struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
|
||||
UsedAssumedInformation, AA::Interprocedural);
|
||||
if (UsedAssumedInformation)
|
||||
continue;
|
||||
if (SimplifiedVal && !SimplifiedVal.value())
|
||||
if (SimplifiedVal && !*SimplifiedVal)
|
||||
return true;
|
||||
if (!SimplifiedVal || isa<UndefValue>(*SimplifiedVal.value())) {
|
||||
if (!SimplifiedVal || isa<UndefValue>(**SimplifiedVal)) {
|
||||
KnownUBInsts.insert(&I);
|
||||
continue;
|
||||
}
|
||||
if (!ArgVal->getType()->isPointerTy() ||
|
||||
!isa<ConstantPointerNull>(*SimplifiedVal.value()))
|
||||
!isa<ConstantPointerNull>(**SimplifiedVal))
|
||||
continue;
|
||||
auto &NonNullAA =
|
||||
A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
|
||||
@ -4556,11 +4556,11 @@ identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
|
||||
bool UsedAssumedInformation = false;
|
||||
std::optional<Constant *> C =
|
||||
A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation);
|
||||
if (!C || isa_and_nonnull<UndefValue>(C.value())) {
|
||||
if (!C || isa_and_nonnull<UndefValue>(*C)) {
|
||||
// No value yet, assume all edges are dead.
|
||||
} else if (isa_and_nonnull<ConstantInt>(C.value())) {
|
||||
} else if (isa_and_nonnull<ConstantInt>(*C)) {
|
||||
for (const auto &CaseIt : SI.cases()) {
|
||||
if (CaseIt.getCaseValue() == C.value()) {
|
||||
if (CaseIt.getCaseValue() == *C) {
|
||||
AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
|
||||
return UsedAssumedInformation;
|
||||
}
|
||||
@ -5995,8 +5995,8 @@ struct AAValueSimplifyImpl : AAValueSimplify {
|
||||
if (!SimpleV.has_value())
|
||||
return PoisonValue::get(&Ty);
|
||||
Value *EffectiveV = &V;
|
||||
if (SimpleV.value())
|
||||
EffectiveV = SimpleV.value();
|
||||
if (*SimpleV)
|
||||
EffectiveV = *SimpleV;
|
||||
if (auto *C = dyn_cast<Constant>(EffectiveV))
|
||||
return C;
|
||||
if (CtxI && AA::isValidAtPosition(AA::ValueAndContext(*EffectiveV, *CtxI),
|
||||
@ -6012,7 +6012,7 @@ struct AAValueSimplifyImpl : AAValueSimplify {
|
||||
/// nullptr if we don't have one that makes sense.
|
||||
Value *manifestReplacementValue(Attributor &A, Instruction *CtxI) const {
|
||||
Value *NewV = SimplifiedAssociatedValue
|
||||
? SimplifiedAssociatedValue.value()
|
||||
? *SimplifiedAssociatedValue
|
||||
: UndefValue::get(getAssociatedType());
|
||||
if (NewV && NewV != &getAssociatedValue()) {
|
||||
ValueToValueMapTy VMap;
|
||||
@ -6143,7 +6143,7 @@ struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
|
||||
A.getAssumedConstant(ACSArgPos, *this, UsedAssumedInformation);
|
||||
if (!SimpleArgOp)
|
||||
return true;
|
||||
if (!SimpleArgOp.value())
|
||||
if (!*SimpleArgOp)
|
||||
return false;
|
||||
if (!AA::isDynamicallyUnique(A, *this, **SimpleArgOp))
|
||||
return false;
|
||||
@ -6572,10 +6572,10 @@ struct AAHeapToStackFunction final : public AAHeapToStack {
|
||||
Alignment = std::max(Alignment, *RetAlign);
|
||||
if (Value *Align = getAllocAlignment(AI.CB, TLI)) {
|
||||
std::optional<APInt> AlignmentAPI = getAPInt(A, *this, *Align);
|
||||
assert(AlignmentAPI && AlignmentAPI.value().getZExtValue() > 0 &&
|
||||
assert(AlignmentAPI && AlignmentAPI->getZExtValue() > 0 &&
|
||||
"Expected an alignment during manifest!");
|
||||
Alignment = std::max(
|
||||
Alignment, assumeAligned(AlignmentAPI.value().getZExtValue()));
|
||||
Alignment =
|
||||
std::max(Alignment, assumeAligned(AlignmentAPI->getZExtValue()));
|
||||
}
|
||||
|
||||
// TODO: Hoist the alloca towards the function entry.
|
||||
@ -6624,7 +6624,7 @@ struct AAHeapToStackFunction final : public AAHeapToStack {
|
||||
A.getAssumedConstant(V, AA, UsedAssumedInformation);
|
||||
if (!SimpleV)
|
||||
return APInt(64, 0);
|
||||
if (auto *CI = dyn_cast_or_null<ConstantInt>(SimpleV.value()))
|
||||
if (auto *CI = dyn_cast_or_null<ConstantInt>(*SimpleV))
|
||||
return CI->getValue();
|
||||
return std::nullopt;
|
||||
}
|
||||
@ -6678,7 +6678,7 @@ ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) {
|
||||
return false;
|
||||
if (!MayContainIrreducibleControl.has_value())
|
||||
MayContainIrreducibleControl = mayContainIrreducibleControl(*F, LI);
|
||||
if (MayContainIrreducibleControl.value())
|
||||
if (*MayContainIrreducibleControl)
|
||||
return true;
|
||||
if (!LI)
|
||||
return true;
|
||||
@ -6907,7 +6907,7 @@ ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) {
|
||||
|
||||
std::optional<APInt> Size = getSize(A, *this, AI);
|
||||
if (MaxHeapToStackSize != -1) {
|
||||
if (!Size || Size.value().ugt(MaxHeapToStackSize)) {
|
||||
if (!Size || Size->ugt(MaxHeapToStackSize)) {
|
||||
LLVM_DEBUG({
|
||||
if (!Size)
|
||||
dbgs() << "[H2S] Unknown allocation size: " << *AI.CB << "\n";
|
||||
@ -7035,8 +7035,8 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
|
||||
|
||||
LLVM_DEBUG({
|
||||
dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
|
||||
if (CSTy && CSTy.value())
|
||||
CSTy.value()->print(dbgs());
|
||||
if (CSTy && *CSTy)
|
||||
(*CSTy)->print(dbgs());
|
||||
else if (CSTy)
|
||||
dbgs() << "<nullptr>";
|
||||
else
|
||||
@ -7047,8 +7047,8 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
|
||||
|
||||
LLVM_DEBUG({
|
||||
dbgs() << " : New Type: ";
|
||||
if (Ty && Ty.value())
|
||||
Ty.value()->print(dbgs());
|
||||
if (Ty && *Ty)
|
||||
(*Ty)->print(dbgs());
|
||||
else if (Ty)
|
||||
dbgs() << "<nullptr>";
|
||||
else
|
||||
@ -7056,7 +7056,7 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
|
||||
dbgs() << "\n";
|
||||
});
|
||||
|
||||
return !Ty || Ty.value();
|
||||
return !Ty || *Ty;
|
||||
};
|
||||
|
||||
if (!A.checkForAllCallSites(CallSiteCheck, *this, true,
|
||||
@ -7070,7 +7070,7 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
|
||||
PrivatizableType = identifyPrivatizableType(A);
|
||||
if (!PrivatizableType)
|
||||
return ChangeStatus::UNCHANGED;
|
||||
if (!PrivatizableType.value())
|
||||
if (!*PrivatizableType)
|
||||
return indicatePessimisticFixpoint();
|
||||
|
||||
// The dependence is optional so we don't give up once we give up on the
|
||||
@ -7158,7 +7158,7 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
|
||||
auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
|
||||
if (!CBArgPrivTy)
|
||||
continue;
|
||||
if (CBArgPrivTy.value() == PrivatizableType)
|
||||
if (*CBArgPrivTy == PrivatizableType)
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -7205,7 +7205,7 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
|
||||
auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
|
||||
if (!DCArgPrivTy)
|
||||
return true;
|
||||
if (DCArgPrivTy.value() == PrivatizableType)
|
||||
if (*DCArgPrivTy == PrivatizableType)
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -7347,7 +7347,7 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
|
||||
ChangeStatus manifest(Attributor &A) override {
|
||||
if (!PrivatizableType)
|
||||
return ChangeStatus::UNCHANGED;
|
||||
assert(PrivatizableType.value() && "Expected privatizable type!");
|
||||
assert(*PrivatizableType && "Expected privatizable type!");
|
||||
|
||||
// Collect all tail calls in the function as we cannot allow new allocas to
|
||||
// escape into tail recursion.
|
||||
@ -7380,9 +7380,9 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
|
||||
Instruction *IP = &*EntryBB.getFirstInsertionPt();
|
||||
const DataLayout &DL = IP->getModule()->getDataLayout();
|
||||
unsigned AS = DL.getAllocaAddrSpace();
|
||||
Instruction *AI = new AllocaInst(PrivatizableType.value(), AS,
|
||||
Instruction *AI = new AllocaInst(*PrivatizableType, AS,
|
||||
Arg->getName() + ".priv", IP);
|
||||
createInitialization(PrivatizableType.value(), *AI, ReplacementFn,
|
||||
createInitialization(*PrivatizableType, *AI, ReplacementFn,
|
||||
ArgIt->getArgNo(), *IP);
|
||||
|
||||
if (AI->getType() != Arg->getType())
|
||||
@ -7490,7 +7490,7 @@ struct AAPrivatizablePtrCallSiteArgument final
|
||||
PrivatizableType = identifyPrivatizableType(A);
|
||||
if (!PrivatizableType)
|
||||
return ChangeStatus::UNCHANGED;
|
||||
if (!PrivatizableType.value())
|
||||
if (!*PrivatizableType)
|
||||
return indicatePessimisticFixpoint();
|
||||
|
||||
const IRPosition &IRP = getIRPosition();
|
||||
@ -9013,7 +9013,7 @@ struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
|
||||
UsedAssumedInformation, AA::Interprocedural);
|
||||
if (!SimplifiedLHS.has_value())
|
||||
return true;
|
||||
if (!SimplifiedLHS.value())
|
||||
if (!*SimplifiedLHS)
|
||||
return false;
|
||||
LHS = *SimplifiedLHS;
|
||||
|
||||
@ -9022,7 +9022,7 @@ struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
|
||||
UsedAssumedInformation, AA::Interprocedural);
|
||||
if (!SimplifiedRHS.has_value())
|
||||
return true;
|
||||
if (!SimplifiedRHS.value())
|
||||
if (!*SimplifiedRHS)
|
||||
return false;
|
||||
RHS = *SimplifiedRHS;
|
||||
|
||||
@ -9066,7 +9066,7 @@ struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
|
||||
UsedAssumedInformation, AA::Interprocedural);
|
||||
if (!SimplifiedOpV.has_value())
|
||||
return true;
|
||||
if (!SimplifiedOpV.value())
|
||||
if (!*SimplifiedOpV)
|
||||
return false;
|
||||
OpV = *SimplifiedOpV;
|
||||
|
||||
@ -9096,7 +9096,7 @@ struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
|
||||
UsedAssumedInformation, AA::Interprocedural);
|
||||
if (!SimplifiedLHS.has_value())
|
||||
return true;
|
||||
if (!SimplifiedLHS.value())
|
||||
if (!*SimplifiedLHS)
|
||||
return false;
|
||||
LHS = *SimplifiedLHS;
|
||||
|
||||
@ -9105,7 +9105,7 @@ struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
|
||||
UsedAssumedInformation, AA::Interprocedural);
|
||||
if (!SimplifiedRHS.has_value())
|
||||
return true;
|
||||
if (!SimplifiedRHS.value())
|
||||
if (!*SimplifiedRHS)
|
||||
return false;
|
||||
RHS = *SimplifiedRHS;
|
||||
|
||||
@ -9171,7 +9171,7 @@ struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
|
||||
UsedAssumedInformation, AA::Interprocedural);
|
||||
if (!SimplifiedOpV.has_value())
|
||||
return true;
|
||||
if (!SimplifiedOpV.value())
|
||||
if (!*SimplifiedOpV)
|
||||
return false;
|
||||
Value *VPtr = *SimplifiedOpV;
|
||||
|
||||
@ -10241,7 +10241,7 @@ askForAssumedConstant(Attributor &A, const AbstractAttribute &QueryingAA,
|
||||
A.recordDependence(AA, QueryingAA, DepClassTy::OPTIONAL);
|
||||
return std::nullopt;
|
||||
}
|
||||
if (auto *C = COpt.value()) {
|
||||
if (auto *C = *COpt) {
|
||||
A.recordDependence(AA, QueryingAA, DepClassTy::OPTIONAL);
|
||||
return C;
|
||||
}
|
||||
@ -10255,12 +10255,12 @@ Value *AAPotentialValues::getSingleValue(
|
||||
std::optional<Value *> V;
|
||||
for (auto &It : Values) {
|
||||
V = AA::combineOptionalValuesInAAValueLatice(V, It.getValue(), &Ty);
|
||||
if (V.has_value() && !V.value())
|
||||
if (V.has_value() && !*V)
|
||||
break;
|
||||
}
|
||||
if (!V.has_value())
|
||||
return UndefValue::get(&Ty);
|
||||
return V.value();
|
||||
return *V;
|
||||
}
|
||||
|
||||
namespace {
|
||||
@ -10303,7 +10303,7 @@ struct AAPotentialValuesImpl : AAPotentialValues {
|
||||
std::optional<Constant *> C = askForAssumedConstant<AAType>(A, AA, IRP, Ty);
|
||||
if (!C)
|
||||
return std::nullopt;
|
||||
if (C.value())
|
||||
if (*C)
|
||||
if (auto *CC = AA::getWithType(**C, Ty))
|
||||
return CC;
|
||||
return nullptr;
|
||||
@ -10328,7 +10328,7 @@ struct AAPotentialValuesImpl : AAPotentialValues {
|
||||
Type &Ty = *getAssociatedType();
|
||||
std::optional<Value *> SimpleV =
|
||||
askOtherAA<AAValueConstantRange>(A, *this, ValIRP, Ty);
|
||||
if (SimpleV.has_value() && !SimpleV.value()) {
|
||||
if (SimpleV.has_value() && !*SimpleV) {
|
||||
auto &PotentialConstantsAA = A.getAAFor<AAPotentialConstantValues>(
|
||||
*this, ValIRP, DepClassTy::OPTIONAL);
|
||||
if (PotentialConstantsAA.isValidState()) {
|
||||
@ -10342,8 +10342,8 @@ struct AAPotentialValuesImpl : AAPotentialValues {
|
||||
if (!SimpleV.has_value())
|
||||
return;
|
||||
|
||||
if (SimpleV.value())
|
||||
VPtr = SimpleV.value();
|
||||
if (*SimpleV)
|
||||
VPtr = *SimpleV;
|
||||
}
|
||||
|
||||
if (isa<ConstantInt>(VPtr))
|
||||
@ -10491,7 +10491,7 @@ struct AAPotentialValuesFloating : AAPotentialValuesImpl {
|
||||
UsedAssumedInformation, AA::Intraprocedural);
|
||||
if (!SimplifiedLHS.has_value())
|
||||
return true;
|
||||
if (!SimplifiedLHS.value())
|
||||
if (!*SimplifiedLHS)
|
||||
return false;
|
||||
LHS = *SimplifiedLHS;
|
||||
|
||||
@ -10500,7 +10500,7 @@ struct AAPotentialValuesFloating : AAPotentialValuesImpl {
|
||||
UsedAssumedInformation, AA::Intraprocedural);
|
||||
if (!SimplifiedRHS.has_value())
|
||||
return true;
|
||||
if (!SimplifiedRHS.value())
|
||||
if (!*SimplifiedRHS)
|
||||
return false;
|
||||
RHS = *SimplifiedRHS;
|
||||
|
||||
@ -10703,8 +10703,8 @@ struct AAPotentialValuesFloating : AAPotentialValuesImpl {
|
||||
if (!SimplifiedOp.has_value())
|
||||
return true;
|
||||
|
||||
if (SimplifiedOp.value())
|
||||
NewOps[Idx] = SimplifiedOp.value();
|
||||
if (*SimplifiedOp)
|
||||
NewOps[Idx] = *SimplifiedOp;
|
||||
else
|
||||
NewOps[Idx] = Op;
|
||||
|
||||
@ -11004,10 +11004,10 @@ struct AAPotentialValuesCallSiteReturned : AAPotentialValuesImpl {
|
||||
// Nothing to do as long as no value was determined.
|
||||
continue;
|
||||
}
|
||||
V = CallerV.value() ? CallerV.value() : V;
|
||||
V = *CallerV ? *CallerV : V;
|
||||
if (AA::isDynamicallyUnique(A, *this, *V) &&
|
||||
AA::isValidInScope(*V, Caller)) {
|
||||
if (CallerV.value()) {
|
||||
if (*CallerV) {
|
||||
SmallVector<AA::ValueAndContext> ArgValues;
|
||||
IRPosition IRP = IRPosition::value(*V);
|
||||
if (auto *Arg = dyn_cast<Argument>(V))
|
||||
|
@ -565,7 +565,7 @@ collectRegionsConstants(OutlinableRegion &Region,
|
||||
for (Value *V : ID.OperVals) {
|
||||
std::optional<unsigned> GVNOpt = C.getGVN(V);
|
||||
assert(GVNOpt && "Expected a GVN for operand?");
|
||||
unsigned GVN = GVNOpt.value();
|
||||
unsigned GVN = *GVNOpt;
|
||||
|
||||
// Check if this global value has been found to not be the same already.
|
||||
if (NotSame.contains(GVN)) {
|
||||
@ -581,7 +581,7 @@ collectRegionsConstants(OutlinableRegion &Region,
|
||||
std::optional<bool> ConstantMatches =
|
||||
constantMatches(V, GVN, GVNToConstant);
|
||||
if (ConstantMatches) {
|
||||
if (ConstantMatches.value())
|
||||
if (*ConstantMatches)
|
||||
continue;
|
||||
else
|
||||
ConstantsTheSame = false;
|
||||
@ -662,7 +662,7 @@ Function *IROutliner::createFunction(Module &M, OutlinableGroup &Group,
|
||||
|
||||
// Transfer the swifterr attribute to the correct function parameter.
|
||||
if (Group.SwiftErrorArgument)
|
||||
Group.OutlinedFunction->addParamAttr(Group.SwiftErrorArgument.value(),
|
||||
Group.OutlinedFunction->addParamAttr(*Group.SwiftErrorArgument,
|
||||
Attribute::SwiftError);
|
||||
|
||||
Group.OutlinedFunction->addFnAttr(Attribute::OptimizeForSize);
|
||||
@ -821,7 +821,7 @@ static void mapInputsToGVNs(IRSimilarityCandidate &C,
|
||||
if (OutputMappings.find(Input) != OutputMappings.end())
|
||||
Input = OutputMappings.find(Input)->second;
|
||||
assert(C.getGVN(Input) && "Could not find a numbering for the given input");
|
||||
EndInputNumbers.push_back(C.getGVN(Input).value());
|
||||
EndInputNumbers.push_back(*C.getGVN(Input));
|
||||
}
|
||||
}
|
||||
|
||||
@ -960,11 +960,11 @@ findExtractedInputToOverallInputMapping(OutlinableRegion &Region,
|
||||
for (unsigned InputVal : InputGVNs) {
|
||||
std::optional<unsigned> CanonicalNumberOpt = C.getCanonicalNum(InputVal);
|
||||
assert(CanonicalNumberOpt && "Canonical number not found?");
|
||||
unsigned CanonicalNumber = CanonicalNumberOpt.value();
|
||||
unsigned CanonicalNumber = *CanonicalNumberOpt;
|
||||
|
||||
std::optional<Value *> InputOpt = C.fromGVN(InputVal);
|
||||
assert(InputOpt && "Global value number not found?");
|
||||
Value *Input = InputOpt.value();
|
||||
Value *Input = *InputOpt;
|
||||
|
||||
DenseMap<unsigned, unsigned>::iterator AggArgIt =
|
||||
Group.CanonicalNumberToAggArg.find(CanonicalNumber);
|
||||
@ -1248,13 +1248,13 @@ static std::optional<unsigned> getGVNForPHINode(OutlinableRegion &Region,
|
||||
std::optional<unsigned> BBGVN = Cand.getGVN(PHIBB);
|
||||
assert(BBGVN && "Could not find GVN for the incoming block!");
|
||||
|
||||
BBGVN = Cand.getCanonicalNum(BBGVN.value());
|
||||
BBGVN = Cand.getCanonicalNum(*BBGVN);
|
||||
assert(BBGVN && "Could not find canonical number for the incoming block!");
|
||||
// Create a pair of the exit block canonical value, and the aggregate
|
||||
// argument location, connected to the canonical numbers stored in the
|
||||
// PHINode.
|
||||
PHINodeData TemporaryPair =
|
||||
std::make_pair(std::make_pair(BBGVN.value(), AggArgIdx), PHIGVNs);
|
||||
std::make_pair(std::make_pair(*BBGVN, AggArgIdx), PHIGVNs);
|
||||
hash_code PHINodeDataHash = encodePHINodeData(TemporaryPair);
|
||||
|
||||
// Look for and create a new entry in our connection between canonical
|
||||
@ -1528,7 +1528,7 @@ CallInst *replaceCalledFunction(Module &M, OutlinableRegion &Region) {
|
||||
// Make sure that the argument in the new function has the SwiftError
|
||||
// argument.
|
||||
if (Group.SwiftErrorArgument)
|
||||
Call->addParamAttr(Group.SwiftErrorArgument.value(), Attribute::SwiftError);
|
||||
Call->addParamAttr(*Group.SwiftErrorArgument, Attribute::SwiftError);
|
||||
|
||||
return Call;
|
||||
}
|
||||
@ -2092,10 +2092,9 @@ static void alignOutputBlockWithAggFunc(
|
||||
// we add it to our list of sets of output blocks.
|
||||
if (MatchingBB) {
|
||||
LLVM_DEBUG(dbgs() << "Set output block for region in function"
|
||||
<< Region.ExtractedFunction << " to "
|
||||
<< MatchingBB.value());
|
||||
<< Region.ExtractedFunction << " to " << *MatchingBB);
|
||||
|
||||
Region.OutputBlockNum = MatchingBB.value();
|
||||
Region.OutputBlockNum = *MatchingBB;
|
||||
for (std::pair<Value *, BasicBlock *> &VtoBB : OutputBBs)
|
||||
VtoBB.second->eraseFromParent();
|
||||
return;
|
||||
@ -2691,14 +2690,14 @@ void IROutliner::updateOutputMapping(OutlinableRegion &Region,
|
||||
if (!OutputIdx)
|
||||
return;
|
||||
|
||||
if (OutputMappings.find(Outputs[OutputIdx.value()]) == OutputMappings.end()) {
|
||||
if (OutputMappings.find(Outputs[*OutputIdx]) == OutputMappings.end()) {
|
||||
LLVM_DEBUG(dbgs() << "Mapping extracted output " << *LI << " to "
|
||||
<< *Outputs[OutputIdx.value()] << "\n");
|
||||
OutputMappings.insert(std::make_pair(LI, Outputs[OutputIdx.value()]));
|
||||
<< *Outputs[*OutputIdx] << "\n");
|
||||
OutputMappings.insert(std::make_pair(LI, Outputs[*OutputIdx]));
|
||||
} else {
|
||||
Value *Orig = OutputMappings.find(Outputs[OutputIdx.value()])->second;
|
||||
Value *Orig = OutputMappings.find(Outputs[*OutputIdx])->second;
|
||||
LLVM_DEBUG(dbgs() << "Mapping extracted output " << *Orig << " to "
|
||||
<< *Outputs[OutputIdx.value()] << "\n");
|
||||
<< *Outputs[*OutputIdx] << "\n");
|
||||
OutputMappings.insert(std::make_pair(LI, Orig));
|
||||
}
|
||||
}
|
||||
|
@ -4476,10 +4476,10 @@ struct AAFoldRuntimeCallCallSiteReturned : AAFoldRuntimeCall {
|
||||
if (!SimplifiedValue)
|
||||
return Str + std::string("none");
|
||||
|
||||
if (!SimplifiedValue.value())
|
||||
if (!*SimplifiedValue)
|
||||
return Str + std::string("nullptr");
|
||||
|
||||
if (ConstantInt *CI = dyn_cast<ConstantInt>(SimplifiedValue.value()))
|
||||
if (ConstantInt *CI = dyn_cast<ConstantInt>(*SimplifiedValue))
|
||||
return Str + std::to_string(CI->getSExtValue());
|
||||
|
||||
return Str + std::string("unknown");
|
||||
@ -4504,7 +4504,7 @@ struct AAFoldRuntimeCallCallSiteReturned : AAFoldRuntimeCall {
|
||||
[&](const IRPosition &IRP, const AbstractAttribute *AA,
|
||||
bool &UsedAssumedInformation) -> std::optional<Value *> {
|
||||
assert((isValidState() ||
|
||||
(SimplifiedValue && SimplifiedValue.value() == nullptr)) &&
|
||||
(SimplifiedValue && *SimplifiedValue == nullptr)) &&
|
||||
"Unexpected invalid state!");
|
||||
|
||||
if (!isAtFixpoint()) {
|
||||
|
@ -132,7 +132,7 @@ void ContextTrieNode::addFunctionSize(uint32_t FSize) {
|
||||
if (!FuncSize)
|
||||
FuncSize = 0;
|
||||
|
||||
FuncSize = FuncSize.value() + FSize;
|
||||
FuncSize = *FuncSize + FSize;
|
||||
}
|
||||
|
||||
LineLocation ContextTrieNode::getCallSiteLoc() const { return CallSiteLoc; }
|
||||
|
@ -1340,14 +1340,14 @@ SampleProfileLoader::getExternalInlineAdvisorCost(CallBase &CB) {
|
||||
|
||||
bool SampleProfileLoader::getExternalInlineAdvisorShouldInline(CallBase &CB) {
|
||||
std::optional<InlineCost> Cost = getExternalInlineAdvisorCost(CB);
|
||||
return Cost ? !!Cost.value() : false;
|
||||
return Cost ? !!*Cost : false;
|
||||
}
|
||||
|
||||
InlineCost
|
||||
SampleProfileLoader::shouldInlineCandidate(InlineCandidate &Candidate) {
|
||||
if (std::optional<InlineCost> ReplayCost =
|
||||
getExternalInlineAdvisorCost(*Candidate.CallInstr))
|
||||
return ReplayCost.value();
|
||||
return *ReplayCost;
|
||||
// Adjust threshold based on call site hotness, only do this for callsite
|
||||
// prioritized inliner because otherwise cost-benefit check is done earlier.
|
||||
int SampleThreshold = SampleColdCallSiteThreshold;
|
||||
|
@ -319,7 +319,7 @@ bool SanitizerBinaryMetadata::runOn(Instruction &I, MetadataInfoSet &MIS,
|
||||
|
||||
if (Options.Atomics && I.mayReadOrWriteMemory()) {
|
||||
auto SSID = getAtomicSyncScopeID(&I);
|
||||
if (SSID.has_value() && SSID.value() != SyncScope::SingleThread) {
|
||||
if (SSID.has_value() && *SSID != SyncScope::SingleThread) {
|
||||
NumMetadataAtomics++;
|
||||
InstMetadata.push_back(&MetadataInfo::Atomics);
|
||||
}
|
||||
|
@ -485,7 +485,7 @@ static bool isTsanAtomic(const Instruction *I) {
|
||||
if (!SSID)
|
||||
return false;
|
||||
if (isa<LoadInst>(I) || isa<StoreInst>(I))
|
||||
return SSID.value() != SyncScope::SingleThread;
|
||||
return *SSID != SyncScope::SingleThread;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -4918,8 +4918,8 @@ LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) {
|
||||
if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange))
|
||||
MaxVScale =
|
||||
TheFunction->getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax();
|
||||
MaxScalableVF = ElementCount::getScalable(
|
||||
MaxVScale ? (MaxSafeElements / MaxVScale.value()) : 0);
|
||||
MaxScalableVF =
|
||||
ElementCount::getScalable(MaxVScale ? (MaxSafeElements / *MaxVScale) : 0);
|
||||
if (!MaxScalableVF)
|
||||
reportVectorizationInfo(
|
||||
"Max legal vector width too small, scalable vectorization "
|
||||
@ -5319,9 +5319,9 @@ bool LoopVectorizationCostModel::isMoreProfitable(
|
||||
unsigned EstimatedWidthB = B.Width.getKnownMinValue();
|
||||
if (std::optional<unsigned> VScale = getVScaleForTuning()) {
|
||||
if (A.Width.isScalable())
|
||||
EstimatedWidthA *= VScale.value();
|
||||
EstimatedWidthA *= *VScale;
|
||||
if (B.Width.isScalable())
|
||||
EstimatedWidthB *= VScale.value();
|
||||
EstimatedWidthB *= *VScale;
|
||||
}
|
||||
|
||||
// Assume vscale may be larger than 1 (or the value being tuned for),
|
||||
@ -7692,7 +7692,7 @@ void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF,
|
||||
BestVPlan.getVectorLoopRegion()->getEntryBasicBlock();
|
||||
Loop *L = LI->getLoopFor(State.CFG.VPBB2IRBB[HeaderVPBB]);
|
||||
if (VectorizedLoopID)
|
||||
L->setLoopID(VectorizedLoopID.value());
|
||||
L->setLoopID(*VectorizedLoopID);
|
||||
else {
|
||||
// Keep all loop hints from the original loop on the vector loop (we'll
|
||||
// replace the vectorizer-specific hints below).
|
||||
@ -10546,7 +10546,7 @@ bool LoopVectorizePass::processLoop(Loop *L) {
|
||||
makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
|
||||
LLVMLoopVectorizeFollowupEpilogue});
|
||||
if (RemainderLoopID) {
|
||||
L->setLoopID(RemainderLoopID.value());
|
||||
L->setLoopID(*RemainderLoopID);
|
||||
} else {
|
||||
if (DisableRuntimeUnroll)
|
||||
AddRuntimeUnrollDisableMetaData(L);
|
||||
|
@ -2824,7 +2824,7 @@ private:
|
||||
AliasCacheKey key = std::make_pair(Inst1, Inst2);
|
||||
std::optional<bool> &result = AliasCache[key];
|
||||
if (result) {
|
||||
return result.value();
|
||||
return *result;
|
||||
}
|
||||
bool aliased = true;
|
||||
if (Loc1.Ptr && isSimple(Inst1))
|
||||
|
@ -101,7 +101,7 @@ BinarySizeContextTracker::getFuncSizeForContext(const ContextTrieNode *Node) {
|
||||
PrevNode = CurrNode;
|
||||
CurrNode = CurrNode->getChildContext(CallSiteLoc, Node->getFuncName());
|
||||
if (CurrNode && CurrNode->getFunctionSize())
|
||||
Size = CurrNode->getFunctionSize().value();
|
||||
Size = *CurrNode->getFunctionSize();
|
||||
CallSiteLoc = Node->getCallSiteLoc();
|
||||
Node = Node->getParentContext();
|
||||
}
|
||||
@ -115,12 +115,12 @@ BinarySizeContextTracker::getFuncSizeForContext(const ContextTrieNode *Node) {
|
||||
while (!Size && CurrNode && !CurrNode->getAllChildContext().empty()) {
|
||||
CurrNode = &CurrNode->getAllChildContext().begin()->second;
|
||||
if (CurrNode->getFunctionSize())
|
||||
Size = CurrNode->getFunctionSize().value();
|
||||
Size = *CurrNode->getFunctionSize();
|
||||
}
|
||||
}
|
||||
|
||||
assert(Size && "We should at least find one context size.");
|
||||
return Size.value();
|
||||
return *Size;
|
||||
}
|
||||
|
||||
void BinarySizeContextTracker::trackInlineesOptimizedAway(
|
||||
|
Loading…
x
Reference in New Issue
Block a user