[llvm] LLVM_FALLTHROUGH => [[fallthrough]]. NFC

With C++17 there is no Clang pedantic warning or MSVC C5051.
This commit is contained in:
Fangrui Song 2022-08-08 11:24:15 -07:00
parent 09db7f5331
commit de9d80c1c5
208 changed files with 686 additions and 686 deletions
llvm
examples/BrainF
include/llvm
lib
Analysis
BinaryFormat
Bitcode/Reader
CodeGen
DWARFLinker
DebugInfo/PDB/Native
ExecutionEngine/RuntimeDyld
Frontend/OpenMP
IR
MC
Object
ObjectYAML
Option
ProfileData
Support
TableGen
Target

@ -335,7 +335,7 @@ void BrainF::readloop(PHINode *phi, BasicBlock *oldbb, BasicBlock *testbb,
switch(c) {
case '-':
direction = -1;
LLVM_FALLTHROUGH;
[[fallthrough]];
case '+':
if (cursym == SYM_CHANGE) {
@ -356,7 +356,7 @@ void BrainF::readloop(PHINode *phi, BasicBlock *oldbb, BasicBlock *testbb,
case '<':
direction = -1;
LLVM_FALLTHROUGH;
[[fallthrough]];
case '>':
if (cursym == SYM_MOVE) {

@ -449,7 +449,7 @@ public:
getCacheSize(TargetTransformInfo::CacheLevel Level) const {
switch (Level) {
case TargetTransformInfo::CacheLevel::L1D:
LLVM_FALLTHROUGH;
[[fallthrough]];
case TargetTransformInfo::CacheLevel::L2D:
return llvm::Optional<unsigned>();
}
@ -460,7 +460,7 @@ public:
getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const {
switch (Level) {
case TargetTransformInfo::CacheLevel::L1D:
LLVM_FALLTHROUGH;
[[fallthrough]];
case TargetTransformInfo::CacheLevel::L2D:
return llvm::Optional<unsigned>();
}

@ -957,7 +957,7 @@ public:
// Check for NOOP conversions.
if (TLI->isTruncateFree(SrcLT.second, DstLT.second))
return 0;
LLVM_FALLTHROUGH;
[[fallthrough]];
case Instruction::BitCast:
// Bitcast between types that are legalized to the same type are free and
// assume int to/from ptr of the same size is also free.
@ -972,7 +972,7 @@ public:
case Instruction::ZExt:
if (TLI->isZExtFree(SrcLT.second, DstLT.second))
return 0;
LLVM_FALLTHROUGH;
[[fallthrough]];
case Instruction::SExt:
if (I && getTLI()->isExtFree(I))
return 0;

@ -685,7 +685,7 @@ void CodeGenPassBuilder<Derived>::addPassesToHandleExceptions(
// pad is shared by multiple invokes and is also a target of a normal
// edge from elsewhere.
addPass(SjLjEHPreparePass());
LLVM_FALLTHROUGH;
[[fallthrough]];
case ExceptionHandling::DwarfCFI:
case ExceptionHandling::ARM:
case ExceptionHandling::AIX:

@ -816,7 +816,7 @@ public:
static bool classof(const Entry *E) {
switch (E->getKind()) {
case EK_DirectoryRemap:
LLVM_FALLTHROUGH;
[[fallthrough]];
case EK_File:
return true;
case EK_Directory:

@ -27,7 +27,7 @@ inline Optional<CodeModel::Model> unwrap(LLVMCodeModel Model, bool &JIT) {
switch (Model) {
case LLVMCodeModelJITDefault:
JIT = true;
LLVM_FALLTHROUGH;
[[fallthrough]];
case LLVMCodeModelDefault:
return None;
case LLVMCodeModelTiny:

@ -387,7 +387,7 @@ static LinearExpression GetLinearExpression(
BOp, DT))
return Val;
LLVM_FALLTHROUGH;
[[fallthrough]];
case Instruction::Add: {
E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
Depth + 1, AC, DT);

@ -2641,7 +2641,7 @@ static Constant *ConstantFoldScalarCall2(StringRef Name,
// undef - X -> { 0, false }
if (!C0 || !C1)
return Constant::getNullValue(Ty);
LLVM_FALLTHROUGH;
[[fallthrough]];
case Intrinsic::uadd_with_overflow:
case Intrinsic::sadd_with_overflow:
// X + undef -> { -1, false }
@ -2652,7 +2652,7 @@ static Constant *ConstantFoldScalarCall2(StringRef Name,
{Constant::getAllOnesValue(Ty->getStructElementType(0)),
Constant::getNullValue(Ty->getStructElementType(1))});
}
LLVM_FALLTHROUGH;
[[fallthrough]];
case Intrinsic::smul_with_overflow:
case Intrinsic::umul_with_overflow: {
// undef * X -> { 0, false }
@ -2944,7 +2944,7 @@ static Constant *ConstantFoldScalarCall3(StringRef Name,
// wrong result if C3 was -0.0.
return ConstantFP::get(Ty->getContext(), APFloat(0.0f) + C3);
}
LLVM_FALLTHROUGH;
[[fallthrough]];
}
case Intrinsic::fma:
case Intrinsic::fmuladd: {

@ -789,7 +789,7 @@ RecurrenceDescriptor::isRecurrenceInstr(Loop *L, PHINode *OrigPhi,
case Instruction::Select:
if (Kind == RecurKind::FAdd || Kind == RecurKind::FMul)
return isConditionalRdxPattern(Kind, I);
LLVM_FALLTHROUGH;
[[fallthrough]];
case Instruction::FCmp:
case Instruction::ICmp:
case Instruction::Call:

@ -3027,7 +3027,7 @@ static Value *simplifyICmpWithBinOpOnLHS(CmpInst::Predicate Pred,
KnownBits Known = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
if (!Known.isNonNegative())
break;
LLVM_FALLTHROUGH;
[[fallthrough]];
}
case ICmpInst::ICMP_EQ:
case ICmpInst::ICMP_UGT:
@ -3038,7 +3038,7 @@ static Value *simplifyICmpWithBinOpOnLHS(CmpInst::Predicate Pred,
KnownBits Known = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
if (!Known.isNonNegative())
break;
LLVM_FALLTHROUGH;
[[fallthrough]];
}
case ICmpInst::ICMP_NE:
case ICmpInst::ICMP_ULT:
@ -5869,7 +5869,7 @@ static Value *simplifyBinaryIntrinsic(Function *F, Value *Op0, Value *Op1,
// sat(X + MAX) -> MAX
if (match(Op0, m_AllOnes()) || match(Op1, m_AllOnes()))
return Constant::getAllOnesValue(ReturnType);
LLVM_FALLTHROUGH;
[[fallthrough]];
case Intrinsic::sadd_sat:
// sat(X + undef) -> -1
// sat(undef + X) -> -1
@ -5889,7 +5889,7 @@ static Value *simplifyBinaryIntrinsic(Function *F, Value *Op0, Value *Op1,
// sat(0 - X) -> 0, sat(X - MAX) -> 0
if (match(Op0, m_Zero()) || match(Op1, m_AllOnes()))
return Constant::getNullValue(ReturnType);
LLVM_FALLTHROUGH;
[[fallthrough]];
case Intrinsic::ssub_sat:
// X - X -> 0, X - undef -> 0, undef - X -> 0
if (Op0 == Op1 || Q.isUndefValue(Op0) || Q.isUndefValue(Op1))

@ -621,7 +621,7 @@ MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom(
// load query, we can safely ignore it (scan past it).
if (isLoad)
continue;
LLVM_FALLTHROUGH;
[[fallthrough]];
default:
// Otherwise, there is a potential dependence. Return a clobber.
return MemDepResult::getClobber(Inst);
@ -993,7 +993,7 @@ SortNonLocalDepInfoCache(MemoryDependenceResults::NonLocalDepInfo &Cache,
MemoryDependenceResults::NonLocalDepInfo::iterator Entry =
std::upper_bound(Cache.begin(), Cache.end() - 1, Val);
Cache.insert(Entry, Val);
LLVM_FALLTHROUGH;
[[fallthrough]];
}
case 1:
// One new entry, Just insert the new value at the appropriate position.

@ -6013,7 +6013,7 @@ const SCEV *ScalarEvolution::createNodeForSelectOrPHIInstWithICmpInstCond(
case ICmpInst::ICMP_ULT:
case ICmpInst::ICMP_ULE:
std::swap(LHS, RHS);
LLVM_FALLTHROUGH;
[[fallthrough]];
case ICmpInst::ICMP_SGT:
case ICmpInst::ICMP_SGE:
case ICmpInst::ICMP_UGT:
@ -6066,7 +6066,7 @@ const SCEV *ScalarEvolution::createNodeForSelectOrPHIInstWithICmpInstCond(
case ICmpInst::ICMP_NE:
// x != 0 ? x+y : C+y -> x == 0 ? C+y : x+y
std::swap(TrueVal, FalseVal);
LLVM_FALLTHROUGH;
[[fallthrough]];
case ICmpInst::ICMP_EQ:
// x == 0 ? C+y : x+y -> umax(x, C)+y iff C u<= 1
if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) &&
@ -10958,7 +10958,7 @@ bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred,
case ICmpInst::ICMP_SGE:
std::swap(LHS, RHS);
LLVM_FALLTHROUGH;
[[fallthrough]];
case ICmpInst::ICMP_SLE:
// (X + C1)<nsw> s<= (X + C2)<nsw> if C1 s<= C2.
if (MatchBinaryAddToConst(LHS, RHS, C1, C2, SCEV::FlagNSW) && C1.sle(C2))
@ -10968,7 +10968,7 @@ bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred,
case ICmpInst::ICMP_SGT:
std::swap(LHS, RHS);
LLVM_FALLTHROUGH;
[[fallthrough]];
case ICmpInst::ICMP_SLT:
// (X + C1)<nsw> s< (X + C2)<nsw> if C1 s< C2.
if (MatchBinaryAddToConst(LHS, RHS, C1, C2, SCEV::FlagNSW) && C1.slt(C2))
@ -10978,7 +10978,7 @@ bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred,
case ICmpInst::ICMP_UGE:
std::swap(LHS, RHS);
LLVM_FALLTHROUGH;
[[fallthrough]];
case ICmpInst::ICMP_ULE:
// (X + C1)<nuw> u<= (X + C2)<nuw> for C1 u<= C2.
if (MatchBinaryAddToConst(RHS, LHS, C2, C1, SCEV::FlagNUW) && C1.ule(C2))
@ -10988,7 +10988,7 @@ bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred,
case ICmpInst::ICMP_UGT:
std::swap(LHS, RHS);
LLVM_FALLTHROUGH;
[[fallthrough]];
case ICmpInst::ICMP_ULT:
// (X + C1)<nuw> u< (X + C2)<nuw> if C1 u< C2.
if (MatchBinaryAddToConst(RHS, LHS, C2, C1, SCEV::FlagNUW) && C1.ult(C2))
@ -11515,7 +11515,7 @@ bool ScalarEvolution::isImpliedCondBalancedTypes(
if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(SharperMin),
CtxI))
return true;
LLVM_FALLTHROUGH;
[[fallthrough]];
case ICmpInst::ICMP_SGT:
case ICmpInst::ICMP_UGT:
@ -11538,7 +11538,7 @@ bool ScalarEvolution::isImpliedCondBalancedTypes(
if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS,
LHS, V, getConstant(SharperMin), CtxI))
return true;
LLVM_FALLTHROUGH;
[[fallthrough]];
case ICmpInst::ICMP_SLT:
case ICmpInst::ICMP_ULT:
@ -11994,7 +11994,7 @@ static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE,
case ICmpInst::ICMP_SGE:
std::swap(LHS, RHS);
LLVM_FALLTHROUGH;
[[fallthrough]];
case ICmpInst::ICMP_SLE:
return
// min(A, ...) <= A
@ -12004,7 +12004,7 @@ static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE,
case ICmpInst::ICMP_UGE:
std::swap(LHS, RHS);
LLVM_FALLTHROUGH;
[[fallthrough]];
case ICmpInst::ICMP_ULE:
return
// min(A, ...) <= A
@ -12191,7 +12191,7 @@ static bool isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred,
switch (Pred) {
case ICmpInst::ICMP_SGE:
std::swap(LHS, RHS);
LLVM_FALLTHROUGH;
[[fallthrough]];
case ICmpInst::ICMP_SLE: {
// If operand >=s 0 then ZExt == SExt. If operand <s 0 then SExt <s ZExt.
const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(LHS);
@ -12202,7 +12202,7 @@ static bool isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred,
}
case ICmpInst::ICMP_UGE:
std::swap(LHS, RHS);
LLVM_FALLTHROUGH;
[[fallthrough]];
case ICmpInst::ICMP_ULE: {
// If operand >=s 0 then ZExt == SExt. If operand <s 0 then ZExt <u SExt.
const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS);
@ -13503,7 +13503,7 @@ ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) {
return DoesNotDominateBlock;
// Fall through into SCEVNAryExpr handling.
LLVM_FALLTHROUGH;
[[fallthrough]];
}
case scAddExpr:
case scMulExpr:

@ -514,7 +514,7 @@ static void initialize(TargetLibraryInfoImpl &TLI, const Triple &T,
// on Linux.
//
// Fall through to disable all of them.
LLVM_FALLTHROUGH;
[[fallthrough]];
default:
TLI.setUnavailable(LibFunc_exp10);
TLI.setUnavailable(LibFunc_exp10f);
@ -945,7 +945,7 @@ bool TargetLibraryInfoImpl::isValidProtoForLibFunc(const FunctionType &FTy,
--NumParams;
if (!FTy.getParamType(NumParams)->isIntegerTy(SizeTBits))
return false;
LLVM_FALLTHROUGH;
[[fallthrough]];
case LibFunc_strlen:
return NumParams == 1 && FTy.getParamType(0)->isPointerTy() &&
FTy.getReturnType()->isIntegerTy(SizeTBits);
@ -970,7 +970,7 @@ bool TargetLibraryInfoImpl::isValidProtoForLibFunc(const FunctionType &FTy,
--NumParams;
if (!FTy.getParamType(NumParams)->isIntegerTy(SizeTBits))
return false;
LLVM_FALLTHROUGH;
[[fallthrough]];
case LibFunc_strcat:
return (NumParams == 2 && FTy.getReturnType()->isPointerTy() &&
FTy.getParamType(0) == FTy.getReturnType() &&
@ -980,7 +980,7 @@ bool TargetLibraryInfoImpl::isValidProtoForLibFunc(const FunctionType &FTy,
--NumParams;
if (!FTy.getParamType(NumParams)->isIntegerTy(SizeTBits))
return false;
LLVM_FALLTHROUGH;
[[fallthrough]];
case LibFunc_strncat:
return (NumParams == 3 && FTy.getReturnType()->isPointerTy() &&
FTy.getParamType(0) == FTy.getReturnType() &&
@ -992,7 +992,7 @@ bool TargetLibraryInfoImpl::isValidProtoForLibFunc(const FunctionType &FTy,
--NumParams;
if (!FTy.getParamType(NumParams)->isIntegerTy(SizeTBits))
return false;
LLVM_FALLTHROUGH;
[[fallthrough]];
case LibFunc_strcpy:
case LibFunc_stpcpy:
return (NumParams == 2 && FTy.getReturnType() == FTy.getParamType(0) &&
@ -1004,7 +1004,7 @@ bool TargetLibraryInfoImpl::isValidProtoForLibFunc(const FunctionType &FTy,
--NumParams;
if (!FTy.getParamType(NumParams)->isIntegerTy(SizeTBits))
return false;
LLVM_FALLTHROUGH;
[[fallthrough]];
case LibFunc_strlcat:
case LibFunc_strlcpy:
return NumParams == 3 && FTy.getReturnType()->isIntegerTy(SizeTBits) &&
@ -1017,7 +1017,7 @@ bool TargetLibraryInfoImpl::isValidProtoForLibFunc(const FunctionType &FTy,
--NumParams;
if (!FTy.getParamType(NumParams)->isIntegerTy(SizeTBits))
return false;
LLVM_FALLTHROUGH;
[[fallthrough]];
case LibFunc_strncpy:
case LibFunc_stpncpy:
return (NumParams == 3 && FTy.getReturnType() == FTy.getParamType(0) &&
@ -1138,7 +1138,7 @@ bool TargetLibraryInfoImpl::isValidProtoForLibFunc(const FunctionType &FTy,
--NumParams;
if (!FTy.getParamType(NumParams)->isIntegerTy(SizeTBits))
return false;
LLVM_FALLTHROUGH;
[[fallthrough]];
case LibFunc_memcpy:
case LibFunc_mempcpy:
case LibFunc_memmove:
@ -1151,7 +1151,7 @@ bool TargetLibraryInfoImpl::isValidProtoForLibFunc(const FunctionType &FTy,
--NumParams;
if (!FTy.getParamType(NumParams)->isIntegerTy(SizeTBits))
return false;
LLVM_FALLTHROUGH;
[[fallthrough]];
case LibFunc_memset:
return (NumParams == 3 && FTy.getReturnType() == FTy.getParamType(0) &&
FTy.getParamType(0)->isPointerTy() &&
@ -1162,7 +1162,7 @@ bool TargetLibraryInfoImpl::isValidProtoForLibFunc(const FunctionType &FTy,
--NumParams;
if (!FTy.getParamType(NumParams)->isIntegerTy(SizeTBits))
return false;
LLVM_FALLTHROUGH;
[[fallthrough]];
case LibFunc_memccpy:
return (NumParams >= 2 && FTy.getParamType(1)->isPointerTy());
case LibFunc_memalign:

@ -1200,7 +1200,7 @@ static void computeKnownBitsFromOperator(const Operator *I,
case Instruction::PtrToInt:
case Instruction::IntToPtr:
// Fall through and handle them the same as zext/trunc.
LLVM_FALLTHROUGH;
[[fallthrough]];
case Instruction::ZExt:
case Instruction::Trunc: {
Type *SrcTy = I->getOperand(0)->getType();
@ -2073,7 +2073,7 @@ static bool isPowerOfTwoRecurrence(const PHINode *PN, bool OrZero,
// power of two is not sufficient, and it has to be a constant.
if (!match(Start, m_Power2()) || match(Start, m_SignMask()))
return false;
LLVM_FALLTHROUGH;
[[fallthrough]];
case Instruction::UDiv:
// Divisor must be a power of two.
// If OrZero is false, cannot guarantee induction variable is non-zero after
@ -2085,7 +2085,7 @@ static bool isPowerOfTwoRecurrence(const PHINode *PN, bool OrZero,
case Instruction::AShr:
if (!match(Start, m_Power2()) || match(Start, m_SignMask()))
return false;
LLVM_FALLTHROUGH;
[[fallthrough]];
case Instruction::LShr:
return OrZero || Q.IIQ.isExact(BO);
default:
@ -3601,7 +3601,7 @@ static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
(!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
return true;
LLVM_FALLTHROUGH;
[[fallthrough]];
case Instruction::FAdd:
case Instruction::FRem:
return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
@ -5137,7 +5137,7 @@ static bool canCreateUndefOrPoison(const Operator *Op, bool PoisonOnly,
return false;
}
}
LLVM_FALLTHROUGH;
[[fallthrough]];
case Instruction::CallBr:
case Instruction::Invoke: {
const auto *CB = cast<CallBase>(Op);

@ -192,13 +192,13 @@ file_magic llvm::identify_magic(StringRef Magic) {
case 0x50: // mc68K
if (startswith(Magic, "\x50\xed\x55\xba"))
return file_magic::cuda_fatbinary;
LLVM_FALLTHROUGH;
[[fallthrough]];
case 0x4c: // 80386 Windows
case 0xc4: // ARMNT Windows
if (Magic[1] == 0x01)
return file_magic::coff_object;
LLVM_FALLTHROUGH;
[[fallthrough]];
case 0x90: // PA-RISC Windows
case 0x68: // mc68K Windows

@ -3445,7 +3445,7 @@ Error BitcodeReader::parseUseLists() {
break;
case bitc::USELIST_CODE_BB:
IsBB = true;
LLVM_FALLTHROUGH;
[[fallthrough]];
case bitc::USELIST_CODE_DEFAULT: {
unsigned RecordLength = Record.size();
if (RecordLength < 3)

@ -552,7 +552,7 @@ class MetadataLoader::MetadataLoaderImpl {
case 0:
if (N >= 3 && Expr[N - 3] == dwarf::DW_OP_bit_piece)
Expr[N - 3] = dwarf::DW_OP_LLVM_fragment;
LLVM_FALLTHROUGH;
[[fallthrough]];
case 1:
// Move DW_OP_deref to the end.
if (N && Expr[0] == dwarf::DW_OP_deref) {
@ -564,7 +564,7 @@ class MetadataLoader::MetadataLoaderImpl {
*std::prev(End) = dwarf::DW_OP_deref;
}
NeedDeclareExpressionUpgrade = true;
LLVM_FALLTHROUGH;
[[fallthrough]];
case 2: {
// Change DW_OP_plus to DW_OP_plus_uconst.
// Change DW_OP_minus to DW_OP_uconst, DW_OP_minus
@ -613,7 +613,7 @@ class MetadataLoader::MetadataLoaderImpl {
SubExpr = SubExpr.slice(HistoricSize);
}
Expr = MutableArrayRef<uint64_t>(Buffer);
LLVM_FALLTHROUGH;
[[fallthrough]];
}
case 3:
// Up-to-date!
@ -1285,7 +1285,7 @@ Error MetadataLoader::MetadataLoaderImpl::parseOneMetadata(
}
case bitc::METADATA_DISTINCT_NODE:
IsDistinct = true;
LLVM_FALLTHROUGH;
[[fallthrough]];
case bitc::METADATA_NODE: {
SmallVector<Metadata *, 8> Elts;
Elts.reserve(Record.size());

@ -529,7 +529,7 @@ bool AsmPrinter::doInitialization(Module &M) {
switch (MAI->getExceptionHandlingType()) {
case ExceptionHandling::None:
// We may want to emit CFI for debug.
LLVM_FALLTHROUGH;
[[fallthrough]];
case ExceptionHandling::SjLj:
case ExceptionHandling::DwarfCFI:
case ExceptionHandling::ARM:
@ -553,7 +553,7 @@ bool AsmPrinter::doInitialization(Module &M) {
case ExceptionHandling::None:
if (!needsCFIForDebug())
break;
LLVM_FALLTHROUGH;
[[fallthrough]];
case ExceptionHandling::SjLj:
case ExceptionHandling::DwarfCFI:
ES = new DwarfCFIException(this);
@ -2761,7 +2761,7 @@ const MCExpr *AsmPrinter::lowerConstant(const Constant *CV) {
// expression properly. This is important for differences between
// blockaddress labels. Since the two labels are in the same function, it
// is reasonable to treat their delta as a 32-bit value.
LLVM_FALLTHROUGH;
[[fallthrough]];
case Instruction::BitCast:
return lowerConstant(CE->getOperand(0));

@ -480,7 +480,7 @@ bool AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
PrintAsmMemoryOperand(MI, OpNo, nullptr, O);
return false;
}
LLVM_FALLTHROUGH; // GCC allows '%a' to behave like '%c' with immediates.
[[fallthrough]]; // GCC allows '%a' to behave like '%c' with immediates.
case 'c': // Substitute immediate value without immediate syntax
if (MO.isImm()) {
O << MO.getImm();

@ -1620,7 +1620,7 @@ TypeIndex CodeViewDebug::lowerType(const DIType *Ty, const DIType *ClassTy) {
case dwarf::DW_TAG_pointer_type:
if (cast<DIDerivedType>(Ty)->getName() == "__vtbl_ptr_type")
return lowerTypeVFTableShape(cast<DIDerivedType>(Ty));
LLVM_FALLTHROUGH;
[[fallthrough]];
case dwarf::DW_TAG_reference_type:
case dwarf::DW_TAG_rvalue_reference_type:
return lowerTypePointer(cast<DIDerivedType>(Ty));

@ -293,7 +293,7 @@ static Value *getNeutralReductionElement(const VPReductionIntrinsic &VPI,
APInt::getSignedMinValue(EltBits));
case Intrinsic::vp_reduce_fmax:
Negative = true;
LLVM_FALLTHROUGH;
[[fallthrough]];
case Intrinsic::vp_reduce_fmin: {
FastMathFlags Flags = VPI.getFastMathFlags();
const fltSemantics &Semantics = EltTy->getFltSemantics();

@ -286,7 +286,7 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
LLT Ty = MRI.getType(MI.getOperand(1).getReg());
if (DL.isNonIntegralAddressSpace(Ty.getAddressSpace()))
break;
LLVM_FALLTHROUGH;
[[fallthrough]];
}
case TargetOpcode::G_ADD: {
computeKnownBitsImpl(MI.getOperand(1).getReg(), Known, DemandedElts,
@ -447,7 +447,7 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
if (DstTy.isVector())
break;
// Fall through and handle them the same as zext/trunc.
LLVM_FALLTHROUGH;
[[fallthrough]];
case TargetOpcode::G_ASSERT_ZEXT:
case TargetOpcode::G_ZEXT:
case TargetOpcode::G_TRUNC: {

@ -264,7 +264,7 @@ LegacyLegalizerInfo::findAction(const SizeAndActionsVec &Vec, const uint32_t Siz
// Special case for scalarization:
if (Vec == SizeAndActionsVec({{1, FewerElements}}))
return {1, FewerElements};
LLVM_FALLTHROUGH;
[[fallthrough]];
case NarrowScalar: {
// The following needs to be a loop, as for now, we do allow needing to
// go over "Unsupported" bit sizes before finding a legalizable bit size.

@ -126,7 +126,7 @@ static bool mutationIsSane(const LegalizeRule &Rule,
case FewerElements:
if (!OldTy.isVector())
return false;
LLVM_FALLTHROUGH;
[[fallthrough]];
case MoreElements: {
// MoreElements can go from scalar to vector.
const ElementCount OldElts = OldTy.isVector() ?

@ -321,7 +321,7 @@ Optional<ValueAndVReg> getConstantVRegValWithLookThrough(
case TargetOpcode::G_ANYEXT:
if (!LookThroughAnyExt)
return None;
LLVM_FALLTHROUGH;
[[fallthrough]];
case TargetOpcode::G_TRUNC:
case TargetOpcode::G_SEXT:
case TargetOpcode::G_ZEXT:

@ -2848,7 +2848,7 @@ bool MIParser::parseMachineOperand(const unsigned OpCode, const unsigned OpIdx,
if (const auto *Formatter = TII->getMIRFormatter()) {
return parseTargetImmMnemonic(OpCode, OpIdx, Dest, *Formatter);
}
LLVM_FALLTHROUGH;
[[fallthrough]];
}
default:
// FIXME: Parse the MCSymbol machine operand.

@ -880,7 +880,7 @@ void MIPrinter::print(const MachineInstr &MI, unsigned OpIdx,
MachineOperand::printSubRegIdx(OS, Op.getImm(), TRI);
break;
}
LLVM_FALLTHROUGH;
[[fallthrough]];
case MachineOperand::MO_Register:
case MachineOperand::MO_CImmediate:
case MachineOperand::MO_FPImmediate:

@ -2956,7 +2956,7 @@ void JoinVals::computeAssignment(unsigned ValNo, JoinVals &Other) {
}
OtherV.Pruned = true;
LLVM_FALLTHROUGH;
[[fallthrough]];
}
default:
// This value number needs to go in the final joined live range.
@ -3399,7 +3399,7 @@ void JoinVals::eraseInstrs(SmallPtrSetImpl<MachineInstr*> &ErasedInstrs,
if (LI != nullptr)
dbgs() << "\t\t LHS = " << *LI << '\n';
});
LLVM_FALLTHROUGH;
[[fallthrough]];
}
case CR_Erase: {

@ -147,7 +147,7 @@ ScoreboardHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
case InstrStage::Required:
// Required FUs conflict with both reserved and required ones
freeUnits &= ~ReservedScoreboard[StageCycle];
LLVM_FALLTHROUGH;
[[fallthrough]];
case InstrStage::Reserved:
// Reserved FUs can conflict only with required ones.
freeUnits &= ~RequiredScoreboard[StageCycle];
@ -198,7 +198,7 @@ void ScoreboardHazardRecognizer::EmitInstruction(SUnit *SU) {
case InstrStage::Required:
// Required FUs conflict with both reserved and required ones
freeUnits &= ~ReservedScoreboard[cycle + i];
LLVM_FALLTHROUGH;
[[fallthrough]];
case InstrStage::Reserved:
// Reserved FUs can conflict only with required ones.
freeUnits &= ~RequiredScoreboard[cycle + i];

@ -1962,7 +1962,7 @@ SDValue DAGCombiner::visitTokenFactor(SDNode *N) {
Changed = true;
break;
}
LLVM_FALLTHROUGH;
[[fallthrough]];
default:
// Only add if it isn't already in the list.
@ -14989,7 +14989,7 @@ SDValue DAGCombiner::visitFMUL(SDNode *N) {
case ISD::SETLT:
case ISD::SETLE:
std::swap(TrueOpnd, FalseOpnd);
LLVM_FALLTHROUGH;
[[fallthrough]];
case ISD::SETOGT:
case ISD::SETUGT:
case ISD::SETOGE:

@ -850,7 +850,7 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
default: llvm_unreachable("This action is not supported yet!");
case TargetLowering::Custom:
isCustom = true;
LLVM_FALLTHROUGH;
[[fallthrough]];
case TargetLowering::Legal:
Value = SDValue(Node, 0);
Chain = SDValue(Node, 1);
@ -1317,11 +1317,11 @@ void SelectionDAGLegalize::LegalizeOp(SDNode *Node) {
return;
}
LLVM_DEBUG(dbgs() << "Could not custom legalize node\n");
LLVM_FALLTHROUGH;
[[fallthrough]];
case TargetLowering::Expand:
if (ExpandNode(Node))
return;
LLVM_FALLTHROUGH;
[[fallthrough]];
case TargetLowering::LibCall:
ConvertNodeToLibcall(Node);
return;
@ -2961,7 +2961,7 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
Results.push_back(Tmp2);
break;
}
LLVM_FALLTHROUGH;
[[fallthrough]];
case ISD::SINT_TO_FP:
case ISD::STRICT_SINT_TO_FP:
if ((Tmp1 = ExpandLegalINT_TO_FP(Node, Tmp2))) {

@ -2973,7 +2973,7 @@ void DAGTypeLegalizer::ExpandIntRes_ADDSUB(SDNode *N,
switch (BoolType) {
case TargetLoweringBase::UndefinedBooleanContent:
OVF = DAG.getNode(ISD::AND, dl, OvfVT, DAG.getConstant(1, dl, OvfVT), OVF);
LLVM_FALLTHROUGH;
[[fallthrough]];
case TargetLoweringBase::ZeroOrOneBooleanContent:
OVF = DAG.getZExtOrTrunc(OVF, dl, NVT);
Hi = DAG.getNode(N->getOpcode(), dl, NVT, Hi, OVF);

@ -492,7 +492,7 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
if (LowerOperationWrapper(Node, ResultVals))
break;
LLVM_DEBUG(dbgs() << "Could not custom legalize node\n");
LLVM_FALLTHROUGH;
[[fallthrough]];
case TargetLowering::Expand:
LLVM_DEBUG(dbgs() << "Expanding\n");
Expand(Node, ResultVals);

@ -3755,7 +3755,7 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
// If the target has custom/legal support for the scalar FP intrinsic ops
// (they are probably not destined to become libcalls), then widen those
// like any other binary ops.
LLVM_FALLTHROUGH;
[[fallthrough]];
case ISD::FADD:
case ISD::FMUL:
@ -3858,7 +3858,7 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
// If the target has custom/legal support for the scalar FP intrinsic ops
// (they are probably not destined to become libcalls), then widen those
// like any other unary ops.
LLVM_FALLTHROUGH;
[[fallthrough]];
case ISD::ABS:
case ISD::BITREVERSE:

@ -2381,34 +2381,34 @@ SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2,
default: break;
case ISD::SETEQ: if (R==APFloat::cmpUnordered)
return getUNDEF(VT);
LLVM_FALLTHROUGH;
[[fallthrough]];
case ISD::SETOEQ: return getBoolConstant(R==APFloat::cmpEqual, dl, VT,
OpVT);
case ISD::SETNE: if (R==APFloat::cmpUnordered)
return getUNDEF(VT);
LLVM_FALLTHROUGH;
[[fallthrough]];
case ISD::SETONE: return getBoolConstant(R==APFloat::cmpGreaterThan ||
R==APFloat::cmpLessThan, dl, VT,
OpVT);
case ISD::SETLT: if (R==APFloat::cmpUnordered)
return getUNDEF(VT);
LLVM_FALLTHROUGH;
[[fallthrough]];
case ISD::SETOLT: return getBoolConstant(R==APFloat::cmpLessThan, dl, VT,
OpVT);
case ISD::SETGT: if (R==APFloat::cmpUnordered)
return getUNDEF(VT);
LLVM_FALLTHROUGH;
[[fallthrough]];
case ISD::SETOGT: return getBoolConstant(R==APFloat::cmpGreaterThan, dl,
VT, OpVT);
case ISD::SETLE: if (R==APFloat::cmpUnordered)
return getUNDEF(VT);
LLVM_FALLTHROUGH;
[[fallthrough]];
case ISD::SETOLE: return getBoolConstant(R==APFloat::cmpLessThan ||
R==APFloat::cmpEqual, dl, VT,
OpVT);
case ISD::SETGE: if (R==APFloat::cmpUnordered)
return getUNDEF(VT);
LLVM_FALLTHROUGH;
[[fallthrough]];
case ISD::SETOGE: return getBoolConstant(R==APFloat::cmpGreaterThan ||
R==APFloat::cmpEqual, dl, VT, OpVT);
case ISD::SETO: return getBoolConstant(R!=APFloat::cmpUnordered, dl, VT,
@ -3495,7 +3495,7 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
Known.Zero.setBitsFrom(1);
break;
}
LLVM_FALLTHROUGH;
[[fallthrough]];
case ISD::SUB:
case ISD::SUBC: {
assert(Op.getResNo() == 0 &&
@ -3523,7 +3523,7 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
Known.Zero.setBitsFrom(1);
break;
}
LLVM_FALLTHROUGH;
[[fallthrough]];
case ISD::ADD:
case ISD::ADDC:
case ISD::ADDE: {
@ -3738,7 +3738,7 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
Known.Zero.setBitsFrom(1);
break;
}
LLVM_FALLTHROUGH;
[[fallthrough]];
case ISD::ATOMIC_CMP_SWAP:
case ISD::ATOMIC_SWAP:
case ISD::ATOMIC_LOAD_ADD:
@ -3771,7 +3771,7 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
default:
if (Opcode < ISD::BUILTIN_OP_END)
break;
LLVM_FALLTHROUGH;
[[fallthrough]];
case ISD::INTRINSIC_WO_CHAIN:
case ISD::INTRINSIC_W_CHAIN:
case ISD::INTRINSIC_VOID:
@ -4983,7 +4983,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
case ISD::TRUNCATE:
if (C->isOpaque())
break;
LLVM_FALLTHROUGH;
[[fallthrough]];
case ISD::ZERO_EXTEND:
return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT,
C->isTargetOpcode(), C->isOpaque());
@ -5842,7 +5842,7 @@ SDValue SelectionDAG::foldConstantFPMath(unsigned Opcode, const SDLoc &DL,
if (ConstantFPSDNode *N1C = isConstOrConstSplatFP(N1, /*AllowUndefs*/ true))
if (N1C && N1C->getValueAPF().isNegZero() && N2.isUndef())
return getUNDEF(VT);
LLVM_FALLTHROUGH;
[[fallthrough]];
case ISD::FADD:
case ISD::FMUL:
@ -6053,12 +6053,12 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
const APInt &ShiftImm = N2C->getAPIntValue();
return getVScale(DL, VT, MulImm << ShiftImm);
}
LLVM_FALLTHROUGH;
[[fallthrough]];
case ISD::SRA:
case ISD::SRL:
if (SDValue V = simplifyShift(N1, N2))
return V;
LLVM_FALLTHROUGH;
[[fallthrough]];
case ISD::ROTL:
case ISD::ROTR:
assert(VT == N1.getValueType() &&
@ -6348,7 +6348,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
// Handle undef ^ undef -> 0 special case. This is a common
// idiom (misuse).
return getConstant(0, DL, VT);
LLVM_FALLTHROUGH;
[[fallthrough]];
case ISD::ADD:
case ISD::SUB:
case ISD::UDIV:

@ -3346,7 +3346,7 @@ void SelectionDAGBuilder::visitSelect(const User &I) {
break;
case SPF_NABS:
Negate = true;
LLVM_FALLTHROUGH;
[[fallthrough]];
case SPF_ABS:
IsUnaryAbs = true;
Opc = ISD::ABS;
@ -7297,7 +7297,7 @@ void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
// The only reason why ebIgnore nodes still need to be chained is that
// they might depend on the current rounding mode, and therefore must
// not be moved across instruction that may change that mode.
LLVM_FALLTHROUGH;
[[fallthrough]];
case fp::ExceptionBehavior::ebMayTrap:
// These must not be moved across calls or instructions that may change
// floating-point exception masks.

@ -351,7 +351,7 @@ void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT,
break;
case ISD::SETO:
ShouldInvertCC = true;
LLVM_FALLTHROUGH;
[[fallthrough]];
case ISD::SETUO:
LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
(VT == MVT::f64) ? RTLIB::UO_F64 :
@ -360,7 +360,7 @@ void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT,
case ISD::SETONE:
// SETONE = O && UNE
ShouldInvertCC = true;
LLVM_FALLTHROUGH;
[[fallthrough]];
case ISD::SETUEQ:
LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
(VT == MVT::f64) ? RTLIB::UO_F64 :
@ -2583,7 +2583,7 @@ bool TargetLowering::SimplifyDemandedBits(
SDValue And1 = TLO.DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0), One);
return TLO.CombineTo(Op, And1);
}
LLVM_FALLTHROUGH;
[[fallthrough]];
case ISD::ADD:
case ISD::SUB: {
// Add, Sub, and Mul don't demand any bits in positions beyond that
@ -2686,7 +2686,7 @@ bool TargetLowering::SimplifyDemandedBits(
}
}
LLVM_FALLTHROUGH;
[[fallthrough]];
}
default:
if (Op.getOpcode() >= ISD::BUILTIN_OP_END) {
@ -3321,7 +3321,7 @@ bool TargetLowering::SimplifyDemandedVectorElts(
Depth + 1, /*AssumeSingleUse*/ true))
return true;
}
LLVM_FALLTHROUGH;
[[fallthrough]];
}
case ISD::OR:
case ISD::XOR:
@ -9834,7 +9834,7 @@ bool TargetLowering::LegalizeSetCCCondCode(SelectionDAG &DAG, EVT VT,
assert(TLI.isCondCodeLegal(ISD::SETOEQ, OpVT) &&
"If SETUE is expanded, SETOEQ or SETUNE must be legal!");
NeedInvert = true;
LLVM_FALLTHROUGH;
[[fallthrough]];
case ISD::SETO:
assert(TLI.isCondCodeLegal(ISD::SETOEQ, OpVT) &&
"If SETO is expanded, SETOEQ must be legal!");
@ -9858,7 +9858,7 @@ bool TargetLowering::LegalizeSetCCCondCode(SelectionDAG &DAG, EVT VT,
NeedInvert = ((unsigned)CCCode & 0x8U);
break;
}
LLVM_FALLTHROUGH;
[[fallthrough]];
case ISD::SETOEQ:
case ISD::SETOGT:
case ISD::SETOGE:
@ -9879,7 +9879,7 @@ bool TargetLowering::LegalizeSetCCCondCode(SelectionDAG &DAG, EVT VT,
break;
}
// Fallthrough if we are unsigned integer.
LLVM_FALLTHROUGH;
[[fallthrough]];
case ISD::SETLE:
case ISD::SETGT:
case ISD::SETGE:

@ -1424,7 +1424,7 @@ void TargetLoweringBase::computeRegisterProperties(
}
if (IsLegalWiderType)
break;
LLVM_FALLTHROUGH;
[[fallthrough]];
}
case TypeWidenVector:
@ -1458,7 +1458,7 @@ void TargetLoweringBase::computeRegisterProperties(
break;
}
}
LLVM_FALLTHROUGH;
[[fallthrough]];
case TypeSplitVector:
case TypeScalarizeVector: {

@ -127,7 +127,7 @@ void TargetLoweringObjectFileELF::Initialize(MCContext &Ctx,
if (Ctx.getAsmInfo()->getExceptionHandlingType() == ExceptionHandling::ARM)
break;
// Fallthrough if not using EHABI
LLVM_FALLTHROUGH;
[[fallthrough]];
case Triple::ppc:
case Triple::ppcle:
case Triple::x86:

@ -960,7 +960,7 @@ void TargetPassConfig::addPassesToHandleExceptions() {
// pad is shared by multiple invokes and is also a target of a normal
// edge from elsewhere.
addPass(createSjLjEHPreparePass(TM));
LLVM_FALLTHROUGH;
[[fallthrough]];
case ExceptionHandling::DwarfCFI:
case ExceptionHandling::ARM:
case ExceptionHandling::AIX:

@ -64,7 +64,7 @@ DeclContextTree::getChildDeclContext(DeclContext &Context, const DWARFDie &DIE,
Context.getTag() == dwarf::DW_TAG_compile_unit) &&
!dwarf::toUnsigned(DIE.find(dwarf::DW_AT_external), 0))
return PointerIntPair<DeclContext *, 1>(nullptr);
LLVM_FALLTHROUGH;
[[fallthrough]];
case dwarf::DW_TAG_member:
case dwarf::DW_TAG_namespace:
case dwarf::DW_TAG_structure_type:

@ -63,7 +63,7 @@ Error InfoStream::reload() {
case uint32_t(PdbRaw_FeatureSig::VC110):
// No other flags for VC110 PDB.
Stop = true;
LLVM_FALLTHROUGH;
[[fallthrough]];
case uint32_t(PdbRaw_FeatureSig::VC140):
Features |= PdbFeatureContainsIdStream;
break;

@ -2027,7 +2027,7 @@ void RuntimeDyldELF::processX86_64TLSRelocation(
case ELF::R_X86_64_REX_GOTPCRELX:
case ELF::R_X86_64_GOTPCRELX:
IsGOTPCRel = true;
LLVM_FALLTHROUGH;
[[fallthrough]];
case ELF::R_X86_64_PLT32:
IsSmallCodeModel = true;
break;

@ -118,7 +118,7 @@ public:
(void)p;
assert((*p & 0x3B000000) == 0x39000000 &&
"Only expected load / store instructions.");
LLVM_FALLTHROUGH;
[[fallthrough]];
}
case MachO::ARM64_RELOC_PAGEOFF12: {
// Verify that the relocation points to one of the expected load / store
@ -222,7 +222,7 @@ public:
assert((*p & 0x3B000000) == 0x39000000 &&
"Only expected load / store instructions.");
(void)p;
LLVM_FALLTHROUGH;
[[fallthrough]];
}
case MachO::ARM64_RELOC_PAGEOFF12: {
// Verify that the relocation points to one of the expected load / store

@ -2293,7 +2293,7 @@ OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::applyWorkshareLoop(
case OMPScheduleType::BaseRuntimeSimd:
assert(!ChunkSize &&
"schedule type does not support user-defined chunk sizes");
LLVM_FALLTHROUGH;
[[fallthrough]];
case OMPScheduleType::BaseDynamicChunked:
case OMPScheduleType::BaseGuidedChunked:
case OMPScheduleType::BaseGuidedIterativeChunked:

@ -903,7 +903,7 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode, Constant *C1,
// Handle undef ^ undef -> 0 special case. This is a common
// idiom (misuse).
return Constant::getNullValue(C1->getType());
LLVM_FALLTHROUGH;
[[fallthrough]];
case Instruction::Add:
case Instruction::Sub:
return UndefValue::get(C1->getType());
@ -979,7 +979,7 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode, Constant *C1,
// -0.0 - undef --> undef (consistent with "fneg undef")
if (match(C1, m_NegZeroFP()) && isa<UndefValue>(C2))
return C2;
LLVM_FALLTHROUGH;
[[fallthrough]];
case Instruction::FAdd:
case Instruction::FMul:
case Instruction::FDiv:
@ -1513,7 +1513,7 @@ static ICmpInst::Predicate evaluateICmpRelation(Constant *V1, Constant *V2,
if (const GlobalValue *GV = dyn_cast<GlobalValue>(CE1Op0))
if (const GlobalValue *GV2 = dyn_cast<GlobalValue>(V2))
return areGlobalsPotentiallyEqual(GV, GV2);
LLVM_FALLTHROUGH;
[[fallthrough]];
case Instruction::UIToFP:
case Instruction::SIToFP:
case Instruction::ZExt:

@ -1186,13 +1186,13 @@ static void DecodeIITType(unsigned &NextElt, ArrayRef<unsigned char> Infos,
case IIT_EMPTYSTRUCT:
OutputTable.push_back(IITDescriptor::get(IITDescriptor::Struct, 0));
return;
case IIT_STRUCT9: ++StructElts; LLVM_FALLTHROUGH;
case IIT_STRUCT8: ++StructElts; LLVM_FALLTHROUGH;
case IIT_STRUCT7: ++StructElts; LLVM_FALLTHROUGH;
case IIT_STRUCT6: ++StructElts; LLVM_FALLTHROUGH;
case IIT_STRUCT5: ++StructElts; LLVM_FALLTHROUGH;
case IIT_STRUCT4: ++StructElts; LLVM_FALLTHROUGH;
case IIT_STRUCT3: ++StructElts; LLVM_FALLTHROUGH;
case IIT_STRUCT9: ++StructElts; [[fallthrough]];
case IIT_STRUCT8: ++StructElts; [[fallthrough]];
case IIT_STRUCT7: ++StructElts; [[fallthrough]];
case IIT_STRUCT6: ++StructElts; [[fallthrough]];
case IIT_STRUCT5: ++StructElts; [[fallthrough]];
case IIT_STRUCT4: ++StructElts; [[fallthrough]];
case IIT_STRUCT3: ++StructElts; [[fallthrough]];
case IIT_STRUCT2: {
OutputTable.push_back(IITDescriptor::get(IITDescriptor::Struct,StructElts));

@ -282,7 +282,7 @@ Error InlineAsm::verify(FunctionType *Ty, StringRef ConstStr) {
break;
}
++NumIndirect;
LLVM_FALLTHROUGH; // We fall through for Indirect Outputs.
[[fallthrough]]; // We fall through for Indirect Outputs.
case InlineAsm::isInput:
if (NumClobbers)
return makeStringError("input constraint occurs after clobber "

@ -636,7 +636,7 @@ static const Value *stripPointerCastsAndOffsets(
case PSK_InBoundsConstantIndices:
if (!GEP->hasAllConstantIndices())
return V;
LLVM_FALLTHROUGH;
[[fallthrough]];
case PSK_InBounds:
if (!GEP->isInBounds())
return V;

@ -2494,7 +2494,7 @@ void Verifier::visitFunction(const Function &F) {
case CallingConv::SPIR_KERNEL:
Check(F.getReturnType()->isVoidTy(),
"Calling convention requires void return type", &F);
LLVM_FALLTHROUGH;
[[fallthrough]];
case CallingConv::AMDGPU_VS:
case CallingConv::AMDGPU_HS:
case CallingConv::AMDGPU_GS:
@ -2523,7 +2523,7 @@ void Verifier::visitFunction(const Function &F) {
}
}
LLVM_FALLTHROUGH;
[[fallthrough]];
case CallingConv::Fast:
case CallingConv::Cold:
case CallingConv::Intel_OCL_BI:

@ -339,7 +339,7 @@ AsmToken AsmLexer::LexDigit() {
if (!FirstNonDecimal) {
FirstNonDecimal = CurPtr;
}
LLVM_FALLTHROUGH;
[[fallthrough]];
case '9':
case '8':
case '7':

@ -2576,7 +2576,7 @@ bool MasmParser::parseStatement(ParseStatementInfo &Info,
// Size directive; part of an instruction.
break;
}
LLVM_FALLTHROUGH;
[[fallthrough]];
case DK_SBYTE:
case DK_DB:
Lex();
@ -2587,7 +2587,7 @@ bool MasmParser::parseStatement(ParseStatementInfo &Info,
// Size directive; part of an instruction.
break;
}
LLVM_FALLTHROUGH;
[[fallthrough]];
case DK_SWORD:
case DK_DW:
Lex();
@ -2598,7 +2598,7 @@ bool MasmParser::parseStatement(ParseStatementInfo &Info,
// Size directive; part of an instruction.
break;
}
LLVM_FALLTHROUGH;
[[fallthrough]];
case DK_SDWORD:
case DK_DD:
Lex();
@ -2609,7 +2609,7 @@ bool MasmParser::parseStatement(ParseStatementInfo &Info,
// Size directive; part of an instruction.
break;
}
LLVM_FALLTHROUGH;
[[fallthrough]];
case DK_DF:
Lex();
return parseDirectiveNamedValue(nextVal, 6, IDVal, IDLoc);
@ -2619,7 +2619,7 @@ bool MasmParser::parseStatement(ParseStatementInfo &Info,
// Size directive; part of an instruction.
break;
}
LLVM_FALLTHROUGH;
[[fallthrough]];
case DK_SQWORD:
case DK_DQ:
Lex();

@ -1286,7 +1286,7 @@ static VersionTuple getMachoBuildVersionSupportedOS(const Triple &Target) {
// Mac Catalyst always uses the build version load command.
if (Target.isMacCatalystEnvironment())
return VersionTuple();
LLVM_FALLTHROUGH;
[[fallthrough]];
case Triple::TvOS:
return VersionTuple(12);
case Triple::WatchOS:

@ -1779,7 +1779,7 @@ static bool tryARMPackedUnwind(MCStreamer &streamer, WinEH::FrameInfo *info,
Step = 2;
break;
}
LLVM_FALLTHROUGH;
[[fallthrough]];
case Win64EH::UOP_WideSaveRegMask:
if (Step != 1 && Step != 2)
return false;
@ -2043,7 +2043,7 @@ static bool tryARMPackedUnwind(MCStreamer &streamer, WinEH::FrameInfo *info,
case Win64EH::UOP_WideEndNop:
GotReturn = true;
Ret = (Inst.Operation == Win64EH::UOP_EndNop) ? 1 : 2;
LLVM_FALLTHROUGH;
[[fallthrough]];
case Win64EH::UOP_End:
if (Step != 6 && Step != 7 && Step != 8 && Step != 9 && Step != 10)
return false;

@ -322,7 +322,7 @@ SubtargetFeatures ELFObjectFileBase::getRISCVFeatures() const {
break;
case 'd':
Features.AddFeature("f"); // D-ext will imply F-ext.
LLVM_FALLTHROUGH;
[[fallthrough]];
case 'e':
case 'm':
case 'a':

@ -595,7 +595,7 @@ void ScalarBitSetTraits<ELFYAML::ELF_EF>::bitset(IO &IO,
switch (Object->Header.ABIVersion) {
default:
// ELFOSABI_AMDGPU_PAL, ELFOSABI_AMDGPU_MESA3D support *_V3 flags.
LLVM_FALLTHROUGH;
[[fallthrough]];
case ELF::ELFABIVERSION_AMDGPU_HSA_V3:
BCase(EF_AMDGPU_FEATURE_XNACK_V3);
BCase(EF_AMDGPU_FEATURE_SRAMECC_V3);

@ -565,7 +565,7 @@ static std::string getOptionHelpName(const OptTable &Opts, OptSpecifier Id) {
case Option::SeparateClass: case Option::JoinedOrSeparateClass:
case Option::RemainingArgsClass: case Option::RemainingArgsJoinedClass:
Name += ' ';
LLVM_FALLTHROUGH;
[[fallthrough]];
case Option::JoinedClass: case Option::CommaJoinedClass:
case Option::JoinedAndSeparateClass:
if (const char *MetaVarName = Opts.getOptionMetaVar(Id))

@ -1349,7 +1349,7 @@ Expected<Header> Header::readFromBuffer(const unsigned char *Buffer) {
"if not add a case statement to fall through to the latest version.");
case 8ull:
H.MemProfOffset = read(Buffer, offsetOf(&Header::MemProfOffset));
LLVM_FALLTHROUGH;
[[fallthrough]];
default: // Version7 (when the backwards compatible header was introduced).
H.HashType = read(Buffer, offsetOf(&Header::HashType));
H.HashOffset = read(Buffer, offsetOf(&Header::HashOffset));

@ -1485,7 +1485,7 @@ IEEEFloat::opStatus IEEEFloat::addOrSubtractSpecials(const IEEEFloat &rhs,
case PackCategoriesIntoKey(fcNormal, fcNaN):
case PackCategoriesIntoKey(fcInfinity, fcNaN):
assign(rhs);
LLVM_FALLTHROUGH;
[[fallthrough]];
case PackCategoriesIntoKey(fcNaN, fcZero):
case PackCategoriesIntoKey(fcNaN, fcNormal):
case PackCategoriesIntoKey(fcNaN, fcInfinity):
@ -1610,7 +1610,7 @@ IEEEFloat::opStatus IEEEFloat::multiplySpecials(const IEEEFloat &rhs) {
case PackCategoriesIntoKey(fcInfinity, fcNaN):
assign(rhs);
sign = false;
LLVM_FALLTHROUGH;
[[fallthrough]];
case PackCategoriesIntoKey(fcNaN, fcZero):
case PackCategoriesIntoKey(fcNaN, fcNormal):
case PackCategoriesIntoKey(fcNaN, fcInfinity):
@ -1654,7 +1654,7 @@ IEEEFloat::opStatus IEEEFloat::divideSpecials(const IEEEFloat &rhs) {
case PackCategoriesIntoKey(fcInfinity, fcNaN):
assign(rhs);
sign = false;
LLVM_FALLTHROUGH;
[[fallthrough]];
case PackCategoriesIntoKey(fcNaN, fcZero):
case PackCategoriesIntoKey(fcNaN, fcNormal):
case PackCategoriesIntoKey(fcNaN, fcInfinity):
@ -1699,7 +1699,7 @@ IEEEFloat::opStatus IEEEFloat::modSpecials(const IEEEFloat &rhs) {
case PackCategoriesIntoKey(fcNormal, fcNaN):
case PackCategoriesIntoKey(fcInfinity, fcNaN):
assign(rhs);
LLVM_FALLTHROUGH;
[[fallthrough]];
case PackCategoriesIntoKey(fcNaN, fcZero):
case PackCategoriesIntoKey(fcNaN, fcNormal):
case PackCategoriesIntoKey(fcNaN, fcInfinity):
@ -1737,7 +1737,7 @@ IEEEFloat::opStatus IEEEFloat::remainderSpecials(const IEEEFloat &rhs) {
case PackCategoriesIntoKey(fcNormal, fcNaN):
case PackCategoriesIntoKey(fcInfinity, fcNaN):
assign(rhs);
LLVM_FALLTHROUGH;
[[fallthrough]];
case PackCategoriesIntoKey(fcNaN, fcZero):
case PackCategoriesIntoKey(fcNaN, fcNormal):
case PackCategoriesIntoKey(fcNaN, fcInfinity):

@ -1679,7 +1679,7 @@ bool CommandLineParser::ParseCommandLineOptions(int argc,
switch (PositionalOpts[i]->getNumOccurrencesFlag()) {
case cl::Optional:
Done = true; // Optional arguments want _at most_ one value
LLVM_FALLTHROUGH;
[[fallthrough]];
case cl::ZeroOrMore: // Zero or more will take all they can get...
case cl::OneOrMore: // One or more will take all they can get...
ProvidePositionalOption(PositionalOpts[i],
@ -1733,7 +1733,7 @@ bool CommandLineParser::ParseCommandLineOptions(int argc,
Opt.second->error("must be specified at least once!");
ErrorParsing = true;
}
LLVM_FALLTHROUGH;
[[fallthrough]];
default:
break;
}

@ -89,8 +89,8 @@ void FoldingSetNodeID::AddString(StringRef String) {
// Pos will have overshot size by 4 - #bytes left over.
// No need to take endianness into account here - this is always executed.
switch (Pos - Size) {
case 1: V = (V << 8) | (unsigned char)String[Size - 3]; LLVM_FALLTHROUGH;
case 2: V = (V << 8) | (unsigned char)String[Size - 2]; LLVM_FALLTHROUGH;
case 1: V = (V << 8) | (unsigned char)String[Size - 3]; [[fallthrough]];
case 2: V = (V << 8) | (unsigned char)String[Size - 2]; [[fallthrough]];
case 3: V = (V << 8) | (unsigned char)String[Size - 1]; break;
default: return; // Nothing left.
}

@ -39,7 +39,7 @@ void formatted_raw_ostream::UpdatePosition(const char *Ptr, size_t Size) {
switch (CP[0]) {
case '\n':
Line += 1;
LLVM_FALLTHROUGH;
[[fallthrough]];
case '\r':
Column = 0;
break;

@ -96,7 +96,7 @@ static Expected<BitVector> scan(StringRef &S, StringRef Original) {
// Eat this character and fall through below to treat it like a non-meta
// character.
S = S.substr(1);
LLVM_FALLTHROUGH;
[[fallthrough]];
default:
BitVector BV(256, false);
BV[(uint8_t)S[0]] = true;

@ -74,7 +74,7 @@ std::string llvm::DOT::EscapeString(const std::string &Label) {
Str.erase(Str.begin()+i); continue;
default: break;
}
LLVM_FALLTHROUGH;
[[fallthrough]];
case '{': case '}':
case '<': case '>':
case '|': case '"':

@ -291,7 +291,7 @@ StringRef sys::detail::getHostCPUNameForARM(StringRef ProcCpuinfoContent) {
switch (Exynos) {
default:
// Default by falling through to Exynos M3.
LLVM_FALLTHROUGH;
[[fallthrough]];
case 0x1002:
return "exynos-m3";
case 0x1003:

@ -1926,7 +1926,7 @@ VersionTuple Triple::getCanonicalVersionForOS(OSType OSKind,
// macOS 10.16 is canonicalized to macOS 11.
if (Version == VersionTuple(10, 16))
return VersionTuple(11, 0);
LLVM_FALLTHROUGH;
[[fallthrough]];
default:
return Version;
}

@ -1246,7 +1246,7 @@ class llvm::vfs::RedirectingFSDirIterImpl
sys::fs::file_type Type = sys::fs::file_type::type_unknown;
switch ((*Current)->getKind()) {
case RedirectingFileSystem::EK_Directory:
LLVM_FALLTHROUGH;
[[fallthrough]];
case RedirectingFileSystem::EK_DirectoryRemap:
Type = sys::fs::file_type::directory_file;
break;

@ -778,7 +778,7 @@ llvm::Optional<bool> yaml::parseBool(StringRef S) {
case 'O':
if (S[1] == 'N') // ON
return true;
LLVM_FALLTHROUGH;
[[fallthrough]];
case 'o':
if (S[1] == 'n') //[Oo]n
return true;
@ -786,7 +786,7 @@ llvm::Optional<bool> yaml::parseBool(StringRef S) {
case 'N':
if (S[1] == 'O') // NO
return false;
LLVM_FALLTHROUGH;
[[fallthrough]];
case 'n':
if (S[1] == 'o') //[Nn]o
return false;
@ -799,7 +799,7 @@ llvm::Optional<bool> yaml::parseBool(StringRef S) {
case 'O':
if (S.drop_front() == "FF") // OFF
return false;
LLVM_FALLTHROUGH;
[[fallthrough]];
case 'o':
if (S.drop_front() == "ff") //[Oo]ff
return false;
@ -807,7 +807,7 @@ llvm::Optional<bool> yaml::parseBool(StringRef S) {
case 'Y':
if (S.drop_front() == "ES") // YES
return true;
LLVM_FALLTHROUGH;
[[fallthrough]];
case 'y':
if (S.drop_front() == "es") //[Yy]es
return true;
@ -820,7 +820,7 @@ llvm::Optional<bool> yaml::parseBool(StringRef S) {
case 'T':
if (S.drop_front() == "RUE") // TRUE
return true;
LLVM_FALLTHROUGH;
[[fallthrough]];
case 't':
if (S.drop_front() == "rue") //[Tt]rue
return true;
@ -833,7 +833,7 @@ llvm::Optional<bool> yaml::parseBool(StringRef S) {
case 'F':
if (S.drop_front() == "ALSE") // FALSE
return false;
LLVM_FALLTHROUGH;
[[fallthrough]];
case 'f':
if (S.drop_front() == "alse") //[Ff]alse
return false;
@ -2285,7 +2285,7 @@ void MappingNode::increment() {
break;
default:
setError("Unexpected token. Expected Key or Block End", T);
LLVM_FALLTHROUGH;
[[fallthrough]];
case Token::TK_Error:
IsAtEnd = true;
CurrentEntry = nullptr;
@ -2298,7 +2298,7 @@ void MappingNode::increment() {
return increment();
case Token::TK_FlowMappingEnd:
getNext();
LLVM_FALLTHROUGH;
[[fallthrough]];
case Token::TK_Error:
// Set this to end iterator.
IsAtEnd = true;
@ -2341,7 +2341,7 @@ void SequenceNode::increment() {
default:
setError( "Unexpected token. Expected Block Entry or Block End."
, T);
LLVM_FALLTHROUGH;
[[fallthrough]];
case Token::TK_Error:
IsAtEnd = true;
CurrentEntry = nullptr;
@ -2370,7 +2370,7 @@ void SequenceNode::increment() {
return increment();
case Token::TK_FlowSequenceEnd:
getNext();
LLVM_FALLTHROUGH;
[[fallthrough]];
case Token::TK_Error:
// Set this to end iterator.
IsAtEnd = true;

@ -285,10 +285,10 @@ void raw_ostream::copy_to_buffer(const char *Ptr, size_t Size) {
// Handle short strings specially, memcpy isn't very good at very short
// strings.
switch (Size) {
case 4: OutBufCur[3] = Ptr[3]; LLVM_FALLTHROUGH;
case 3: OutBufCur[2] = Ptr[2]; LLVM_FALLTHROUGH;
case 2: OutBufCur[1] = Ptr[1]; LLVM_FALLTHROUGH;
case 1: OutBufCur[0] = Ptr[0]; LLVM_FALLTHROUGH;
case 4: OutBufCur[3] = Ptr[3]; [[fallthrough]];
case 3: OutBufCur[2] = Ptr[2]; [[fallthrough]];
case 2: OutBufCur[1] = Ptr[1]; [[fallthrough]];
case 1: OutBufCur[0] = Ptr[0]; [[fallthrough]];
case 0: break;
default:
memcpy(OutBufCur, Ptr, Size);

@ -239,7 +239,7 @@ tgtok::TokKind TGLexer::LexToken(bool FileOrLineStart) {
case '0': case '1':
if (NextChar == 'b')
return LexNumber();
LLVM_FALLTHROUGH;
[[fallthrough]];
case '2': case '3': case '4': case '5':
case '6': case '7': case '8': case '9':
case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
@ -306,7 +306,7 @@ tgtok::TokKind TGLexer::LexString() {
case '\0':
if (CurPtr == CurBuf.end())
return ReturnError(StrStart, "End of file in string literal");
LLVM_FALLTHROUGH;
[[fallthrough]];
default:
return ReturnError(CurPtr, "invalid escape in string literal");
}

@ -162,7 +162,7 @@ bool AArch64CondBrTuning::tryToTuneBranch(MachineInstr &MI,
case AArch64::SUBWrs:
case AArch64::SUBWrx:
IsFlagSetting = false;
LLVM_FALLTHROUGH;
[[fallthrough]];
case AArch64::ADDSWri:
case AArch64::ADDSWrr:
case AArch64::ADDSWrs:
@ -218,7 +218,7 @@ bool AArch64CondBrTuning::tryToTuneBranch(MachineInstr &MI,
case AArch64::SUBXrs:
case AArch64::SUBXrx:
IsFlagSetting = false;
LLVM_FALLTHROUGH;
[[fallthrough]];
case AArch64::ADDSXri:
case AArch64::ADDSXrr:
case AArch64::ADDSXrs:

@ -333,7 +333,7 @@ MachineInstr *SSACCmpConv::findConvertibleCompare(MachineBasicBlock *MBB) {
++NumImmRangeRejs;
return nullptr;
}
LLVM_FALLTHROUGH;
[[fallthrough]];
case AArch64::SUBSWrr:
case AArch64::SUBSXrr:
case AArch64::ADDSWrr:

@ -461,7 +461,7 @@ bool AArch64ExpandPseudo::expand_DestructiveOp(
UseRev = true;
break;
}
LLVM_FALLTHROUGH;
[[fallthrough]];
case AArch64::DestructiveBinary:
case AArch64::DestructiveBinaryImm:
std::tie(PredIdx, DOPIdx, SrcIdx) = std::make_tuple(1, 2, 3);
@ -1086,7 +1086,7 @@ bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB,
return true;
}
}
LLVM_FALLTHROUGH;
[[fallthrough]];
case AArch64::MOVaddr:
case AArch64::MOVaddrJT:
case AArch64::MOVaddrCP:

@ -2098,7 +2098,7 @@ bool AArch64FastISel::emitStore(MVT VT, unsigned SrcReg, Address Addr,
switch (VT.SimpleTy) {
default: llvm_unreachable("Unexpected value type.");
case MVT::i1: VTIsi1 = true; LLVM_FALLTHROUGH;
case MVT::i1: VTIsi1 = true; [[fallthrough]];
case MVT::i8: Opc = OpcTable[Idx][0]; break;
case MVT::i16: Opc = OpcTable[Idx][1]; break;
case MVT::i32: Opc = OpcTable[Idx][2]; break;

@ -993,7 +993,7 @@ static MachineBasicBlock::iterator InsertSEH(MachineBasicBlock::iterator MBBI,
llvm_unreachable("No SEH Opcode for this instruction");
case AArch64::LDPDpost:
Imm = -Imm;
LLVM_FALLTHROUGH;
[[fallthrough]];
case AArch64::STPDpre: {
unsigned Reg0 = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg());
unsigned Reg1 = RegInfo->getSEHRegNum(MBBI->getOperand(2).getReg());
@ -1006,7 +1006,7 @@ static MachineBasicBlock::iterator InsertSEH(MachineBasicBlock::iterator MBBI,
}
case AArch64::LDPXpost:
Imm = -Imm;
LLVM_FALLTHROUGH;
[[fallthrough]];
case AArch64::STPXpre: {
Register Reg0 = MBBI->getOperand(1).getReg();
Register Reg1 = MBBI->getOperand(2).getReg();
@ -1024,7 +1024,7 @@ static MachineBasicBlock::iterator InsertSEH(MachineBasicBlock::iterator MBBI,
}
case AArch64::LDRDpost:
Imm = -Imm;
LLVM_FALLTHROUGH;
[[fallthrough]];
case AArch64::STRDpre: {
unsigned Reg = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg());
MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFReg_X))
@ -1035,7 +1035,7 @@ static MachineBasicBlock::iterator InsertSEH(MachineBasicBlock::iterator MBBI,
}
case AArch64::LDRXpost:
Imm = -Imm;
LLVM_FALLTHROUGH;
[[fallthrough]];
case AArch64::STRXpre: {
unsigned Reg = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg());
MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveReg_X))
@ -1452,7 +1452,7 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
.addImm(Subtarget.isTargetILP32() ? 32 : 0);
break;
}
LLVM_FALLTHROUGH;
[[fallthrough]];
case SwiftAsyncFramePointerMode::Always:
// ORR x29, x29, #0x1000_0000_0000_0000
@ -2025,7 +2025,7 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
// Avoid the reload as it is GOT relative, and instead fall back to the
// hardcoded value below. This allows a mismatch between the OS and
// application without immediately terminating on the difference.
LLVM_FALLTHROUGH;
[[fallthrough]];
case SwiftAsyncFramePointerMode::Always:
// We need to reset FP to its untagged state on return. Bit 60 is
// currently used to show the presence of an extended frame.

@ -3565,7 +3565,7 @@ void AArch64DAGToDAGISel::Select(SDNode *Node) {
return;
if (tryBitfieldInsertInZeroOp(Node))
return;
LLVM_FALLTHROUGH;
[[fallthrough]];
case ISD::ROTR:
case ISD::SHL:
if (tryShiftAmountMod(Node))

@ -2531,7 +2531,7 @@ MachineBasicBlock *AArch64TargetLowering::EmitInstrWithCustomInserter(
AArch64::LR, /*isDef*/ true,
/*isImp*/ true, /*isKill*/ false, /*isDead*/ true,
/*isUndef*/ false, /*isEarlyClobber*/ true));
LLVM_FALLTHROUGH;
[[fallthrough]];
case TargetOpcode::STACKMAP:
case TargetOpcode::PATCHPOINT:
return emitPatchPoint(MI, BB);
@ -2821,7 +2821,7 @@ static void changeVectorFPCCToAArch64CC(ISD::CondCode CC,
break;
case ISD::SETUO:
Invert = true;
LLVM_FALLTHROUGH;
[[fallthrough]];
case ISD::SETO:
CondCode = AArch64CC::MI;
CondCode2 = AArch64CC::GE;
@ -6247,9 +6247,9 @@ SDValue AArch64TargetLowering::LowerCallResult(
case CCValAssign::AExtUpper:
Val = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Val,
DAG.getConstant(32, DL, VA.getLocVT()));
LLVM_FALLTHROUGH;
[[fallthrough]];
case CCValAssign::AExt:
LLVM_FALLTHROUGH;
[[fallthrough]];
case CCValAssign::ZExt:
Val = DAG.getZExtOrTrunc(Val, DL, VA.getValVT());
break;
@ -12073,7 +12073,7 @@ static SDValue EmitVectorComparison(SDValue LHS, SDValue RHS,
if (!NoNans)
return SDValue();
// If we ignore NaNs then we can use to the LS implementation.
LLVM_FALLTHROUGH;
[[fallthrough]];
case AArch64CC::LS:
if (IsZero)
return DAG.getNode(AArch64ISD::FCMLEz, dl, VT, LHS);
@ -12082,7 +12082,7 @@ static SDValue EmitVectorComparison(SDValue LHS, SDValue RHS,
if (!NoNans)
return SDValue();
// If we ignore NaNs then we can use to the MI implementation.
LLVM_FALLTHROUGH;
[[fallthrough]];
case AArch64CC::MI:
if (IsZero)
return DAG.getNode(AArch64ISD::FCMLTz, dl, VT, LHS);
@ -12727,7 +12727,7 @@ bool AArch64TargetLowering::isExtFreeImpl(const Instruction *Ext) const {
// trunc(sext ty1 to ty2) to ty1.
if (Instr->getType() == Ext->getOperand(0)->getType())
continue;
LLVM_FALLTHROUGH;
[[fallthrough]];
default:
return false;
}
@ -12832,14 +12832,14 @@ bool AArch64TargetLowering::shouldSinkOperands(
Ops.push_back(&II->getOperandUse(1));
return true;
}
LLVM_FALLTHROUGH;
[[fallthrough]];
case Intrinsic::fma:
if (isa<VectorType>(I->getType()) &&
cast<VectorType>(I->getType())->getElementType()->isHalfTy() &&
!Subtarget->hasFullFP16())
return false;
LLVM_FALLTHROUGH;
[[fallthrough]];
case Intrinsic::aarch64_neon_sqdmull:
case Intrinsic::aarch64_neon_sqdmulh:
case Intrinsic::aarch64_neon_sqrdmulh:
@ -21335,12 +21335,12 @@ SDValue AArch64TargetLowering::LowerFixedLengthVectorIntExtendToSVE(
Val = DAG.getNode(ExtendOpc, DL, MVT::nxv8i16, Val);
if (VT.getVectorElementType() == MVT::i16)
break;
LLVM_FALLTHROUGH;
[[fallthrough]];
case MVT::nxv8i16:
Val = DAG.getNode(ExtendOpc, DL, MVT::nxv4i32, Val);
if (VT.getVectorElementType() == MVT::i32)
break;
LLVM_FALLTHROUGH;
[[fallthrough]];
case MVT::nxv4i32:
Val = DAG.getNode(ExtendOpc, DL, MVT::nxv2i64, Val);
assert(VT.getVectorElementType() == MVT::i64 && "Unexpected element type!");
@ -21369,13 +21369,13 @@ SDValue AArch64TargetLowering::LowerFixedLengthVectorTruncateToSVE(
Val = DAG.getNode(AArch64ISD::UZP1, DL, MVT::nxv4i32, Val, Val);
if (VT.getVectorElementType() == MVT::i32)
break;
LLVM_FALLTHROUGH;
[[fallthrough]];
case MVT::nxv4i32:
Val = DAG.getNode(ISD::BITCAST, DL, MVT::nxv8i16, Val);
Val = DAG.getNode(AArch64ISD::UZP1, DL, MVT::nxv8i16, Val, Val);
if (VT.getVectorElementType() == MVT::i16)
break;
LLVM_FALLTHROUGH;
[[fallthrough]];
case MVT::nxv8i16:
Val = DAG.getNode(ISD::BITCAST, DL, MVT::nxv16i8, Val);
Val = DAG.getNode(AArch64ISD::UZP1, DL, MVT::nxv16i8, Val, Val);

@ -549,7 +549,7 @@ static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
return 0;
// fall-through to ADDXri and ADDWri.
LLVM_FALLTHROUGH;
[[fallthrough]];
case AArch64::ADDXri:
case AArch64::ADDWri:
// add x, 1 -> csinc.
@ -577,7 +577,7 @@ static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
return 0;
// fall-through to SUBXrr and SUBWrr.
LLVM_FALLTHROUGH;
[[fallthrough]];
case AArch64::SUBXrr:
case AArch64::SUBWrr: {
// neg x -> csneg, represented as sub dst, xzr, src.
@ -1576,7 +1576,7 @@ static UsedNZCV getUsedNZCV(AArch64CC::CondCode CC) {
case AArch64CC::HI: // Z clear and C set
case AArch64CC::LS: // Z set or C clear
UsedFlags.Z = true;
LLVM_FALLTHROUGH;
[[fallthrough]];
case AArch64CC::HS: // C set
case AArch64CC::LO: // C clear
UsedFlags.C = true;
@ -1595,7 +1595,7 @@ static UsedNZCV getUsedNZCV(AArch64CC::CondCode CC) {
case AArch64CC::GT: // Z clear, N and V the same
case AArch64CC::LE: // Z set, N and V differ
UsedFlags.Z = true;
LLVM_FALLTHROUGH;
[[fallthrough]];
case AArch64CC::GE: // N and V the same
case AArch64CC::LT: // N and V differ
UsedFlags.N = true;
@ -8021,7 +8021,7 @@ Optional<RegImmPair> AArch64InstrInfo::isAddImmediate(const MachineInstr &MI,
case AArch64::SUBSWri:
case AArch64::SUBSXri:
Sign *= -1;
LLVM_FALLTHROUGH;
[[fallthrough]];
case AArch64::ADDSWri:
case AArch64::ADDSXri:
case AArch64::ADDWri:

@ -24,7 +24,7 @@ static bool needReorderStoreMI(const MachineInstr *MI) {
case AArch64::STRQui:
if (!MI->getMF()->getSubtarget<AArch64Subtarget>().isStoreAddressAscend())
return false;
LLVM_FALLTHROUGH;
[[fallthrough]];
case AArch64::STPQi:
return AArch64InstrInfo::getLdStOffsetOp(*MI).isImm();
}

@ -176,7 +176,7 @@ bool AArch64RedundantCopyElimination::knownRegValInBlock(
case AArch64::ADDSWri:
case AArch64::ADDSXri:
IsCMN = true;
LLVM_FALLTHROUGH;
[[fallthrough]];
// CMP is an alias for SUBS with a dead destination register.
case AArch64::SUBSWri:
case AArch64::SUBSXri: {

@ -556,7 +556,7 @@ bool AArch64SpeculationHardening::expandSpeculationSafeValue(
break;
case AArch64::SpeculationSafeValueW:
Is64Bit = false;
LLVM_FALLTHROUGH;
[[fallthrough]];
case AArch64::SpeculationSafeValueX:
// Just remove the SpeculationSafe pseudo's if control flow
// miss-speculation isn't happening because we're already inserting barriers

@ -2017,7 +2017,7 @@ InstructionCost AArch64TTIImpl::getArithmeticInstrCost(
TargetTransformInfo::OP_None);
return Cost;
}
LLVM_FALLTHROUGH;
[[fallthrough]];
case ISD::UDIV: {
if (Opd2Info == TargetTransformInfo::OK_UniformConstantValue) {
auto VT = TLI->getValueType(DL, Ty);

@ -2313,7 +2313,7 @@ void AArch64Operand::print(raw_ostream &OS) const {
OS << "<register " << getReg() << ">";
if (!getShiftExtendAmount() && !hasShiftExtendAmount())
break;
LLVM_FALLTHROUGH;
[[fallthrough]];
case k_ShiftExtend:
OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
<< getShiftExtendAmount();
@ -4745,7 +4745,7 @@ bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
if (RI->isSubRegisterEq(Rn, Rt2))
return Error(Loc[1], "unpredictable LDP instruction, writeback base "
"is also a destination");
LLVM_FALLTHROUGH;
[[fallthrough]];
}
case AArch64::LDPDi:
case AArch64::LDPQi:

@ -908,7 +908,7 @@ DecodeThreeAddrSRegInstruction(MCInst &Inst, uint32_t insn, uint64_t Addr,
// if shift == '11' then ReservedValue()
if (shiftHi == 0x3)
return Fail;
LLVM_FALLTHROUGH;
[[fallthrough]];
case AArch64::ANDWrs:
case AArch64::ANDSWrs:
case AArch64::BICWrs:
@ -932,7 +932,7 @@ DecodeThreeAddrSRegInstruction(MCInst &Inst, uint32_t insn, uint64_t Addr,
// if shift == '11' then ReservedValue()
if (shiftHi == 0x3)
return Fail;
LLVM_FALLTHROUGH;
[[fallthrough]];
case AArch64::ANDXrs:
case AArch64::ANDSXrs:
case AArch64::BICXrs:
@ -1260,7 +1260,7 @@ DecodeExclusiveLdStInstruction(MCInst &Inst, uint32_t insn, uint64_t Addr,
case AArch64::STXRB:
case AArch64::STXRH:
DecodeGPR32RegisterClass(Inst, Rs, Addr, Decoder);
LLVM_FALLTHROUGH;
[[fallthrough]];
case AArch64::LDARW:
case AArch64::LDARB:
case AArch64::LDARH:
@ -1284,7 +1284,7 @@ DecodeExclusiveLdStInstruction(MCInst &Inst, uint32_t insn, uint64_t Addr,
case AArch64::STLXRX:
case AArch64::STXRX:
DecodeGPR32RegisterClass(Inst, Rs, Addr, Decoder);
LLVM_FALLTHROUGH;
[[fallthrough]];
case AArch64::LDARX:
case AArch64::LDAXRX:
case AArch64::LDXRX:
@ -1296,7 +1296,7 @@ DecodeExclusiveLdStInstruction(MCInst &Inst, uint32_t insn, uint64_t Addr,
case AArch64::STLXPW:
case AArch64::STXPW:
DecodeGPR32RegisterClass(Inst, Rs, Addr, Decoder);
LLVM_FALLTHROUGH;
[[fallthrough]];
case AArch64::LDAXPW:
case AArch64::LDXPW:
DecodeGPR32RegisterClass(Inst, Rt, Addr, Decoder);
@ -1305,7 +1305,7 @@ DecodeExclusiveLdStInstruction(MCInst &Inst, uint32_t insn, uint64_t Addr,
case AArch64::STLXPX:
case AArch64::STXPX:
DecodeGPR32RegisterClass(Inst, Rs, Addr, Decoder);
LLVM_FALLTHROUGH;
[[fallthrough]];
case AArch64::LDAXPX:
case AArch64::LDXPX:
DecodeGPR64RegisterClass(Inst, Rt, Addr, Decoder);
@ -1385,7 +1385,7 @@ static DecodeStatus DecodePairLdStInstruction(MCInst &Inst, uint32_t insn,
case AArch64::STGPpre:
case AArch64::STGPpost:
NeedsDisjointWritebackTransfer = true;
LLVM_FALLTHROUGH;
[[fallthrough]];
case AArch64::LDNPXi:
case AArch64::STNPXi:
case AArch64::LDPXi:
@ -1400,7 +1400,7 @@ static DecodeStatus DecodePairLdStInstruction(MCInst &Inst, uint32_t insn,
case AArch64::LDPWpre:
case AArch64::STPWpre:
NeedsDisjointWritebackTransfer = true;
LLVM_FALLTHROUGH;
[[fallthrough]];
case AArch64::LDNPWi:
case AArch64::STNPWi:
case AArch64::LDPWi:

@ -161,7 +161,7 @@ void AArch64GISelUtils::changeVectorFCMPPredToAArch64CC(
break;
case CmpInst::FCMP_UNO:
Invert = true;
LLVM_FALLTHROUGH;
[[fallthrough]];
case CmpInst::FCMP_ORD:
CondCode = AArch64CC::MI;
CondCode2 = AArch64CC::GE;

@ -2569,7 +2569,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
// For s32, use a cp load if we have optsize/minsize.
if (!shouldOptForSize(&MF))
break;
LLVM_FALLTHROUGH;
[[fallthrough]];
case 16:
case 64:
case 128: {
@ -2972,7 +2972,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
case TargetOpcode::G_ASHR:
if (MRI.getType(I.getOperand(0).getReg()).isVector())
return selectVectorAshrLshr(I, MRI);
LLVM_FALLTHROUGH;
[[fallthrough]];
case TargetOpcode::G_SHL:
if (Opcode == TargetOpcode::G_SHL &&
MRI.getType(I.getOperand(0).getReg()).isVector())
@ -2997,7 +2997,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
I.getOperand(2).setReg(Trunc.getReg(0));
}
}
LLVM_FALLTHROUGH;
[[fallthrough]];
case TargetOpcode::G_OR: {
// Reject the various things we don't support yet.
if (unsupportedBinOp(I, RBI, MRI, TRI))

@ -648,7 +648,7 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
/*NumOperands*/ 1);
}
// Both registers are generic, use G_BITCAST.
LLVM_FALLTHROUGH;
[[fallthrough]];
}
case TargetOpcode::G_BITCAST: {
LLT DstTy = MRI.getType(MI.getOperand(0).getReg());

@ -949,7 +949,7 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
static unsigned getRsrcReg(CallingConv::ID CallConv) {
switch (CallConv) {
default: LLVM_FALLTHROUGH;
default: [[fallthrough]];
case CallingConv::AMDGPU_CS: return R_00B848_COMPUTE_PGM_RSRC1;
case CallingConv::AMDGPU_LS: return R_00B528_SPI_SHADER_PGM_RSRC1_LS;
case CallingConv::AMDGPU_HS: return R_00B428_SPI_SHADER_PGM_RSRC1_HS;

@ -471,7 +471,7 @@ AMDGPURegisterBankInfo::getInstrAlternativeMappings(
return addMappingFromTable<1>(MI, MRI, {{ 0 }}, Table);
}
LLVM_FALLTHROUGH;
[[fallthrough]];
}
case TargetOpcode::G_FCONSTANT:
case TargetOpcode::G_FRAME_INDEX:
@ -2367,7 +2367,7 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
llvm_unreachable("lowerAbsToMaxNeg should have succeeded");
return;
}
LLVM_FALLTHROUGH;
[[fallthrough]];
}
case AMDGPU::G_ADD:
case AMDGPU::G_SUB:
@ -3717,7 +3717,7 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
break;
}
LLVM_FALLTHROUGH;
[[fallthrough]];
}
case AMDGPU::G_PTR_ADD:
case AMDGPU::G_PTRMASK:
@ -3743,7 +3743,7 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case AMDGPU::G_UBFX:
if (isSALUMapping(MI))
return getDefaultMappingSOP(MI);
LLVM_FALLTHROUGH;
[[fallthrough]];
case AMDGPU::G_SADDSAT: // FIXME: Could lower sat ops for SALU
case AMDGPU::G_SSUBSAT:
@ -3906,7 +3906,7 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
break;
}
LLVM_FALLTHROUGH;
[[fallthrough]];
}
case AMDGPU::G_MERGE_VALUES:
case AMDGPU::G_CONCAT_VECTORS: {
@ -4353,7 +4353,7 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
unsigned IdxSize = MRI.getType(IdxReg).getSizeInBits();
unsigned IdxBank = getRegBankID(IdxReg, MRI, AMDGPU::SGPRRegBankID);
OpdsMapping[3] = AMDGPU::getValueMapping(IdxBank, IdxSize);
LLVM_FALLTHROUGH;
[[fallthrough]];
}
case Intrinsic::amdgcn_readfirstlane: {
unsigned DstSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();

@ -460,21 +460,21 @@ bool AMDGPUSubtarget::makeLIDRangeMetadata(Instruction *I) const {
case Intrinsic::amdgcn_workitem_id_x:
case Intrinsic::r600_read_tidig_x:
IdQuery = true;
LLVM_FALLTHROUGH;
[[fallthrough]];
case Intrinsic::r600_read_local_size_x:
Dim = 0;
break;
case Intrinsic::amdgcn_workitem_id_y:
case Intrinsic::r600_read_tidig_y:
IdQuery = true;
LLVM_FALLTHROUGH;
[[fallthrough]];
case Intrinsic::r600_read_local_size_y:
Dim = 1;
break;
case Intrinsic::amdgcn_workitem_id_z:
case Intrinsic::r600_read_tidig_z:
IdQuery = true;
LLVM_FALLTHROUGH;
[[fallthrough]];
case Intrinsic::r600_read_local_size_z:
Dim = 2;
break;

@ -588,7 +588,7 @@ InstructionCost GCNTTIImpl::getArithmeticInstrCost(
return TargetTransformInfo::TCC_Free;
}
}
LLVM_FALLTHROUGH;
[[fallthrough]];
case ISD::FADD:
case ISD::FSUB:
if (ST->hasPackedFP32Ops() && SLT == MVT::f32)

@ -977,7 +977,7 @@ int GCNHazardRecognizer::checkVALUHazards(MachineInstr *VALU) {
getWaitStatesSince(IsVALUDefSGPRFn, VALUWriteVGPRReadlaneRead);
WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForDef);
}
LLVM_FALLTHROUGH;
[[fallthrough]];
case AMDGPU::V_WRITELANE_B32: {
UseReg = AMDGPU::EXEC;
int WaitStatesNeededForDef =
@ -1913,7 +1913,7 @@ int GCNHazardRecognizer::checkMAIHazards908(MachineInstr *MI) {
break;
case 8: NeedWaitStates = MFMA16x16WritesAGPRAccVgprReadWaitStates;
break;
case 16: LLVM_FALLTHROUGH;
case 16: [[fallthrough]];
default: NeedWaitStates = MFMA32x32WritesAGPRAccVgprReadWaitStates;
break;
}
@ -1923,7 +1923,7 @@ int GCNHazardRecognizer::checkMAIHazards908(MachineInstr *MI) {
break;
case 8: NeedWaitStates = MFMA16x16WritesAGPRAccVgprWriteWaitStates;
break;
case 16: LLVM_FALLTHROUGH;
case 16: [[fallthrough]];
default: NeedWaitStates = MFMA32x32WritesAGPRAccVgprWriteWaitStates;
break;
}
@ -1984,7 +1984,7 @@ int GCNHazardRecognizer::checkMAIHazards908(MachineInstr *MI) {
break;
case 8: NeedWaitStates = MFMA16x16ReadSrcCAccVgprWriteWaitStates;
break;
case 16: LLVM_FALLTHROUGH;
case 16: [[fallthrough]];
default: NeedWaitStates = MFMA32x32ReadSrcCAccVgprWriteWaitStates;
break;
}
@ -2140,7 +2140,7 @@ int GCNHazardRecognizer::checkMAIHazards90A(MachineInstr *MI) {
? SMFMA16x16WritesVGPROverlappedDMFMASrcCWaitStates
: SMFMA16x16WritesVGPROverlappedSMFMASrcCWaitStates;
break;
case 16: LLVM_FALLTHROUGH;
case 16: [[fallthrough]];
default:
NeedWaitStates = ST.hasGFX940Insts()
? isXDL(ST, *MI1)
@ -2186,7 +2186,7 @@ int GCNHazardRecognizer::checkMAIHazards90A(MachineInstr *MI) {
: GFX940_SMFMA8PassWritesVGPROverlappedSrcABWaitStates
: SMFMA16x16WritesVGPROverlappedSrcABWaitStates;
break;
case 16: LLVM_FALLTHROUGH;
case 16: [[fallthrough]];
default:
NeedWaitStates = ST.hasGFX940Insts()
? isXDL(ST, *MI1)
@ -2407,7 +2407,7 @@ int GCNHazardRecognizer::checkMAIVALUHazards(MachineInstr *MI) {
: GFX940_SMFMA8PassWriteVgprVALUMemExpReadWaitStates
: SMFMA16x16WriteVgprVALUMemExpReadWaitStates;
break;
case 16: LLVM_FALLTHROUGH;
case 16: [[fallthrough]];
default:
NeedWaitStates =
isDGEMM(MFMA->getOpcode())
@ -2502,7 +2502,7 @@ int GCNHazardRecognizer::checkMAIVALUHazards(MachineInstr *MI) {
: GFX940_SMFMA8PassWriteVgprVALUWawWaitStates
: SMFMA16x16WriteVgprVALUWawWaitStates;
break;
case 16: LLVM_FALLTHROUGH;
case 16: [[fallthrough]];
default:
NeedWaitStates = isDGEMM(MFMA->getOpcode())
? DMFMA16x16WriteVgprVALUWriteWaitStates
@ -2555,7 +2555,7 @@ int GCNHazardRecognizer::checkMAIVALUHazards(MachineInstr *MI) {
break;
case 8: NeedWaitStates = SMFMA16x16ReadVgprVALUWarWaitStates;
break;
case 16: LLVM_FALLTHROUGH;
case 16: [[fallthrough]];
default: NeedWaitStates = SMFMA32x32ReadVgprVALUWarWaitStates;
break;
}

@ -715,7 +715,7 @@ void AMDGPUInstPrinter::printRegularOperand(const MCInst *MI, unsigned OpNo,
printImmediate16(static_cast<uint16_t>(Op.getImm()), STI, O);
break;
}
LLVM_FALLTHROUGH;
[[fallthrough]];
case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16:
printImmediateInt16(static_cast<uint16_t>(Op.getImm()), STI, O);

@ -274,7 +274,7 @@ uint32_t SIMCCodeEmitter::getLitEncoding(const MCOperand &MO,
return getLit32Encoding(static_cast<uint32_t>(Imm), STI);
if (OpInfo.OperandType == AMDGPU::OPERAND_REG_IMM_V2FP16)
return getLit16Encoding(static_cast<uint16_t>(Imm), STI);
LLVM_FALLTHROUGH;
[[fallthrough]];
}
case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16:

@ -71,7 +71,7 @@ void R600AsmPrinter::EmitProgramInfoR600(const MachineFunction &MF) {
if (STM.getGeneration() >= AMDGPUSubtarget::EVERGREEN) {
// Evergreen / Northern Islands
switch (MF.getFunction().getCallingConv()) {
default: LLVM_FALLTHROUGH;
default: [[fallthrough]];
case CallingConv::AMDGPU_CS: RsrcReg = R_0288D4_SQ_PGM_RESOURCES_LS; break;
case CallingConv::AMDGPU_GS: RsrcReg = R_028878_SQ_PGM_RESOURCES_GS; break;
case CallingConv::AMDGPU_PS: RsrcReg = R_028844_SQ_PGM_RESOURCES_PS; break;
@ -80,9 +80,9 @@ void R600AsmPrinter::EmitProgramInfoR600(const MachineFunction &MF) {
} else {
// R600 / R700
switch (MF.getFunction().getCallingConv()) {
default: LLVM_FALLTHROUGH;
case CallingConv::AMDGPU_GS: LLVM_FALLTHROUGH;
case CallingConv::AMDGPU_CS: LLVM_FALLTHROUGH;
default: [[fallthrough]];
case CallingConv::AMDGPU_GS: [[fallthrough]];
case CallingConv::AMDGPU_CS: [[fallthrough]];
case CallingConv::AMDGPU_VS: RsrcReg = R_028868_SQ_PGM_RESOURCES_VS; break;
case CallingConv::AMDGPU_PS: RsrcReg = R_028850_SQ_PGM_RESOURCES_PS; break;
}

@ -527,7 +527,7 @@ public:
CFStack.pushBranch(R600::CF_PUSH_EG);
} else
CFStack.pushBranch(R600::CF_ALU_PUSH_BEFORE);
LLVM_FALLTHROUGH;
[[fallthrough]];
case R600::CF_ALU:
I = MI;
AluClauses.push_back(MakeALUClause(MBB, I));

@ -589,7 +589,7 @@ void R600TargetLowering::ReplaceNodeResults(SDNode *N,
// Since we don't care about out of bounds values we can use FP_TO_SINT for
// uints too. The DAGLegalizer code for uint considers some extra cases
// which are not necessary here.
LLVM_FALLTHROUGH;
[[fallthrough]];
case ISD::FP_TO_SINT: {
if (N->getValueType(0) == MVT::i1) {
Results.push_back(lowerFP_TO_SINT(N->getOperand(0), DAG));

@ -4338,7 +4338,7 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
case AMDGPU::DS_GWS_SEMA_BR:
case AMDGPU::DS_GWS_BARRIER:
TII->enforceOperandRCAlignment(MI, AMDGPU::OpName::data0);
LLVM_FALLTHROUGH;
[[fallthrough]];
case AMDGPU::DS_GWS_SEMA_V:
case AMDGPU::DS_GWS_SEMA_P:
case AMDGPU::DS_GWS_SEMA_RELEASE_ALL:
@ -7807,7 +7807,7 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
DAG.getContext()->diagnose(NoFpRet);
return SDValue();
}
LLVM_FALLTHROUGH;
[[fallthrough]];
case Intrinsic::amdgcn_global_atomic_fmin:
case Intrinsic::amdgcn_global_atomic_fmax:
case Intrinsic::amdgcn_flat_atomic_fadd:
@ -7824,7 +7824,7 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
case Intrinsic::amdgcn_global_atomic_fadd:
if (!Subtarget->hasAtomicFaddNoRtnInsts())
return makeV_ILLEGAL(Op, DAG);
LLVM_FALLTHROUGH;
[[fallthrough]];
case Intrinsic::amdgcn_flat_atomic_fadd: {
EVT VT = Op.getOperand(3).getValueType();
return DAG.getAtomic(ISD::ATOMIC_LOAD_FADD, DL, VT,
@ -10168,7 +10168,7 @@ bool SITargetLowering::isCanonicalized(SelectionDAG &DAG, SDValue Op,
break;
}
LLVM_FALLTHROUGH;
[[fallthrough]];
}
default:
return denormalsEnabledForType(DAG, Op.getValueType()) &&
@ -11554,7 +11554,7 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
case ISD::LOAD: {
if (SDValue Widended = widenLoad(cast<LoadSDNode>(N), DCI))
return Widended;
LLVM_FALLTHROUGH;
[[fallthrough]];
}
default: {
if (!DCI.isBeforeLegalize()) {

@ -4082,7 +4082,7 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
ErrInfo = "Expected immediate, but got non-immediate";
return false;
}
LLVM_FALLTHROUGH;
[[fallthrough]];
default:
continue;
}

@ -362,14 +362,14 @@ static unsigned getOpcodeWidth(const MachineInstr &MI, const SIInstrInfo &TII) {
case AMDGPU::S_BUFFER_LOAD_DWORDX8_IMM:
case AMDGPU::S_LOAD_DWORDX8_IMM:
return 8;
case AMDGPU::DS_READ_B32: LLVM_FALLTHROUGH;
case AMDGPU::DS_READ_B32_gfx9: LLVM_FALLTHROUGH;
case AMDGPU::DS_WRITE_B32: LLVM_FALLTHROUGH;
case AMDGPU::DS_READ_B32: [[fallthrough]];
case AMDGPU::DS_READ_B32_gfx9: [[fallthrough]];
case AMDGPU::DS_WRITE_B32: [[fallthrough]];
case AMDGPU::DS_WRITE_B32_gfx9:
return 1;
case AMDGPU::DS_READ_B64: LLVM_FALLTHROUGH;
case AMDGPU::DS_READ_B64_gfx9: LLVM_FALLTHROUGH;
case AMDGPU::DS_WRITE_B64: LLVM_FALLTHROUGH;
case AMDGPU::DS_READ_B64: [[fallthrough]];
case AMDGPU::DS_READ_B64_gfx9: [[fallthrough]];
case AMDGPU::DS_WRITE_B64: [[fallthrough]];
case AMDGPU::DS_WRITE_B64_gfx9:
return 2;
default:
@ -635,7 +635,7 @@ static AddressRegs getRegs(unsigned Opc, const SIInstrInfo &TII) {
case AMDGPU::GLOBAL_STORE_DWORDX3_SADDR:
case AMDGPU::GLOBAL_STORE_DWORDX4_SADDR:
Result.SAddr = true;
LLVM_FALLTHROUGH;
[[fallthrough]];
case AMDGPU::GLOBAL_LOAD_DWORD:
case AMDGPU::GLOBAL_LOAD_DWORDX2:
case AMDGPU::GLOBAL_LOAD_DWORDX3:

Some files were not shown because too many files have changed in this diff Show More