Remove trailing space

sed -Ei 's/[[:space:]]+$//' include/**/*.{def,h,td} lib/**/*.{cpp,h}

llvm-svn: 338293
This commit is contained in:
Fangrui Song 2018-07-30 19:41:25 +00:00
parent 9ab8ef3cc4
commit 121474a01b
255 changed files with 969 additions and 969 deletions

View File

@ -17,7 +17,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/Support/type_traits.h"
#include <algorithm>
#include <algorithm>
#include <cstddef>
#include <initializer_list>
#include <iterator>

View File

@ -682,7 +682,7 @@ bool sortPtrAccesses(ArrayRef<Value *> VL, const DataLayout &DL,
SmallVectorImpl<unsigned> &SortedIndices);
/// Returns true if the memory operations \p A and \p B are consecutive.
/// This is a simple API that does not depend on the analysis pass.
/// This is a simple API that does not depend on the analysis pass.
bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL,
ScalarEvolution &SE, bool CheckType = true);
@ -734,7 +734,7 @@ private:
/// accesses of a loop.
///
/// It runs the analysis for a loop on demand. This can be initiated by
/// querying the loop access info via AM.getResult<LoopAccessAnalysis>.
/// querying the loop access info via AM.getResult<LoopAccessAnalysis>.
/// getResult return a LoopAccessInfo object. See this class for the
/// specifics of what information is provided.
class LoopAccessAnalysis

View File

@ -10,7 +10,7 @@
/// Contains a collection of routines for determining if a given instruction is
/// guaranteed to execute if a given point in control flow is reached. The most
/// common example is an instruction within a loop being provably executed if we
/// branch to the header of it's containing loop.
/// branch to the header of it's containing loop.
///
//===----------------------------------------------------------------------===//
@ -58,7 +58,7 @@ void computeLoopSafetyInfo(LoopSafetyInfo *, Loop *);
bool isGuaranteedToExecute(const Instruction &Inst, const DominatorTree *DT,
const Loop *CurLoop,
const LoopSafetyInfo *SafetyInfo);
}
#endif

View File

@ -326,7 +326,7 @@ public:
bool haveFastSqrt(Type *Ty) { return false; }
bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) { return true; }
unsigned getFPOpCost(Type *Ty) { return TargetTransformInfo::TCC_Basic; }
int getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, const APInt &Imm,

View File

@ -464,7 +464,7 @@ class Value;
/// This is equivelent to saying that all instructions within the basic block
/// are guaranteed to transfer execution to their successor within the basic
/// block. This has the same assumptions w.r.t. undefined behavior as the
/// instruction variant of this function.
/// instruction variant of this function.
bool isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB);
/// Return true if this function can prove that the instruction I

View File

@ -104,12 +104,12 @@ public:
const std::string &getName() const { return Name; }
/// By default, write barriers are replaced with simple store
/// instructions. If true, you must provide a custom pass to lower
/// instructions. If true, you must provide a custom pass to lower
/// calls to \@llvm.gcwrite.
bool customWriteBarrier() const { return CustomWriteBarriers; }
/// By default, read barriers are replaced with simple load
/// instructions. If true, you must provide a custom pass to lower
/// instructions. If true, you must provide a custom pass to lower
/// calls to \@llvm.gcread.
bool customReadBarrier() const { return CustomReadBarriers; }
@ -146,7 +146,7 @@ public:
}
/// By default, roots are left for the code generator so it can generate a
/// stack map. If true, you must provide a custom pass to lower
/// stack map. If true, you must provide a custom pass to lower
/// calls to \@llvm.gcroot.
bool customRoots() const { return CustomRoots; }

View File

@ -786,7 +786,7 @@ public:
/// setAction ({G_ADD, 0, LLT::scalar(32)}, Legal);
/// setLegalizeScalarToDifferentSizeStrategy(
/// G_ADD, 0, widenToLargerTypesAndNarrowToLargest);
/// will end up defining getAction({G_ADD, 0, T}) to return the following
/// will end up defining getAction({G_ADD, 0, T}) to return the following
/// actions for different scalar types T:
/// LLT::scalar(1)..LLT::scalar(31): {WidenScalar, 0, LLT::scalar(32)}
/// LLT::scalar(32): {Legal, 0, LLT::scalar(32)}
@ -814,7 +814,7 @@ public:
VectorElementSizeChangeStrategies[OpcodeIdx][TypeIdx] = S;
}
/// A SizeChangeStrategy for the common case where legalization for a
/// A SizeChangeStrategy for the common case where legalization for a
/// particular operation consists of only supporting a specific set of type
/// sizes. E.g.
/// setAction ({G_DIV, 0, LLT::scalar(32)}, Legal);

View File

@ -27,15 +27,15 @@ namespace llvm {
uint32_t r_symbolnum; // symbol index if r_extern == 1 else section index
bool r_pcrel; // was relocated pc-relative already
uint8_t r_length; // length = 2 ^ r_length
bool r_extern; //
bool r_extern; //
uint8_t r_type; // if not 0, machine-specific relocation type.
bool r_scattered; // 1 = scattered, 0 = non-scattered
int32_t r_value; // the value the item to be relocated is referring
// to.
public:
public:
uint32_t getPackedFields() const {
if (r_scattered)
return (1 << 31) | (r_pcrel << 30) | ((r_length & 3) << 28) |
return (1 << 31) | (r_pcrel << 30) | ((r_length & 3) << 28) |
((r_type & 15) << 24) | (r_address & 0x00FFFFFF);
else
return (r_symbolnum << 8) | (r_pcrel << 7) | ((r_length & 3) << 5) |
@ -45,8 +45,8 @@ namespace llvm {
uint32_t getRawAddress() const { return r_address; }
MachORelocation(uint32_t addr, uint32_t index, bool pcrel, uint8_t len,
bool ext, uint8_t type, bool scattered = false,
int32_t value = 0) :
bool ext, uint8_t type, bool scattered = false,
int32_t value = 0) :
r_address(addr), r_symbolnum(index), r_pcrel(pcrel), r_length(len),
r_extern(ext), r_type(type), r_scattered(scattered), r_value(value) {}
};

View File

@ -105,7 +105,7 @@ class MachineModuleInfo : public ImmutablePass {
/// basic block's address of label.
MMIAddrLabelMap *AddrLabelSymbols;
// TODO: Ideally, what we'd like is to have a switch that allows emitting
// TODO: Ideally, what we'd like is to have a switch that allows emitting
// synchronous (precise at call-sites only) CFA into .eh_frame. However,
// even under this switch, we'd like .debug_frame to be precise when using
// -g. At this moment, there's no way to specify that some CFI directives

View File

@ -252,7 +252,7 @@ class TargetRegisterInfo;
MachineInstr *Instr = nullptr; ///< Alternatively, a MachineInstr.
public:
SUnit *OrigNode = nullptr; ///< If not this, the node from which this node
SUnit *OrigNode = nullptr; ///< If not this, the node from which this node
/// was cloned. (SD scheduling only)
const MCSchedClassDesc *SchedClass =

View File

@ -156,7 +156,7 @@ class StatepointOpers {
// TODO:: we should change the STATEPOINT representation so that CC and
// Flags should be part of meta operands, with args and deopt operands, and
// gc operands all prefixed by their length and a type code. This would be
// much more consistent.
// much more consistent.
public:
// These values are aboolute offsets into the operands of the statepoint
// instruction.

View File

@ -16,7 +16,7 @@
#include "llvm/Pass.h"
#include "llvm/Support/CodeGen.h"
#include <cassert>
#include <cassert>
#include <string>
namespace llvm {

View File

@ -456,7 +456,7 @@ public:
/// stack frame offset. The first register is closest to the incoming stack
/// pointer if stack grows down, and vice versa.
/// Notice: This function does not take into account disabled CSRs.
/// In most cases you will want to use instead the function
/// In most cases you will want to use instead the function
/// getCalleeSavedRegs that is implemented in MachineRegisterInfo.
virtual const MCPhysReg*
getCalleeSavedRegs(const MachineFunction *MF) const = 0;
@ -518,7 +518,7 @@ public:
/// guaranteed to be restored before any uses. This is useful for targets that
/// have call sequences where a GOT register may be updated by the caller
/// prior to a call and is guaranteed to be restored (also by the caller)
/// after the call.
/// after the call.
virtual bool isCallerPreservedPhysReg(unsigned PhysReg,
const MachineFunction &MF) const {
return false;

View File

@ -547,7 +547,7 @@ public:
/// may have side effects cannot be removed without semantically changing the
/// generated program.
bool isSafeToRemove() const;
/// Return true if the instruction is a variety of EH-block.
bool isEHPad() const {
switch (getOpcode()) {

View File

@ -4016,7 +4016,7 @@ public:
void setDoesNotThrow() {
addAttribute(AttributeList::FunctionIndex, Attribute::NoUnwind);
}
/// Return the function called, or null if this is an
/// indirect function invocation.
///

View File

@ -541,7 +541,7 @@ let IntrProperties = [IntrInaccessibleMemOnly] in {
[ LLVMMatchType<0>,
llvm_metadata_ty,
llvm_metadata_ty ]>;
def int_experimental_constrained_exp : Intrinsic<[ llvm_anyfloat_ty ],
def int_experimental_constrained_exp : Intrinsic<[ llvm_anyfloat_ty ],
[ LLVMMatchType<0>,
llvm_metadata_ty,
llvm_metadata_ty ]>;

View File

@ -275,7 +275,7 @@ def int_arm_stc : GCCBuiltin<"__builtin_arm_stc">,
Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], []>;
def int_arm_stcl : GCCBuiltin<"__builtin_arm_stcl">,
Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], []>;
def int_arm_stc2 : GCCBuiltin<"__builtin_arm_stc2">,
def int_arm_stc2 : GCCBuiltin<"__builtin_arm_stc2">,
Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], []>;
def int_arm_stc2l : GCCBuiltin<"__builtin_arm_stc2l">,
Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], []>;

View File

@ -1,10 +1,10 @@
//===- IntrinsicsPowerPC.td - Defines PowerPC intrinsics ---*- tablegen -*-===//
//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file defines all of the PowerPC-specific intrinsics.
@ -122,21 +122,21 @@ class PowerPC_Vec_FFF_Intrinsic<string GCCIntSuffix>
/// PowerPC_Vec_BBB_Intrinsic - A PowerPC intrinsic that takes two v16i8
/// vectors and returns one. These intrinsics have no side effects.
class PowerPC_Vec_BBB_Intrinsic<string GCCIntSuffix>
class PowerPC_Vec_BBB_Intrinsic<string GCCIntSuffix>
: PowerPC_Vec_Intrinsic<GCCIntSuffix,
[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
[IntrNoMem]>;
/// PowerPC_Vec_HHH_Intrinsic - A PowerPC intrinsic that takes two v8i16
/// vectors and returns one. These intrinsics have no side effects.
class PowerPC_Vec_HHH_Intrinsic<string GCCIntSuffix>
class PowerPC_Vec_HHH_Intrinsic<string GCCIntSuffix>
: PowerPC_Vec_Intrinsic<GCCIntSuffix,
[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
[IntrNoMem]>;
/// PowerPC_Vec_WWW_Intrinsic - A PowerPC intrinsic that takes two v4i32
/// vectors and returns one. These intrinsics have no side effects.
class PowerPC_Vec_WWW_Intrinsic<string GCCIntSuffix>
class PowerPC_Vec_WWW_Intrinsic<string GCCIntSuffix>
: PowerPC_Vec_Intrinsic<GCCIntSuffix,
[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
[IntrNoMem]>;
@ -267,7 +267,7 @@ let TargetPrefix = "ppc" in { // All intrinsics start with "llvm.ppc.".
def int_ppc_altivec_vcmpgtud : GCCBuiltin<"__builtin_altivec_vcmpgtud">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
[IntrNoMem]>;
def int_ppc_altivec_vcmpequw : GCCBuiltin<"__builtin_altivec_vcmpequw">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
[IntrNoMem]>;
@ -283,7 +283,7 @@ let TargetPrefix = "ppc" in { // All intrinsics start with "llvm.ppc.".
def int_ppc_altivec_vcmpnezw : GCCBuiltin<"__builtin_altivec_vcmpnezw">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
[IntrNoMem]>;
def int_ppc_altivec_vcmpequh : GCCBuiltin<"__builtin_altivec_vcmpequh">,
Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
[IntrNoMem]>;
@ -355,7 +355,7 @@ let TargetPrefix = "ppc" in { // All intrinsics start with "llvm.ppc.".
def int_ppc_altivec_vcmpnezw_p : GCCBuiltin<"__builtin_altivec_vcmpnezw_p">,
Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v4i32_ty,llvm_v4i32_ty],
[IntrNoMem]>;
def int_ppc_altivec_vcmpequh_p : GCCBuiltin<"__builtin_altivec_vcmpequh_p">,
Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v8i16_ty,llvm_v8i16_ty],
[IntrNoMem]>;
@ -474,10 +474,10 @@ let TargetPrefix = "ppc" in { // All PPC intrinsics start with "llvm.ppc.".
Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
llvm_v4i32_ty], [IntrNoMem]>;
def int_ppc_altivec_vmsumshs : GCCBuiltin<"__builtin_altivec_vmsumshs">,
Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
llvm_v4i32_ty], [IntrNoMem]>;
def int_ppc_altivec_vmsumubm : GCCBuiltin<"__builtin_altivec_vmsumubm">,
Intrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
Intrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
llvm_v4i32_ty], [IntrNoMem]>;
def int_ppc_altivec_vmsumuhm : GCCBuiltin<"__builtin_altivec_vmsumuhm">,
Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
@ -544,7 +544,7 @@ let TargetPrefix = "ppc" in { // All PPC intrinsics start with "llvm.ppc.".
// Other multiplies.
def int_ppc_altivec_vmladduhm : GCCBuiltin<"__builtin_altivec_vmladduhm">,
Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
llvm_v8i16_ty], [IntrNoMem]>;
// Packs.
@ -626,21 +626,21 @@ let TargetPrefix = "ppc" in { // All PPC intrinsics start with "llvm.ppc.".
// Add Extended Quadword
def int_ppc_altivec_vaddeuqm : GCCBuiltin<"__builtin_altivec_vaddeuqm">,
Intrinsic<[llvm_v1i128_ty],
Intrinsic<[llvm_v1i128_ty],
[llvm_v1i128_ty, llvm_v1i128_ty, llvm_v1i128_ty],
[IntrNoMem]>;
def int_ppc_altivec_vaddecuq : GCCBuiltin<"__builtin_altivec_vaddecuq">,
Intrinsic<[llvm_v1i128_ty],
Intrinsic<[llvm_v1i128_ty],
[llvm_v1i128_ty, llvm_v1i128_ty, llvm_v1i128_ty],
[IntrNoMem]>;
// Sub Extended Quadword
def int_ppc_altivec_vsubeuqm : GCCBuiltin<"__builtin_altivec_vsubeuqm">,
Intrinsic<[llvm_v1i128_ty],
Intrinsic<[llvm_v1i128_ty],
[llvm_v1i128_ty, llvm_v1i128_ty, llvm_v1i128_ty],
[IntrNoMem]>;
def int_ppc_altivec_vsubecuq : GCCBuiltin<"__builtin_altivec_vsubecuq">,
Intrinsic<[llvm_v1i128_ty],
Intrinsic<[llvm_v1i128_ty],
[llvm_v1i128_ty, llvm_v1i128_ty, llvm_v1i128_ty],
[IntrNoMem]>;
}
@ -657,7 +657,7 @@ def int_ppc_altivec_vslw : PowerPC_Vec_WWW_Intrinsic<"vslw">;
// Right Shifts.
def int_ppc_altivec_vsr : PowerPC_Vec_WWW_Intrinsic<"vsr">;
def int_ppc_altivec_vsro : PowerPC_Vec_WWW_Intrinsic<"vsro">;
def int_ppc_altivec_vsrb : PowerPC_Vec_BBB_Intrinsic<"vsrb">;
def int_ppc_altivec_vsrh : PowerPC_Vec_HHH_Intrinsic<"vsrh">;
def int_ppc_altivec_vsrw : PowerPC_Vec_WWW_Intrinsic<"vsrw">;
@ -679,10 +679,10 @@ let TargetPrefix = "ppc" in { // All PPC intrinsics start with "llvm.ppc.".
Intrinsic<[llvm_v16i8_ty], [llvm_ptr_ty], [IntrNoMem]>;
def int_ppc_altivec_vperm : GCCBuiltin<"__builtin_altivec_vperm_4si">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
llvm_v4i32_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_ppc_altivec_vsel : GCCBuiltin<"__builtin_altivec_vsel_4si">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_ppc_altivec_vgbbd : GCCBuiltin<"__builtin_altivec_vgbbd">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;

View File

@ -285,7 +285,7 @@ private:
SpecificBumpPtrAllocator<AUFoldingSetNode> AUFoldingSetNodeAllocator;
// Maps from a pass to it's associated entry in UniqueAnalysisUsages. Does
// not own the storage associated with either key or value..
// not own the storage associated with either key or value..
DenseMap<Pass *, AnalysisUsage*> AnUsageMap;
/// Collection of PassInfo objects found via analysis IDs and in this top

View File

@ -325,7 +325,7 @@ public:
explicit Statepoint(CallSite CS) : Base(CS) {}
};
/// Common base class for representing values projected from a statepoint.
/// Common base class for representing values projected from a statepoint.
/// Currently, the only projections available are gc.result and gc.relocate.
class GCProjectionInst : public IntrinsicInst {
public:

View File

@ -101,10 +101,10 @@ public:
void operator delete(void *Usr);
/// Placement delete - required by std, called if the ctor throws.
void operator delete(void *Usr, unsigned) {
// Note: If a subclass manipulates the information which is required to calculate the
// Usr memory pointer, e.g. NumUserOperands, the operator delete of that subclass has
// Note: If a subclass manipulates the information which is required to calculate the
// Usr memory pointer, e.g. NumUserOperands, the operator delete of that subclass has
// to restore the changed information to the original value, since the dtor of that class
// is not called if the ctor fails.
// is not called if the ctor fails.
User::operator delete(Usr);
#ifndef LLVM_ENABLE_EXCEPTIONS
@ -113,10 +113,10 @@ public:
}
/// Placement delete - required by std, called if the ctor throws.
void operator delete(void *Usr, unsigned, bool) {
// Note: If a subclass manipulates the information which is required to calculate the
// Usr memory pointer, e.g. NumUserOperands, the operator delete of that subclass has
// Note: If a subclass manipulates the information which is required to calculate the
// Usr memory pointer, e.g. NumUserOperands, the operator delete of that subclass has
// to restore the changed information to the original value, since the dtor of that class
// is not called if the ctor fails.
// is not called if the ctor fails.
User::operator delete(Usr);
#ifndef LLVM_ENABLE_EXCEPTIONS

View File

@ -44,7 +44,7 @@ namespace {
llvm::LLVMContext Context;
(void)new llvm::Module("", Context);
(void)new llvm::UnreachableInst(Context);
(void) llvm::createVerifierPass();
(void) llvm::createVerifierPass();
}
} ForceVMCoreLinking;
}

View File

@ -64,7 +64,7 @@ public:
/// Returns true if at least one of the register writes performed by
/// \param Inst implicitly clears the upper portion of all super-registers.
///
///
/// Example: on X86-64, a write to EAX implicitly clears the upper half of
/// RAX. Also (still on x86) an XMM write perfomed by an AVX 128-bit
/// instruction implicitly clears the upper portion of the correspondent

View File

@ -15,7 +15,7 @@ namespace llvm {
/// AsmCond - Class to support conditional assembly
///
/// The conditional assembly feature (.if, .else, .elseif and .endif) is
/// implemented with AsmCond that tells us what we are in the middle of
/// implemented with AsmCond that tells us what we are in the middle of
/// processing. Ignore can be either true or false. When true we are ignoring
/// the block of code in the middle of a conditional.

View File

@ -297,8 +297,8 @@ public:
/// If the comment includes embedded \n's, they will each get the comment
/// prefix as appropriate. The added comment should not end with a \n.
/// By default, each comment is terminated with an end of line, i.e. the
/// EOL param is set to true by default. If one prefers not to end the
/// comment with a new line then the EOL param should be passed
/// EOL param is set to true by default. If one prefers not to end the
/// comment with a new line then the EOL param should be passed
/// with a false value.
virtual void AddComment(const Twine &T, bool EOL = true) {}

View File

@ -333,7 +333,7 @@ public:
relocation_iterator locrel_begin() const;
relocation_iterator locrel_end() const;
void moveRelocationNext(DataRefImpl &Rel) const override;
uint64_t getRelocationOffset(DataRefImpl Rel) const override;
symbol_iterator getRelocationSymbol(DataRefImpl Rel) const override;

View File

@ -231,7 +231,7 @@ AnalysisType &Pass::getAnalysisID(AnalysisID PI) const {
// should be a small number, we just do a linear search over a (dense)
// vector.
Pass *ResultPass = Resolver->findImplPass(PI);
assert(ResultPass &&
assert(ResultPass &&
"getAnalysis*() called on an analysis that was not "
"'required' by pass!");

View File

@ -9,7 +9,7 @@
//
// This file defines PassRegistry, a class that is used in the initialization
// and registration of passes. At application startup, passes are registered
// with the PassRegistry, which is later provided to the PassManager for
// with the PassRegistry, which is later provided to the PassManager for
// dependency resolution and similar tasks.
//
//===----------------------------------------------------------------------===//

View File

@ -207,7 +207,7 @@ struct CounterMappingRegion {
/// A CodeRegion associates some code with a counter
CodeRegion,
/// An ExpansionRegion represents a file expansion region that associates
/// An ExpansionRegion represents a file expansion region that associates
/// a source range with the expansion of a virtual source file, such as
/// for a macro instantiation or #include file.
ExpansionRegion,

View File

@ -15,7 +15,7 @@
namespace llvm {
/// An auxiliary type to facilitate extraction of 3-byte entities.
/// An auxiliary type to facilitate extraction of 3-byte entities.
struct Uint24 {
uint8_t Bytes[3];
Uint24(uint8_t U) {

View File

@ -1,10 +1,10 @@
//===- TargetCallingConv.td - Target Calling Conventions ---*- tablegen -*-===//
//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file defines the target-independent interfaces with which targets

View File

@ -13,7 +13,7 @@
// an instruction. Each MCInstPredicate class has a well-known semantic, and it
// is used by a PredicateExpander to generate code for MachineInstr and/or
// MCInst.
//
//
// MCInstPredicate definitions can be used to construct MCSchedPredicate
// definitions. An MCSchedPredicate can be used in place of a SchedPredicate
// when defining SchedReadVariant and SchedWriteVariant used by a processor
@ -63,7 +63,7 @@
//
// New MCInstPredicate classes must be added to this file. For each new class
// XYZ, an "expandXYZ" method must be added to the PredicateExpander.
//
//
//===----------------------------------------------------------------------===//
// Forward declarations.

View File

@ -82,7 +82,7 @@ private:
bool considerHoistingFromTo(BasicBlock &FromBlock, BasicBlock &ToBlock);
// If true, this pass is a nop unless the target architecture has branch
// divergence.
// divergence.
const bool OnlyIfDivergentTarget = false;
TargetTransformInfo *TTI = nullptr;

View File

@ -74,7 +74,7 @@ class Value;
/// vararg functions can be extracted. This is safe, if all vararg handling
/// code is extracted, including vastart. If AllowAlloca is true, then
/// extraction of blocks containing alloca instructions would be possible,
/// however code extractor won't validate whether extraction is legal.
/// however code extractor won't validate whether extraction is legal.
CodeExtractor(ArrayRef<BasicBlock *> BBs, DominatorTree *DT = nullptr,
bool AggregateArgs = false, BlockFrequencyInfo *BFI = nullptr,
BranchProbabilityInfo *BPI = nullptr,

View File

@ -18,7 +18,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/ValueMap.h"
#include "llvm/Support/AtomicOrdering.h"

View File

@ -134,7 +134,7 @@ public:
private:
void loadAndParseMapFiles();
SymbolRewriter::RewriteDescriptorList Descriptors;
SymbolRewriter::RewriteDescriptorList Descriptors;
};
} // end namespace llvm

View File

@ -142,7 +142,7 @@ void AliasSet::addPointer(AliasSetTracker &AST, PointerRec &Entry,
Alias = SetMayAlias;
AST.TotalMayAliasSetSize += size();
} else {
// First entry of must alias must have maximum size!
// First entry of must alias must have maximum size!
P->updateSizeAndAAInfo(Size, AAInfo);
}
assert(Result != NoAlias && "Cannot be part of must set!");
@ -251,9 +251,9 @@ void AliasSetTracker::clear() {
for (PointerMapType::iterator I = PointerMap.begin(), E = PointerMap.end();
I != E; ++I)
I->second->eraseFromList();
PointerMap.clear();
// The alias sets should all be clear now.
AliasSets.clear();
}
@ -269,7 +269,7 @@ AliasSet *AliasSetTracker::mergeAliasSetsForPointer(const Value *Ptr,
for (iterator I = begin(), E = end(); I != E;) {
iterator Cur = I++;
if (Cur->Forward || !Cur->aliasesPointer(Ptr, Size, AAInfo, AA)) continue;
if (!FoundSet) { // If this is the first alias set ptr can go into.
FoundSet = &*Cur; // Remember it.
} else { // Otherwise, we must merge the sets.
@ -336,13 +336,13 @@ AliasSet &AliasSetTracker::getAliasSetForPointer(Value *Pointer,
// Return the set!
return *Entry.getAliasSet(*this)->getForwardedTarget(*this);
}
if (AliasSet *AS = mergeAliasSetsForPointer(Pointer, Size, AAInfo)) {
// Add it to the alias set it aliases.
AS->addPointer(*this, Entry, Size, AAInfo);
return *AS;
}
// Otherwise create a new alias set to hold the loaded pointer.
AliasSets.push_back(new AliasSet());
AliasSets.back().addPointer(*this, Entry, Size, AAInfo);
@ -526,10 +526,10 @@ void AliasSetTracker::deleteValue(Value *PtrVal) {
AS->SetSize--;
TotalMayAliasSetSize--;
}
// Stop using the alias set.
AS->dropRef(*this);
PointerMap.erase(I);
}

View File

@ -124,7 +124,7 @@ namespace {
}
char CFGPrinterLegacyPass::ID = 0;
INITIALIZE_PASS(CFGPrinterLegacyPass, "dot-cfg", "Print CFG of function to 'dot' file",
INITIALIZE_PASS(CFGPrinterLegacyPass, "dot-cfg", "Print CFG of function to 'dot' file",
false, true)
PreservedAnalyses CFGPrinterPass::run(Function &F,

View File

@ -166,7 +166,7 @@ void CallGraphNode::print(raw_ostream &OS) const {
OS << "Call graph node for function: '" << F->getName() << "'";
else
OS << "Call graph node <<null function>>";
OS << "<<" << this << ">> #uses=" << getNumReferences() << '\n';
for (const auto &I : *this) {

View File

@ -41,7 +41,7 @@ using namespace llvm;
#define DEBUG_TYPE "cgscc-passmgr"
static cl::opt<unsigned>
static cl::opt<unsigned>
MaxIterations("max-cg-scc-iterations", cl::ReallyHidden, cl::init(4));
STATISTIC(MaxSCCIterations, "Maximum CGSCCPassMgr iterations on one SCC");
@ -97,13 +97,13 @@ public:
}
PassManagerType getPassManagerType() const override {
return PMT_CallGraphPassManager;
return PMT_CallGraphPassManager;
}
private:
bool RunAllPassesOnSCC(CallGraphSCC &CurSCC, CallGraph &CG,
bool &DevirtualizedCall);
bool RunPassOnSCC(Pass *P, CallGraphSCC &CurSCC,
CallGraph &CG, bool &CallGraphUpToDate,
bool &DevirtualizedCall);
@ -142,21 +142,21 @@ bool CGPassManager::RunPassOnSCC(Pass *P, CallGraphSCC &CurSCC,
if (EmitICRemark)
emitInstrCountChangedRemark(P, M, InstrCount);
}
// After the CGSCCPass is done, when assertions are enabled, use
// RefreshCallGraph to verify that the callgraph was correctly updated.
#ifndef NDEBUG
if (Changed)
RefreshCallGraph(CurSCC, CG, true);
#endif
return Changed;
}
assert(PM->getPassManagerType() == PMT_FunctionPassManager &&
"Invalid CGPassManager member");
FPPassManager *FPP = (FPPassManager*)P;
// Run pass P on all functions in the current SCC.
for (CallGraphNode *CGN : CurSCC) {
if (Function *F = CGN->getFunction()) {
@ -168,7 +168,7 @@ bool CGPassManager::RunPassOnSCC(Pass *P, CallGraphSCC &CurSCC,
F->getContext().yield();
}
}
// The function pass(es) modified the IR, they may have clobbered the
// callgraph.
if (Changed && CallGraphUpToDate) {
@ -199,7 +199,7 @@ bool CGPassManager::RefreshCallGraph(const CallGraphSCC &CurSCC, CallGraph &CG,
bool MadeChange = false;
bool DevirtualizedCall = false;
// Scan all functions in the SCC.
unsigned FunctionNo = 0;
for (CallGraphSCC::iterator SCCIdx = CurSCC.begin(), E = CurSCC.end();
@ -207,14 +207,14 @@ bool CGPassManager::RefreshCallGraph(const CallGraphSCC &CurSCC, CallGraph &CG,
CallGraphNode *CGN = *SCCIdx;
Function *F = CGN->getFunction();
if (!F || F->isDeclaration()) continue;
// Walk the function body looking for call sites. Sync up the call sites in
// CGN with those actually in the function.
// Keep track of the number of direct and indirect calls that were
// invalidated and removed.
unsigned NumDirectRemoved = 0, NumIndirectRemoved = 0;
// Get the set of call sites currently in the function.
for (CallGraphNode::iterator I = CGN->begin(), E = CGN->end(); I != E; ) {
// If this call site is null, then the function pass deleted the call
@ -226,7 +226,7 @@ bool CGPassManager::RefreshCallGraph(const CallGraphSCC &CurSCC, CallGraph &CG,
CallSites.count(I->first) ||
// If the call edge is not from a call or invoke, or it is a
// instrinsic call, then the function pass RAUW'd a call with
// instrinsic call, then the function pass RAUW'd a call with
// another value. This can happen when constant folding happens
// of well known functions etc.
!CallSite(I->first) ||
@ -236,18 +236,18 @@ bool CGPassManager::RefreshCallGraph(const CallGraphSCC &CurSCC, CallGraph &CG,
CallSite(I->first).getCalledFunction()->getIntrinsicID()))) {
assert(!CheckingMode &&
"CallGraphSCCPass did not update the CallGraph correctly!");
// If this was an indirect call site, count it.
if (!I->second->getFunction())
++NumIndirectRemoved;
else
else
++NumDirectRemoved;
// Just remove the edge from the set of callees, keep track of whether
// I points to the last element of the vector.
bool WasLast = I + 1 == E;
CGN->removeCallEdge(I);
// If I pointed to the last element of the vector, we have to bail out:
// iterator checking rejects comparisons of the resultant pointer with
// end.
@ -256,10 +256,10 @@ bool CGPassManager::RefreshCallGraph(const CallGraphSCC &CurSCC, CallGraph &CG,
E = CGN->end();
continue;
}
assert(!CallSites.count(I->first) &&
"Call site occurs in node multiple times");
CallSite CS(I->first);
if (CS) {
Function *Callee = CS.getCalledFunction();
@ -269,7 +269,7 @@ bool CGPassManager::RefreshCallGraph(const CallGraphSCC &CurSCC, CallGraph &CG,
}
++I;
}
// Loop over all of the instructions in the function, getting the callsites.
// Keep track of the number of direct/indirect calls added.
unsigned NumDirectAdded = 0, NumIndirectAdded = 0;
@ -280,7 +280,7 @@ bool CGPassManager::RefreshCallGraph(const CallGraphSCC &CurSCC, CallGraph &CG,
if (!CS) continue;
Function *Callee = CS.getCalledFunction();
if (Callee && Callee->isIntrinsic()) continue;
// If this call site already existed in the callgraph, just verify it
// matches up to expectations and remove it from CallSites.
DenseMap<Value*, CallGraphNode*>::iterator ExistingIt =
@ -290,11 +290,11 @@ bool CGPassManager::RefreshCallGraph(const CallGraphSCC &CurSCC, CallGraph &CG,
// Remove from CallSites since we have now seen it.
CallSites.erase(ExistingIt);
// Verify that the callee is right.
if (ExistingNode->getFunction() == CS.getCalledFunction())
continue;
// If we are in checking mode, we are not allowed to actually mutate
// the callgraph. If this is a case where we can infer that the
// callgraph is less precise than it could be (e.g. an indirect call
@ -303,10 +303,10 @@ bool CGPassManager::RefreshCallGraph(const CallGraphSCC &CurSCC, CallGraph &CG,
if (CheckingMode && CS.getCalledFunction() &&
ExistingNode->getFunction() == nullptr)
continue;
assert(!CheckingMode &&
"CallGraphSCCPass did not update the CallGraph correctly!");
// If not, we either went from a direct call to indirect, indirect to
// direct, or direct to different direct.
CallGraphNode *CalleeNode;
@ -328,7 +328,7 @@ bool CGPassManager::RefreshCallGraph(const CallGraphSCC &CurSCC, CallGraph &CG,
MadeChange = true;
continue;
}
assert(!CheckingMode &&
"CallGraphSCCPass did not update the CallGraph correctly!");
@ -341,11 +341,11 @@ bool CGPassManager::RefreshCallGraph(const CallGraphSCC &CurSCC, CallGraph &CG,
CalleeNode = CG.getCallsExternalNode();
++NumIndirectAdded;
}
CGN->addCalledFunction(CS, CalleeNode);
MadeChange = true;
}
// We scanned the old callgraph node, removing invalidated call sites and
// then added back newly found call sites. One thing that can happen is
// that an old indirect call site was deleted and replaced with a new direct
@ -359,13 +359,13 @@ bool CGPassManager::RefreshCallGraph(const CallGraphSCC &CurSCC, CallGraph &CG,
if (NumIndirectRemoved > NumIndirectAdded &&
NumDirectRemoved < NumDirectAdded)
DevirtualizedCall = true;
// After scanning this function, if we still have entries in callsites, then
// they are dangling pointers. WeakTrackingVH should save us for this, so
// abort if
// this happens.
assert(CallSites.empty() && "Dangling pointers found in call sites map");
// Periodically do an explicit clear to remove tombstones when processing
// large scc's.
if ((FunctionNo & 15) == 15)
@ -392,7 +392,7 @@ bool CGPassManager::RefreshCallGraph(const CallGraphSCC &CurSCC, CallGraph &CG,
bool CGPassManager::RunAllPassesOnSCC(CallGraphSCC &CurSCC, CallGraph &CG,
bool &DevirtualizedCall) {
bool Changed = false;
// Keep track of whether the callgraph is known to be up-to-date or not.
// The CGSSC pass manager runs two types of passes:
// CallGraphSCC Passes and other random function passes. Because other
@ -406,7 +406,7 @@ bool CGPassManager::RunAllPassesOnSCC(CallGraphSCC &CurSCC, CallGraph &CG,
for (unsigned PassNo = 0, e = getNumContainedPasses();
PassNo != e; ++PassNo) {
Pass *P = getContainedPass(PassNo);
// If we're in -debug-pass=Executions mode, construct the SCC node list,
// otherwise avoid constructing this string as it is expensive.
if (isPassDebuggingExecutionsOrMore()) {
@ -423,23 +423,23 @@ bool CGPassManager::RunAllPassesOnSCC(CallGraphSCC &CurSCC, CallGraph &CG,
dumpPassInfo(P, EXECUTION_MSG, ON_CG_MSG, Functions);
}
dumpRequiredSet(P);
initializeAnalysisImpl(P);
// Actually run this pass on the current SCC.
Changed |= RunPassOnSCC(P, CurSCC, CG,
CallGraphUpToDate, DevirtualizedCall);
if (Changed)
dumpPassInfo(P, MODIFICATION_MSG, ON_CG_MSG, "");
dumpPreservedSet(P);
verifyPreservedAnalysis(P);
verifyPreservedAnalysis(P);
removeNotPreservedAnalysis(P);
recordAvailableAnalysis(P);
removeDeadPasses(P, "", ON_CG_MSG);
}
// If the callgraph was left out of date (because the last pass run was a
// functionpass), refresh it before we move on to the next SCC.
if (!CallGraphUpToDate)
@ -452,7 +452,7 @@ bool CGPassManager::RunAllPassesOnSCC(CallGraphSCC &CurSCC, CallGraph &CG,
bool CGPassManager::runOnModule(Module &M) {
CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
bool Changed = doInitialization(CG);
// Walk the callgraph in bottom-up SCC order.
scc_iterator<CallGraph*> CGI = scc_begin(&CG);
@ -485,7 +485,7 @@ bool CGPassManager::runOnModule(Module &M) {
DevirtualizedCall = false;
Changed |= RunAllPassesOnSCC(CurSCC, CG, DevirtualizedCall);
} while (Iteration++ < MaxIterations && DevirtualizedCall);
if (DevirtualizedCall)
LLVM_DEBUG(dbgs() << " CGSCCPASSMGR: Stopped iteration after "
<< Iteration
@ -500,7 +500,7 @@ bool CGPassManager::runOnModule(Module &M) {
/// Initialize CG
bool CGPassManager::doInitialization(CallGraph &CG) {
bool Changed = false;
for (unsigned i = 0, e = getNumContainedPasses(); i != e; ++i) {
for (unsigned i = 0, e = getNumContainedPasses(); i != e; ++i) {
if (PMDataManager *PM = getContainedPass(i)->getAsPMDataManager()) {
assert(PM->getPassManagerType() == PMT_FunctionPassManager &&
"Invalid CGPassManager member");
@ -515,7 +515,7 @@ bool CGPassManager::doInitialization(CallGraph &CG) {
/// Finalize CG
bool CGPassManager::doFinalization(CallGraph &CG) {
bool Changed = false;
for (unsigned i = 0, e = getNumContainedPasses(); i != e; ++i) {
for (unsigned i = 0, e = getNumContainedPasses(); i != e; ++i) {
if (PMDataManager *PM = getContainedPass(i)->getAsPMDataManager()) {
assert(PM->getPassManagerType() == PMT_FunctionPassManager &&
"Invalid CGPassManager member");
@ -541,7 +541,7 @@ void CallGraphSCC::ReplaceNode(CallGraphNode *Old, CallGraphNode *New) {
Nodes[i] = New;
break;
}
// Update the active scc_iterator so that it doesn't contain dangling
// pointers to the old CallGraphNode.
scc_iterator<CallGraph*> *CGI = (scc_iterator<CallGraph*>*)Context;
@ -555,18 +555,18 @@ void CallGraphSCC::ReplaceNode(CallGraphNode *Old, CallGraphNode *New) {
/// Assign pass manager to manage this pass.
void CallGraphSCCPass::assignPassManager(PMStack &PMS,
PassManagerType PreferredType) {
// Find CGPassManager
// Find CGPassManager
while (!PMS.empty() &&
PMS.top()->getPassManagerType() > PMT_CallGraphPassManager)
PMS.pop();
assert(!PMS.empty() && "Unable to handle Call Graph Pass");
CGPassManager *CGP;
if (PMS.top()->getPassManagerType() == PMT_CallGraphPassManager)
CGP = (CGPassManager*)PMS.top();
else {
// Create new Call Graph SCC Pass Manager if it does not exist.
// Create new Call Graph SCC Pass Manager if it does not exist.
assert(!PMS.empty() && "Unable to create Call Graph Pass Manager");
PMDataManager *PMD = PMS.top();
@ -608,7 +608,7 @@ namespace {
class PrintCallGraphPass : public CallGraphSCCPass {
std::string Banner;
raw_ostream &OS; // raw_ostream to print on.
public:
static char ID;
@ -640,10 +640,10 @@ namespace {
}
return false;
}
StringRef getPassName() const override { return "Print CallGraph IR"; }
};
} // end anonymous namespace.
char PrintCallGraphPass::ID = 0;

View File

@ -272,7 +272,7 @@ void DemandedBits::performAnalysis() {
// Analysis already completed for this function.
return;
Analyzed = true;
Visited.clear();
AliveBits.clear();
@ -367,7 +367,7 @@ void DemandedBits::performAnalysis() {
APInt DemandedBits::getDemandedBits(Instruction *I) {
performAnalysis();
const DataLayout &DL = I->getModule()->getDataLayout();
auto Found = AliveBits.find(I);
if (Found != AliveBits.end())

View File

@ -409,7 +409,7 @@ bool GlobalsAAResult::AnalyzeIndirectGlobalMemory(GlobalVariable *GV) {
if (Constant *C = GV->getInitializer())
if (!C->isNullValue())
return false;
// Walk the user list of the global. If we find anything other than a direct
// load or store, bail out.
for (User *U : GV->users()) {
@ -464,7 +464,7 @@ bool GlobalsAAResult::AnalyzeIndirectGlobalMemory(GlobalVariable *GV) {
return true;
}
void GlobalsAAResult::CollectSCCMembership(CallGraph &CG) {
void GlobalsAAResult::CollectSCCMembership(CallGraph &CG) {
// We do a bottom-up SCC traversal of the call graph. In other words, we
// visit all callees before callers (leaf-first).
unsigned SCCID = 0;
@ -633,7 +633,7 @@ static bool isNonEscapingGlobalNoAliasWithLoad(const GlobalValue *GV,
Inputs.push_back(V);
do {
const Value *Input = Inputs.pop_back_val();
if (isa<GlobalValue>(Input) || isa<Argument>(Input) || isa<CallInst>(Input) ||
isa<InvokeInst>(Input))
// Arguments to functions or returns from functions are inherently
@ -654,7 +654,7 @@ static bool isNonEscapingGlobalNoAliasWithLoad(const GlobalValue *GV,
if (auto *LI = dyn_cast<LoadInst>(Input)) {
Inputs.push_back(GetUnderlyingObject(LI->getPointerOperand(), DL));
continue;
}
}
if (auto *SI = dyn_cast<SelectInst>(Input)) {
const Value *LHS = GetUnderlyingObject(SI->getTrueValue(), DL);
const Value *RHS = GetUnderlyingObject(SI->getFalseValue(), DL);
@ -672,7 +672,7 @@ static bool isNonEscapingGlobalNoAliasWithLoad(const GlobalValue *GV,
}
continue;
}
return false;
} while (!Inputs.empty());
@ -754,7 +754,7 @@ bool GlobalsAAResult::isNonEscapingGlobalNoAlias(const GlobalValue *GV,
// non-addr-taken globals.
continue;
}
// Recurse through a limited number of selects, loads and PHIs. This is an
// arbitrary depth of 4, lower numbers could be used to fix compile time
// issues if needed, but this is generally expected to be only be important

View File

@ -725,7 +725,7 @@ bool LazyValueInfoImpl::solveBlockValueNonLocal(ValueLatticeElement &BBLV,
// frequently arranged such that dominating ones come first and we quickly
// find a path to function entry. TODO: We should consider explicitly
// canonicalizing to make this true rather than relying on this happy
// accident.
// accident.
for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
ValueLatticeElement EdgeResult;
if (!getEdgeValue(Val, *PI, BB, EdgeResult))

View File

@ -176,8 +176,8 @@ const SCEV *llvm::replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE,
/// Calculate Start and End points of memory access.
/// Let's assume A is the first access and B is a memory access on N-th loop
/// iteration. Then B is calculated as:
/// B = A + Step*N .
/// iteration. Then B is calculated as:
/// B = A + Step*N .
/// Step value may be positive or negative.
/// N is a calculated back-edge taken count:
/// N = (TripCount > 0) ? RoundDown(TripCount -1 , VF) : 0
@ -1317,7 +1317,7 @@ bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance,
return false;
}
/// Given a non-constant (unknown) dependence-distance \p Dist between two
/// Given a non-constant (unknown) dependence-distance \p Dist between two
/// memory accesses, that have the same stride whose absolute value is given
/// in \p Stride, and that have the same type size \p TypeByteSize,
/// in a loop whose takenCount is \p BackedgeTakenCount, check if it is
@ -1336,19 +1336,19 @@ static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE,
// If we can prove that
// (**) |Dist| > BackedgeTakenCount * Step
// where Step is the absolute stride of the memory accesses in bytes,
// where Step is the absolute stride of the memory accesses in bytes,
// then there is no dependence.
//
// Ratioanle:
// We basically want to check if the absolute distance (|Dist/Step|)
// is >= the loop iteration count (or > BackedgeTakenCount).
// This is equivalent to the Strong SIV Test (Practical Dependence Testing,
// Section 4.2.1); Note, that for vectorization it is sufficient to prove
// Ratioanle:
// We basically want to check if the absolute distance (|Dist/Step|)
// is >= the loop iteration count (or > BackedgeTakenCount).
// This is equivalent to the Strong SIV Test (Practical Dependence Testing,
// Section 4.2.1); Note, that for vectorization it is sufficient to prove
// that the dependence distance is >= VF; This is checked elsewhere.
// But in some cases we can prune unknown dependence distances early, and
// even before selecting the VF, and without a runtime test, by comparing
// the distance against the loop iteration count. Since the vectorized code
// will be executed only if LoopCount >= VF, proving distance >= LoopCount
// But in some cases we can prune unknown dependence distances early, and
// even before selecting the VF, and without a runtime test, by comparing
// the distance against the loop iteration count. Since the vectorized code
// will be executed only if LoopCount >= VF, proving distance >= LoopCount
// also guarantees that distance >= VF.
//
const uint64_t ByteStride = Stride * TypeByteSize;
@ -1360,8 +1360,8 @@ static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE,
uint64_t DistTypeSize = DL.getTypeAllocSize(Dist.getType());
uint64_t ProductTypeSize = DL.getTypeAllocSize(Product->getType());
// The dependence distance can be positive/negative, so we sign extend Dist;
// The multiplication of the absolute stride in bytes and the
// The dependence distance can be positive/negative, so we sign extend Dist;
// The multiplication of the absolute stride in bytes and the
// backdgeTakenCount is non-negative, so we zero extend Product.
if (DistTypeSize > ProductTypeSize)
CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType());
@ -2212,24 +2212,24 @@ void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
"versioning:");
LLVM_DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *Stride << "\n");
// Avoid adding the "Stride == 1" predicate when we know that
// Avoid adding the "Stride == 1" predicate when we know that
// Stride >= Trip-Count. Such a predicate will effectively optimize a single
// or zero iteration loop, as Trip-Count <= Stride == 1.
//
//
// TODO: We are currently not making a very informed decision on when it is
// beneficial to apply stride versioning. It might make more sense that the
// users of this analysis (such as the vectorizer) will trigger it, based on
// their specific cost considerations; For example, in cases where stride
// users of this analysis (such as the vectorizer) will trigger it, based on
// their specific cost considerations; For example, in cases where stride
// versioning does not help resolving memory accesses/dependences, the
// vectorizer should evaluate the cost of the runtime test, and the benefit
// of various possible stride specializations, considering the alternatives
// of using gather/scatters (if available).
// vectorizer should evaluate the cost of the runtime test, and the benefit
// of various possible stride specializations, considering the alternatives
// of using gather/scatters (if available).
const SCEV *StrideExpr = PSE->getSCEV(Stride);
const SCEV *BETakenCount = PSE->getBackedgeTakenCount();
const SCEV *BETakenCount = PSE->getBackedgeTakenCount();
// Match the types so we can compare the stride and the BETakenCount.
// The Stride can be positive/negative, so we sign extend Stride;
// The Stride can be positive/negative, so we sign extend Stride;
// The backdgeTakenCount is non-negative, so we zero extend BETakenCount.
const DataLayout &DL = TheLoop->getHeader()->getModule()->getDataLayout();
uint64_t StrideTypeSize = DL.getTypeAllocSize(StrideExpr->getType());
@ -2243,7 +2243,7 @@ void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
CastedBECount = SE->getZeroExtendExpr(BETakenCount, StrideExpr->getType());
const SCEV *StrideMinusBETaken = SE->getMinusSCEV(CastedStride, CastedBECount);
// Since TripCount == BackEdgeTakenCount + 1, checking:
// "Stride >= TripCount" is equivalent to checking:
// "Stride >= TripCount" is equivalent to checking:
// Stride - BETakenCount > 0
if (SE->isKnownPositive(StrideMinusBETaken)) {
LLVM_DEBUG(

View File

@ -118,7 +118,7 @@ bool MemDepPrinter::runOnFunction(Function &F) {
} else {
SmallVector<NonLocalDepResult, 4> NLDI;
assert( (isa<LoadInst>(Inst) || isa<StoreInst>(Inst) ||
isa<VAArgInst>(Inst)) && "Unknown memory instruction!");
isa<VAArgInst>(Inst)) && "Unknown memory instruction!");
MDA.getNonLocalPointerDependency(Inst, NLDI);
DepSet &InstDeps = Deps[Inst];

View File

@ -235,7 +235,7 @@ public:
}
void printInfoComment(const Value &V, formatted_raw_ostream &OS) override {
void printInfoComment(const Value &V, formatted_raw_ostream &OS) override {
if (!MustExec.count(&V))
return;
@ -245,7 +245,7 @@ public:
OS << " ; (mustexec in " << NumLoops << " loops: ";
else
OS << " ; (mustexec in: ";
bool first = true;
for (const Loop *L : Loops) {
if (!first)
@ -264,6 +264,6 @@ bool MustExecutePrinter::runOnFunction(Function &F) {
MustExecuteAnnotatedWriter Writer(F, DT, LI);
F.print(dbgs(), &Writer);
return false;
}

View File

@ -4839,7 +4839,7 @@ ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI
// Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy)
// for each of StartVal and Accum
auto getExtendedExpr = [&](const SCEV *Expr,
auto getExtendedExpr = [&](const SCEV *Expr,
bool CreateSignExtend) -> const SCEV * {
assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant");
const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy);
@ -4935,11 +4935,11 @@ ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) {
return Rewrite;
}
// FIXME: This utility is currently required because the Rewriter currently
// does not rewrite this expression:
// {0, +, (sext ix (trunc iy to ix) to iy)}
// FIXME: This utility is currently required because the Rewriter currently
// does not rewrite this expression:
// {0, +, (sext ix (trunc iy to ix) to iy)}
// into {0, +, %step},
// even when the following Equal predicate exists:
// even when the following Equal predicate exists:
// "%step == (sext ix (trunc iy to ix) to iy)".
bool PredicatedScalarEvolution::areAddRecsEqualWithPreds(
const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const {

View File

@ -721,7 +721,7 @@ struct ReductionData {
static Optional<ReductionData> getReductionData(Instruction *I) {
Value *L, *R;
if (m_BinOp(m_Value(L), m_Value(R)).match(I))
return ReductionData(RK_Arithmetic, I->getOpcode(), L, R);
return ReductionData(RK_Arithmetic, I->getOpcode(), L, R);
if (auto *SI = dyn_cast<SelectInst>(I)) {
if (m_SMin(m_Value(L), m_Value(R)).match(SI) ||
m_SMax(m_Value(L), m_Value(R)).match(SI) ||
@ -730,8 +730,8 @@ static Optional<ReductionData> getReductionData(Instruction *I) {
m_UnordFMin(m_Value(L), m_Value(R)).match(SI) ||
m_UnordFMax(m_Value(L), m_Value(R)).match(SI)) {
auto *CI = cast<CmpInst>(SI->getCondition());
return ReductionData(RK_MinMax, CI->getOpcode(), L, R);
}
return ReductionData(RK_MinMax, CI->getOpcode(), L, R);
}
if (m_UMin(m_Value(L), m_Value(R)).match(SI) ||
m_UMax(m_Value(L), m_Value(R)).match(SI)) {
auto *CI = cast<CmpInst>(SI->getCondition());
@ -851,11 +851,11 @@ static ReductionKind matchPairwiseReduction(const ExtractElementInst *ReduxRoot,
// We look for a sequence of shuffle,shuffle,add triples like the following
// that builds a pairwise reduction tree.
//
//
// (X0, X1, X2, X3)
// (X0 + X1, X2 + X3, undef, undef)
// ((X0 + X1) + (X2 + X3), undef, undef, undef)
//
//
// %rdx.shuf.0.0 = shufflevector <4 x float> %rdx, <4 x float> undef,
// <4 x i32> <i32 0, i32 2 , i32 undef, i32 undef>
// %rdx.shuf.0.1 = shufflevector <4 x float> %rdx, <4 x float> undef,
@ -916,7 +916,7 @@ matchVectorSplittingReduction(const ExtractElementInst *ReduxRoot,
// We look for a sequence of shuffles and adds like the following matching one
// fadd, shuffle vector pair at a time.
//
//
// %rdx.shuf = shufflevector <4 x float> %rdx, <4 x float> undef,
// <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
// %bin.rdx = fadd <4 x float> %rdx, %rdx.shuf
@ -927,7 +927,7 @@ matchVectorSplittingReduction(const ExtractElementInst *ReduxRoot,
unsigned MaskStart = 1;
Instruction *RdxOp = RdxStart;
SmallVector<int, 32> ShuffleMask(NumVecElems, 0);
SmallVector<int, 32> ShuffleMask(NumVecElems, 0);
unsigned NumVecElemsRemain = NumVecElems;
while (NumVecElemsRemain - 1) {
// Check for the right reduction operation.
@ -1093,7 +1093,7 @@ int TargetTransformInfo::getInstructionThroughput(const Instruction *I) const {
case Instruction::InsertElement: {
const InsertElementInst * IE = cast<InsertElementInst>(I);
ConstantInt *CI = dyn_cast<ConstantInt>(IE->getOperand(2));
unsigned Idx = -1;
unsigned Idx = -1;
if (CI)
Idx = CI->getZExtValue();
return getVectorInstrCost(I->getOpcode(),
@ -1104,7 +1104,7 @@ int TargetTransformInfo::getInstructionThroughput(const Instruction *I) const {
// TODO: Identify and add costs for insert/extract subvector, etc.
if (Shuffle->changesLength())
return -1;
if (Shuffle->isIdentity())
return 0;

View File

@ -71,7 +71,7 @@
#include <cassert>
#include <cstdint>
#include <iterator>
#include <utility>
#include <utility>
using namespace llvm;
using namespace llvm::PatternMatch;
@ -3828,7 +3828,7 @@ static bool checkRippleForSignedAdd(const KnownBits &LHSKnown,
// If either of the values is known to be non-negative, adding them can only
// overflow if the second is also non-negative, so we can assume that.
// Two non-negative numbers will only overflow if there is a carry to the
// Two non-negative numbers will only overflow if there is a carry to the
// sign bit, so we can check if even when the values are as big as possible
// there is no overflow to the sign bit.
if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) {
@ -3855,7 +3855,7 @@ static bool checkRippleForSignedAdd(const KnownBits &LHSKnown,
}
// If we reached here it means that we know nothing about the sign bits.
// In this case we can't know if there will be an overflow, since by
// In this case we can't know if there will be an overflow, since by
// changing the sign bits any two values can be made to overflow.
return false;
}
@ -3905,7 +3905,7 @@ static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
// operands.
bool LHSOrRHSKnownNonNegative =
(LHSKnown.isNonNegative() || RHSKnown.isNonNegative());
bool LHSOrRHSKnownNegative =
bool LHSOrRHSKnownNegative =
(LHSKnown.isNegative() || RHSKnown.isNegative());
if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
KnownBits AddKnown = computeKnownBits(Add, DL, /*Depth=*/0, AC, CxtI, DT);
@ -4454,7 +4454,7 @@ static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
return SPR;
if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
return {SPF_UNKNOWN, SPNB_NA, false};
@ -4630,7 +4630,7 @@ static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
}
}
if (isKnownNegation(TrueVal, FalseVal)) {
// Sign-extending LHS does not change its sign, so TrueVal/FalseVal can
// match against either LHS or sext(LHS).

View File

@ -842,7 +842,7 @@ static void maybeSetDSOLocal(bool DSOLocal, GlobalValue &GV) {
}
/// parseIndirectSymbol:
/// ::= GlobalVar '=' OptionalLinkage OptionalPreemptionSpecifier
/// ::= GlobalVar '=' OptionalLinkage OptionalPreemptionSpecifier
/// OptionalVisibility OptionalDLLStorageClass
/// OptionalThreadLocal OptionalUnnamedAddr
// 'alias|ifunc' IndirectSymbol
@ -3935,7 +3935,7 @@ bool LLParser::ParseMDField(LocTy Loc, StringRef Name, EmissionKindField &Result
Lex.Lex();
return false;
}
template <>
bool LLParser::ParseMDField(LocTy Loc, StringRef Name,
DwarfAttEncodingField &Result) {

View File

@ -3809,7 +3809,7 @@ void IndexBitcodeWriter::writeCombinedGlobalValueSummary() {
continue;
// The mapping from OriginalId to GUID may return a GUID
// that corresponds to a static variable. Filter it out here.
// This can happen when
// This can happen when
// 1) There is a call to a library function which does not have
// a CallValidId;
// 2) There is a static variable with the OriginalGUID identical

View File

@ -46,7 +46,7 @@ public:
MachineBasicBlock::iterator End,
unsigned InsertPosIndex,
DbgValueVector &DbgValues) = 0;
/// Update liveness information to account for the current
/// instruction, which will not be scheduled.
virtual void Observe(MachineInstr &MI, unsigned Count,

View File

@ -112,7 +112,7 @@ protected:
uint64_t OffsetInBits = 0;
unsigned DwarfVersion;
/// Sometimes we need to add a DW_OP_bit_piece to describe a subregister.
/// Sometimes we need to add a DW_OP_bit_piece to describe a subregister.
unsigned SubRegisterSizeInBits = 0;
unsigned SubRegisterOffsetInBits = 0;

View File

@ -95,6 +95,6 @@ bool DwarfFile::addScopeVariable(LexicalScope *LS, DbgVariable *Var) {
}
} else {
ScopeVars.Locals.push_back(Var);
}
}
return true;
}

View File

@ -1182,7 +1182,7 @@ DIE *DwarfUnit::getOrCreateModule(const DIModule *M) {
addString(MDie, dwarf::DW_AT_LLVM_include_path, M->getIncludePath());
if (!M->getISysRoot().empty())
addString(MDie, dwarf::DW_AT_LLVM_isysroot, M->getISysRoot());
return &MDie;
}
@ -1691,7 +1691,7 @@ void DwarfUnit::emitCommonHeader(bool UseOffsets, dwarf::UnitType UT) {
}
void DwarfTypeUnit::emitHeader(bool UseOffsets) {
DwarfUnit::emitCommonHeader(UseOffsets,
DwarfUnit::emitCommonHeader(UseOffsets,
DD->useSplitDwarf() ? dwarf::DW_UT_split_type
: dwarf::DW_UT_type);
Asm->OutStreamer->AddComment("Type Signature");

View File

@ -362,19 +362,19 @@ IntegerType *AtomicExpand::getCorrespondingIntegerType(Type *T,
/// Convert an atomic load of a non-integral type to an integer load of the
/// equivalent bitwidth. See the function comment on
/// convertAtomicStoreToIntegerType for background.
/// convertAtomicStoreToIntegerType for background.
LoadInst *AtomicExpand::convertAtomicLoadToIntegerType(LoadInst *LI) {
auto *M = LI->getModule();
Type *NewTy = getCorrespondingIntegerType(LI->getType(),
M->getDataLayout());
IRBuilder<> Builder(LI);
Value *Addr = LI->getPointerOperand();
Type *PT = PointerType::get(NewTy,
Addr->getType()->getPointerAddressSpace());
Value *NewAddr = Builder.CreateBitCast(Addr, PT);
auto *NewLI = Builder.CreateLoad(NewAddr);
NewLI->setAlignment(LI->getAlignment());
NewLI->setVolatile(LI->isVolatile());
@ -452,7 +452,7 @@ StoreInst *AtomicExpand::convertAtomicStoreToIntegerType(StoreInst *SI) {
Type *NewTy = getCorrespondingIntegerType(SI->getValueOperand()->getType(),
M->getDataLayout());
Value *NewVal = Builder.CreateBitCast(SI->getValueOperand(), NewTy);
Value *Addr = SI->getPointerOperand();
Type *PT = PointerType::get(NewTy,
Addr->getType()->getPointerAddressSpace());
@ -920,14 +920,14 @@ Value *AtomicExpand::insertRMWLLSCLoop(
/// the equivalent bitwidth. We used to not support pointer cmpxchg in the
/// IR. As a migration step, we convert back to what use to be the standard
/// way to represent a pointer cmpxchg so that we can update backends one by
/// one.
/// one.
AtomicCmpXchgInst *AtomicExpand::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) {
auto *M = CI->getModule();
Type *NewTy = getCorrespondingIntegerType(CI->getCompareOperand()->getType(),
M->getDataLayout());
IRBuilder<> Builder(CI);
Value *Addr = CI->getPointerOperand();
Type *PT = PointerType::get(NewTy,
Addr->getType()->getPointerAddressSpace());
@ -935,8 +935,8 @@ AtomicCmpXchgInst *AtomicExpand::convertCmpXchgToIntegerType(AtomicCmpXchgInst *
Value *NewCmp = Builder.CreatePtrToInt(CI->getCompareOperand(), NewTy);
Value *NewNewVal = Builder.CreatePtrToInt(CI->getNewValOperand(), NewTy);
auto *NewCI = Builder.CreateAtomicCmpXchg(NewAddr, NewCmp, NewNewVal,
CI->getSuccessOrdering(),
CI->getFailureOrdering(),

View File

@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
//
// This file contains the boilerplate required to define our various built in
// gc lowering strategies.
// gc lowering strategies.
//
//===----------------------------------------------------------------------===//

View File

@ -530,7 +530,7 @@ BreakAntiDependencies(const std::vector<SUnit> &SUnits,
// Kill instructions can define registers but are really nops, and there
// might be a real definition earlier that needs to be paired with uses
// dominated by this kill.
// FIXME: It may be possible to remove the isKill() restriction once PR18663
// has been properly fixed. There can be value in processing kills as seen
// in the AggressiveAntiDepBreaker class.

View File

@ -159,7 +159,7 @@ GCStrategy *GCModuleInfo::getGCStrategy(const StringRef Name) {
auto NMI = GCStrategyMap.find(Name);
if (NMI != GCStrategyMap.end())
return NMI->getValue();
for (auto& Entry : GCRegistry::entries()) {
if (Name == Entry.getName()) {
std::unique_ptr<GCStrategy> S = Entry.instantiate();
@ -171,11 +171,11 @@ GCStrategy *GCModuleInfo::getGCStrategy(const StringRef Name) {
}
if (GCRegistry::begin() == GCRegistry::end()) {
// In normal operation, the registry should not be empty. There should
// In normal operation, the registry should not be empty. There should
// be the builtin GCs if nothing else. The most likely scenario here is
// that we got here without running the initializers used by the Registry
// that we got here without running the initializers used by the Registry
// itself and it's registration mechanism.
const std::string error = ("unsupported GC: " + Name).str() +
const std::string error = ("unsupported GC: " + Name).str() +
" (did you remember to link and initialize the CodeGen library?)";
report_fatal_error(error);
} else

View File

@ -56,7 +56,7 @@
// - it makes linker optimizations less useful (order files, LOHs, ...)
// - it forces usage of indexed addressing (which isn't necessarily "free")
// - it can increase register pressure when the uses are disparate enough.
//
//
// We use heuristics to discover the best global grouping we can (cf cl::opts).
//
// ===---------------------------------------------------------------------===//

View File

@ -113,22 +113,22 @@ void IntrinsicLowering::AddPrototypes(Module &M) {
case Intrinsic::memcpy:
M.getOrInsertFunction("memcpy",
Type::getInt8PtrTy(Context),
Type::getInt8PtrTy(Context),
Type::getInt8PtrTy(Context),
Type::getInt8PtrTy(Context),
Type::getInt8PtrTy(Context),
DL.getIntPtrType(Context));
break;
case Intrinsic::memmove:
M.getOrInsertFunction("memmove",
Type::getInt8PtrTy(Context),
Type::getInt8PtrTy(Context),
Type::getInt8PtrTy(Context),
Type::getInt8PtrTy(Context),
Type::getInt8PtrTy(Context),
DL.getIntPtrType(Context));
break;
case Intrinsic::memset:
M.getOrInsertFunction("memset",
Type::getInt8PtrTy(Context),
Type::getInt8PtrTy(Context),
Type::getInt32Ty(M.getContext()),
Type::getInt8PtrTy(Context),
Type::getInt32Ty(M.getContext()),
DL.getIntPtrType(Context));
break;
case Intrinsic::sqrt:
@ -210,13 +210,13 @@ static Value *LowerBSWAP(LLVMContext &Context, Value *V, Instruction *IP) {
"bswap.5");
Value* Tmp4 = Builder.CreateLShr(V, ConstantInt::get(V->getType(), 8),
"bswap.4");
Value* Tmp3 = Builder.CreateLShr(V,
Value* Tmp3 = Builder.CreateLShr(V,
ConstantInt::get(V->getType(), 24),
"bswap.3");
Value* Tmp2 = Builder.CreateLShr(V,
Value* Tmp2 = Builder.CreateLShr(V,
ConstantInt::get(V->getType(), 40),
"bswap.2");
Value* Tmp1 = Builder.CreateLShr(V,
Value* Tmp1 = Builder.CreateLShr(V,
ConstantInt::get(V->getType(), 56),
"bswap.1");
Tmp7 = Builder.CreateAnd(Tmp7,
@ -274,7 +274,7 @@ static Value *LowerCTPOP(LLVMContext &Context, Value *V, Instruction *IP) {
for (unsigned n = 0; n < WordSize; ++n) {
Value *PartValue = V;
for (unsigned i = 1, ct = 0; i < (BitSize>64 ? 64 : BitSize);
for (unsigned i = 1, ct = 0; i < (BitSize>64 ? 64 : BitSize);
i <<= 1, ++ct) {
Value *MaskCst = ConstantInt::get(V->getType(), MaskValues[ct]);
Value *LHS = Builder.CreateAnd(PartValue, MaskCst, "cppop.and1");
@ -381,7 +381,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
case Intrinsic::siglongjmp: {
// Insert the call to abort
ReplaceCallWith("abort", CI, CS.arg_end(), CS.arg_end(),
ReplaceCallWith("abort", CI, CS.arg_end(), CS.arg_end(),
Type::getVoidTy(Context));
break;
}
@ -392,7 +392,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
case Intrinsic::bswap:
CI->replaceAllUsesWith(LowerBSWAP(Context, CI->getArgOperand(0), CI));
break;
case Intrinsic::ctlz:
CI->replaceAllUsesWith(LowerCTLZ(Context, CI->getArgOperand(0), CI));
break;
@ -420,7 +420,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
CI->replaceAllUsesWith(Constant::getNullValue(CI->getType()));
break;
}
case Intrinsic::get_dynamic_area_offset:
errs() << "WARNING: this target does not support the custom llvm.get."
"dynamic.area.offset. It is being lowered to a constant 0\n";
@ -473,7 +473,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
case Intrinsic::assume:
case Intrinsic::var_annotation:
break; // Strip out these intrinsics
case Intrinsic::memcpy: {
Type *IntPtr = DL.getIntPtrType(Context);
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,

View File

@ -340,7 +340,7 @@ void LiveDebugValues::printVarLocInMBB(const MachineFunction &MF,
/// address the spill location in a target independent way.
int LiveDebugValues::extractSpillBaseRegAndOffset(const MachineInstr &MI,
unsigned &Reg) {
assert(MI.hasOneMemOperand() &&
assert(MI.hasOneMemOperand() &&
"Spill instruction does not have exactly one memory operand?");
auto MMOI = MI.memoperands_begin();
const PseudoSourceValue *PVal = (*MMOI)->getPseudoValue();
@ -472,7 +472,7 @@ bool LiveDebugValues::isSpillInstruction(const MachineInstr &MI,
int FI;
const MachineMemOperand *MMO;
// TODO: Handle multiple stores folded into one.
// TODO: Handle multiple stores folded into one.
if (!MI.hasOneMemOperand())
return false;

View File

@ -314,10 +314,10 @@ public:
MMI.deleteMachineFunctionFor(F);
return true;
}
StringRef getPassName() const override {
return "Free MachineFunction";
}
}
};
} // end anonymous namespace

View File

@ -945,7 +945,7 @@ unsigned MachineOutliner::findCandidates(
// AA (where each "A" is an instruction).
//
// We might have some portion of the module that looks like this:
// AAAAAA (6 A's)
// AAAAAA (6 A's)
//
// In this case, there are 5 different copies of "AA" in this range, but
// at most 3 can be outlined. If only outlining 3 of these is going to

View File

@ -383,7 +383,7 @@ void MachineRegisterInfo::replaceRegWith(unsigned FromReg, unsigned ToReg) {
assert(FromReg != ToReg && "Cannot replace a reg with itself");
const TargetRegisterInfo *TRI = getTargetRegisterInfo();
// TODO: This could be more efficient by bulk changing the operands.
for (reg_iterator I = reg_begin(FromReg), E = reg_end(); I != E; ) {
MachineOperand &O = *I;

View File

@ -254,14 +254,14 @@ public:
private:
MachineInstr *PHI;
unsigned idx;
public:
explicit PHI_iterator(MachineInstr *P) // begin iterator
: PHI(P), idx(1) {}
PHI_iterator(MachineInstr *P, bool) // end iterator
: PHI(P), idx(PHI->getNumOperands()) {}
PHI_iterator &operator++() { idx += 2; return *this; }
PHI_iterator &operator++() { idx += 2; return *this; }
bool operator==(const PHI_iterator& x) const { return idx == x.idx; }
bool operator!=(const PHI_iterator& x) const { return !operator==(x); }

View File

@ -509,7 +509,7 @@ bool MachineSinking::PostponeSplitCriticalEdge(MachineInstr &MI,
}
ToSplit.insert(std::make_pair(FromBB, ToBB));
return true;
}

View File

@ -655,7 +655,7 @@ static bool getDataDeps(const MachineInstr &UseMI,
// Debug values should not be included in any calculations.
if (UseMI.isDebugInstr())
return false;
bool HasPhysRegs = false;
for (MachineInstr::const_mop_iterator I = UseMI.operands_begin(),
E = UseMI.operands_end(); I != E; ++I) {
@ -1167,7 +1167,7 @@ MachineTraceMetrics::Ensemble::getTrace(const MachineBasicBlock *MBB) {
computeInstrDepths(MBB);
if (!TBI.HasValidInstrHeights)
computeInstrHeights(MBB);
return Trace(*this, TBI);
}

View File

@ -1077,8 +1077,8 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
auto VerifyStackMapConstant = [&](unsigned Offset) {
if (!MI->getOperand(Offset).isImm() ||
MI->getOperand(Offset).getImm() != StackMaps::ConstantOp ||
!MI->getOperand(Offset + 1).isImm())
MI->getOperand(Offset).getImm() != StackMaps::ConstantOp ||
!MI->getOperand(Offset + 1).isImm())
report("stack map constant to STATEPOINT not well formed!", MI);
};
const unsigned VarStart = StatepointOpers(MI).getVarIdx();

View File

@ -4203,8 +4203,8 @@ bool DAGCombiner::SearchForAndLoads(SDNode *N,
// Allow one node which will masked along with any loads found.
if (NodeToMask)
return false;
// Also ensure that the node to be masked only produces one data result.
// Also ensure that the node to be masked only produces one data result.
NodeToMask = Op.getNode();
if (NodeToMask->getNumValues() > 1) {
bool HasValue = false;
@ -5479,7 +5479,7 @@ SDNode *DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, const SDLoc &DL) {
return nullptr;
// At this point we've matched or extracted a shift op on each side.
if (LHSShift.getOperand(0) != RHSShift.getOperand(0))
return nullptr; // Not shifting the same value.
@ -10392,7 +10392,7 @@ SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) {
N10.getOperand(0))),
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N10.getOperand(1)),
N0, Flags);
N0, Flags);
}
}
@ -10455,7 +10455,7 @@ SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) {
N0.getOperand(2).getOperand(0),
N0.getOperand(2).getOperand(1),
DAG.getNode(ISD::FNEG, SL, VT,
N1), Flags), Flags);
N1), Flags), Flags);
}
// fold (fsub x, (fma y, z, (fmul u, v)))
@ -10470,7 +10470,7 @@ SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) {
N1.getOperand(1),
DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FNEG, SL, VT, N20),
N21, N0, Flags), Flags);
N21, N0, Flags), Flags);
}
@ -10490,7 +10490,7 @@ SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) {
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N020.getOperand(1)),
DAG.getNode(ISD::FNEG, SL, VT,
N1), Flags), Flags);
N1), Flags), Flags);
}
}
}
@ -10518,7 +10518,7 @@ SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) {
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N002.getOperand(1)),
DAG.getNode(ISD::FNEG, SL, VT,
N1), Flags), Flags);
N1), Flags), Flags);
}
}
}
@ -10541,7 +10541,7 @@ SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) {
VT, N1200)),
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N1201),
N0, Flags), Flags);
N0, Flags), Flags);
}
}
@ -10572,7 +10572,7 @@ SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) {
VT, N1020)),
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N1021),
N0, Flags), Flags);
N0, Flags), Flags);
}
}
}
@ -10628,7 +10628,7 @@ SDValue DAGCombiner::visitFMULForFMADistributiveCombine(SDNode *N) {
Y, Flags);
if (XC1 && XC1->isExactlyValue(-1.0))
return DAG.getNode(PreferredFusedOpcode, SL, VT, X.getOperand(0), Y,
DAG.getNode(ISD::FNEG, SL, VT, Y), Flags);
DAG.getNode(ISD::FNEG, SL, VT, Y), Flags);
}
return SDValue();
};
@ -10652,7 +10652,7 @@ SDValue DAGCombiner::visitFMULForFMADistributiveCombine(SDNode *N) {
if (XC0 && XC0->isExactlyValue(-1.0))
return DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FNEG, SL, VT, X.getOperand(1)), Y,
DAG.getNode(ISD::FNEG, SL, VT, Y), Flags);
DAG.getNode(ISD::FNEG, SL, VT, Y), Flags);
auto XC1 = isConstOrConstSplatFP(X.getOperand(1));
if (XC1 && XC1->isExactlyValue(+1.0))
@ -10957,12 +10957,12 @@ SDValue DAGCombiner::visitFMUL(SDNode *N) {
if (SDValue NewSel = foldBinOpIntoSelect(N))
return NewSel;
if (Options.UnsafeFPMath ||
if (Options.UnsafeFPMath ||
(Flags.hasNoNaNs() && Flags.hasNoSignedZeros())) {
// fold (fmul A, 0) -> 0
if (N1CFP && N1CFP->isZero())
return N1;
}
}
if (Options.UnsafeFPMath || Flags.hasAllowReassociation()) {
// fmul (fmul X, C1), C2 -> fmul X, C1 * C2
@ -11370,7 +11370,7 @@ SDValue DAGCombiner::visitFREM(SDNode *N) {
SDValue DAGCombiner::visitFSQRT(SDNode *N) {
SDNodeFlags Flags = N->getFlags();
if (!DAG.getTarget().Options.UnsafeFPMath &&
if (!DAG.getTarget().Options.UnsafeFPMath &&
!Flags.hasApproximateFuncs())
return SDValue();

View File

@ -1548,7 +1548,7 @@ void FastISel::removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue)
{
MachineInstr *CurLastLocalValue = getLastLocalValue();
if (CurLastLocalValue != SavedLastLocalValue) {
// Find the first local value instruction to be deleted.
// Find the first local value instruction to be deleted.
// This is the instruction after SavedLastLocalValue if it is non-NULL.
// Otherwise it's the first instruction in the block.
MachineBasicBlock::iterator FirstDeadInst(SavedLastLocalValue);
@ -1569,7 +1569,7 @@ bool FastISel::selectInstruction(const Instruction *I) {
if (!handlePHINodesInSuccessorBlocks(I->getParent())) {
// PHI node handling may have generated local value instructions,
// even though it failed to handle all PHI nodes.
// We remove these instructions because SelectionDAGISel will generate
// We remove these instructions because SelectionDAGISel will generate
// them again.
removeDeadLocalValueCode(SavedLastLocalValue);
return false;
@ -1630,7 +1630,7 @@ bool FastISel::selectInstruction(const Instruction *I) {
DbgLoc = DebugLoc();
// Undo phi node updates, because they will be added again by SelectionDAG.
if (isa<TerminatorInst>(I)) {
// PHI node handling may have generated local value instructions.
// PHI node handling may have generated local value instructions.
// We remove them because SelectionDAGISel will generate them again.
removeDeadLocalValueCode(SavedLastLocalValue);
FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);

View File

@ -153,7 +153,7 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_ConstantFP(SDNode *N, unsigned ResNo) {
// of Endianness. LLVM's APFloat representation is not Endian sensitive,
// and so always converts into a 128-bit APInt in a non-Endian-sensitive
// way. However, APInt's are serialized in an Endian-sensitive fashion,
// so on big-Endian targets, the two doubles are output in the wrong
// so on big-Endian targets, the two doubles are output in the wrong
// order. Fix this by manually flipping the order of the high 64 bits
// and the low 64 bits here.
if (DAG.getDataLayout().isBigEndian() &&
@ -815,7 +815,7 @@ bool DAGTypeLegalizer::CanSkipSoftenFloatOperand(SDNode *N, unsigned OpNo) {
switch (N->getOpcode()) {
case ISD::ConstantFP: // Leaf node.
case ISD::CopyFromReg: // Operand is a register that we know to be left
case ISD::CopyFromReg: // Operand is a register that we know to be left
// unchanged by SoftenFloatResult().
case ISD::Register: // Leaf node.
return true;
@ -838,7 +838,7 @@ SDValue DAGTypeLegalizer::SoftenFloatOp_COPY_TO_REG(SDNode *N) {
if (N->getNumOperands() == 3)
return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0), Op1, Op2), 0);
return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0), Op1, Op2,
return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0), Op1, Op2,
N->getOperand(3)),
0);
}

View File

@ -510,7 +510,7 @@ private:
SDValue SoftenFloatRes_XINT_TO_FP(SDNode *N);
// Return true if we can skip softening the given operand or SDNode because
// either it was soften before by SoftenFloatResult and references to the
// either it was soften before by SoftenFloatResult and references to the
// operand were replaced by ReplaceValueWith or it's value type is legal in HW
// registers and the operand can be left unchanged.
bool CanSkipSoftenFloatOperand(SDNode *N, unsigned OpNo);

View File

@ -131,7 +131,7 @@ class VectorLegalizer {
SDValue ExpandCTLZ(SDValue Op);
SDValue ExpandCTTZ_ZERO_UNDEF(SDValue Op);
SDValue ExpandStrictFPOp(SDValue Op);
/// Implements vector promotion.
///
/// This is essentially just bitcasting the operands to a different type and
@ -315,7 +315,7 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
// equivalent. For instance, if ISD::FSQRT is legal then ISD::STRICT_FSQRT
// is also legal, but if ISD::FSQRT requires expansion then so does
// ISD::STRICT_FSQRT.
Action = TLI.getStrictFPOperationAction(Node->getOpcode(),
Action = TLI.getStrictFPOperationAction(Node->getOpcode(),
Node->getValueType(0));
break;
case ISD::ADD:
@ -397,12 +397,12 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
break;
case ISD::FP_ROUND_INREG:
Action = TLI.getOperationAction(Node->getOpcode(),
Action = TLI.getOperationAction(Node->getOpcode(),
cast<VTSDNode>(Node->getOperand(1))->getVT());
break;
case ISD::SINT_TO_FP:
case ISD::UINT_TO_FP:
Action = TLI.getOperationAction(Node->getOpcode(),
Action = TLI.getOperationAction(Node->getOpcode(),
Node->getOperand(0).getValueType());
break;
case ISD::MSCATTER:
@ -736,7 +736,7 @@ SDValue VectorLegalizer::Expand(SDValue Op) {
case ISD::CTTZ_ZERO_UNDEF:
return ExpandCTTZ_ZERO_UNDEF(Op);
case ISD::STRICT_FADD:
case ISD::STRICT_FSUB:
case ISD::STRICT_FSUB:
case ISD::STRICT_FMUL:
case ISD::STRICT_FDIV:
case ISD::STRICT_FSQRT:
@ -1153,24 +1153,24 @@ SDValue VectorLegalizer::ExpandStrictFPOp(SDValue Op) {
SmallVector<SDValue, 32> OpChains;
for (unsigned i = 0; i < NumElems; ++i) {
SmallVector<SDValue, 4> Opers;
SDValue Idx = DAG.getConstant(i, dl,
SDValue Idx = DAG.getConstant(i, dl,
TLI.getVectorIdxTy(DAG.getDataLayout()));
// The Chain is the first operand.
Opers.push_back(Chain);
// Now process the remaining operands.
// Now process the remaining operands.
for (unsigned j = 1; j < NumOpers; ++j) {
SDValue Oper = Op.getOperand(j);
EVT OperVT = Oper.getValueType();
if (OperVT.isVector())
Oper = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
Oper = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
EltVT, Oper, Idx);
Opers.push_back(Oper);
}
SDValue ScalarOp = DAG.getNode(Op->getOpcode(), dl, ValueVTs, Opers);
OpValues.push_back(ScalarOp.getValue(0));

View File

@ -1068,14 +1068,14 @@ void DAGTypeLegalizer::SplitVecRes_StrictFPOp(SDNode *N, SDValue &Lo,
OpsLo.push_back(Chain);
OpsHi.push_back(Chain);
// Now process the remaining operands.
// Now process the remaining operands.
for (unsigned i = 1; i < NumOps; ++i) {
SDValue Op = N->getOperand(i);
SDValue OpLo = Op;
SDValue OpHi = Op;
SDValue Op = N->getOperand(i);
SDValue OpLo = Op;
SDValue OpHi = Op;
EVT InVT = Op.getValueType();
if (InVT.isVector()) {
if (InVT.isVector()) {
// If the input also splits, handle it directly for a
// compile time speedup. Otherwise split it by hand.
if (getTypeAction(InVT) == TargetLowering::TypeSplitVector)
@ -1092,10 +1092,10 @@ void DAGTypeLegalizer::SplitVecRes_StrictFPOp(SDNode *N, SDValue &Lo,
EVT HiValueVTs[] = {HiVT, MVT::Other};
Lo = DAG.getNode(N->getOpcode(), dl, LoValueVTs, OpsLo);
Hi = DAG.getNode(N->getOpcode(), dl, HiValueVTs, OpsHi);
// Build a factor node to remember that this Op is independent of the
// other one.
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
Lo.getValue(1), Hi.getValue(1));
// Legalize the chain result - switch anything that used the old chain to

View File

@ -4987,7 +4987,7 @@ SDDbgValue *SelectionDAGBuilder::getDbgValue(SDValue N,
unsigned DbgSDNodeOrder) {
if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
// Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can describe
// stack slot locations.
// stack slot locations.
//
// Consider "int x = 0; int *px = &x;". There are two kinds of interesting
// debug values here after optimization:

View File

@ -419,10 +419,10 @@ static void lowerIncomingStatepointValue(SDValue Incoming, bool LiveInOnly,
Builder.getFrameIndexTy()));
} else if (LiveInOnly) {
// If this value is live in (not live-on-return, or live-through), we can
// treat it the same way patchpoint treats it's "live in" values. We'll
// end up folding some of these into stack references, but they'll be
// treat it the same way patchpoint treats it's "live in" values. We'll
// end up folding some of these into stack references, but they'll be
// handled by the register allocator. Note that we do not have the notion
// of a late use so these values might be placed in registers which are
// of a late use so these values might be placed in registers which are
// clobbered by the call. This is fine for live-in.
Ops.push_back(Incoming);
} else {
@ -498,7 +498,7 @@ lowerStatepointMetaArgs(SmallVectorImpl<SDValue> &Ops,
auto isGCValue =[&](const Value *V) {
return is_contained(SI.Ptrs, V) || is_contained(SI.Bases, V);
};
// Before we actually start lowering (and allocating spill slots for values),
// reserve any stack slots which we judge to be profitable to reuse for a
// particular value. This is purely an optimization over the code below and

View File

@ -175,7 +175,7 @@ bool ShadowStackGCLowering::doInitialization(Module &M) {
}
if (!Active)
return false;
// struct FrameMap {
// int32_t NumRoots; // Number of roots in stack frame.
// int32_t NumMeta; // Number of metadata descriptors. May be < NumRoots.
@ -286,7 +286,7 @@ bool ShadowStackGCLowering::runOnFunction(Function &F) {
if (!F.hasGC() ||
F.getGC() != std::string("shadow-stack"))
return false;
LLVMContext &Context = F.getContext();
// Find calls to llvm.gcroot.

View File

@ -233,7 +233,7 @@ public:
/// - Create a SplitEditor from a SplitAnalysis.
/// - Start a new live interval with openIntv.
/// - Mark the places where the new interval is entered using enterIntv*
/// - Mark the ranges where the new interval is used with useIntv*
/// - Mark the ranges where the new interval is used with useIntv*
/// - Mark the places where the interval is exited with exitIntv*.
/// - Finish the current interval with closeIntv and repeat from 2.
/// - Rewrite instructions with finish().

View File

@ -632,7 +632,7 @@ void TargetLoweringBase::initActions() {
setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
setOperationAction(ISD::BITREVERSE, VT, Expand);
// These library functions default to expand.
setOperationAction(ISD::FROUND, VT, Expand);
setOperationAction(ISD::FPOWI, VT, Expand);
@ -924,7 +924,7 @@ TargetLoweringBase::emitPatchPoint(MachineInstr &InitialMI,
// STATEPOINT Deopt Spill - live-through, read only, indirect
// STATEPOINT Deopt Alloca - live-through, read only, direct
// (We're currently conservative and mark the deopt slots read/write in
// practice.)
// practice.)
// STATEPOINT GC Spill - live-through, read/write, indirect
// STATEPOINT GC Alloca - live-through, read/write, direct
// The live-in vs live-through is handled already (the live through ones are
@ -1411,7 +1411,7 @@ bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
*Fast = true;
return true;
}
// This is a misaligned access.
return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Fast);
}

View File

@ -166,7 +166,7 @@ static cl::opt<CFLAAType> UseCFLAA(
"Enable unification-based CFL-AA"),
clEnumValN(CFLAAType::Andersen, "anders",
"Enable inclusion-based CFL-AA"),
clEnumValN(CFLAAType::Both, "both",
clEnumValN(CFLAAType::Both, "both",
"Enable both variants of CFL-AA")));
/// Option names for limiting the codegen pipeline.

View File

@ -54,7 +54,7 @@ static cl::opt<bool> DemoteCatchSwitchPHIOnlyOpt(
cl::desc("Demote catchswitch BBs only (for wasm EH)"), cl::init(false));
namespace {
class WinEHPrepare : public FunctionPass {
public:
static char ID; // Pass identification, replacement for typeid.

View File

@ -38,7 +38,7 @@ DWARFAbbreviationDeclaration::DWARFAbbreviationDeclaration() {
}
bool
DWARFAbbreviationDeclaration::extract(DataExtractor Data,
DWARFAbbreviationDeclaration::extract(DataExtractor Data,
uint32_t* OffsetPtr) {
clear();
const uint32_t Offset = *OffsetPtr;

View File

@ -474,7 +474,7 @@ void DWARFContext::dump(
while (rangesData.isValidOffset(offset)) {
if (Error E = rangeList.extract(rangesData, &offset)) {
WithColor::error() << toString(std::move(E)) << '\n';
break;
break;
}
rangeList.dump(OS);
}

View File

@ -155,7 +155,7 @@ std::error_code SymbolizableObjectFile::addSymbol(const SymbolRef &Symbol,
// of the function's code, not the descriptor.
uint64_t OpdOffset = SymbolAddress - OpdAddress;
uint32_t OpdOffset32 = OpdOffset;
if (OpdOffset == OpdOffset32 &&
if (OpdOffset == OpdOffset32 &&
OpdExtractor->isValidOffsetForAddress(OpdOffset32))
SymbolAddress = OpdExtractor->getAddress(&OpdOffset32);
}

View File

@ -153,7 +153,7 @@ void LLVMInitializeMCJITCompilerOptions(LLVMMCJITCompilerOptions *PassedOptions,
LLVMMCJITCompilerOptions options;
memset(&options, 0, sizeof(options)); // Most fields are zero by default.
options.CodeModel = LLVMCodeModelJITDefault;
memcpy(PassedOptions, &options,
std::min(sizeof(options), SizeOfPassedOptions));
}
@ -171,14 +171,14 @@ LLVMBool LLVMCreateMCJITCompilerForModule(
"LLVM library mismatch.");
return 1;
}
// Defend against the user having an old version of the API by ensuring that
// any fields they didn't see are cleared. We must defend against fields being
// set to the bitwise equivalent of zero, and assume that this means "do the
// default" as if that option hadn't been available.
LLVMInitializeMCJITCompilerOptions(&options, sizeof(options));
memcpy(&options, PassedOptions, SizeOfPassedOptions);
TargetOptions targetOptions;
targetOptions.EnableFastISel = options.EnableFastISel;
std::unique_ptr<Module> Mod(unwrap(M));
@ -241,12 +241,12 @@ LLVMGenericValueRef LLVMRunFunction(LLVMExecutionEngineRef EE, LLVMValueRef F,
unsigned NumArgs,
LLVMGenericValueRef *Args) {
unwrap(EE)->finalizeObject();
std::vector<GenericValue> ArgVec;
ArgVec.reserve(NumArgs);
for (unsigned I = 0; I != NumArgs; ++I)
ArgVec.push_back(*unwrap(Args[I]));
GenericValue *Result = new GenericValue();
*Result = unwrap(EE)->runFunction(unwrap<Function>(F), ArgVec);
return wrap(Result);
@ -297,7 +297,7 @@ void LLVMAddGlobalMapping(LLVMExecutionEngineRef EE, LLVMValueRef Global,
void *LLVMGetPointerToGlobal(LLVMExecutionEngineRef EE, LLVMValueRef Global) {
unwrap(EE)->finalizeObject();
return unwrap(EE)->getPointerToGlobal(unwrap<GlobalValue>(Global));
}
@ -395,11 +395,11 @@ LLVMMCJITMemoryManagerRef LLVMCreateSimpleMCJITMemoryManager(
LLVMMemoryManagerAllocateDataSectionCallback AllocateDataSection,
LLVMMemoryManagerFinalizeMemoryCallback FinalizeMemory,
LLVMMemoryManagerDestroyCallback Destroy) {
if (!AllocateCodeSection || !AllocateDataSection || !FinalizeMemory ||
!Destroy)
return nullptr;
SimpleBindingMMFunctions functions;
functions.AllocateCodeSection = AllocateCodeSection;
functions.AllocateDataSection = AllocateDataSection;

View File

@ -7,7 +7,7 @@
*
*===----------------------------------------------------------------------===*
*
* This file provides Intel(R) Performance Analyzer JIT (Just-In-Time)
* This file provides Intel(R) Performance Analyzer JIT (Just-In-Time)
* Profiling API internal config.
*
* NOTE: This file comes in a style different from the rest of LLVM
@ -213,7 +213,7 @@ typedef pthread_mutex_t mutex_t;
#define __itt_thread_id() GetCurrentThreadId()
#define __itt_thread_yield() SwitchToThread()
#ifndef ITT_SIMPLE_INIT
ITT_INLINE long
ITT_INLINE long
__itt_interlocked_increment(volatile long* ptr) ITT_INLINE_ATTRIBUTE;
ITT_INLINE long __itt_interlocked_increment(volatile long* ptr)
{
@ -273,7 +273,7 @@ ITT_INLINE long __TBB_machine_fetchadd4(volatile void* ptr, long addend)
}
#endif /* ITT_ARCH==ITT_ARCH_IA64 */
#ifndef ITT_SIMPLE_INIT
ITT_INLINE long
ITT_INLINE long
__itt_interlocked_increment(volatile long* ptr) ITT_INLINE_ATTRIBUTE;
ITT_INLINE long __itt_interlocked_increment(volatile long* ptr)
{

View File

@ -7,7 +7,7 @@
*
*===----------------------------------------------------------------------===*
*
* This file provides Intel(R) Performance Analyzer JIT (Just-In-Time)
* This file provides Intel(R) Performance Analyzer JIT (Just-In-Time)
* Profiling API declaration.
*
* NOTE: This file comes in a style different from the rest of LLVM
@ -28,54 +28,54 @@ typedef enum iJIT_jvm_event
{
/* shutdown */
/*
/*
* Program exiting EventSpecificData NA
*/
iJVM_EVENT_TYPE_SHUTDOWN = 2,
iJVM_EVENT_TYPE_SHUTDOWN = 2,
/* JIT profiling */
/*
/*
* issued after method code jitted into memory but before code is executed
* EventSpecificData is an iJIT_Method_Load
*/
iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED=13,
iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED=13,
/* issued before unload. Method code will no longer be executed, but code
* and info are still in memory. The VTune profiler may capture method
/* issued before unload. Method code will no longer be executed, but code
* and info are still in memory. The VTune profiler may capture method
* code only at this point EventSpecificData is iJIT_Method_Id
*/
iJVM_EVENT_TYPE_METHOD_UNLOAD_START,
iJVM_EVENT_TYPE_METHOD_UNLOAD_START,
/* Method Profiling */
/* method name, Id and stack is supplied
* issued when a method is about to be entered EventSpecificData is
/* method name, Id and stack is supplied
* issued when a method is about to be entered EventSpecificData is
* iJIT_Method_NIDS
*/
iJVM_EVENT_TYPE_ENTER_NIDS = 19,
iJVM_EVENT_TYPE_ENTER_NIDS = 19,
/* method name, Id and stack is supplied
* issued when a method is about to be left EventSpecificData is
/* method name, Id and stack is supplied
* issued when a method is about to be left EventSpecificData is
* iJIT_Method_NIDS
*/
iJVM_EVENT_TYPE_LEAVE_NIDS
iJVM_EVENT_TYPE_LEAVE_NIDS
} iJIT_JVM_EVENT;
typedef enum _iJIT_ModeFlags
{
/* No need to Notify VTune, since VTune is not running */
iJIT_NO_NOTIFICATIONS = 0x0000,
iJIT_NO_NOTIFICATIONS = 0x0000,
/* when turned on the jit must call
/* when turned on the jit must call
* iJIT_NotifyEvent
* (
* iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED,
* )
* for all the method already jitted
*/
iJIT_BE_NOTIFY_ON_LOAD = 0x0001,
iJIT_BE_NOTIFY_ON_LOAD = 0x0001,
/* when turned on the jit must call
* iJIT_NotifyEvent
@ -83,19 +83,19 @@ typedef enum _iJIT_ModeFlags
* iJVM_EVENT_TYPE_METHOD_UNLOAD_FINISHED,
* ) for all the method that are unloaded
*/
iJIT_BE_NOTIFY_ON_UNLOAD = 0x0002,
iJIT_BE_NOTIFY_ON_UNLOAD = 0x0002,
/* when turned on the jit must instrument all
* the currently jited code with calls on
* method entries
*/
iJIT_BE_NOTIFY_ON_METHOD_ENTRY = 0x0004,
iJIT_BE_NOTIFY_ON_METHOD_ENTRY = 0x0004,
/* when turned on the jit must instrument all
* the currently jited code with calls
* on method exit
*/
iJIT_BE_NOTIFY_ON_METHOD_EXIT = 0x0008
iJIT_BE_NOTIFY_ON_METHOD_EXIT = 0x0008
} iJIT_ModeFlags;
@ -104,13 +104,13 @@ typedef enum _iJIT_ModeFlags
typedef enum _iJIT_IsProfilingActiveFlags
{
/* No profiler is running. Currently not used */
iJIT_NOTHING_RUNNING = 0x0000,
iJIT_NOTHING_RUNNING = 0x0000,
/* Sampling is running. This is the default value
* returned by iJIT_IsProfilingActive()
*/
iJIT_SAMPLING_ON = 0x0001,
iJIT_SAMPLING_ON = 0x0001,
/* Call Graph is running */
iJIT_CALLGRAPH_ON = 0x0002
@ -135,7 +135,7 @@ typedef struct _iJIT_Method_Id
/* Id of the method (same as the one passed in
* the iJIT_Method_Load struct
*/
unsigned int method_id;
unsigned int method_id;
} *piJIT_Method_Id, iJIT_Method_Id;
@ -149,13 +149,13 @@ typedef struct _iJIT_Method_Id
typedef struct _iJIT_Method_NIDS
{
/* unique method ID */
unsigned int method_id;
unsigned int method_id;
/* NOTE: no need to fill this field, it's filled by VTune */
unsigned int stack_id;
unsigned int stack_id;
/* method name (just the method, without the class) */
char* method_name;
char* method_name;
} *piJIT_Method_NIDS, iJIT_Method_NIDS;
/* structures for the events:
@ -168,51 +168,51 @@ typedef struct _LineNumberInfo
unsigned int Offset;
/* source line number from the beginning of the source file */
unsigned int LineNumber;
unsigned int LineNumber;
} *pLineNumberInfo, LineNumberInfo;
typedef struct _iJIT_Method_Load
{
/* unique method ID - can be any unique value, (except 0 - 999) */
unsigned int method_id;
unsigned int method_id;
/* method name (can be with or without the class and signature, in any case
* the class name will be added to it)
*/
char* method_name;
char* method_name;
/* virtual address of that method - This determines the method range for the
* iJVM_EVENT_TYPE_ENTER/LEAVE_METHOD_ADDR events
*/
void* method_load_address;
void* method_load_address;
/* Size in memory - Must be exact */
unsigned int method_size;
unsigned int method_size;
/* Line Table size in number of entries - Zero if none */
unsigned int line_number_size;
/* Pointer to the beginning of the line numbers info array */
pLineNumberInfo line_number_table;
pLineNumberInfo line_number_table;
/* unique class ID */
unsigned int class_id;
unsigned int class_id;
/* class file name */
char* class_file_name;
char* class_file_name;
/* source file name */
char* source_file_name;
char* source_file_name;
/* bits supplied by the user for saving in the JIT file */
void* user_data;
void* user_data;
/* the size of the user data buffer */
unsigned int user_data_size;
unsigned int user_data_size;
/* NOTE: no need to fill this field, it's filled by VTune */
iJDEnvironmentType env;
iJDEnvironmentType env;
} *piJIT_Method_Load, iJIT_Method_Load;
@ -241,7 +241,7 @@ typedef void (*iJIT_ModeChangedEx)(void *UserData, iJIT_ModeFlags Flags);
int JITAPI iJIT_NotifyEvent(iJIT_JVM_EVENT event_type, void *EventSpecificData);
/* The new mode call back routine */
void JITAPI iJIT_RegisterCallbackEx(void *userdata,
void JITAPI iJIT_RegisterCallbackEx(void *userdata,
iJIT_ModeChangedEx NewModeCallBackFuncEx);
iJIT_IsProfilingActiveFlags JITAPI iJIT_IsProfilingActive(void);

View File

@ -85,7 +85,7 @@ static void executeFMulInst(GenericValue &Dest, GenericValue Src1,
}
}
static void executeFDivInst(GenericValue &Dest, GenericValue Src1,
static void executeFDivInst(GenericValue &Dest, GenericValue Src1,
GenericValue Src2, Type *Ty) {
switch (Ty->getTypeID()) {
IMPLEMENT_BINARY_OPERATOR(/, Float);
@ -96,7 +96,7 @@ static void executeFDivInst(GenericValue &Dest, GenericValue Src1,
}
}
static void executeFRemInst(GenericValue &Dest, GenericValue Src1,
static void executeFRemInst(GenericValue &Dest, GenericValue Src1,
GenericValue Src2, Type *Ty) {
switch (Ty->getTypeID()) {
case Type::FloatTyID:
@ -281,7 +281,7 @@ void Interpreter::visitICmpInst(ICmpInst &I) {
GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
GenericValue R; // Result
switch (I.getPredicate()) {
case ICmpInst::ICMP_EQ: R = executeICMP_EQ(Src1, Src2, Ty); break;
case ICmpInst::ICMP_NE: R = executeICMP_NE(Src1, Src2, Ty); break;
@ -297,7 +297,7 @@ void Interpreter::visitICmpInst(ICmpInst &I) {
dbgs() << "Don't know how to handle this ICmp predicate!\n-->" << I;
llvm_unreachable(nullptr);
}
SetValue(&I, R, SF);
}
@ -552,10 +552,10 @@ static GenericValue executeFCMP_ORD(GenericValue Src1, GenericValue Src2,
Src2.AggregateVal[_i].DoubleVal)));
}
} else if (Ty->isFloatTy())
Dest.IntVal = APInt(1,(Src1.FloatVal == Src1.FloatVal &&
Dest.IntVal = APInt(1,(Src1.FloatVal == Src1.FloatVal &&
Src2.FloatVal == Src2.FloatVal));
else {
Dest.IntVal = APInt(1,(Src1.DoubleVal == Src1.DoubleVal &&
Dest.IntVal = APInt(1,(Src1.DoubleVal == Src1.DoubleVal &&
Src2.DoubleVal == Src2.DoubleVal));
}
return Dest;
@ -583,10 +583,10 @@ static GenericValue executeFCMP_UNO(GenericValue Src1, GenericValue Src2,
Src2.AggregateVal[_i].DoubleVal)));
}
} else if (Ty->isFloatTy())
Dest.IntVal = APInt(1,(Src1.FloatVal != Src1.FloatVal ||
Dest.IntVal = APInt(1,(Src1.FloatVal != Src1.FloatVal ||
Src2.FloatVal != Src2.FloatVal));
else {
Dest.IntVal = APInt(1,(Src1.DoubleVal != Src1.DoubleVal ||
Dest.IntVal = APInt(1,(Src1.DoubleVal != Src1.DoubleVal ||
Src2.DoubleVal != Src2.DoubleVal));
}
return Dest;
@ -613,15 +613,15 @@ void Interpreter::visitFCmpInst(FCmpInst &I) {
GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
GenericValue R; // Result
switch (I.getPredicate()) {
default:
dbgs() << "Don't know how to handle this FCmp predicate!\n-->" << I;
llvm_unreachable(nullptr);
break;
case FCmpInst::FCMP_FALSE: R = executeFCMP_BOOL(Src1, Src2, Ty, false);
case FCmpInst::FCMP_FALSE: R = executeFCMP_BOOL(Src1, Src2, Ty, false);
break;
case FCmpInst::FCMP_TRUE: R = executeFCMP_BOOL(Src1, Src2, Ty, true);
case FCmpInst::FCMP_TRUE: R = executeFCMP_BOOL(Src1, Src2, Ty, true);
break;
case FCmpInst::FCMP_ORD: R = executeFCMP_ORD(Src1, Src2, Ty); break;
case FCmpInst::FCMP_UNO: R = executeFCMP_UNO(Src1, Src2, Ty); break;
@ -638,11 +638,11 @@ void Interpreter::visitFCmpInst(FCmpInst &I) {
case FCmpInst::FCMP_UGE: R = executeFCMP_UGE(Src1, Src2, Ty); break;
case FCmpInst::FCMP_OGE: R = executeFCMP_OGE(Src1, Src2, Ty); break;
}
SetValue(&I, R, SF);
}
static GenericValue executeCmpInst(unsigned predicate, GenericValue Src1,
static GenericValue executeCmpInst(unsigned predicate, GenericValue Src1,
GenericValue Src2, Type *Ty) {
GenericValue Result;
switch (predicate) {
@ -747,12 +747,12 @@ void Interpreter::visitBinaryOperator(BinaryOperator &I) {
case Instruction::FRem:
if (cast<VectorType>(Ty)->getElementType()->isFloatTy())
for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
R.AggregateVal[i].FloatVal =
R.AggregateVal[i].FloatVal =
fmod(Src1.AggregateVal[i].FloatVal, Src2.AggregateVal[i].FloatVal);
else {
if (cast<VectorType>(Ty)->getElementType()->isDoubleTy())
for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
R.AggregateVal[i].DoubleVal =
R.AggregateVal[i].DoubleVal =
fmod(Src1.AggregateVal[i].DoubleVal, Src2.AggregateVal[i].DoubleVal);
else {
dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n";
@ -965,7 +965,7 @@ void Interpreter::visitAllocaInst(AllocaInst &I) {
Type *Ty = I.getType()->getElementType(); // Type to be allocated
// Get the number of elements being allocated by the array...
unsigned NumElements =
unsigned NumElements =
getOperandValue(I.getOperand(0), SF).IntVal.getZExtValue();
unsigned TypeSize = (size_t)getDataLayout().getTypeAllocSize(Ty);
@ -1011,7 +1011,7 @@ GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I,
GenericValue IdxGV = getOperandValue(I.getOperand(), SF);
int64_t Idx;
unsigned BitWidth =
unsigned BitWidth =
cast<IntegerType>(I.getOperand()->getType())->getBitWidth();
if (BitWidth == 32)
Idx = (int64_t)(int32_t)IdxGV.IntVal.getZExtValue();
@ -2037,13 +2037,13 @@ GenericValue Interpreter::getConstantExprValue (ConstantExpr *CE,
case Instruction::And: Dest.IntVal = Op0.IntVal & Op1.IntVal; break;
case Instruction::Or: Dest.IntVal = Op0.IntVal | Op1.IntVal; break;
case Instruction::Xor: Dest.IntVal = Op0.IntVal ^ Op1.IntVal; break;
case Instruction::Shl:
case Instruction::Shl:
Dest.IntVal = Op0.IntVal.shl(Op1.IntVal.getZExtValue());
break;
case Instruction::LShr:
case Instruction::LShr:
Dest.IntVal = Op0.IntVal.lshr(Op1.IntVal.getZExtValue());
break;
case Instruction::AShr:
case Instruction::AShr:
Dest.IntVal = Op0.IntVal.ashr(Op1.IntVal.getZExtValue());
break;
default:
@ -2100,7 +2100,7 @@ void Interpreter::callFunction(Function *F, ArrayRef<GenericValue> ArgVals) {
// Handle non-varargs arguments...
unsigned i = 0;
for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
AI != E; ++AI, ++i)
SetValue(&*AI, ArgVals[i], StackFrame);

View File

@ -132,8 +132,8 @@ public:
void visitLoadInst(LoadInst &I);
void visitStoreInst(StoreInst &I);
void visitGetElementPtrInst(GetElementPtrInst &I);
void visitPHINode(PHINode &PN) {
llvm_unreachable("PHI nodes already handled!");
void visitPHINode(PHINode &PN) {
llvm_unreachable("PHI nodes already handled!");
}
void visitTruncInst(TruncInst &I);
void visitZExtInst(ZExtInst &I);
@ -224,7 +224,7 @@ private: // Helper functions
ExecutionContext &SF);
GenericValue executeBitCastInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF);
GenericValue executeCastOperation(Instruction::CastOps opcode, Value *SrcVal,
GenericValue executeCastOperation(Instruction::CastOps opcode, Value *SrcVal,
Type *Ty, ExecutionContext &SF);
void popStackAndReturnValueToCaller(Type *RetTy, GenericValue Result);

View File

@ -119,10 +119,10 @@ void RTDyldMemoryManager::deregisterEHFramesInProcess(uint8_t *Addr,
void RTDyldMemoryManager::registerEHFramesInProcess(uint8_t *Addr,
size_t Size) {
// On Linux __register_frame takes a single argument:
// On Linux __register_frame takes a single argument:
// a pointer to the start of the .eh_frame section.
// How can it find the end? Because crtendS.o is linked
// How can it find the end? Because crtendS.o is linked
// in and it has an .eh_frame section with four zero chars.
__register_frame(Addr);
}
@ -255,7 +255,7 @@ RTDyldMemoryManager::getSymbolAddressInProcess(const std::string &Name) {
return (uint64_t)&__morestack;
#endif
#endif // __linux__ && __GLIBC__
// See ARM_MATH_IMPORTS definition for explanation
#if defined(__BIONIC__) && defined(__arm__)
if (Name.compare(0, 8, "__aeabi_") == 0) {

View File

@ -1430,7 +1430,7 @@ RuntimeDyldELF::processRelocationRef(
} else {
processSimpleRelocation(SectionID, Offset, RelType, Value);
}
} else if (Arch == Triple::ppc64 || Arch == Triple::ppc64le) {
if (RelType == ELF::R_PPC64_REL24) {
// Determine ABI variant in use for this object.

View File

@ -93,7 +93,7 @@ void llvm::handleExecNameEncodedOptimizerOpts(StringRef ExecName) {
Args.push_back("-passes=gvn");
} else if (Opt == "sccp") {
Args.push_back("-passes=sccp");
} else if (Opt == "loop_predication") {
Args.push_back("-passes=loop-predication");
} else if (Opt == "guard_widening") {
@ -114,7 +114,7 @@ void llvm::handleExecNameEncodedOptimizerOpts(StringRef ExecName) {
Args.push_back("-passes=strength-reduce");
} else if (Opt == "irce") {
Args.push_back("-passes=irce");
} else if (Triple(Opt).getArch()) {
Args.push_back("-mtriple=" + Opt.str());
} else {
@ -204,6 +204,6 @@ std::unique_ptr<Module> llvm::parseAndVerify(const uint8_t *Data, size_t Size,
auto M = parseModule(Data, Size, Context);
if (!M || verifyModule(*M, &errs()))
return nullptr;
return M;
}

View File

@ -94,7 +94,7 @@ static bool ShouldUpgradeX86Intrinsic(Function *F, StringRef Name) {
Name.startswith("avx512.mask3.vfmsubadd.") || // Added in 7.0
Name.startswith("avx512.mask.shuf.i") || // Added in 6.0
Name.startswith("avx512.mask.shuf.f") || // Added in 6.0
Name.startswith("avx512.kunpck") || //added in 6.0
Name.startswith("avx512.kunpck") || //added in 6.0
Name.startswith("avx2.pabs.") || // Added in 6.0
Name.startswith("avx512.mask.pabs.") || // Added in 6.0
Name.startswith("avx512.broadcastm") || // Added in 6.0

View File

@ -586,7 +586,7 @@ static std::string getMangledTypeStr(Type* Ty) {
if (FT->isVarArg())
Result += "vararg";
// Ensure nested function types are distinguishable.
Result += "f";
Result += "f";
} else if (isa<VectorType>(Ty)) {
Result += "v" + utostr(Ty->getVectorNumElements()) +
getMangledTypeStr(Ty->getVectorElementType());

View File

@ -57,7 +57,7 @@ void InlineAsm::destroyConstant() {
FunctionType *InlineAsm::getFunctionType() const {
return FTy;
}
/// Parse - Analyze the specified string (e.g. "==&{eax}") and fill in the
/// fields in this structure. If the constraint string is not understood,
/// return true, otherwise return false.
@ -80,7 +80,7 @@ bool InlineAsm::ConstraintInfo::Parse(StringRef Str,
isCommutative = false;
isIndirect = false;
currentAlternativeIndex = 0;
// Parse prefixes.
if (*I == '~') {
Type = isClobber;
@ -100,7 +100,7 @@ bool InlineAsm::ConstraintInfo::Parse(StringRef Str,
}
if (I == E) return true; // Just a prefix, like "==" or "~".
// Parse the modifiers.
bool DoneWithModifiers = false;
while (!DoneWithModifiers) {
@ -124,13 +124,13 @@ bool InlineAsm::ConstraintInfo::Parse(StringRef Str,
case '*': // Register preferencing.
return true; // Not supported.
}
if (!DoneWithModifiers) {
++I;
if (I == E) return true; // Just prefixes and modifiers!
}
}
// Parse the various constraints.
while (I != E) {
if (*I == '{') { // Physical register reference.
@ -150,7 +150,7 @@ bool InlineAsm::ConstraintInfo::Parse(StringRef Str,
if (N >= ConstraintsSoFar.size() || ConstraintsSoFar[N].Type != isOutput||
Type != isInput)
return true; // Invalid constraint number.
// If Operand N already has a matching input, reject this. An output
// can't be constrained to the same value as multiple inputs.
if (isMultipleAlternative) {
@ -207,7 +207,7 @@ void InlineAsm::ConstraintInfo::selectAlternative(unsigned index) {
InlineAsm::ConstraintInfoVector
InlineAsm::ParseConstraints(StringRef Constraints) {
ConstraintInfoVector Result;
// Scan the constraints string.
for (StringRef::iterator I = Constraints.begin(),
E = Constraints.end(); I != E; ) {
@ -223,7 +223,7 @@ InlineAsm::ParseConstraints(StringRef Constraints) {
}
Result.push_back(Info);
// ConstraintEnd may be either the next comma or the end of the string. In
// the former case, we skip the comma.
I = ConstraintEnd;
@ -235,7 +235,7 @@ InlineAsm::ParseConstraints(StringRef Constraints) {
} // don't allow "xyz,"
}
}
return Result;
}
@ -243,15 +243,15 @@ InlineAsm::ParseConstraints(StringRef Constraints) {
/// specified function type, and otherwise validate the constraint string.
bool InlineAsm::Verify(FunctionType *Ty, StringRef ConstStr) {
if (Ty->isVarArg()) return false;
ConstraintInfoVector Constraints = ParseConstraints(ConstStr);
// Error parsing constraints.
if (Constraints.empty() && !ConstStr.empty()) return false;
unsigned NumOutputs = 0, NumInputs = 0, NumClobbers = 0;
unsigned NumIndirect = 0;
for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
switch (Constraints[i].Type) {
case InlineAsm::isOutput:
@ -272,7 +272,7 @@ bool InlineAsm::Verify(FunctionType *Ty, StringRef ConstStr) {
break;
}
}
switch (NumOutputs) {
case 0:
if (!Ty->getReturnType()->isVoidTy()) return false;
@ -285,8 +285,8 @@ bool InlineAsm::Verify(FunctionType *Ty, StringRef ConstStr) {
if (!STy || STy->getNumElements() != NumOutputs)
return false;
break;
}
}
if (Ty->getNumParams() != NumInputs) return false;
return true;
}

View File

@ -310,7 +310,7 @@ void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
"Calling a function with bad signature!");
for (unsigned i = 0; i != Args.size(); ++i)
assert((i >= FTy->getNumParams() ||
assert((i >= FTy->getNumParams() ||
FTy->getParamType(i) == Args[i]->getType()) &&
"Calling a function with a bad signature!");
#endif
@ -409,7 +409,7 @@ static Instruction *createMalloc(Instruction *InsertBefore,
assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) &&
"createMalloc needs either InsertBefore or InsertAtEnd");
// malloc(type) becomes:
// malloc(type) becomes:
// bitcast (i8* malloc(typeSize)) to type*
// malloc(type, arraySize) becomes:
// bitcast (i8* malloc(typeSize*arraySize)) to type*
@ -516,7 +516,7 @@ Instruction *CallInst::CreateMalloc(Instruction *InsertBefore,
/// responsibility of the caller.
Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd,
Type *IntPtrTy, Type *AllocTy,
Value *AllocSize, Value *ArraySize,
Value *AllocSize, Value *ArraySize,
Function *MallocF, const Twine &Name) {
return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize,
ArraySize, None, MallocF, Name);
@ -612,7 +612,7 @@ void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
"Invoking a function with bad signature");
for (unsigned i = 0, e = Args.size(); i != e; i++)
assert((i >= FTy->getNumParams() ||
assert((i >= FTy->getNumParams() ||
FTy->getParamType(i) == Args[i]->getType()) &&
"Invoking a function with a bad signature!");
#endif
@ -912,7 +912,7 @@ FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
// UnreachableInst Implementation
//===----------------------------------------------------------------------===//
UnreachableInst::UnreachableInst(LLVMContext &Context,
UnreachableInst::UnreachableInst(LLVMContext &Context,
Instruction *InsertBefore)
: TerminatorInst(Type::getVoidTy(Context), Instruction::Unreachable,
nullptr, 0, InsertBefore) {
@ -1072,7 +1072,7 @@ bool AllocaInst::isArrayAllocation() const {
bool AllocaInst::isStaticAlloca() const {
// Must be constant size.
if (!isa<ConstantInt>(getArraySize())) return false;
// Must be in the entry block.
const BasicBlock *Parent = getParent();
return Parent == &Parent->getParent()->front() && !isUsedWithInAlloca();
@ -1125,7 +1125,7 @@ LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
setName(Name);
}
LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
unsigned Align, AtomicOrdering Order,
SyncScope::ID SSID,
BasicBlock *InsertAE)
@ -1380,7 +1380,7 @@ AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
// FenceInst Implementation
//===----------------------------------------------------------------------===//
FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
SyncScope::ID SSID,
Instruction *InsertBefore)
: Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) {
@ -1388,7 +1388,7 @@ FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
setSyncScopeID(SSID);
}
FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
SyncScope::ID SSID,
BasicBlock *InsertAtEnd)
: Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) {
@ -1575,14 +1575,14 @@ InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
setName(Name);
}
bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt,
bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt,
const Value *Index) {
if (!Vec->getType()->isVectorTy())
return false; // First operand of insertelement must be vector type.
if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
return false;// Second operand of insertelement must be vector element type.
if (!Index->getType()->isIntegerTy())
return false; // Third operand of insertelement must be i32.
return true;
@ -1632,7 +1632,7 @@ bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
// V1 and V2 must be vectors of the same type.
if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
return false;
// Mask must be vector of i32.
auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32))
@ -1654,7 +1654,7 @@ bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
}
return true;
}
if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements();
for (unsigned i = 0, e = MaskTy->getNumElements(); i != e; ++i)
@ -1662,7 +1662,7 @@ bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
return false;
return true;
}
// The bitcode reader can create a place holder for a forward reference
// used as the shuffle mask. When this occurs, the shuffle mask will
// fall into this case and fail. To avoid this error, do this bit of
@ -1687,12 +1687,12 @@ int ShuffleVectorInst::getMaskValue(const Constant *Mask, unsigned i) {
void ShuffleVectorInst::getShuffleMask(const Constant *Mask,
SmallVectorImpl<int> &Result) {
unsigned NumElts = Mask->getType()->getVectorNumElements();
if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
for (unsigned i = 0; i != NumElts; ++i)
Result.push_back(CDS->getElementAsInteger(i));
return;
}
}
for (unsigned i = 0; i != NumElts; ++i) {
Constant *C = Mask->getAggregateElement(i);
Result.push_back(isa<UndefValue>(C) ? -1 :
@ -1806,7 +1806,7 @@ bool ShuffleVectorInst::isTransposeMask(ArrayRef<int> Mask) {
// InsertValueInst Class
//===----------------------------------------------------------------------===//
void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
const Twine &Name) {
assert(getNumOperands() == 2 && "NumOperands not initialized?");
@ -1903,7 +1903,7 @@ BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
AssertOK();
}
BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
Type *Ty, const Twine &Name,
BasicBlock *InsertAtEnd)
: Instruction(Ty, iType,
@ -1938,8 +1938,8 @@ void BinaryOperator::AssertOK() {
"Tried to create a floating-point operation on a "
"non-floating-point type!");
break;
case UDiv:
case SDiv:
case UDiv:
case SDiv:
assert(getType() == LHS->getType() &&
"Arithmetic operation should return same type as operands!");
assert(getType()->isIntOrIntVectorTy() &&
@ -1951,8 +1951,8 @@ void BinaryOperator::AssertOK() {
assert(getType()->isFPOrFPVectorTy() &&
"Incorrect operand type (not floating point) for FDIV");
break;
case URem:
case SRem:
case URem:
case SRem:
assert(getType() == LHS->getType() &&
"Arithmetic operation should return same type as operands!");
assert(getType()->isIntOrIntVectorTy() &&
@ -2185,7 +2185,7 @@ bool CastInst::isLosslessCast() const {
Type *DstTy = getType();
if (SrcTy == DstTy)
return true;
// Pointer to pointer is always lossless.
if (SrcTy->isPointerTy())
return DstTy->isPointerTy();
@ -2194,10 +2194,10 @@ bool CastInst::isLosslessCast() const {
/// This function determines if the CastInst does not require any bits to be
/// changed in order to effect the cast. Essentially, it identifies cases where
/// no code gen is necessary for the cast, hence the name no-op cast. For
/// no code gen is necessary for the cast, hence the name no-op cast. For
/// example, the following are all no-op casts:
/// # bitcast i32* %x to i8*
/// # bitcast <2 x i32> %x to <4 x i16>
/// # bitcast <2 x i32> %x to <4 x i16>
/// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
/// Determine if the described cast is a no-op.
bool CastInst::isNoopCast(Instruction::CastOps Opcode,
@ -2208,7 +2208,7 @@ bool CastInst::isNoopCast(Instruction::CastOps Opcode,
default: llvm_unreachable("Invalid CastOp");
case Instruction::Trunc:
case Instruction::ZExt:
case Instruction::SExt:
case Instruction::SExt:
case Instruction::FPTrunc:
case Instruction::FPExt:
case Instruction::UIToFP:
@ -2247,7 +2247,7 @@ unsigned CastInst::isEliminableCastPair(
Type *DstIntPtrTy) {
// Define the 144 possibilities for these two cast instructions. The values
// in this matrix determine what to do in a given situation and select the
// case in the switch below. The rows correspond to firstOp, the columns
// case in the switch below. The rows correspond to firstOp, the columns
// correspond to secondOp. In looking at the table below, keep in mind
// the following cast properties:
//
@ -2315,16 +2315,16 @@ unsigned CastInst::isEliminableCastPair(
int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
[secondOp-Instruction::CastOpsBegin];
switch (ElimCase) {
case 0:
case 0:
// Categorically disallowed.
return 0;
case 1:
case 1:
// Allowed, use first cast's opcode.
return firstOp;
case 2:
case 2:
// Allowed, use second cast's opcode.
return secondOp;
case 3:
case 3:
// No-op cast in second op implies firstOp as long as the DestTy
// is integer and we are not converting between a vector and a
// non-vector type.
@ -2337,7 +2337,7 @@ unsigned CastInst::isEliminableCastPair(
if (DstTy->isFloatingPointTy())
return firstOp;
return 0;
case 5:
case 5:
// No-op cast in first op implies secondOp as long as the SrcTy
// is an integer.
if (SrcTy->isIntegerTy())
@ -2449,7 +2449,7 @@ unsigned CastInst::isEliminableCastPair(
case 17:
// (sitofp (zext x)) -> (uitofp x)
return Instruction::UIToFP;
case 99:
case 99:
// Cast combination can't happen (error in input). This is for all cases
// where the MidTy is not the same for the two cast instructions.
llvm_unreachable("Invalid Cast Combination");
@ -2458,7 +2458,7 @@ unsigned CastInst::isEliminableCastPair(
}
}
CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
const Twine &Name, Instruction *InsertBefore) {
assert(castIsValid(op, S, Ty) && "Invalid cast!");
// Construct and return the appropriate CastInst subclass
@ -2502,7 +2502,7 @@ CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
}
}
CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty,
CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty,
const Twine &Name,
Instruction *InsertBefore) {
if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
@ -2510,7 +2510,7 @@ CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty,
return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
}
CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty,
CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty,
const Twine &Name,
BasicBlock *InsertAtEnd) {
if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
@ -2518,7 +2518,7 @@ CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty,
return Create(Instruction::ZExt, S, Ty, Name, InsertAtEnd);
}
CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty,
CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty,
const Twine &Name,
Instruction *InsertBefore) {
if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
@ -2526,7 +2526,7 @@ CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty,
return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
}
CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty,
CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty,
const Twine &Name,
BasicBlock *InsertAtEnd) {
if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
@ -2543,7 +2543,7 @@ CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty,
}
CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty,
const Twine &Name,
const Twine &Name,
BasicBlock *InsertAtEnd) {
if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
@ -2636,7 +2636,7 @@ CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty,
return Create(opcode, C, Ty, Name, InsertBefore);
}
CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty,
CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty,
bool isSigned, const Twine &Name,
BasicBlock *InsertAtEnd) {
assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
@ -2650,8 +2650,8 @@ CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty,
return Create(opcode, C, Ty, Name, InsertAtEnd);
}
CastInst *CastInst::CreateFPCast(Value *C, Type *Ty,
const Twine &Name,
CastInst *CastInst::CreateFPCast(Value *C, Type *Ty,
const Twine &Name,
Instruction *InsertBefore) {
assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
"Invalid cast");
@ -2663,8 +2663,8 @@ CastInst *CastInst::CreateFPCast(Value *C, Type *Ty,
return Create(opcode, C, Ty, Name, InsertBefore);
}
CastInst *CastInst::CreateFPCast(Value *C, Type *Ty,
const Twine &Name,
CastInst *CastInst::CreateFPCast(Value *C, Type *Ty,
const Twine &Name,
BasicBlock *InsertAtEnd) {
assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
"Invalid cast");
@ -2707,7 +2707,7 @@ bool CastInst::isCastable(Type *SrcTy, Type *DestTy) {
return DestBits == SrcBits;
// Casting from something else
return SrcTy->isPointerTy();
}
}
if (DestTy->isFloatingPointTy()) { // Casting to floating pt
if (SrcTy->isIntegerTy()) // Casting from integral
return true;
@ -2724,7 +2724,7 @@ bool CastInst::isCastable(Type *SrcTy, Type *DestTy) {
if (SrcTy->isPointerTy()) // Casting from pointer
return true;
return SrcTy->isIntegerTy(); // Casting from integral
}
}
if (DestTy->isX86_MMXTy()) {
if (SrcTy->isVectorTy())
return DestBits == SrcBits; // 64-bit vector to MMX
@ -2834,10 +2834,10 @@ CastInst::getCastOpcode(
return BitCast; // Same size, No-op cast
}
} else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
if (DestIsSigned)
if (DestIsSigned)
return FPToSI; // FP -> sint
else
return FPToUI; // FP -> uint
return FPToUI; // FP -> uint
} else if (SrcTy->isVectorTy()) {
assert(DestBits == SrcBits &&
"Casting vector to integer of different width");
@ -2898,7 +2898,7 @@ CastInst::getCastOpcode(
/// could be broken out into the separate constructors but it is useful to have
/// it in one place and to eliminate the redundant code for getting the sizes
/// of the types involved.
bool
bool
CastInst::castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) {
// Check for type sanity on the arguments
Type *SrcTy = S->getType();
@ -2928,7 +2928,7 @@ CastInst::castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) {
case Instruction::ZExt:
return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
SrcLength == DstLength && SrcBitSize < DstBitSize;
case Instruction::SExt:
case Instruction::SExt:
return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
SrcLength == DstLength && SrcBitSize < DstBitSize;
case Instruction::FPTrunc:
@ -3019,138 +3019,138 @@ TruncInst::TruncInst(
TruncInst::TruncInst(
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) {
) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
}
ZExtInst::ZExtInst(
Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
) : CastInst(Ty, ZExt, S, Name, InsertBefore) {
) : CastInst(Ty, ZExt, S, Name, InsertBefore) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
}
ZExtInst::ZExtInst(
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) {
) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
}
SExtInst::SExtInst(
Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
) : CastInst(Ty, SExt, S, Name, InsertBefore) {
) : CastInst(Ty, SExt, S, Name, InsertBefore) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
}
SExtInst::SExtInst(
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
) : CastInst(Ty, SExt, S, Name, InsertAtEnd) {
) : CastInst(Ty, SExt, S, Name, InsertAtEnd) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
}
FPTruncInst::FPTruncInst(
Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
}
FPTruncInst::FPTruncInst(
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) {
) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
}
FPExtInst::FPExtInst(
Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
) : CastInst(Ty, FPExt, S, Name, InsertBefore) {
) : CastInst(Ty, FPExt, S, Name, InsertBefore) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
}
FPExtInst::FPExtInst(
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) {
) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
}
UIToFPInst::UIToFPInst(
Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
) : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
) : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
}
UIToFPInst::UIToFPInst(
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) {
) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
}
SIToFPInst::SIToFPInst(
Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
) : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
) : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
}
SIToFPInst::SIToFPInst(
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) {
) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
}
FPToUIInst::FPToUIInst(
Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
) : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
) : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
}
FPToUIInst::FPToUIInst(
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) {
) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
}
FPToSIInst::FPToSIInst(
Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
) : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
) : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
}
FPToSIInst::FPToSIInst(
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) {
) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
}
PtrToIntInst::PtrToIntInst(
Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
}
PtrToIntInst::PtrToIntInst(
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) {
) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
}
IntToPtrInst::IntToPtrInst(
Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
}
IntToPtrInst::IntToPtrInst(
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) {
) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
}
BitCastInst::BitCastInst(
Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
) : CastInst(Ty, BitCast, S, Name, InsertBefore) {
) : CastInst(Ty, BitCast, S, Name, InsertBefore) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
}
BitCastInst::BitCastInst(
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) {
) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
}
@ -3205,7 +3205,7 @@ CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2,
return new ICmpInst(CmpInst::Predicate(predicate),
S1, S2, Name);
}
if (InsertBefore)
return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
S1, S2, Name);
@ -3312,8 +3312,8 @@ StringRef CmpInst::getPredicateName(Predicate Pred) {
ICmpInst::Predicate ICmpInst::getSignedPredicate(Predicate pred) {
switch (pred) {
default: llvm_unreachable("Unknown icmp predicate!");
case ICMP_EQ: case ICMP_NE:
case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
case ICMP_EQ: case ICMP_NE:
case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
return pred;
case ICMP_UGT: return ICMP_SGT;
case ICMP_ULT: return ICMP_SLT;
@ -3325,8 +3325,8 @@ ICmpInst::Predicate ICmpInst::getSignedPredicate(Predicate pred) {
ICmpInst::Predicate ICmpInst::getUnsignedPredicate(Predicate pred) {
switch (pred) {
default: llvm_unreachable("Unknown icmp predicate!");
case ICMP_EQ: case ICMP_NE:
case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
case ICMP_EQ: case ICMP_NE:
case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
return pred;
case ICMP_SGT: return ICMP_UGT;
case ICMP_SLT: return ICMP_ULT;
@ -3371,7 +3371,7 @@ CmpInst::Predicate CmpInst::getSwappedPredicate(Predicate pred) {
case ICMP_ULT: return ICMP_UGT;
case ICMP_UGE: return ICMP_ULE;
case ICMP_ULE: return ICMP_UGE;
case FCMP_FALSE: case FCMP_TRUE:
case FCMP_OEQ: case FCMP_ONE:
case FCMP_UEQ: case FCMP_UNE:
@ -3422,7 +3422,7 @@ CmpInst::Predicate CmpInst::getSignedPredicate(Predicate pred) {
bool CmpInst::isUnsigned(Predicate predicate) {
switch (predicate) {
default: return false;
case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: case ICmpInst::ICMP_UGT:
case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: case ICmpInst::ICMP_UGT:
case ICmpInst::ICMP_UGE: return true;
}
}
@ -3430,7 +3430,7 @@ bool CmpInst::isUnsigned(Predicate predicate) {
bool CmpInst::isSigned(Predicate predicate) {
switch (predicate) {
default: return false;
case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: case ICmpInst::ICMP_SGT:
case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: case ICmpInst::ICMP_SGT:
case ICmpInst::ICMP_SGE: return true;
}
}
@ -3438,17 +3438,17 @@ bool CmpInst::isSigned(Predicate predicate) {
bool CmpInst::isOrdered(Predicate predicate) {
switch (predicate) {
default: return false;
case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_OGT:
case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_OLE:
case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_OGT:
case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_OLE:
case FCmpInst::FCMP_ORD: return true;
}
}
bool CmpInst::isUnordered(Predicate predicate) {
switch (predicate) {
default: return false;
case FCmpInst::FCMP_UEQ: case FCmpInst::FCMP_UNE: case FCmpInst::FCMP_UGT:
case FCmpInst::FCMP_ULT: case FCmpInst::FCMP_UGE: case FCmpInst::FCMP_ULE:
case FCmpInst::FCMP_UEQ: case FCmpInst::FCMP_UNE: case FCmpInst::FCMP_UGT:
case FCmpInst::FCMP_ULT: case FCmpInst::FCMP_UGE: case FCmpInst::FCMP_ULE:
case FCmpInst::FCMP_UNO: return true;
}
}
@ -3619,7 +3619,7 @@ void IndirectBrInst::init(Value *Address, unsigned NumDests) {
void IndirectBrInst::growOperands() {
unsigned e = getNumOperands();
unsigned NumOps = e*2;
ReservedSpace = NumOps;
growHungoffUses(ReservedSpace);
}
@ -3665,13 +3665,13 @@ void IndirectBrInst::addDestination(BasicBlock *DestBB) {
/// indirectbr instruction.
void IndirectBrInst::removeDestination(unsigned idx) {
assert(idx < getNumOperands()-1 && "Successor index out of range!");
unsigned NumOps = getNumOperands();
Use *OL = getOperandList();
// Replace this value with the last one.
OL[idx+1] = OL[NumOps-1];
// Nuke the last value.
OL[NumOps-1].set(nullptr);
setNumHungOffUseOperands(NumOps-1);
@ -3725,7 +3725,7 @@ LoadInst *LoadInst::cloneImpl() const {
StoreInst *StoreInst::cloneImpl() const {
return new StoreInst(getOperand(0), getOperand(1), isVolatile(),
getAlignment(), getOrdering(), getSyncScopeID());
}
AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const {

View File

@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
// This file declares LLVMContextImpl, the opaque implementation
// This file declares LLVMContextImpl, the opaque implementation
// of LLVMContext.
//
//===----------------------------------------------------------------------===//
@ -1217,7 +1217,7 @@ public:
/// OwnedModules - The set of modules instantiated in this context, and which
/// will be automatically deleted if this context is deleted.
SmallPtrSet<Module*, 4> OwnedModules;
LLVMContext::InlineAsmDiagHandlerTy InlineAsmDiagHandler = nullptr;
void *InlineAsmDiagContext = nullptr;
@ -1265,10 +1265,10 @@ public:
using ArrayConstantsTy = ConstantUniqueMap<ConstantArray>;
ArrayConstantsTy ArrayConstants;
using StructConstantsTy = ConstantUniqueMap<ConstantStruct>;
StructConstantsTy StructConstants;
using VectorConstantsTy = ConstantUniqueMap<ConstantVector>;
VectorConstantsTy VectorConstants;
@ -1293,11 +1293,11 @@ public:
Type VoidTy, LabelTy, HalfTy, FloatTy, DoubleTy, MetadataTy, TokenTy;
Type X86_FP80Ty, FP128Ty, PPC_FP128Ty, X86_MMXTy;
IntegerType Int1Ty, Int8Ty, Int16Ty, Int32Ty, Int64Ty, Int128Ty;
/// TypeAllocator - All dynamically allocated types are allocated from this.
/// They live forever until the context is torn down.
BumpPtrAllocator TypeAllocator;
DenseMap<unsigned, IntegerType*> IntegerTypes;
using FunctionTypeSet = DenseSet<FunctionType *, FunctionTypeKeyInfo>;
@ -1306,7 +1306,7 @@ public:
StructTypeSet AnonStructTypes;
StringMap<StructType*> NamedStructTypes;
unsigned NamedStructTypesUniqueID = 0;
DenseMap<std::pair<Type *, uint64_t>, ArrayType*> ArrayTypes;
DenseMap<std::pair<Type *, unsigned>, VectorType*> VectorTypes;
DenseMap<Type*, PointerType*> PointerTypes; // Pointers in AddrSpace = 0
@ -1317,7 +1317,7 @@ public:
/// whether or not a value has an entry in this map.
using ValueHandlesTy = DenseMap<Value *, ValueHandleBase *>;
ValueHandlesTy ValueHandles;
/// CustomMDKindNames - Map to hold the metadata string to ID mapping.
StringMap<unsigned> CustomMDKindNames;

View File

@ -33,17 +33,17 @@ void SymbolTableListTraits<ValueSubClass>::setSymTabObject(TPtr *Dest,
// Do it.
*Dest = Src;
// Get the new SymTab object.
ValueSymbolTable *NewST = getSymTab(getListOwner());
// If there is nothing to do, quick exit.
if (OldST == NewST) return;
// Move all the elements from the old symtab to the new one.
ListTy &ItemList = getList(getListOwner());
if (ItemList.empty()) return;
if (OldST) {
// Remove all entries from the previous symtab.
for (auto I = ItemList.begin(); I != ItemList.end(); ++I)
@ -57,7 +57,7 @@ void SymbolTableListTraits<ValueSubClass>::setSymTabObject(TPtr *Dest,
if (I->hasName())
NewST->reinsertValue(&*I);
}
}
template <typename ValueSubClass>

View File

@ -79,7 +79,7 @@ void ValueSymbolTable::reinsertValue(Value* V) {
// *V << "\n");
return;
}
// Otherwise, there is a naming conflict. Rename this value.
SmallString<256> UniqueName(V->getName().begin(), V->getName().end());
@ -107,7 +107,7 @@ ValueName *ValueSymbolTable::createValueName(StringRef Name, Value *V) {
// << *V << "\n");
return &*IterBool.first;
}
// Otherwise, there is a naming conflict. Rename this value.
SmallString<256> UniqueName(Name.begin(), Name.end());
return makeUniqueName(V, UniqueName);

Some files were not shown because too many files have changed in this diff Show More