mirror of
https://github.com/RPCSX/llvm.git
synced 2024-11-23 19:59:57 +00:00
Fix typos in comments, NFC
Summary: Just fixing comments, no functional change. Test Plan: N/A Reviewers: jfb Subscribers: mcrosier, llvm-commits Differential Revision: http://reviews.llvm.org/D5130 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@216784 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
039f6c6ded
commit
217b38e19a
@ -873,7 +873,7 @@ variety of customizations.
|
||||
llvm/ADT/ilist_node.h
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
``ilist_node<T>`` implements a the forward and backward links that are expected
|
||||
``ilist_node<T>`` implements the forward and backward links that are expected
|
||||
by the ``ilist<T>`` (and analogous containers) in the default manner.
|
||||
|
||||
``ilist_node<T>``\ s are meant to be embedded in the node type ``T``, usually
|
||||
|
@ -537,7 +537,7 @@ public:
|
||||
|
||||
static void *ID() { return (void *)&PassID; }
|
||||
|
||||
/// \brief Compute the \c LazyCallGraph for a the module \c M.
|
||||
/// \brief Compute the \c LazyCallGraph for the module \c M.
|
||||
///
|
||||
/// This just builds the set of entry points to the call graph. The rest is
|
||||
/// built lazily as it is walked.
|
||||
|
@ -19,7 +19,7 @@
|
||||
namespace llvm {
|
||||
|
||||
/// PostDominatorTree Class - Concrete subclass of DominatorTree that is used to
|
||||
/// compute the a post-dominator tree.
|
||||
/// compute the post-dominator tree.
|
||||
///
|
||||
struct PostDominatorTree : public FunctionPass {
|
||||
static char ID; // Pass identification, replacement for typeid
|
||||
|
@ -183,7 +183,7 @@ public:
|
||||
/// should probably move to simpler cost metrics using the above.
|
||||
/// Alternatively, we could split the cost interface into distinct code-size
|
||||
/// and execution-speed costs. This would allow modelling the core of this
|
||||
/// query more accurately as the a call is a single small instruction, but
|
||||
/// query more accurately as a call is a single small instruction, but
|
||||
/// incurs significant execution cost.
|
||||
virtual bool isLoweredToCall(const Function *F) const;
|
||||
|
||||
|
@ -22,7 +22,7 @@ namespace llvm {
|
||||
|
||||
///
|
||||
/// PostDominatorTree Class - Concrete subclass of DominatorTree that is used
|
||||
/// to compute the a post-dominator tree.
|
||||
/// to compute the post-dominator tree.
|
||||
///
|
||||
struct MachinePostDominatorTree : public MachineFunctionPass {
|
||||
private:
|
||||
|
@ -749,7 +749,7 @@ public:
|
||||
SDValue SV, unsigned Align);
|
||||
|
||||
/// getAtomicCmpSwap - Gets a node for an atomic cmpxchg op. There are two
|
||||
/// valid Opcodes. ISD::ATOMIC_CMO_SWAP produces a the value loaded and a
|
||||
/// valid Opcodes. ISD::ATOMIC_CMO_SWAP produces the value loaded and a
|
||||
/// chain result. ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS produces the value loaded,
|
||||
/// a success flag (initially i1), and a chain.
|
||||
SDValue getAtomicCmpSwap(unsigned Opcode, SDLoc dl, EVT MemVT, SDVTList VTs,
|
||||
|
@ -446,7 +446,7 @@ private:
|
||||
/// The first template argument handles whether or not to preserve names in the
|
||||
/// final instruction output. This defaults to on. The second template argument
|
||||
/// specifies a class to use for creating constants. This defaults to creating
|
||||
/// minimally folded constants. The fourth template argument allows clients to
|
||||
/// minimally folded constants. The third template argument allows clients to
|
||||
/// specify custom insertion hooks that are called on every newly created
|
||||
/// insertion.
|
||||
template<bool preserveNames = true, typename T = ConstantFolder,
|
||||
|
@ -135,7 +135,7 @@ public:
|
||||
return getSubclassDataFromInstruction() & 32;
|
||||
}
|
||||
|
||||
/// \brief Specify whether this alloca is used to represent a the arguments to
|
||||
/// \brief Specify whether this alloca is used to represent the arguments to
|
||||
/// a call.
|
||||
void setUsedWithInAlloca(bool V) {
|
||||
setInstructionSubclassData((getSubclassDataFromInstruction() & ~32) |
|
||||
|
@ -323,7 +323,7 @@ public:
|
||||
}
|
||||
|
||||
/// getContainedType - This method is used to implement the type iterator
|
||||
/// (defined a the end of the file). For derived types, this returns the
|
||||
/// (defined at the end of the file). For derived types, this returns the
|
||||
/// types 'contained' in the derived type.
|
||||
///
|
||||
Type *getContainedType(unsigned i) const {
|
||||
|
@ -87,7 +87,7 @@ protected:
|
||||
bool HasMachoTBSSDirective;
|
||||
|
||||
/// True if the compiler should emit a ".reference .constructors_used" or
|
||||
/// ".reference .destructors_used" directive after the a static ctor/dtor
|
||||
/// ".reference .destructors_used" directive after the static ctor/dtor
|
||||
/// list. This directive is only emitted in Static relocation model. Default
|
||||
/// is false.
|
||||
bool HasStaticCtorDtorReferenceInStaticMode;
|
||||
|
@ -937,7 +937,7 @@ GetNonLocalInfoForBlock(const AliasAnalysis::Location &Loc,
|
||||
return Dep;
|
||||
}
|
||||
|
||||
/// SortNonLocalDepInfoCache - Sort the a NonLocalDepInfo cache, given a certain
|
||||
/// SortNonLocalDepInfoCache - Sort the NonLocalDepInfo cache, given a certain
|
||||
/// number of elements in the array that are already properly ordered. This is
|
||||
/// optimized for the case when only a few entries are added.
|
||||
static void
|
||||
|
@ -126,7 +126,7 @@ TransformImpl(const SCEV *S, Instruction *User, Value *OperandValToReplace) {
|
||||
// Normalized form: {-2,+,1,+,2}
|
||||
// Denormalized form: {1,+,3,+,2}
|
||||
//
|
||||
// However, denormalization would use the a different step expression than
|
||||
// However, denormalization would use a different step expression than
|
||||
// normalization (see getPostIncExpr), generating the wrong final
|
||||
// expression: {-2,+,1,+,2} + {1,+,2} => {-1,+,3,+,2}
|
||||
if (AR->isAffine() &&
|
||||
|
@ -3337,7 +3337,7 @@ bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) {
|
||||
if (isDefine)
|
||||
return false;
|
||||
|
||||
// Check the a declaration has no block address forward references.
|
||||
// Check the declaration has no block address forward references.
|
||||
ValID ID;
|
||||
if (FunctionName.empty()) {
|
||||
ID.Kind = ValID::t_GlobalID;
|
||||
|
@ -261,7 +261,7 @@ void DIEHash::hashDIEEntry(dwarf::Attribute Attribute, dwarf::Tag Tag,
|
||||
return;
|
||||
}
|
||||
|
||||
// otherwise, b) use the letter 'T' as a the marker, ...
|
||||
// otherwise, b) use the letter 'T' as the marker, ...
|
||||
addULEB128('T');
|
||||
|
||||
addULEB128(Attribute);
|
||||
|
@ -228,7 +228,7 @@ void StackColoring::dump() const {
|
||||
unsigned StackColoring::collectMarkers(unsigned NumSlot) {
|
||||
unsigned MarkersFound = 0;
|
||||
// Scan the function to find all lifetime markers.
|
||||
// NOTE: We use the a reverse-post-order iteration to ensure that we obtain a
|
||||
// NOTE: We use a reverse-post-order iteration to ensure that we obtain a
|
||||
// deterministic numbering, and because we'll need a post-order iteration
|
||||
// later for solving the liveness dataflow problem.
|
||||
for (MachineBasicBlock *MBB : depth_first(MF)) {
|
||||
|
@ -680,7 +680,7 @@ sys::MemoryBlock DefaultJITMemoryManager::allocateNewSlab(size_t size) {
|
||||
bool DefaultJITMemoryManager::CheckInvariants(std::string &ErrorStr) {
|
||||
raw_string_ostream Err(ErrorStr);
|
||||
|
||||
// Construct a the set of FreeRangeHeader pointers so we can query it
|
||||
// Construct the set of FreeRangeHeader pointers so we can query it
|
||||
// efficiently.
|
||||
llvm::SmallPtrSet<MemoryRangeHeader*, 16> FreeHdrSet;
|
||||
FreeRangeHeader* FreeHead = FreeMemoryList;
|
||||
|
@ -7,7 +7,7 @@
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines the a diagnostic printer relying on raw_ostream.
|
||||
// This file defines a diagnostic printer relying on raw_ostream.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
|
@ -303,7 +303,7 @@ static AArch64_AM::ShiftExtendType getShiftTypeForNode(SDValue N) {
|
||||
|
||||
/// \brief Determine wether it is worth to fold V into an extended register.
|
||||
bool AArch64DAGToDAGISel::isWorthFolding(SDValue V) const {
|
||||
// it hurts if the a value is used at least twice, unless we are optimizing
|
||||
// it hurts if the value is used at least twice, unless we are optimizing
|
||||
// for code size.
|
||||
if (ForCodeSize || V.hasOneUse())
|
||||
return true;
|
||||
|
@ -1781,8 +1781,7 @@ unsigned AArch64TargetLowering::getFunctionAlignment(const Function *F) const {
|
||||
|
||||
#include "AArch64GenCallingConv.inc"
|
||||
|
||||
/// Selects the correct CCAssignFn for a the given CallingConvention
|
||||
/// value.
|
||||
/// Selects the correct CCAssignFn for a given CallingConvention value.
|
||||
CCAssignFn *AArch64TargetLowering::CCAssignFnForCall(CallingConv::ID CC,
|
||||
bool IsVarArg) const {
|
||||
switch (CC) {
|
||||
|
@ -199,8 +199,7 @@ class AArch64TargetLowering : public TargetLowering {
|
||||
public:
|
||||
explicit AArch64TargetLowering(TargetMachine &TM);
|
||||
|
||||
/// Selects the correct CCAssignFn for a the given CallingConvention
|
||||
/// value.
|
||||
/// Selects the correct CCAssignFn for a given CallingConvention value.
|
||||
CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
|
||||
|
||||
/// computeKnownBitsForTargetNode - Determine which of the bits specified in
|
||||
|
@ -2889,7 +2889,7 @@ static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData,
|
||||
// FIXME: The current MachineInstr design does not support relying on machine
|
||||
// mem operands to determine the width of a memory access. Instead, we expect
|
||||
// the target to provide this information based on the instruction opcode and
|
||||
// operands. However, using MachineMemOperand is a the best solution now for
|
||||
// operands. However, using MachineMemOperand is the best solution now for
|
||||
// two reasons:
|
||||
//
|
||||
// 1) getNumMicroOps tries to infer LDM memory width from the total number of MI
|
||||
|
@ -1771,9 +1771,9 @@ def: Mips16Pat
|
||||
|
||||
//
|
||||
// For constants, llvm transforms this to:
|
||||
// x > (k -1) and then reverses the operands to use setlt. So this pattern
|
||||
// x > (k - 1) and then reverses the operands to use setlt. So this pattern
|
||||
// is not used now by the compiler. (Presumably checking that k-1 does not
|
||||
// overflow). The compiler never uses this at a the current time, due to
|
||||
// overflow). The compiler never uses this at the current time, due to
|
||||
// other optimizations.
|
||||
//
|
||||
//def: Mips16Pat
|
||||
|
@ -6,7 +6,7 @@
|
||||
; we may reference variables that were not live across basic blocks
|
||||
; resulting in undefined virtual registers.
|
||||
;
|
||||
; In this example, this is illustrated by a the spill/reload of the
|
||||
; In this example, this is illustrated by a spill/reload of the
|
||||
; LOADED_PTR_SLOT.
|
||||
;
|
||||
; Before this patch, the compiler was accessing two different spill
|
||||
|
@ -1,6 +1,6 @@
|
||||
// RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o - | llvm-readobj -s -t | FileCheck %s
|
||||
|
||||
// Test that we produce the group sections and that they are a the beginning
|
||||
// Test that we produce the group sections and that they are at the beginning
|
||||
// of the file.
|
||||
|
||||
// CHECK: Section {
|
||||
|
@ -185,7 +185,7 @@ static void ContractNodes(std::unique_ptr<Matcher> &MatcherPtr,
|
||||
/// Conceptually, we'd like to sink these predicates all the way to the last
|
||||
/// matcher predicate in the series. However, it turns out that some
|
||||
/// ComplexPatterns have side effects on the graph, so we really don't want to
|
||||
/// run a the complex pattern if the pattern predicate will fail. For this
|
||||
/// run a complex pattern if the pattern predicate will fail. For this
|
||||
/// reason, we refuse to sink the pattern predicate past a ComplexPattern.
|
||||
///
|
||||
static void SinkPatternPredicates(std::unique_ptr<Matcher> &MatcherPtr) {
|
||||
|
Loading…
Reference in New Issue
Block a user