diff --git a/docs/CodingStandards.rst b/docs/CodingStandards.rst index 9418680edc7..b454e49664f 100644 --- a/docs/CodingStandards.rst +++ b/docs/CodingStandards.rst @@ -844,7 +844,7 @@ Here are more examples: .. code-block:: c++ - assert(Ty->isPointerType() && "Can't allocate a non pointer type!"); + assert(Ty->isPointerType() && "Can't allocate a non-pointer type!"); assert((Opcode == Shl || Opcode == Shr) && "ShiftInst Opcode invalid!"); diff --git a/docs/CommandLine.rst b/docs/CommandLine.rst index 4c84d23297b..1b342e34bf5 100644 --- a/docs/CommandLine.rst +++ b/docs/CommandLine.rst @@ -1276,7 +1276,7 @@ The ``cl::getRegisteredOptions`` function ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The ``cl::getRegisteredOptions`` function is designed to give a programmer -access to declared non positional command line options so that how they appear +access to declared non-positional command line options so that how they appear in ``-help`` can be modified prior to calling `cl::ParseCommandLineOptions`_. Note this method should not be called during any static initialisation because it cannot be guaranteed that all options will have been initialised. Hence it diff --git a/docs/LangRef.rst b/docs/LangRef.rst index 77d37df4e7f..f7f83dba696 100644 --- a/docs/LangRef.rst +++ b/docs/LangRef.rst @@ -612,7 +612,7 @@ Syntax:: The linkage must be one of ``private``, ``linker_private``, ``linker_private_weak``, ``internal``, ``linkonce``, ``weak``, ``linkonce_odr``, ``weak_odr``, ``external``. Note that some system linkers -might not correctly handle dropping a weak symbol that is aliased by a non weak +might not correctly handle dropping a weak symbol that is aliased by a non-weak alias. .. _namedmetadatastructure: diff --git a/docs/SourceLevelDebugging.rst b/docs/SourceLevelDebugging.rst index a1d8110637f..a6349fba86a 100644 --- a/docs/SourceLevelDebugging.rst +++ b/docs/SourceLevelDebugging.rst @@ -2306,7 +2306,7 @@ stringWithCString:]``") and the basename is the selector only Mach-O Changes """""""""""""" -The sections names for the apple hash tables are for non mach-o files. For +The sections names for the apple hash tables are for non-mach-o files. For mach-o files, the sections should be contained in the ``__DWARF`` segment with names as follows: diff --git a/include/llvm/ADT/SparseBitVector.h b/include/llvm/ADT/SparseBitVector.h index 7a10f857044..706f2486226 100644 --- a/include/llvm/ADT/SparseBitVector.h +++ b/include/llvm/ADT/SparseBitVector.h @@ -382,7 +382,7 @@ class SparseBitVector { AtEnd = true; return; } - // Set up for next non zero word in bitmap. + // Set up for next non-zero word in bitmap. BitNumber = Iter->index() * ElementSize; NextSetBitNumber = Iter->find_first(); BitNumber += NextSetBitNumber; diff --git a/include/llvm/Analysis/IntervalPartition.h b/include/llvm/Analysis/IntervalPartition.h index 8cade58cd32..1af7d6b0bd3 100644 --- a/include/llvm/Analysis/IntervalPartition.h +++ b/include/llvm/Analysis/IntervalPartition.h @@ -34,7 +34,7 @@ namespace llvm { // IntervalPartition - This class builds and holds an "interval partition" for // a function. This partition divides the control flow graph into a set of // maximal intervals, as defined with the properties above. Intuitively, an -// interval is a (possibly nonexistent) loop with a "tail" of non looping +// interval is a (possibly nonexistent) loop with a "tail" of non-looping // nodes following it. // class IntervalPartition : public FunctionPass { diff --git a/include/llvm/Analysis/RegionInfo.h b/include/llvm/Analysis/RegionInfo.h index e87319516cd..8af02e3efbd 100644 --- a/include/llvm/Analysis/RegionInfo.h +++ b/include/llvm/Analysis/RegionInfo.h @@ -312,11 +312,11 @@ public: /// The toplevel region represents the whole function. bool isTopLevelRegion() const { return exit == NULL; } - /// @brief Return a new (non canonical) region, that is obtained by joining + /// @brief Return a new (non-canonical) region, that is obtained by joining /// this region with its predecessors. /// /// @return A region also starting at getEntry(), but reaching to the next - /// basic block that forms with getEntry() a (non canonical) region. + /// basic block that forms with getEntry() a (non-canonical) region. /// NULL if such a basic block does not exist. Region *getExpandedRegion() const; diff --git a/include/llvm/CodeGen/ScheduleDAGInstrs.h b/include/llvm/CodeGen/ScheduleDAGInstrs.h index fe4f3c2de3b..aaf29ff52af 100644 --- a/include/llvm/CodeGen/ScheduleDAGInstrs.h +++ b/include/llvm/CodeGen/ScheduleDAGInstrs.h @@ -43,7 +43,7 @@ namespace llvm { }; /// Record a physical register access. - /// For non data-dependent uses, OpIdx == -1. + /// For non-data-dependent uses, OpIdx == -1. struct PhysRegSUOper { SUnit *SU; int OpIdx; diff --git a/include/llvm/MC/MCAsmInfo.h b/include/llvm/MC/MCAsmInfo.h index f58a25d3319..b0a81d68c03 100644 --- a/include/llvm/MC/MCAsmInfo.h +++ b/include/llvm/MC/MCAsmInfo.h @@ -82,7 +82,7 @@ namespace llvm { /// LinkerRequiresNonEmptyDwarfLines - True if the linker has a bug and /// requires that the debug_line section be of a minimum size. In practice - /// such a linker requires a non empty line sequence if a file is present. + /// such a linker requires a non-empty line sequence if a file is present. bool LinkerRequiresNonEmptyDwarfLines; // Default to false. /// MaxInstLength - This is the maximum possible length of an instruction, diff --git a/include/llvm/MC/MCSymbol.h b/include/llvm/MC/MCSymbol.h index fe927555c49..ea14da1e15b 100644 --- a/include/llvm/MC/MCSymbol.h +++ b/include/llvm/MC/MCSymbol.h @@ -141,7 +141,7 @@ namespace llvm { } // AliasedSymbol() - If this is an alias (a = b), return the symbol - // we ultimately point to. For a non alias, this just returns the symbol + // we ultimately point to. For a non-alias, this just returns the symbol // itself. const MCSymbol &AliasedSymbol() const; diff --git a/include/llvm/Support/CFG.h b/include/llvm/Support/CFG.h index 74ec7260927..a9427793ac7 100644 --- a/include/llvm/Support/CFG.h +++ b/include/llvm/Support/CFG.h @@ -34,7 +34,7 @@ class PredIterator : public std::iterator(*It)) ++It; } diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h index 6f643c3583a..53eea3b50f4 100644 --- a/include/llvm/Target/TargetLowering.h +++ b/include/llvm/Target/TargetLowering.h @@ -880,13 +880,13 @@ protected: } /// Indicate whether this target prefers to use _setjmp to implement - /// llvm.setjmp or the non _ version. Defaults to false. + /// llvm.setjmp or the version without _. Defaults to false. void setUseUnderscoreSetJmp(bool Val) { UseUnderscoreSetJmp = Val; } /// Indicate whether this target prefers to use _longjmp to implement - /// llvm.longjmp or the non _ version. Defaults to false. + /// llvm.longjmp or the version without _. Defaults to false. void setUseUnderscoreLongJmp(bool Val) { UseUnderscoreLongJmp = Val; } diff --git a/lib/Analysis/PHITransAddr.cpp b/lib/Analysis/PHITransAddr.cpp index e6af0663fea..6c85d1195f2 100644 --- a/lib/Analysis/PHITransAddr.cpp +++ b/lib/Analysis/PHITransAddr.cpp @@ -72,7 +72,7 @@ static bool VerifySubExpr(Value *Expr, // If it isn't in the InstInputs list it is a subexpr incorporated into the // address. Sanity check that it is phi translatable. if (!CanPHITrans(I)) { - errs() << "Non phi translatable instruction found in PHITransAddr:\n"; + errs() << "Instruction in PHITransAddr is not phi-translatable:\n"; errs() << *I << '\n'; llvm_unreachable("Either something is missing from InstInputs or " "CanPHITrans is wrong."); diff --git a/lib/Analysis/ScalarEvolutionExpander.cpp b/lib/Analysis/ScalarEvolutionExpander.cpp index 86a557b55f7..ca7c73fa652 100644 --- a/lib/Analysis/ScalarEvolutionExpander.cpp +++ b/lib/Analysis/ScalarEvolutionExpander.cpp @@ -1528,7 +1528,7 @@ Value *SCEVExpander::expand(const SCEV *S) { // // This is independent of PostIncLoops. The mapped value simply materializes // the expression at this insertion point. If the mapped value happened to be - // a postinc expansion, it could be reused by a non postinc user, but only if + // a postinc expansion, it could be reused by a non-postinc user, but only if // its insertion point was already at the head of the loop. InsertedExpressions[std::make_pair(S, InsertPt)] = V; return V; diff --git a/lib/CodeGen/ScheduleDAGInstrs.cpp b/lib/CodeGen/ScheduleDAGInstrs.cpp index 7f1f9c4e7be..7d9160ab1f4 100644 --- a/lib/CodeGen/ScheduleDAGInstrs.cpp +++ b/lib/CodeGen/ScheduleDAGInstrs.cpp @@ -690,7 +690,7 @@ void ScheduleDAGInstrs::initSUnits() { } } -/// If RegPressure is non null, compute register pressure as a side effect. The +/// If RegPressure is non-null, compute register pressure as a side effect. The /// DAG builder is an efficient place to do it because it already visits /// operands. void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA, diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 805cc9e0f27..ff78f646c25 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -8120,7 +8120,7 @@ bool DAGCombiner::SliceUpLoad(SDNode *N) { // The width of the type must be a power of 2 and greater than 8-bits. // Otherwise the load cannot be represented in LLVM IR. - // Moreover, if we shifted with a non 8-bits multiple, the slice + // Moreover, if we shifted with a non-8-bits multiple, the slice // will be accross several bytes. We do not support that. unsigned Width = User->getValueSizeInBits(0); if (Width < 8 || !isPowerOf2_32(Width) || (Shift & 0x7)) @@ -8762,7 +8762,7 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) { } else if (ConstantFPSDNode *C = dyn_cast(StoredVal)) { NonZero |= !C->getConstantFPValue()->isNullValue(); } else { - // Non constant. + // Non-constant. break; } diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/lib/CodeGen/SelectionDAG/LegalizeTypes.h index 13bb08f08c0..d6a2bd85b83 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeTypes.h +++ b/lib/CodeGen/SelectionDAG/LegalizeTypes.h @@ -670,13 +670,13 @@ private: LoadSDNode *LD, ISD::LoadExtType ExtType); /// Helper genWidenVectorStores - Helper function to generate a set of - /// stores to store a widen vector into non widen memory + /// stores to store a widen vector into non-widen memory /// StChain: list of chains for the stores we have generated /// ST: store of a widen value void GenWidenVectorStores(SmallVectorImpl &StChain, StoreSDNode *ST); /// Helper genWidenVectorTruncStores - Helper function to generate a set of - /// stores to store a truncate widen vector into non widen memory + /// stores to store a truncate widen vector into non-widen memory /// StChain: list of chains for the stores we have generated /// ST: store of a widen value void GenWidenVectorTruncStores(SmallVectorImpl &StChain, diff --git a/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp index f7a3e3d2506..2af0c55d962 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -2251,7 +2251,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_VSETCC(SDNode *N) { SDValue InOp1 = N->getOperand(0); EVT InVT = InOp1.getValueType(); - assert(InVT.isVector() && "can not widen non vector type"); + assert(InVT.isVector() && "can not widen non-vector type"); EVT WidenInVT = EVT::getVectorVT(*DAG.getContext(), InVT.getVectorElementType(), WidenNumElts); InOp1 = GetWidenedVector(InOp1); diff --git a/lib/CodeGen/StackColoring.cpp b/lib/CodeGen/StackColoring.cpp index 3dbc0508aa5..e31777735bd 100644 --- a/lib/CodeGen/StackColoring.cpp +++ b/lib/CodeGen/StackColoring.cpp @@ -452,7 +452,7 @@ void StackColoring::calculateLiveIntervals(unsigned NumSlots) { // We have a single consecutive region. Intervals[i]->addSegment(LiveInterval::Segment(S, F, ValNum)); } else { - // We have two non consecutive regions. This happens when + // We have two non-consecutive regions. This happens when // LIFETIME_START appears after the LIFETIME_END marker. SlotIndex NewStart = Indexes->getMBBStartIdx(MBB); SlotIndex NewFin = Indexes->getMBBEndIdx(MBB); diff --git a/lib/IR/Instructions.cpp b/lib/IR/Instructions.cpp index 8a6b77ba37d..795e5fae5b9 100644 --- a/lib/IR/Instructions.cpp +++ b/lib/IR/Instructions.cpp @@ -2206,7 +2206,7 @@ unsigned CastInst::isEliminableCastPair( case 3: // No-op cast in second op implies firstOp as long as the DestTy // is integer and we are not converting between a vector and a - // non vector type. + // non-vector type. if (!SrcTy->isVectorTy() && DstTy->isIntegerTy()) return firstOp; return 0; @@ -2823,7 +2823,7 @@ CastInst::castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) { if (SrcTy->isPtrOrPtrVectorTy() != DstTy->isPtrOrPtrVectorTy()) return false; - // For non pointer cases, the cast is okay if the source and destination bit + // For non-pointer cases, the cast is okay if the source and destination bit // widths are identical. if (!SrcTy->isPtrOrPtrVectorTy()) return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits(); diff --git a/lib/IR/LegacyPassManager.cpp b/lib/IR/LegacyPassManager.cpp index a431d8256d7..dda623764ae 100644 --- a/lib/IR/LegacyPassManager.cpp +++ b/lib/IR/LegacyPassManager.cpp @@ -475,7 +475,7 @@ public: } // createTheTimeInfo - This method either initializes the TheTimeInfo pointer - // to a non null value (if the -time-passes option is enabled) or it leaves it + // to a non-null value (if the -time-passes option is enabled) or it leaves it // null. It may be called multiple times. static void createTheTimeInfo(); @@ -1755,7 +1755,7 @@ EnableTiming("time-passes", cl::location(TimePassesIsEnabled), cl::desc("Time each pass, printing elapsed time for each on exit")); // createTheTimeInfo - This method either initializes the TheTimeInfo pointer to -// a non null value (if the -time-passes option is enabled) or it leaves it +// a non-null value (if the -time-passes option is enabled) or it leaves it // null. It may be called multiple times. void TimingInfo::createTheTimeInfo() { if (!TimePassesIsEnabled || TheTimeInfo) return; diff --git a/lib/MC/MCContext.cpp b/lib/MC/MCContext.cpp index 7e4cdf98eef..fa9e0760975 100644 --- a/lib/MC/MCContext.cpp +++ b/lib/MC/MCContext.cpp @@ -138,7 +138,7 @@ MCSymbol *MCContext::CreateSymbol(StringRef Name) { StringMapEntry *NameEntry = &UsedNames.GetOrCreateValue(Name); if (NameEntry->getValue()) { - assert(isTemporary && "Cannot rename non temporary symbols"); + assert(isTemporary && "Cannot rename non-temporary symbols"); SmallString<128> NewName = Name; do { NewName.resize(Name.size()); diff --git a/lib/MC/MCSectionMachO.cpp b/lib/MC/MCSectionMachO.cpp index 870451313bb..d91bfe25a9a 100644 --- a/lib/MC/MCSectionMachO.cpp +++ b/lib/MC/MCSectionMachO.cpp @@ -68,7 +68,7 @@ ENTRY(0 /*FIXME*/, S_ATTR_EXT_RELOC) ENTRY(0 /*FIXME*/, S_ATTR_LOC_RELOC) #undef ENTRY { 0, "none", 0 }, // used if section has no attributes but has a stub size -#define AttrFlagEnd 0xffffffff // non legal value, multiple attribute bits set +#define AttrFlagEnd 0xffffffff // non-legal value, multiple attribute bits set { AttrFlagEnd, 0, 0 } }; diff --git a/lib/MC/MachObjectWriter.cpp b/lib/MC/MachObjectWriter.cpp index 8234affc54d..4143d783e29 100644 --- a/lib/MC/MachObjectWriter.cpp +++ b/lib/MC/MachObjectWriter.cpp @@ -446,7 +446,7 @@ void MachObjectWriter::BindIndirectSymbols(MCAssembler &Asm) { } } - // Bind non lazy symbol pointers first. + // Bind non-lazy symbol pointers first. unsigned IndirectIndex = 0; for (MCAssembler::indirect_symbol_iterator it = Asm.indirect_symbol_begin(), ie = Asm.indirect_symbol_end(); it != ie; ++it, ++IndirectIndex) { @@ -917,7 +917,7 @@ void MachObjectWriter::WriteObject(MCAssembler &Asm, for (MCAssembler::const_indirect_symbol_iterator it = Asm.indirect_symbol_begin(), ie = Asm.indirect_symbol_end(); it != ie; ++it) { - // Indirect symbols in the non lazy symbol pointer section have some + // Indirect symbols in the non-lazy symbol pointer section have some // special handling. const MCSectionMachO &Section = static_cast(it->SectionData->getSection()); diff --git a/lib/MC/WinCOFFObjectWriter.cpp b/lib/MC/WinCOFFObjectWriter.cpp index 5ae2d925d78..6d270209b74 100644 --- a/lib/MC/WinCOFFObjectWriter.cpp +++ b/lib/MC/WinCOFFObjectWriter.cpp @@ -351,7 +351,7 @@ object_t *WinCOFFObjectWriter::createCOFFEntity(StringRef Name, /// and creates the associated COFF section staging object. void WinCOFFObjectWriter::DefineSection(MCSectionData const &SectionData) { assert(SectionData.getSection().getVariant() == MCSection::SV_COFF - && "Got non COFF section in the COFF backend!"); + && "Got non-COFF section in the COFF backend!"); // FIXME: Not sure how to verify this (at least in a debug build). MCSectionCOFF const &Sec = static_cast(SectionData.getSection()); diff --git a/lib/MC/WinCOFFStreamer.cpp b/lib/MC/WinCOFFStreamer.cpp index 4989957c6ab..d6b2f2402b5 100644 --- a/lib/MC/WinCOFFStreamer.cpp +++ b/lib/MC/WinCOFFStreamer.cpp @@ -190,7 +190,7 @@ bool WinCOFFStreamer::EmitSymbolAttribute(MCSymbol *Symbol, assert(Symbol && "Symbol must be non-null!"); assert((Symbol->isInSection() ? Symbol->getSection().getVariant() == MCSection::SV_COFF - : true) && "Got non COFF section in the COFF backend!"); + : true) && "Got non-COFF section in the COFF backend!"); switch (Attribute) { case MCSA_WeakReference: case MCSA_Weak: { @@ -218,7 +218,7 @@ void WinCOFFStreamer::EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) { void WinCOFFStreamer::BeginCOFFSymbolDef(MCSymbol const *Symbol) { assert((Symbol->isInSection() ? Symbol->getSection().getVariant() == MCSection::SV_COFF - : true) && "Got non COFF section in the COFF backend!"); + : true) && "Got non-COFF section in the COFF backend!"); assert(CurSymbol == NULL && "EndCOFFSymbolDef must be called between calls " "to BeginCOFFSymbolDef!"); CurSymbol = Symbol; @@ -268,7 +268,7 @@ void WinCOFFStreamer::EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size, unsigned ByteAlignment) { assert((Symbol->isInSection() ? Symbol->getSection().getVariant() == MCSection::SV_COFF - : true) && "Got non COFF section in the COFF backend!"); + : true) && "Got non-COFF section in the COFF backend!"); AddCommonSymbol(Symbol, Size, ByteAlignment, true); } @@ -276,7 +276,7 @@ void WinCOFFStreamer::EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size, unsigned ByteAlignment) { assert((Symbol->isInSection() ? Symbol->getSection().getVariant() == MCSection::SV_COFF - : true) && "Got non COFF section in the COFF backend!"); + : true) && "Got non-COFF section in the COFF backend!"); AddCommonSymbol(Symbol, Size, ByteAlignment, false); } diff --git a/lib/Support/APFloat.cpp b/lib/Support/APFloat.cpp index 676e2d4ba00..802233c1099 100644 --- a/lib/Support/APFloat.cpp +++ b/lib/Support/APFloat.cpp @@ -3816,7 +3816,7 @@ APFloat::opStatus APFloat::next(bool nextDown) { // Decrement the significand. // // We always do this since: - // 1. If we are dealing with a non binade decrement, by definition we + // 1. If we are dealing with a non-binade decrement, by definition we // just decrement the significand. // 2. If we are dealing with a normal -> normal binade decrement, since // we have an explicit integral bit the fact that all bits but the diff --git a/lib/Support/Unix/Path.inc b/lib/Support/Unix/Path.inc index c9dc8716714..6e08c63c1fa 100644 --- a/lib/Support/Unix/Path.inc +++ b/lib/Support/Unix/Path.inc @@ -645,7 +645,7 @@ uint64_t mapped_file_region::size() const { char *mapped_file_region::data() const { assert(Mapping && "Mapping failed but used anyway!"); - assert(Mode != readonly && "Cannot get non const data for readonly mapping!"); + assert(Mode != readonly && "Cannot get non-const data for readonly mapping!"); return reinterpret_cast(Mapping); } diff --git a/lib/Support/Windows/Path.inc b/lib/Support/Windows/Path.inc index 0b39198e6b3..b7926e40806 100644 --- a/lib/Support/Windows/Path.inc +++ b/lib/Support/Windows/Path.inc @@ -859,7 +859,7 @@ uint64_t mapped_file_region::size() const { } char *mapped_file_region::data() const { - assert(Mode != readonly && "Cannot get non const data for readonly mapping!"); + assert(Mode != readonly && "Cannot get non-const data for readonly mapping!"); assert(Mapping && "Mapping failed but used anyway!"); return reinterpret_cast(Mapping); } diff --git a/lib/Target/ARM/A15SDOptimizer.cpp b/lib/Target/ARM/A15SDOptimizer.cpp index ff585b41a2a..8ea2073aad1 100644 --- a/lib/Target/ARM/A15SDOptimizer.cpp +++ b/lib/Target/ARM/A15SDOptimizer.cpp @@ -165,7 +165,7 @@ unsigned A15SDOptimizer::getPrefSPRLane(unsigned SReg) { if (!MI) return ARM::ssub_0; MachineOperand *MO = MI->findRegisterDefOperand(SReg); - assert(MO->isReg() && "Non register operand found!"); + assert(MO->isReg() && "Non-register operand found!"); if (!MO) return ARM::ssub_0; if (MI->isCopy() && usesRegClass(MI->getOperand(1), diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index b7f0f6f67a8..9bdec10a64e 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -2802,7 +2802,7 @@ ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, bool ForceMutable) const { // Currently, two use-cases possible: - // Case #1. Non var-args function, and we meet first byval parameter. + // Case #1. Non-var-args function, and we meet first byval parameter. // Setup first unallocated register as first byval register; // eat all remained registers // (these two actions are performed by HandleByVal method). diff --git a/lib/Target/ARM/ARMScheduleA9.td b/lib/Target/ARM/ARMScheduleA9.td index 603e775d351..f34c0b0abf3 100644 --- a/lib/Target/ARM/ARMScheduleA9.td +++ b/lib/Target/ARM/ARMScheduleA9.td @@ -2217,7 +2217,7 @@ def A9WriteLMfp : SchedWriteVariant<[ SchedVar]>; //===----------------------------------------------------------------------===// -// Resources for other (non LDM/VLDM) Variants. +// Resources for other (non-LDM/VLDM) Variants. // These mov immediate writers are unconditionally expanded with // additive latency. diff --git a/lib/Target/ARM/ARMSelectionDAGInfo.cpp b/lib/Target/ARM/ARMSelectionDAGInfo.cpp index 93add6ee33c..00e44f5273f 100644 --- a/lib/Target/ARM/ARMSelectionDAGInfo.cpp +++ b/lib/Target/ARM/ARMSelectionDAGInfo.cpp @@ -145,7 +145,7 @@ EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl, SDValue Src, SDValue Size, unsigned Align, bool isVolatile, MachinePointerInfo DstPtrInfo) const { - // Use default for non AAPCS (or Darwin) subtargets + // Use default for non-AAPCS (or Darwin) subtargets if (!Subtarget->isAAPCS_ABI() || Subtarget->isTargetDarwin()) return SDValue(); diff --git a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp index e3f9e0dc609..2ad0a518156 100644 --- a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp +++ b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp @@ -1580,7 +1580,7 @@ public: void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const { assert(N == 3 && "Invalid number of operands!"); assert(isRegShiftedReg() && - "addRegShiftedRegOperands() on non RegShiftedReg!"); + "addRegShiftedRegOperands() on non-RegShiftedReg!"); Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg)); Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg)); Inst.addOperand(MCOperand::CreateImm( @@ -1590,7 +1590,7 @@ public: void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const { assert(N == 2 && "Invalid number of operands!"); assert(isRegShiftedImm() && - "addRegShiftedImmOperands() on non RegShiftedImm!"); + "addRegShiftedImmOperands() on non-RegShiftedImm!"); Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg)); // Shift of #32 is encoded as 0 where permitted unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm); diff --git a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp index 5ae93284269..7a345b6937e 100644 --- a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp +++ b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp @@ -1639,7 +1639,7 @@ bool HexagonDAGToDAGISel::hasNumUsesBelowThresGA(SDNode *N) const { } //===--------------------------------------------------------------------===// -// Return true if the non GP-relative global address can be folded. +// Return true if the non-GP-relative global address can be folded. //===--------------------------------------------------------------------===// inline bool HexagonDAGToDAGISel::foldGlobalAddress(SDValue &N, SDValue &R) { return foldGlobalAddressImpl(N, R, false); diff --git a/lib/Target/Hexagon/HexagonInstrInfo.cpp b/lib/Target/Hexagon/HexagonInstrInfo.cpp index 6b97609415a..f9be3192f1f 100644 --- a/lib/Target/Hexagon/HexagonInstrInfo.cpp +++ b/lib/Target/Hexagon/HexagonInstrInfo.cpp @@ -1539,7 +1539,7 @@ int HexagonInstrInfo::GetDotOldOp(const int opc) const { assert(0 && "Couldn't change predicate new instruction to its old form."); } - if (isNewValueStore(NewOp)) { // Convert into non new-value format + if (isNewValueStore(NewOp)) { // Convert into non-new-value format NewOp = Hexagon::getNonNVStore(NewOp); if (NewOp < 0) assert(0 && "Couldn't change new-value store to its old form."); diff --git a/lib/Target/Hexagon/HexagonInstrInfoV4.td b/lib/Target/Hexagon/HexagonInstrInfoV4.td index 475c23d98bf..d2600dffb08 100644 --- a/lib/Target/Hexagon/HexagonInstrInfoV4.td +++ b/lib/Target/Hexagon/HexagonInstrInfoV4.td @@ -1016,7 +1016,7 @@ class NVJrr_template majOp, bit NvOpNum, bits<5> src1; bits<5> src2; bits<3> Ns; // New-Value Operand - bits<5> RegOp; // Non New-Value Operand + bits<5> RegOp; // Non-New-Value Operand bits<11> offset; let isBrTaken = !if(isTaken, "true", "false"); diff --git a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp index 41e382dc072..697419be6e4 100644 --- a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp +++ b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp @@ -681,7 +681,7 @@ bool HexagonPacketizerList::CanPromoteToNewValueStore( MachineInstr *MI, } } - // Make sure that for non POST_INC stores: + // Make sure that for non-POST_INC stores: // 1. The only use of reg is DepReg and no other registers. // This handles V4 base+index registers. // The following store can not be dot new. diff --git a/lib/Target/Hexagon/HexagonVarargsCallingConvention.h b/lib/Target/Hexagon/HexagonVarargsCallingConvention.h index c607b5d3564..668ca98402b 100644 --- a/lib/Target/Hexagon/HexagonVarargsCallingConvention.h +++ b/lib/Target/Hexagon/HexagonVarargsCallingConvention.h @@ -41,7 +41,7 @@ static bool CC_Hexagon32_VarArgs(unsigned ValNo, EVT ValVT, } - // Only assign registers for named (non varargs) arguments + // Only assign registers for named (non-varargs) arguments if ( !ForceMem && ((NonVarArgsParams == -1) || (CurrentParam <= NonVarArgsParams))) { diff --git a/lib/Target/Mips/Mips16InstrInfo.td b/lib/Target/Mips/Mips16InstrInfo.td index d9e4a7b62d0..365fe2b5e14 100644 --- a/lib/Target/Mips/Mips16InstrInfo.td +++ b/lib/Target/Mips/Mips16InstrInfo.td @@ -298,7 +298,7 @@ class FI8_MOV32R16_ins: // // This are pseudo formats for multiply -// This first one can be changed to non pseudo now. +// This first one can be changed to non-pseudo now. // // MULT // diff --git a/lib/Target/Mips/MipsConstantIslandPass.cpp b/lib/Target/Mips/MipsConstantIslandPass.cpp index 97ac501a72b..e8b4eb41dd3 100644 --- a/lib/Target/Mips/MipsConstantIslandPass.cpp +++ b/lib/Target/Mips/MipsConstantIslandPass.cpp @@ -17,7 +17,7 @@ // // The constants can be not just numbers but addresses of functions and labels. // This can be particularly helpful in static relocation mode for embedded -// non linux targets. +// non-linux targets. // // diff --git a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp index 90dd3a05fa6..cd4f6981476 100644 --- a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp +++ b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp @@ -1580,7 +1580,7 @@ void NVPTXAsmPrinter::emitFunctionParamList(const Function *F, raw_ostream &O) { continue; } // Non-kernel function, just print .param .b for ABI - // and .reg .b for non ABY + // and .reg .b for non-ABI unsigned sz = 0; if (isa(Ty)) { sz = cast(Ty)->getBitWidth(); diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp index 8da5f0563c6..7682f1a495d 100644 --- a/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/lib/Target/PowerPC/PPCISelLowering.cpp @@ -2968,7 +2968,7 @@ PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, if (Flags.isByVal()) return false; } - // Non PIC/GOT tail calls are supported. + // Non-PIC/GOT tail calls are supported. if (getTargetMachine().getRelocationModel() != Reloc::PIC_) return true; diff --git a/lib/Target/R600/R600ISelLowering.cpp b/lib/Target/R600/R600ISelLowering.cpp index 0fcb488672f..1155c2a760a 100644 --- a/lib/Target/R600/R600ISelLowering.cpp +++ b/lib/Target/R600/R600ISelLowering.cpp @@ -1239,7 +1239,7 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const } Result = DAG.getNode(ISD::BUILD_VECTOR, DL, NewVT, Slots, NumElements); } else { - // non constant ptr cant be folded, keeps it as a v4f32 load + // non-constant ptr can't be folded, keeps it as a v4f32 load Result = DAG.getNode(AMDGPUISD::CONST_ADDRESS, DL, MVT::v4i32, DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr, DAG.getConstant(4, MVT::i32)), DAG.getConstant(LoadNode->getAddressSpace() - diff --git a/lib/Target/R600/SIAnnotateControlFlow.cpp b/lib/Target/R600/SIAnnotateControlFlow.cpp index 6bbdf59d559..9c0feff375b 100644 --- a/lib/Target/R600/SIAnnotateControlFlow.cpp +++ b/lib/Target/R600/SIAnnotateControlFlow.cpp @@ -205,7 +205,7 @@ void SIAnnotateControlFlow::insertElse(BranchInst *Term) { void SIAnnotateControlFlow::handleLoopCondition(Value *Cond) { if (PHINode *Phi = dyn_cast(Cond)) { - // Handle all non constant incoming values first + // Handle all non-constant incoming values first for (unsigned i = 0, e = Phi->getNumIncomingValues(); i != e; ++i) { Value *Incoming = Phi->getIncomingValue(i); if (isa(Incoming)) diff --git a/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp index f8e359b160f..ab95eb6d332 100644 --- a/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp +++ b/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp @@ -314,7 +314,7 @@ bool X86AsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const { {0x66, 0x2e, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, }; - // This CPU doesnt support long nops. If needed add more. + // This CPU doesn't support long nops. If needed add more. // FIXME: Can we get this from the subtarget somehow? // FIXME: We could generated something better than plain 0x90. if (!HasNopl) { diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp index 36d16907bfe..bc751d37db6 100644 --- a/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -344,7 +344,7 @@ X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const { // addl %gs:0, %eax // if the block also has an access to a second TLS address this will save // a load. - // FIXME: This is probably also true for non TLS addresses. + // FIXME: This is probably also true for non-TLS addresses. if (Op1.getOpcode() == X86ISD::Wrapper) { SDValue Val = Op1.getOperand(0); if (Val.getOpcode() == ISD::TargetGlobalTLSAddress) diff --git a/lib/Target/X86/X86TargetTransformInfo.cpp b/lib/Target/X86/X86TargetTransformInfo.cpp index f88a666092b..cb6af0d22cb 100644 --- a/lib/Target/X86/X86TargetTransformInfo.cpp +++ b/lib/Target/X86/X86TargetTransformInfo.cpp @@ -555,7 +555,7 @@ unsigned X86TTI::getScalarizationOverhead(Type *Ty, bool Insert, unsigned X86TTI::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, unsigned AddressSpace) const { - // Handle non power of two vectors such as <3 x float> + // Handle non-power-of-two vectors such as <3 x float> if (VectorType *VTy = dyn_cast(Src)) { unsigned NumElem = VTy->getVectorNumElements(); @@ -570,7 +570,7 @@ unsigned X86TTI::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, // Cost = 128 bit store + unpack + 64 bit store. return 3; - // Assume that all other non power-of-two numbers are scalarized. + // Assume that all other non-power-of-two numbers are scalarized. if (!isPowerOf2_32(NumElem)) { unsigned Cost = TargetTransformInfo::getMemoryOpCost(Opcode, VTy->getScalarType(), diff --git a/lib/Transforms/IPO/GlobalOpt.cpp b/lib/Transforms/IPO/GlobalOpt.cpp index 7e918979eca..a10485c1241 100644 --- a/lib/Transforms/IPO/GlobalOpt.cpp +++ b/lib/Transforms/IPO/GlobalOpt.cpp @@ -1737,7 +1737,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV, // and this function is main (which we know is not recursive), we replace // the global with a local alloca in this function. // - // NOTE: It doesn't make sense to promote non single-value types since we + // NOTE: It doesn't make sense to promote non-single-value types since we // are just replacing static memory to stack memory. // // If the global is in different address space, don't bring it to stack. @@ -2571,7 +2571,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst, // We don't insert an entry into Values, as it doesn't have a // meaningful return value. if (!II->use_empty()) { - DEBUG(dbgs() << "Found unused invariant_start. Cant evaluate.\n"); + DEBUG(dbgs() << "Found unused invariant_start. Can't evaluate.\n"); return false; } ConstantInt *Size = cast(II->getArgOperand(0)); diff --git a/lib/Transforms/IPO/IPConstantPropagation.cpp b/lib/Transforms/IPO/IPConstantPropagation.cpp index 4ac1dfc0968..8b816e556e3 100644 --- a/lib/Transforms/IPO/IPConstantPropagation.cpp +++ b/lib/Transforms/IPO/IPConstantPropagation.cpp @@ -210,7 +210,7 @@ bool IPCP::PropagateConstantReturn(Function &F) { // Different or no known return value? Don't propagate this return // value. RetVals[i] = 0; - // All values non constant? Stop looking. + // All values non-constant? Stop looking. if (++NumNonConstant == RetVals.size()) return false; } diff --git a/lib/Transforms/IPO/StripSymbols.cpp b/lib/Transforms/IPO/StripSymbols.cpp index c4f5cfc1b35..b4c8b3726e1 100644 --- a/lib/Transforms/IPO/StripSymbols.cpp +++ b/lib/Transforms/IPO/StripSymbols.cpp @@ -147,7 +147,7 @@ static void RemoveDeadConstant(Constant *C) { if (OnlyUsedBy(C->getOperand(i), C)) Operands.insert(cast(C->getOperand(i))); if (GlobalVariable *GV = dyn_cast(C)) { - if (!GV->hasLocalLinkage()) return; // Don't delete non static globals. + if (!GV->hasLocalLinkage()) return; // Don't delete non-static globals. GV->eraseFromParent(); } else if (!isa(C)) diff --git a/lib/Transforms/ObjCARC/ObjCARCOpts.cpp b/lib/Transforms/ObjCARC/ObjCARCOpts.cpp index e348deb0b02..f8b6f15850f 100644 --- a/lib/Transforms/ObjCARC/ObjCARCOpts.cpp +++ b/lib/Transforms/ObjCARC/ObjCARCOpts.cpp @@ -1005,7 +1005,7 @@ static void GenerateARCAnnotation(unsigned InstMDId, // llvm-arc-annotation-processor tool to cross reference where the source // pointer is in the LLVM IR since the LLVM IR parser does not submit such // information via debug info for backends to use (since why would anyone - // need such a thing from LLVM IR besides in non standard cases + // need such a thing from LLVM IR besides in non-standard cases // [i.e. this]). MDString *SourcePtrMDNode = AppendMDNodeToSourcePtr(PtrMDId, Ptr); diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp index 6af269dfed3..d49f3d09d81 100644 --- a/lib/Transforms/Scalar/GVN.cpp +++ b/lib/Transforms/Scalar/GVN.cpp @@ -1789,7 +1789,7 @@ static void patchReplacementInstruction(Instruction *I, Value *Repl) { ReplInst->setMetadata(Kind, MDNode::getMostGenericRange(IMD, ReplMD)); break; case LLVMContext::MD_prof: - llvm_unreachable("MD_prof in a non terminator instruction"); + llvm_unreachable("MD_prof in a non-terminator instruction"); break; case LLVMContext::MD_fpmath: ReplInst->setMetadata(Kind, MDNode::getMostGenericFPMath(IMD, ReplMD)); diff --git a/lib/Transforms/Scalar/LoopRotation.cpp b/lib/Transforms/Scalar/LoopRotation.cpp index 14c5655f083..18aeb03253e 100644 --- a/lib/Transforms/Scalar/LoopRotation.cpp +++ b/lib/Transforms/Scalar/LoopRotation.cpp @@ -301,7 +301,7 @@ bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) { CodeMetrics Metrics; Metrics.analyzeBasicBlock(OrigHeader, *TTI); if (Metrics.notDuplicatable) { - DEBUG(dbgs() << "LoopRotation: NOT rotating - contains non duplicatable" + DEBUG(dbgs() << "LoopRotation: NOT rotating - contains non-duplicatable" << " instructions: "; L->dump()); return false; } diff --git a/lib/Transforms/Scalar/LoopUnrollPass.cpp b/lib/Transforms/Scalar/LoopUnrollPass.cpp index 08ac38dec5d..7cbd6d3189b 100644 --- a/lib/Transforms/Scalar/LoopUnrollPass.cpp +++ b/lib/Transforms/Scalar/LoopUnrollPass.cpp @@ -213,7 +213,7 @@ bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) { notDuplicatable, TTI); DEBUG(dbgs() << " Loop Size = " << LoopSize << "\n"); if (notDuplicatable) { - DEBUG(dbgs() << " Not unrolling loop which contains non duplicatable" + DEBUG(dbgs() << " Not unrolling loop which contains non-duplicatable" << " instructions.\n"); return false; } diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp index 57b290e14b1..394274d86d3 100644 --- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp +++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp @@ -1731,7 +1731,7 @@ void SROA::isSafeGEP(GetElementPtrInst *GEPI, // Compute the offset due to this GEP and check if the alloca has a // component element at that offset. SmallVector Indices(GEPI->op_begin() + 1, GEPI->op_end()); - // If this GEP is non constant then the last operand must have been a + // If this GEP is non-constant then the last operand must have been a // dynamic index into a vector. Pop this now as it has no impact on the // constant part of the offset. if (NonConstant) diff --git a/lib/Transforms/Utils/PromoteMemoryToRegister.cpp b/lib/Transforms/Utils/PromoteMemoryToRegister.cpp index 8f6eee3510d..839bd208cf8 100644 --- a/lib/Transforms/Utils/PromoteMemoryToRegister.cpp +++ b/lib/Transforms/Utils/PromoteMemoryToRegister.cpp @@ -679,8 +679,8 @@ void PromoteMem2Reg::run() { // Iterating over NewPhiNodes is deterministic, so it is safe to try to // simplify and RAUW them as we go. If it was not, we could add uses to - // the values we replace with in a non deterministic order, thus creating - // non deterministic def->use chains. + // the values we replace with in a non-deterministic order, thus creating + // non-deterministic def->use chains. for (DenseMap, PHINode *>::iterator I = NewPhiNodes.begin(), E = NewPhiNodes.end(); diff --git a/lib/Transforms/Vectorize/LoopVectorize.cpp b/lib/Transforms/Vectorize/LoopVectorize.cpp index 874db9ff152..9c3d29f6597 100644 --- a/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -564,7 +564,7 @@ public: /// pointer itself is an induction variable. /// This check allows us to vectorize A[idx] into a wide load/store. /// Returns: - /// 0 - Stride is unknown or non consecutive. + /// 0 - Stride is unknown or non-consecutive. /// 1 - Address is consecutive. /// -1 - Address is consecutive, and decreasing. int isConsecutivePtr(Value *Ptr); @@ -1093,7 +1093,7 @@ static unsigned getGEPInductionOperand(DataLayout *DL, } int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) { - assert(Ptr->getType()->isPointerTy() && "Unexpected non ptr"); + assert(Ptr->getType()->isPointerTy() && "Unexpected non-ptr"); // Make sure that the pointer does not point to structs. if (Ptr->getType()->getPointerElementType()->isAggregateType()) return 0; @@ -1216,7 +1216,7 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr, if (ScalarAllocatedSize != VectorElementSize) return scalarizeInstruction(Instr); - // If the pointer is loop invariant or if it is non consecutive, + // If the pointer is loop invariant or if it is non-consecutive, // scalarize the load. int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); bool Reverse = ConsecutiveStride < 0; @@ -2430,7 +2430,7 @@ void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, setDebugLocFromInst(Builder, P); // Check for PHI nodes that are lowered to vector selects. if (P->getParent() != OrigLoop->getHeader()) { - // We know that all PHIs in non header blocks are converted into + // We know that all PHIs in non-header blocks are converted into // selects, so we don't have to worry about the insertion order and we // can just use the builder. // At this point we generate the predication tree. There may be @@ -2846,7 +2846,7 @@ bool LoopVectorizationLegality::canVectorize() { DEBUG(dbgs() << "LV: Found a loop: " << TheLoop->getHeader()->getName() << '\n'); - // Check if we can if-convert non single-bb loops. + // Check if we can if-convert non-single-bb loops. unsigned NumBlocks = TheLoop->getNumBlocks(); if (NumBlocks != 1 && !canVectorizeWithIfConvert()) { DEBUG(dbgs() << "LV: Can't if-convert the loop.\n"); @@ -3499,7 +3499,7 @@ private: // We can access this many bytes in parallel safely. unsigned MaxSafeDepDistBytes; - /// \brief If we see a non constant dependence distance we can still try to + /// \brief If we see a non-constant dependence distance we can still try to /// vectorize this loop with runtime checks. bool ShouldRetryWithRuntimeCheck; @@ -3535,7 +3535,7 @@ static bool isInBoundsGep(Value *Ptr) { static int isStridedPtr(ScalarEvolution *SE, DataLayout *DL, Value *Ptr, const Loop *Lp) { const Type *Ty = Ptr->getType(); - assert(Ty->isPointerTy() && "Unexpected non ptr"); + assert(Ty->isPointerTy() && "Unexpected non-ptr"); // Make sure that the pointer does not point to aggregate types. const PointerType *PtrTy = cast(Ty); @@ -3699,7 +3699,7 @@ bool MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx, const SCEVConstant *C = dyn_cast(Dist); if (!C) { - DEBUG(dbgs() << "LV: Dependence because of non constant distance\n"); + DEBUG(dbgs() << "LV: Dependence because of non-constant distance\n"); ShouldRetryWithRuntimeCheck = true; return true; } @@ -4140,7 +4140,7 @@ bool LoopVectorizationLegality::AddReductionVar(PHINode *Phi, // Check whether we found a reduction operator. FoundReduxOp |= !IsAPhi; - // Process users of current instruction. Push non PHI nodes after PHI nodes + // Process users of current instruction. Push non-PHI nodes after PHI nodes // onto the stack. This way we are going to have seen all inputs to PHI // nodes once we get to them. SmallVector NonPHIs; diff --git a/lib/Transforms/Vectorize/SLPVectorizer.cpp b/lib/Transforms/Vectorize/SLPVectorizer.cpp index d15b1250944..966fca766c7 100644 --- a/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -932,7 +932,7 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth) { for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) if (!isConsecutiveAccess(VL[i], VL[i + 1])) { newTreeEntry(VL, false); - DEBUG(dbgs() << "SLP: Non consecutive store.\n"); + DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); return; } diff --git a/test/CodeGen/PowerPC/vec_cmp.ll b/test/CodeGen/PowerPC/vec_cmp.ll index 83e0e026306..4bce8c80fc6 100644 --- a/test/CodeGen/PowerPC/vec_cmp.ll +++ b/test/CodeGen/PowerPC/vec_cmp.ll @@ -1,6 +1,6 @@ ; RUN: llc -mcpu=pwr6 -mattr=+altivec < %s | FileCheck %s -; Check vector comparisons using altivec. For non native types, just basic +; Check vector comparisons using altivec. For non-native types, just basic ; comparison instruction check is done. For altivec supported type (16i8, ; 8i16, 4i32, and 4f32) all the comparisons operators (==, !=, >, >=, <, <=) ; are checked. diff --git a/test/CodeGen/X86/x86-shifts.ll b/test/CodeGen/X86/x86-shifts.ll index 2f3adb8db9a..ec479330ed6 100644 --- a/test/CodeGen/X86/x86-shifts.ll +++ b/test/CodeGen/X86/x86-shifts.ll @@ -100,7 +100,7 @@ entry: ret <8 x i16> %K } -; non splat test +; non-splat test define <8 x i16> @sll8_nosplat(<8 x i16> %A) nounwind { diff --git a/test/MC/ELF/weak.s b/test/MC/ELF/weak.s index 2ed3eb7b2bd..943a46d03b3 100644 --- a/test/MC/ELF/weak.s +++ b/test/MC/ELF/weak.s @@ -5,7 +5,7 @@ .weak foo .long foo -// And that bar is after all local symbols and has non zero value. +// And that bar is after all local symbols and has non-zero value. .weak bar bar: diff --git a/test/Object/nm-archive.test b/test/Object/nm-archive.test index 0d43cc70155..fbbf051b478 100644 --- a/test/Object/nm-archive.test +++ b/test/Object/nm-archive.test @@ -28,7 +28,7 @@ Or in an archive with no symtab or string table. RUN: llvm-nm %p/Inputs/archive-test.a-gnu-minimal -And don't crash when asked to print a non existing symtab. +And don't crash when asked to print a non-existing symtab. RUN: llvm-nm -s %p/Inputs/archive-test.a-gnu-minimal Don't reject an empty archive. diff --git a/test/Transforms/Internalize/lists.ll b/test/Transforms/Internalize/lists.ll index 637d701524e..548c8aa267b 100644 --- a/test/Transforms/Internalize/lists.ll +++ b/test/Transforms/Internalize/lists.ll @@ -1,7 +1,7 @@ ; No arguments means internalize everything ; RUN: opt < %s -internalize -S | FileCheck --check-prefix=ALL %s -; Non existent files should be treated as if they were empty (so internalize +; Non-existent files should be treated as if they were empty (so internalize ; everything) ; RUN: opt < %s -internalize -internalize-public-api-file /nonexistent/file 2> /dev/null -S | FileCheck --check-prefix=ALL %s diff --git a/test/Transforms/LoopVectorize/increment.ll b/test/Transforms/LoopVectorize/increment.ll index d35bd58a028..71bedb7334a 100644 --- a/test/Transforms/LoopVectorize/increment.ll +++ b/test/Transforms/LoopVectorize/increment.ll @@ -34,7 +34,7 @@ define void @inc(i32 %n) nounwind uwtable noinline ssp { ret void } -; Can't vectorize this loop because the access to A[X] is non linear. +; Can't vectorize this loop because the access to A[X] is non-linear. ; ; for (i = 0; i < n; ++i) { ; A[B[i]]++; diff --git a/tools/llvm-config/BuildVariables.inc.in b/tools/llvm-config/BuildVariables.inc.in index fe87afb8219..2ec019ba622 100644 --- a/tools/llvm-config/BuildVariables.inc.in +++ b/tools/llvm-config/BuildVariables.inc.in @@ -11,8 +11,8 @@ // llvm-config wants to report to the user, but which can only be determined at // build time. // -// The non .in variant of this file has been autogenerated by the LLVM build. Do -// not edit! +// The variant of this file not ending with .in has been autogenerated by the +// LLVM build. Do not edit! // //===----------------------------------------------------------------------===// diff --git a/tools/llvm-stress/llvm-stress.cpp b/tools/llvm-stress/llvm-stress.cpp index fd10baf5a4a..3034568ec5b 100644 --- a/tools/llvm-stress/llvm-stress.cpp +++ b/tools/llvm-stress/llvm-stress.cpp @@ -289,7 +289,7 @@ protected: struct LoadModifier: public Modifier { LoadModifier(BasicBlock *BB, PieceTable *PT, Random *R):Modifier(BB, PT, R) {} virtual void Act() { - // Try to use predefined pointers. If non exist, use undef pointer value; + // Try to use predefined pointers. If non-exist, use undef pointer value; Value *Ptr = getRandomPointerValue(); Value *V = new LoadInst(Ptr, "L", BB->getTerminator()); PT->push_back(V); @@ -299,7 +299,7 @@ struct LoadModifier: public Modifier { struct StoreModifier: public Modifier { StoreModifier(BasicBlock *BB, PieceTable *PT, Random *R):Modifier(BB, PT, R) {} virtual void Act() { - // Try to use predefined pointers. If non exist, use undef pointer value; + // Try to use predefined pointers. If non-exist, use undef pointer value; Value *Ptr = getRandomPointerValue(); Type *Tp = Ptr->getType(); Value *Val = getRandomValue(Tp->getContainedType(0)); diff --git a/utils/TableGen/CodeGenMapTable.cpp b/utils/TableGen/CodeGenMapTable.cpp index cb7ec3e9b9a..d27323eacbc 100644 --- a/utils/TableGen/CodeGenMapTable.cpp +++ b/utils/TableGen/CodeGenMapTable.cpp @@ -116,7 +116,7 @@ public: ColFields = MapRec->getValueAsListInit("ColFields"); // Values for the fields/attributes listed in 'ColFields'. - // Ex: KeyCol = 'noPred' -- key instruction is non predicated + // Ex: KeyCol = 'noPred' -- key instruction is non-predicated KeyCol = MapRec->getValueAsListInit("KeyCol"); // List of values for the fields/attributes listed in 'ColFields', one for