mirror of
https://github.com/RPCS3/llvm.git
synced 2024-11-27 13:40:43 +00:00
Fix known typos
Sweep the codebase for common typos. Includes some changes to visible function names that were misspelt. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@200018 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
27ce8feb4a
commit
ae43cab6ba
@ -24,7 +24,7 @@ extern "C" {
|
|||||||
* Read LLVM IR from a memory buffer and convert it into an in-memory Module
|
* Read LLVM IR from a memory buffer and convert it into an in-memory Module
|
||||||
* object. Returns 0 on success.
|
* object. Returns 0 on success.
|
||||||
* Optionally returns a human-readable description of any errors that
|
* Optionally returns a human-readable description of any errors that
|
||||||
* occured during parsing IR. OutMessage must be disposed with
|
* occurred during parsing IR. OutMessage must be disposed with
|
||||||
* LLVMDisposeMessage.
|
* LLVMDisposeMessage.
|
||||||
*
|
*
|
||||||
* @see llvm::ParseIR()
|
* @see llvm::ParseIR()
|
||||||
|
@ -94,7 +94,7 @@ namespace llvm {
|
|||||||
|
|
||||||
/// The standard DAG builder does not normally include terminators as DAG
|
/// The standard DAG builder does not normally include terminators as DAG
|
||||||
/// nodes because it does not create the necessary dependencies to prevent
|
/// nodes because it does not create the necessary dependencies to prevent
|
||||||
/// reordering. A specialized scheduler can overide
|
/// reordering. A specialized scheduler can override
|
||||||
/// TargetInstrInfo::isSchedulingBoundary then enable this flag to indicate
|
/// TargetInstrInfo::isSchedulingBoundary then enable this flag to indicate
|
||||||
/// it has taken responsibility for scheduling the terminator correctly.
|
/// it has taken responsibility for scheduling the terminator correctly.
|
||||||
bool CanHandleTerminators;
|
bool CanHandleTerminators;
|
||||||
|
@ -158,7 +158,7 @@ public:
|
|||||||
/// model.
|
/// model.
|
||||||
///
|
///
|
||||||
/// Compute and return the expected latency of this instruction independent of
|
/// Compute and return the expected latency of this instruction independent of
|
||||||
/// a particular use. computeOperandLatency is the prefered API, but this is
|
/// a particular use. computeOperandLatency is the preferred API, but this is
|
||||||
/// occasionally useful to help estimate instruction cost.
|
/// occasionally useful to help estimate instruction cost.
|
||||||
///
|
///
|
||||||
/// If UseDefaultDefLatency is false and no new machine sched model is
|
/// If UseDefaultDefLatency is false and no new machine sched model is
|
||||||
|
@ -33,11 +33,11 @@ enum CompressionLevel {
|
|||||||
|
|
||||||
enum Status {
|
enum Status {
|
||||||
StatusOK,
|
StatusOK,
|
||||||
StatusUnsupported, // zlib is unavaliable
|
StatusUnsupported, // zlib is unavailable
|
||||||
StatusOutOfMemory, // there was not enough memory
|
StatusOutOfMemory, // there was not enough memory
|
||||||
StatusBufferTooShort, // there was not enough room in the output buffer
|
StatusBufferTooShort, // there was not enough room in the output buffer
|
||||||
StatusInvalidArg, // invalid input parameter
|
StatusInvalidArg, // invalid input parameter
|
||||||
StatusInvalidData // data was corrupted or incomplete
|
StatusInvalidData // data was corrupted or incomplete
|
||||||
};
|
};
|
||||||
|
|
||||||
bool isAvailable();
|
bool isAvailable();
|
||||||
|
@ -95,7 +95,7 @@ namespace sys {
|
|||||||
/// memory was not allocated using the allocateMappedMemory method.
|
/// memory was not allocated using the allocateMappedMemory method.
|
||||||
/// \p Block describes the memory block to be protected.
|
/// \p Block describes the memory block to be protected.
|
||||||
/// \p Flags specifies the new protection state to be assigned to the block.
|
/// \p Flags specifies the new protection state to be assigned to the block.
|
||||||
/// \p ErrMsg [out] returns a string describing any error that occured.
|
/// \p ErrMsg [out] returns a string describing any error that occurred.
|
||||||
///
|
///
|
||||||
/// If \p Flags is MF_WRITE, the actual behavior varies
|
/// If \p Flags is MF_WRITE, the actual behavior varies
|
||||||
/// with the operating system (i.e. MF_READ | MF_WRITE on Windows) and the
|
/// with the operating system (i.e. MF_READ | MF_WRITE on Windows) and the
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
//
|
//
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
//
|
//
|
||||||
// This header defines ComputeASanStackFrameLayout and auxilary data structs.
|
// This header defines ComputeASanStackFrameLayout and auxiliary data structs.
|
||||||
//
|
//
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
#ifndef LLVM_TRANSFORMS_UTILS_ASANSTACKFRAMELAYOUT_H
|
#ifndef LLVM_TRANSFORMS_UTILS_ASANSTACKFRAMELAYOUT_H
|
||||||
|
@ -154,7 +154,7 @@ static bool isObjectSize(const Value *V, uint64_t Size,
|
|||||||
/// isIdentifiedFunctionLocal - Return true if V is umabigously identified
|
/// isIdentifiedFunctionLocal - Return true if V is umabigously identified
|
||||||
/// at the function-level. Different IdentifiedFunctionLocals can't alias.
|
/// at the function-level. Different IdentifiedFunctionLocals can't alias.
|
||||||
/// Further, an IdentifiedFunctionLocal can not alias with any function
|
/// Further, an IdentifiedFunctionLocal can not alias with any function
|
||||||
/// arguments other than itself, which is not neccessarily true for
|
/// arguments other than itself, which is not necessarily true for
|
||||||
/// IdentifiedObjects.
|
/// IdentifiedObjects.
|
||||||
static bool isIdentifiedFunctionLocal(const Value *V)
|
static bool isIdentifiedFunctionLocal(const Value *V)
|
||||||
{
|
{
|
||||||
|
@ -3178,7 +3178,7 @@ void DependenceAnalysis::updateDirection(Dependence::DVEntry &Level,
|
|||||||
|
|
||||||
/// Check if we can delinearize the subscripts. If the SCEVs representing the
|
/// Check if we can delinearize the subscripts. If the SCEVs representing the
|
||||||
/// source and destination array references are recurrences on a nested loop,
|
/// source and destination array references are recurrences on a nested loop,
|
||||||
/// this function flattens the nested recurrences into seperate recurrences
|
/// this function flattens the nested recurrences into separate recurrences
|
||||||
/// for each loop level.
|
/// for each loop level.
|
||||||
bool
|
bool
|
||||||
DependenceAnalysis::tryDelinearize(const SCEV *SrcSCEV, const SCEV *DstSCEV,
|
DependenceAnalysis::tryDelinearize(const SCEV *SrcSCEV, const SCEV *DstSCEV,
|
||||||
|
@ -7143,7 +7143,7 @@ SCEVAddRecExpr::delinearize(ScalarEvolution &SE,
|
|||||||
const SCEV *Start = this->getStart();
|
const SCEV *Start = this->getStart();
|
||||||
const SCEV *Step = this->getStepRecurrence(SE);
|
const SCEV *Step = this->getStepRecurrence(SE);
|
||||||
|
|
||||||
// Build the SCEV representation of the cannonical induction variable in the
|
// Build the SCEV representation of the canonical induction variable in the
|
||||||
// loop of this SCEV.
|
// loop of this SCEV.
|
||||||
const SCEV *Zero = SE.getConstant(this->getType(), 0);
|
const SCEV *Zero = SE.getConstant(this->getType(), 0);
|
||||||
const SCEV *One = SE.getConstant(this->getType(), 1);
|
const SCEV *One = SE.getConstant(this->getType(), 1);
|
||||||
@ -7189,13 +7189,13 @@ SCEVAddRecExpr::delinearize(ScalarEvolution &SE,
|
|||||||
else
|
else
|
||||||
Rem = Quotient;
|
Rem = Quotient;
|
||||||
|
|
||||||
// Scale up the cannonical induction variable IV by whatever remains from the
|
// Scale up the canonical induction variable IV by whatever remains from the
|
||||||
// Step after division by the GCD: the GCD is the size of all the sub-array.
|
// Step after division by the GCD: the GCD is the size of all the sub-array.
|
||||||
if (Step != GCD) {
|
if (Step != GCD) {
|
||||||
Step = SCEVDivision::divide(SE, Step, GCD);
|
Step = SCEVDivision::divide(SE, Step, GCD);
|
||||||
IV = SE.getMulExpr(IV, Step);
|
IV = SE.getMulExpr(IV, Step);
|
||||||
}
|
}
|
||||||
// The access function in the current subscript is computed as the cannonical
|
// The access function in the current subscript is computed as the canonical
|
||||||
// induction variable IV (potentially scaled up by the step) and offset by
|
// induction variable IV (potentially scaled up by the step) and offset by
|
||||||
// Rem, the offset of delinearization in the sub-array.
|
// Rem, the offset of delinearization in the sub-array.
|
||||||
const SCEV *Index = SE.getAddExpr(IV, Rem);
|
const SCEV *Index = SE.getAddExpr(IV, Rem);
|
||||||
@ -7652,7 +7652,7 @@ void ScalarEvolution::forgetMemoizedResults(const SCEV *S) {
|
|||||||
|
|
||||||
typedef DenseMap<const Loop *, std::string> VerifyMap;
|
typedef DenseMap<const Loop *, std::string> VerifyMap;
|
||||||
|
|
||||||
/// replaceSubString - Replaces all occurences of From in Str with To.
|
/// replaceSubString - Replaces all occurrences of From in Str with To.
|
||||||
static void replaceSubString(std::string &Str, StringRef From, StringRef To) {
|
static void replaceSubString(std::string &Str, StringRef From, StringRef To) {
|
||||||
size_t Pos = 0;
|
size_t Pos = 0;
|
||||||
while ((Pos = Str.find(From, Pos)) != std::string::npos) {
|
while ((Pos = Str.find(From, Pos)) != std::string::npos) {
|
||||||
|
@ -595,7 +595,7 @@ BreakAntiDependencies(const std::vector<SUnit>& SUnits,
|
|||||||
if (RC == reinterpret_cast<TargetRegisterClass *>(-1))
|
if (RC == reinterpret_cast<TargetRegisterClass *>(-1))
|
||||||
AntiDepReg = 0;
|
AntiDepReg = 0;
|
||||||
|
|
||||||
// Look for a suitable register to use to break the anti-depenence.
|
// Look for a suitable register to use to break the anti-dependence.
|
||||||
//
|
//
|
||||||
// TODO: Instead of picking the first free register, consider which might
|
// TODO: Instead of picking the first free register, consider which might
|
||||||
// be the best.
|
// be the best.
|
||||||
|
@ -1976,7 +1976,7 @@ void SchedBoundary::bumpNode(SUnit *SU) {
|
|||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
// After updating ZoneCritResIdx and ExpectedLatency, check if we're
|
// After updating ZoneCritResIdx and ExpectedLatency, check if we're
|
||||||
// resource limited. If a stall occured, bumpCycle does this.
|
// resource limited. If a stall occurred, bumpCycle does this.
|
||||||
unsigned LFactor = SchedModel->getLatencyFactor();
|
unsigned LFactor = SchedModel->getLatencyFactor();
|
||||||
IsResourceLimited =
|
IsResourceLimited =
|
||||||
(int)(getCriticalCount() - (getScheduledLatency() * LFactor))
|
(int)(getCriticalCount() - (getScheduledLatency() * LFactor))
|
||||||
|
@ -63,7 +63,7 @@ const MCInstrDesc *ScheduleDAG::getNodeDesc(const SDNode *Node) const {
|
|||||||
/// not already. It also adds the current node as a successor of the
|
/// not already. It also adds the current node as a successor of the
|
||||||
/// specified node.
|
/// specified node.
|
||||||
bool SUnit::addPred(const SDep &D, bool Required) {
|
bool SUnit::addPred(const SDep &D, bool Required) {
|
||||||
// If this node already has this depenence, don't add a redundant one.
|
// If this node already has this dependence, don't add a redundant one.
|
||||||
for (SmallVectorImpl<SDep>::iterator I = Preds.begin(), E = Preds.end();
|
for (SmallVectorImpl<SDep>::iterator I = Preds.begin(), E = Preds.end();
|
||||||
I != E; ++I) {
|
I != E; ++I) {
|
||||||
// Zero-latency weak edges may be added purely for heuristic ordering. Don't
|
// Zero-latency weak edges may be added purely for heuristic ordering. Don't
|
||||||
|
@ -6740,7 +6740,7 @@ SDValue DAGCombiner::visitSINT_TO_FP(SDNode *N) {
|
|||||||
return DAG.getNode(ISD::UINT_TO_FP, SDLoc(N), VT, N0);
|
return DAG.getNode(ISD::UINT_TO_FP, SDLoc(N), VT, N0);
|
||||||
}
|
}
|
||||||
|
|
||||||
// The next optimizations are desireable only if SELECT_CC can be lowered.
|
// The next optimizations are desirable only if SELECT_CC can be lowered.
|
||||||
// Check against MVT::Other for SELECT_CC, which is a workaround for targets
|
// Check against MVT::Other for SELECT_CC, which is a workaround for targets
|
||||||
// having to say they don't support SELECT_CC on every type the DAG knows
|
// having to say they don't support SELECT_CC on every type the DAG knows
|
||||||
// about, since there is no way to mark an opcode illegal at all value types
|
// about, since there is no way to mark an opcode illegal at all value types
|
||||||
@ -6797,7 +6797,7 @@ SDValue DAGCombiner::visitUINT_TO_FP(SDNode *N) {
|
|||||||
return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, N0);
|
return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, N0);
|
||||||
}
|
}
|
||||||
|
|
||||||
// The next optimizations are desireable only if SELECT_CC can be lowered.
|
// The next optimizations are desirable only if SELECT_CC can be lowered.
|
||||||
// Check against MVT::Other for SELECT_CC, which is a workaround for targets
|
// Check against MVT::Other for SELECT_CC, which is a workaround for targets
|
||||||
// having to say they don't support SELECT_CC on every type the DAG knows
|
// having to say they don't support SELECT_CC on every type the DAG knows
|
||||||
// about, since there is no way to mark an opcode illegal at all value types
|
// about, since there is no way to mark an opcode illegal at all value types
|
||||||
@ -8265,7 +8265,7 @@ bool DAGCombiner::SliceUpLoad(SDNode *N) {
|
|||||||
// The width of the type must be a power of 2 and greater than 8-bits.
|
// The width of the type must be a power of 2 and greater than 8-bits.
|
||||||
// Otherwise the load cannot be represented in LLVM IR.
|
// Otherwise the load cannot be represented in LLVM IR.
|
||||||
// Moreover, if we shifted with a non-8-bits multiple, the slice
|
// Moreover, if we shifted with a non-8-bits multiple, the slice
|
||||||
// will be accross several bytes. We do not support that.
|
// will be across several bytes. We do not support that.
|
||||||
unsigned Width = User->getValueSizeInBits(0);
|
unsigned Width = User->getValueSizeInBits(0);
|
||||||
if (Width < 8 || !isPowerOf2_32(Width) || (Shift & 0x7))
|
if (Width < 8 || !isPowerOf2_32(Width) || (Shift & 0x7))
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -41,7 +41,7 @@ PatchPointOpers::PatchPointOpers(const MachineInstr *MI)
|
|||||||
++CheckStartIdx;
|
++CheckStartIdx;
|
||||||
|
|
||||||
assert(getMetaIdx() == CheckStartIdx &&
|
assert(getMetaIdx() == CheckStartIdx &&
|
||||||
"Unexpected additonal definition in Patchpoint intrinsic.");
|
"Unexpected additional definition in Patchpoint intrinsic.");
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -164,10 +164,10 @@ typedef struct _iJIT_Method_NIDS
|
|||||||
|
|
||||||
typedef struct _LineNumberInfo
|
typedef struct _LineNumberInfo
|
||||||
{
|
{
|
||||||
/* x86 Offset from the begining of the method*/
|
/* x86 Offset from the beginning of the method*/
|
||||||
unsigned int Offset;
|
unsigned int Offset;
|
||||||
|
|
||||||
/* source line number from the begining of the source file */
|
/* source line number from the beginning of the source file */
|
||||||
unsigned int LineNumber;
|
unsigned int LineNumber;
|
||||||
|
|
||||||
} *pLineNumberInfo, LineNumberInfo;
|
} *pLineNumberInfo, LineNumberInfo;
|
||||||
@ -191,9 +191,9 @@ typedef struct _iJIT_Method_Load
|
|||||||
unsigned int method_size;
|
unsigned int method_size;
|
||||||
|
|
||||||
/* Line Table size in number of entries - Zero if none */
|
/* Line Table size in number of entries - Zero if none */
|
||||||
unsigned int line_number_size;
|
unsigned int line_number_size;
|
||||||
|
|
||||||
/* Pointer to the begining of the line numbers info array */
|
/* Pointer to the beginning of the line numbers info array */
|
||||||
pLineNumberInfo line_number_table;
|
pLineNumberInfo line_number_table;
|
||||||
|
|
||||||
/* unique class ID */
|
/* unique class ID */
|
||||||
|
@ -1120,7 +1120,7 @@ void Interpreter::visitCallSite(CallSite CS) {
|
|||||||
callFunction((Function*)GVTOP(SRC), ArgVals);
|
callFunction((Function*)GVTOP(SRC), ArgVals);
|
||||||
}
|
}
|
||||||
|
|
||||||
// auxilary function for shift operations
|
// auxiliary function for shift operations
|
||||||
static unsigned getShiftAmount(uint64_t orgShiftAmount,
|
static unsigned getShiftAmount(uint64_t orgShiftAmount,
|
||||||
llvm::APInt valueToShift) {
|
llvm::APInt valueToShift) {
|
||||||
unsigned valueWidth = valueToShift.getBitWidth();
|
unsigned valueWidth = valueToShift.getBitWidth();
|
||||||
|
@ -78,7 +78,7 @@ uint8_t *SectionMemoryManager::allocateSection(MemoryGroup &MemGroup,
|
|||||||
sys::Memory::MF_WRITE,
|
sys::Memory::MF_WRITE,
|
||||||
ec);
|
ec);
|
||||||
if (ec) {
|
if (ec) {
|
||||||
// FIXME: Add error propogation to the interface.
|
// FIXME: Add error propagation to the interface.
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -116,7 +116,7 @@ class RuntimeDyldELF : public RuntimeDyldImpl {
|
|||||||
|
|
||||||
virtual void updateGOTEntries(StringRef Name, uint64_t Addr);
|
virtual void updateGOTEntries(StringRef Name, uint64_t Addr);
|
||||||
|
|
||||||
// Relocation entries for symbols whose position-independant offset is
|
// Relocation entries for symbols whose position-independent offset is
|
||||||
// updated in a global offset table.
|
// updated in a global offset table.
|
||||||
typedef SmallVector<RelocationValueRef, 2> GOTRelocations;
|
typedef SmallVector<RelocationValueRef, 2> GOTRelocations;
|
||||||
GOTRelocations GOTEntries; // List of entries requiring finalization.
|
GOTRelocations GOTEntries; // List of entries requiring finalization.
|
||||||
|
@ -1341,7 +1341,7 @@ bool AsmParser::parseStatement(ParseStatementInfo &Info) {
|
|||||||
if (!getTargetParser().ParseDirective(ID))
|
if (!getTargetParser().ParseDirective(ID))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
// Next, check the extention directive map to see if any extension has
|
// Next, check the extension directive map to see if any extension has
|
||||||
// registered itself to parse this directive.
|
// registered itself to parse this directive.
|
||||||
std::pair<MCAsmParserExtension *, DirectiveHandler> Handler =
|
std::pair<MCAsmParserExtension *, DirectiveHandler> Handler =
|
||||||
ExtensionDirectiveMap.lookup(IDVal);
|
ExtensionDirectiveMap.lookup(IDVal);
|
||||||
@ -3164,13 +3164,13 @@ bool AsmParser::parseDirectiveMacro(SMLoc DirectiveLoc) {
|
|||||||
///
|
///
|
||||||
/// With the support added for named parameters there may be code out there that
|
/// With the support added for named parameters there may be code out there that
|
||||||
/// is transitioning from positional parameters. In versions of gas that did
|
/// is transitioning from positional parameters. In versions of gas that did
|
||||||
/// not support named parameters they would be ignored on the macro defintion.
|
/// not support named parameters they would be ignored on the macro definition.
|
||||||
/// But to support both styles of parameters this is not possible so if a macro
|
/// But to support both styles of parameters this is not possible so if a macro
|
||||||
/// defintion has named parameters but does not use them and has what appears
|
/// definition has named parameters but does not use them and has what appears
|
||||||
/// to be positional parameters, strings like $1, $2, ... and $n, then issue a
|
/// to be positional parameters, strings like $1, $2, ... and $n, then issue a
|
||||||
/// warning that the positional parameter found in body which have no effect.
|
/// warning that the positional parameter found in body which have no effect.
|
||||||
/// Hoping the developer will either remove the named parameters from the macro
|
/// Hoping the developer will either remove the named parameters from the macro
|
||||||
/// definiton so the positional parameters get used if that was what was
|
/// definition so the positional parameters get used if that was what was
|
||||||
/// intended or change the macro to use the named parameters. It is possible
|
/// intended or change the macro to use the named parameters. It is possible
|
||||||
/// this warning will trigger when the none of the named parameters are used
|
/// this warning will trigger when the none of the named parameters are used
|
||||||
/// and the strings like $1 are infact to simply to be passed trough unchanged.
|
/// and the strings like $1 are infact to simply to be passed trough unchanged.
|
||||||
|
@ -3776,7 +3776,7 @@ APFloat::opStatus APFloat::next(bool nextDown) {
|
|||||||
// change the payload.
|
// change the payload.
|
||||||
if (isSignaling()) {
|
if (isSignaling()) {
|
||||||
result = opInvalidOp;
|
result = opInvalidOp;
|
||||||
// For consistency, propogate the sign of the sNaN to the qNaN.
|
// For consistency, propagate the sign of the sNaN to the qNaN.
|
||||||
makeNaN(false, isNegative(), 0);
|
makeNaN(false, isNegative(), 0);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
@ -1096,7 +1096,7 @@ APInt APInt::ashr(unsigned shiftAmt) const {
|
|||||||
// to include in this word.
|
// to include in this word.
|
||||||
val[breakWord] = pVal[breakWord+offset] >> wordShift;
|
val[breakWord] = pVal[breakWord+offset] >> wordShift;
|
||||||
|
|
||||||
// Deal with sign extenstion in the break word, and possibly the word before
|
// Deal with sign extension in the break word, and possibly the word before
|
||||||
// it.
|
// it.
|
||||||
if (isNegative()) {
|
if (isNegative()) {
|
||||||
if (wordShift > bitsInWord) {
|
if (wordShift > bitsInWord) {
|
||||||
|
@ -246,12 +246,11 @@ static Option *LookupNearestOption(StringRef Arg,
|
|||||||
return Best;
|
return Best;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// CommaSeparateAndAddOccurence - A wrapper around Handler->addOccurence() that
|
/// CommaSeparateAndAddOccurrence - A wrapper around Handler->addOccurrence()
|
||||||
/// does special handling of cl::CommaSeparated options.
|
/// that does special handling of cl::CommaSeparated options.
|
||||||
static bool CommaSeparateAndAddOccurence(Option *Handler, unsigned pos,
|
static bool CommaSeparateAndAddOccurrence(Option *Handler, unsigned pos,
|
||||||
StringRef ArgName,
|
StringRef ArgName, StringRef Value,
|
||||||
StringRef Value, bool MultiArg = false)
|
bool MultiArg = false) {
|
||||||
{
|
|
||||||
// Check to see if this option accepts a comma separated list of values. If
|
// Check to see if this option accepts a comma separated list of values. If
|
||||||
// it does, we have to split up the value into multiple values.
|
// it does, we have to split up the value into multiple values.
|
||||||
if (Handler->getMiscFlags() & CommaSeparated) {
|
if (Handler->getMiscFlags() & CommaSeparated) {
|
||||||
@ -312,13 +311,13 @@ static inline bool ProvideOption(Option *Handler, StringRef ArgName,
|
|||||||
|
|
||||||
// If this isn't a multi-arg option, just run the handler.
|
// If this isn't a multi-arg option, just run the handler.
|
||||||
if (NumAdditionalVals == 0)
|
if (NumAdditionalVals == 0)
|
||||||
return CommaSeparateAndAddOccurence(Handler, i, ArgName, Value);
|
return CommaSeparateAndAddOccurrence(Handler, i, ArgName, Value);
|
||||||
|
|
||||||
// If it is, run the handle several times.
|
// If it is, run the handle several times.
|
||||||
bool MultiArg = false;
|
bool MultiArg = false;
|
||||||
|
|
||||||
if (Value.data()) {
|
if (Value.data()) {
|
||||||
if (CommaSeparateAndAddOccurence(Handler, i, ArgName, Value, MultiArg))
|
if (CommaSeparateAndAddOccurrence(Handler, i, ArgName, Value, MultiArg))
|
||||||
return true;
|
return true;
|
||||||
--NumAdditionalVals;
|
--NumAdditionalVals;
|
||||||
MultiArg = true;
|
MultiArg = true;
|
||||||
@ -329,7 +328,7 @@ static inline bool ProvideOption(Option *Handler, StringRef ArgName,
|
|||||||
return Handler->error("not enough values!");
|
return Handler->error("not enough values!");
|
||||||
Value = argv[++i];
|
Value = argv[++i];
|
||||||
|
|
||||||
if (CommaSeparateAndAddOccurence(Handler, i, ArgName, Value, MultiArg))
|
if (CommaSeparateAndAddOccurrence(Handler, i, ArgName, Value, MultiArg))
|
||||||
return true;
|
return true;
|
||||||
MultiArg = true;
|
MultiArg = true;
|
||||||
--NumAdditionalVals;
|
--NumAdditionalVals;
|
||||||
@ -1502,7 +1501,7 @@ protected:
|
|||||||
std::vector<OptionCategory *> SortedCategories;
|
std::vector<OptionCategory *> SortedCategories;
|
||||||
std::map<OptionCategory *, std::vector<Option *> > CategorizedOptions;
|
std::map<OptionCategory *, std::vector<Option *> > CategorizedOptions;
|
||||||
|
|
||||||
// Collect registered option categories into vector in preperation for
|
// Collect registered option categories into vector in preparation for
|
||||||
// sorting.
|
// sorting.
|
||||||
for (OptionCatSet::const_iterator I = RegisteredOptionCategories->begin(),
|
for (OptionCatSet::const_iterator I = RegisteredOptionCategories->begin(),
|
||||||
E = RegisteredOptionCategories->end();
|
E = RegisteredOptionCategories->end();
|
||||||
|
@ -32,10 +32,10 @@ namespace {
|
|||||||
|
|
||||||
#ifdef LLVM_ON_WIN32
|
#ifdef LLVM_ON_WIN32
|
||||||
const char *separators = "\\/";
|
const char *separators = "\\/";
|
||||||
const char prefered_separator = '\\';
|
const char preferred_separator = '\\';
|
||||||
#else
|
#else
|
||||||
const char separators = '/';
|
const char separators = '/';
|
||||||
const char prefered_separator = '/';
|
const char preferred_separator = '/';
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
StringRef find_first_component(StringRef path) {
|
StringRef find_first_component(StringRef path) {
|
||||||
@ -403,7 +403,7 @@ void append(SmallVectorImpl<char> &path, const Twine &a,
|
|||||||
|
|
||||||
if (!component_has_sep && !(path.empty() || is_root_name)) {
|
if (!component_has_sep && !(path.empty() || is_root_name)) {
|
||||||
// Add a separator.
|
// Add a separator.
|
||||||
path.push_back(prefered_separator);
|
path.push_back(preferred_separator);
|
||||||
}
|
}
|
||||||
|
|
||||||
path.append(i->begin(), i->end());
|
path.append(i->begin(), i->end());
|
||||||
|
@ -532,10 +532,10 @@ p_simp_re(struct parse *p,
|
|||||||
sopno subno;
|
sopno subno;
|
||||||
# define BACKSL (1<<CHAR_BIT)
|
# define BACKSL (1<<CHAR_BIT)
|
||||||
|
|
||||||
pos = HERE(); /* repetion op, if any, covers from here */
|
pos = HERE(); /* repetition op, if any, covers from here */
|
||||||
|
|
||||||
assert(MORE()); /* caller should have ensured this */
|
assert(MORE()); /* caller should have ensured this */
|
||||||
c = GETNEXT();
|
c = GETNEXT();
|
||||||
if (c == '\\') {
|
if (c == '\\') {
|
||||||
REQUIRE(MORE(), REG_EESCAPE);
|
REQUIRE(MORE(), REG_EESCAPE);
|
||||||
c = BACKSL | GETNEXT();
|
c = BACKSL | GETNEXT();
|
||||||
|
@ -611,7 +611,8 @@ void AArch64InstrInfo::getAddressConstraints(const MachineInstr &MI,
|
|||||||
int &AccessScale, int &MinOffset,
|
int &AccessScale, int &MinOffset,
|
||||||
int &MaxOffset) const {
|
int &MaxOffset) const {
|
||||||
switch (MI.getOpcode()) {
|
switch (MI.getOpcode()) {
|
||||||
default: llvm_unreachable("Unkown load/store kind");
|
default:
|
||||||
|
llvm_unreachable("Unknown load/store kind");
|
||||||
case TargetOpcode::DBG_VALUE:
|
case TargetOpcode::DBG_VALUE:
|
||||||
AccessScale = 1;
|
AccessScale = 1;
|
||||||
MinOffset = INT_MIN;
|
MinOffset = INT_MIN;
|
||||||
|
@ -6432,7 +6432,7 @@ defm TBL2 : NI_TBL_pat<0b01, 0b0, "tbl", "VPair">;
|
|||||||
defm TBL3 : NI_TBL_pat<0b10, 0b0, "tbl", "VTriple">;
|
defm TBL3 : NI_TBL_pat<0b10, 0b0, "tbl", "VTriple">;
|
||||||
defm TBL4 : NI_TBL_pat<0b11, 0b0, "tbl", "VQuad">;
|
defm TBL4 : NI_TBL_pat<0b11, 0b0, "tbl", "VQuad">;
|
||||||
|
|
||||||
// Table lookup extention
|
// Table lookup extension
|
||||||
class NI_TBX<bit q, bits<2> op2, bits<2> len, bit op,
|
class NI_TBX<bit q, bits<2> op2, bits<2> len, bit op,
|
||||||
string asmop, string OpS, RegisterOperand OpVPR,
|
string asmop, string OpS, RegisterOperand OpVPR,
|
||||||
RegisterOperand VecList>
|
RegisterOperand VecList>
|
||||||
|
@ -1517,7 +1517,7 @@ static DecodeStatus DecodeVLDSTLanePostInstruction(MCInst &Inst, unsigned Insn,
|
|||||||
unsigned Q = fieldFromInstruction(Insn, 30, 1);
|
unsigned Q = fieldFromInstruction(Insn, 30, 1);
|
||||||
unsigned S = fieldFromInstruction(Insn, 10, 3);
|
unsigned S = fieldFromInstruction(Insn, 10, 3);
|
||||||
unsigned lane = 0;
|
unsigned lane = 0;
|
||||||
// Calculate the number of lanes by number of vectors and transfered bytes.
|
// Calculate the number of lanes by number of vectors and transferred bytes.
|
||||||
// NumLanes = 16 bytes / bytes of each lane
|
// NumLanes = 16 bytes / bytes of each lane
|
||||||
unsigned NumLanes = 16 / (TransferBytes / NumVecs);
|
unsigned NumLanes = 16 / (TransferBytes / NumVecs);
|
||||||
switch (NumLanes) {
|
switch (NumLanes) {
|
||||||
|
@ -1407,7 +1407,7 @@ bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue N,
|
|||||||
|
|
||||||
bool ARMDAGToDAGISel::SelectT2AddrModeExclusive(SDValue N, SDValue &Base,
|
bool ARMDAGToDAGISel::SelectT2AddrModeExclusive(SDValue N, SDValue &Base,
|
||||||
SDValue &OffImm) {
|
SDValue &OffImm) {
|
||||||
// This *must* succeed since it's used for the irreplacable ldrex and strex
|
// This *must* succeed since it's used for the irreplaceable ldrex and strex
|
||||||
// instructions.
|
// instructions.
|
||||||
Base = N;
|
Base = N;
|
||||||
OffImm = CurDAG->getTargetConstant(0, MVT::i32);
|
OffImm = CurDAG->getTargetConstant(0, MVT::i32);
|
||||||
|
@ -5987,7 +5987,7 @@ static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) {
|
|||||||
if (cast<AtomicSDNode>(Op)->getOrdering() <= Monotonic)
|
if (cast<AtomicSDNode>(Op)->getOrdering() <= Monotonic)
|
||||||
return Op;
|
return Op;
|
||||||
|
|
||||||
// Aquire/Release load/store is not legal for targets without a
|
// Acquire/Release load/store is not legal for targets without a
|
||||||
// dmb or equivalent available.
|
// dmb or equivalent available.
|
||||||
return SDValue();
|
return SDValue();
|
||||||
}
|
}
|
||||||
@ -10189,7 +10189,7 @@ bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const
|
|||||||
case MVT::v2f64: {
|
case MVT::v2f64: {
|
||||||
// For any little-endian targets with neon, we can support unaligned ld/st
|
// For any little-endian targets with neon, we can support unaligned ld/st
|
||||||
// of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8.
|
// of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8.
|
||||||
// A big-endian target may also explictly support unaligned accesses
|
// A big-endian target may also explicitly support unaligned accesses
|
||||||
if (Subtarget->hasNEON() && (AllowsUnaligned || isLittleEndian())) {
|
if (Subtarget->hasNEON() && (AllowsUnaligned || isLittleEndian())) {
|
||||||
if (Fast)
|
if (Fast)
|
||||||
*Fast = true;
|
*Fast = true;
|
||||||
|
@ -214,7 +214,7 @@ def GPRnopc : RegisterClass<"ARM", [i32], 32, (sub GPR, PC)> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GPRs without the PC but with APSR. Some instructions allow accessing the
|
// GPRs without the PC but with APSR. Some instructions allow accessing the
|
||||||
// APSR, while actually encoding PC in the register field. This is usefull
|
// APSR, while actually encoding PC in the register field. This is useful
|
||||||
// for assembly and disassembly only.
|
// for assembly and disassembly only.
|
||||||
def GPRwithAPSR : RegisterClass<"ARM", [i32], 32, (add (sub GPR, PC), APSR_NZCV)> {
|
def GPRwithAPSR : RegisterClass<"ARM", [i32], 32, (add (sub GPR, PC), APSR_NZCV)> {
|
||||||
let AltOrders = [(add LR, GPRnopc), (trunc GPRnopc, 8)];
|
let AltOrders = [(add LR, GPRnopc), (trunc GPRnopc, 8)];
|
||||||
|
@ -1721,7 +1721,7 @@ let SchedModel = SwiftModel in {
|
|||||||
SchedVar<SwiftLMAddr3Pred, [SwiftWriteLM9Cy, SwiftWriteLM10Cy,
|
SchedVar<SwiftLMAddr3Pred, [SwiftWriteLM9Cy, SwiftWriteLM10Cy,
|
||||||
SwiftWriteLM13CyNo, SwiftWriteP01OneCycle,
|
SwiftWriteLM13CyNo, SwiftWriteP01OneCycle,
|
||||||
SwiftVLDMPerm3]>,
|
SwiftVLDMPerm3]>,
|
||||||
// Load of a Q register (not neccessarily true). We should not be mapping to
|
// Load of a Q register (not necessarily true). We should not be mapping to
|
||||||
// 4 S registers, either.
|
// 4 S registers, either.
|
||||||
SchedVar<SwiftLMAddr4Pred, [SwiftWriteLM4Cy, SwiftWriteLM4CyNo,
|
SchedVar<SwiftLMAddr4Pred, [SwiftWriteLM4Cy, SwiftWriteLM4CyNo,
|
||||||
SwiftWriteLM4CyNo, SwiftWriteLM4CyNo]>,
|
SwiftWriteLM4CyNo, SwiftWriteLM4CyNo]>,
|
||||||
@ -1858,7 +1858,7 @@ let SchedModel = SwiftModel in {
|
|||||||
// Assume 5 D registers.
|
// Assume 5 D registers.
|
||||||
SchedVar<SwiftLMAddr10Pred, [SwiftWriteSTM6]>,
|
SchedVar<SwiftLMAddr10Pred, [SwiftWriteSTM6]>,
|
||||||
SchedVar<SwiftLMAddr11Pred, [SwiftWriteSTM12]>,
|
SchedVar<SwiftLMAddr11Pred, [SwiftWriteSTM12]>,
|
||||||
// Asume three Q registers.
|
// Assume three Q registers.
|
||||||
SchedVar<SwiftLMAddr12Pred, [SwiftWriteSTM4]>,
|
SchedVar<SwiftLMAddr12Pred, [SwiftWriteSTM4]>,
|
||||||
SchedVar<SwiftLMAddr13Pred, [SwiftWriteSTM14]>,
|
SchedVar<SwiftLMAddr13Pred, [SwiftWriteSTM14]>,
|
||||||
// Assume 7 D registers.
|
// Assume 7 D registers.
|
||||||
|
@ -533,7 +533,7 @@ unsigned ARMTTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueK
|
|||||||
// creates a sequence of shift, and, or instructions to construct values.
|
// creates a sequence of shift, and, or instructions to construct values.
|
||||||
// These sequences are recognized by the ISel and have zero-cost. Not so for
|
// These sequences are recognized by the ISel and have zero-cost. Not so for
|
||||||
// the vectorized code. Because we have support for v2i64 but not i64 those
|
// the vectorized code. Because we have support for v2i64 but not i64 those
|
||||||
// sequences look particularily beneficial to vectorize.
|
// sequences look particularly beneficial to vectorize.
|
||||||
// To work around this we increase the cost of v2i64 operations to make them
|
// To work around this we increase the cost of v2i64 operations to make them
|
||||||
// seem less beneficial.
|
// seem less beneficial.
|
||||||
if (LT.second == MVT::v2i64 &&
|
if (LT.second == MVT::v2i64 &&
|
||||||
|
@ -183,7 +183,8 @@ namespace ARM_ISB {
|
|||||||
|
|
||||||
inline static const char *InstSyncBOptToString(unsigned val) {
|
inline static const char *InstSyncBOptToString(unsigned val) {
|
||||||
switch (val) {
|
switch (val) {
|
||||||
default: llvm_unreachable("Unkown memory operation");
|
default:
|
||||||
|
llvm_unreachable("Unknown memory operation");
|
||||||
case RESERVED_0: return "#0x0";
|
case RESERVED_0: return "#0x0";
|
||||||
case RESERVED_1: return "#0x1";
|
case RESERVED_1: return "#0x1";
|
||||||
case RESERVED_2: return "#0x2";
|
case RESERVED_2: return "#0x2";
|
||||||
|
@ -1035,7 +1035,7 @@ void ARMELFStreamer::emitFnStart() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void ARMELFStreamer::emitFnEnd() {
|
void ARMELFStreamer::emitFnEnd() {
|
||||||
assert(FnStart && ".fnstart must preceeds .fnend");
|
assert(FnStart && ".fnstart must precedes .fnend");
|
||||||
|
|
||||||
// Emit unwind opcodes if there is no .handlerdata directive
|
// Emit unwind opcodes if there is no .handlerdata directive
|
||||||
if (!ExTab && !CantUnwind)
|
if (!ExTab && !CantUnwind)
|
||||||
|
@ -285,7 +285,7 @@ bool HexagonCopyToCombine::isSafeToMoveTogether(MachineInstr *I1,
|
|||||||
// Update the intermediate instruction to with the kill flag.
|
// Update the intermediate instruction to with the kill flag.
|
||||||
if (KillingInstr) {
|
if (KillingInstr) {
|
||||||
bool Added = KillingInstr->addRegisterKilled(KilledOperand, TRI, true);
|
bool Added = KillingInstr->addRegisterKilled(KilledOperand, TRI, true);
|
||||||
(void)Added; // supress compiler warning
|
(void)Added; // suppress compiler warning
|
||||||
assert(Added && "Must successfully update kill flag");
|
assert(Added && "Must successfully update kill flag");
|
||||||
removeKillInfo(I2, KilledOperand);
|
removeKillInfo(I2, KilledOperand);
|
||||||
}
|
}
|
||||||
@ -343,7 +343,7 @@ bool HexagonCopyToCombine::isSafeToMoveTogether(MachineInstr *I1,
|
|||||||
// Update I1 to set the kill flag. This flag will later be picked up by
|
// Update I1 to set the kill flag. This flag will later be picked up by
|
||||||
// the new COMBINE instruction.
|
// the new COMBINE instruction.
|
||||||
bool Added = I1->addRegisterKilled(KilledOperand, TRI);
|
bool Added = I1->addRegisterKilled(KilledOperand, TRI);
|
||||||
(void)Added; // supress compiler warning
|
(void)Added; // suppress compiler warning
|
||||||
assert(Added && "Must successfully update kill flag");
|
assert(Added && "Must successfully update kill flag");
|
||||||
}
|
}
|
||||||
DoInsertAtI1 = false;
|
DoInsertAtI1 = false;
|
||||||
|
@ -1522,7 +1522,7 @@ MachineBasicBlock *HexagonHardwareLoops::createPreheaderForLoop(
|
|||||||
if (PB != Latch) {
|
if (PB != Latch) {
|
||||||
Tmp2.clear();
|
Tmp2.clear();
|
||||||
bool NotAnalyzed = TII->AnalyzeBranch(*PB, TB, FB, Tmp2, false);
|
bool NotAnalyzed = TII->AnalyzeBranch(*PB, TB, FB, Tmp2, false);
|
||||||
(void)NotAnalyzed; // supress compiler warning
|
(void)NotAnalyzed; // suppress compiler warning
|
||||||
assert (!NotAnalyzed && "Should be analyzable!");
|
assert (!NotAnalyzed && "Should be analyzable!");
|
||||||
if (TB != Header && (Tmp2.empty() || FB != Header))
|
if (TB != Header && (Tmp2.empty() || FB != Header))
|
||||||
TII->InsertBranch(*PB, NewPH, 0, EmptyCond, DL);
|
TII->InsertBranch(*PB, NewPH, 0, EmptyCond, DL);
|
||||||
@ -1534,7 +1534,7 @@ MachineBasicBlock *HexagonHardwareLoops::createPreheaderForLoop(
|
|||||||
// Insert an unconditional branch to the header.
|
// Insert an unconditional branch to the header.
|
||||||
TB = FB = 0;
|
TB = FB = 0;
|
||||||
bool LatchNotAnalyzed = TII->AnalyzeBranch(*Latch, TB, FB, Tmp2, false);
|
bool LatchNotAnalyzed = TII->AnalyzeBranch(*Latch, TB, FB, Tmp2, false);
|
||||||
(void)LatchNotAnalyzed; // supress compiler warning
|
(void)LatchNotAnalyzed; // suppress compiler warning
|
||||||
assert (!LatchNotAnalyzed && "Should be analyzable!");
|
assert (!LatchNotAnalyzed && "Should be analyzable!");
|
||||||
if (!TB && !FB)
|
if (!TB && !FB)
|
||||||
TII->InsertBranch(*Latch, Header, 0, EmptyCond, DL);
|
TII->InsertBranch(*Latch, Header, 0, EmptyCond, DL);
|
||||||
|
@ -1793,7 +1793,7 @@ bool HexagonInstrInfo::NonExtEquivalentExists (const MachineInstr *MI) const {
|
|||||||
return true;
|
return true;
|
||||||
|
|
||||||
if (MI->getDesc().mayLoad() || MI->getDesc().mayStore()) {
|
if (MI->getDesc().mayLoad() || MI->getDesc().mayStore()) {
|
||||||
// Check addressing mode and retreive non-ext equivalent instruction.
|
// Check addressing mode and retrieve non-ext equivalent instruction.
|
||||||
|
|
||||||
switch (getAddrMode(MI)) {
|
switch (getAddrMode(MI)) {
|
||||||
case HexagonII::Absolute :
|
case HexagonII::Absolute :
|
||||||
@ -1827,7 +1827,7 @@ short HexagonInstrInfo::getNonExtOpcode (const MachineInstr *MI) const {
|
|||||||
return NonExtOpcode;
|
return NonExtOpcode;
|
||||||
|
|
||||||
if (MI->getDesc().mayLoad() || MI->getDesc().mayStore()) {
|
if (MI->getDesc().mayLoad() || MI->getDesc().mayStore()) {
|
||||||
// Check addressing mode and retreive non-ext equivalent instruction.
|
// Check addressing mode and retrieve non-ext equivalent instruction.
|
||||||
switch (getAddrMode(MI)) {
|
switch (getAddrMode(MI)) {
|
||||||
case HexagonII::Absolute :
|
case HexagonII::Absolute :
|
||||||
return Hexagon::getBasedWithImmOffset(MI->getOpcode());
|
return Hexagon::getBasedWithImmOffset(MI->getOpcode());
|
||||||
|
@ -869,7 +869,7 @@ void MipsAsmParser::expandMemInst(MCInst &Inst, SMLoc IDLoc,
|
|||||||
TempInst.addOperand(MCOperand::CreateReg(BaseRegNum));
|
TempInst.addOperand(MCOperand::CreateReg(BaseRegNum));
|
||||||
Instructions.push_back(TempInst);
|
Instructions.push_back(TempInst);
|
||||||
TempInst.clear();
|
TempInst.clear();
|
||||||
// And finaly, create original instruction with low part
|
// And finally, create original instruction with low part
|
||||||
// of offset and new base.
|
// of offset and new base.
|
||||||
TempInst.setOpcode(Inst.getOpcode());
|
TempInst.setOpcode(Inst.getOpcode());
|
||||||
TempInst.addOperand(MCOperand::CreateReg(RegOpNum));
|
TempInst.addOperand(MCOperand::CreateReg(RegOpNum));
|
||||||
@ -1247,7 +1247,7 @@ MipsAsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand *> &Operands,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
// Look for the existing symbol, we should check if
|
// Look for the existing symbol, we should check if
|
||||||
// we need to assigne the propper RegisterKind.
|
// we need to assigne the proper RegisterKind.
|
||||||
if (searchSymbolAlias(Operands, MipsOperand::Kind_None))
|
if (searchSymbolAlias(Operands, MipsOperand::Kind_None))
|
||||||
return false;
|
return false;
|
||||||
// Else drop to expression parsing.
|
// Else drop to expression parsing.
|
||||||
|
@ -3519,7 +3519,7 @@ class MSABitconvertPat<ValueType DstVT, ValueType SrcVT,
|
|||||||
MSAPat<(DstVT (bitconvert SrcVT:$src)),
|
MSAPat<(DstVT (bitconvert SrcVT:$src)),
|
||||||
(COPY_TO_REGCLASS SrcVT:$src, DstRC), preds>;
|
(COPY_TO_REGCLASS SrcVT:$src, DstRC), preds>;
|
||||||
|
|
||||||
// These are endian-independant because the element size doesnt change
|
// These are endian-independent because the element size doesnt change
|
||||||
def : MSABitconvertPat<v8i16, v8f16, MSA128H>;
|
def : MSABitconvertPat<v8i16, v8f16, MSA128H>;
|
||||||
def : MSABitconvertPat<v4i32, v4f32, MSA128W>;
|
def : MSABitconvertPat<v4i32, v4f32, MSA128W>;
|
||||||
def : MSABitconvertPat<v2i64, v2f64, MSA128D>;
|
def : MSABitconvertPat<v2i64, v2f64, MSA128D>;
|
||||||
|
@ -1258,7 +1258,7 @@ NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
|
|||||||
|
|
||||||
// Since StoreV2 is a target node, we cannot rely on DAG type legalization.
|
// Since StoreV2 is a target node, we cannot rely on DAG type legalization.
|
||||||
// Therefore, we must ensure the type is legal. For i1 and i8, we set the
|
// Therefore, we must ensure the type is legal. For i1 and i8, we set the
|
||||||
// stored type to i16 and propogate the "real" type as the memory type.
|
// stored type to i16 and propagate the "real" type as the memory type.
|
||||||
bool NeedExt = false;
|
bool NeedExt = false;
|
||||||
if (EltVT.getSizeInBits() < 16)
|
if (EltVT.getSizeInBits() < 16)
|
||||||
NeedExt = true;
|
NeedExt = true;
|
||||||
@ -2074,7 +2074,7 @@ static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
|
|||||||
|
|
||||||
// Since LoadV2 is a target node, we cannot rely on DAG type legalization.
|
// Since LoadV2 is a target node, we cannot rely on DAG type legalization.
|
||||||
// Therefore, we must ensure the type is legal. For i1 and i8, we set the
|
// Therefore, we must ensure the type is legal. For i1 and i8, we set the
|
||||||
// loaded type to i16 and propogate the "real" type as the memory type.
|
// loaded type to i16 and propagate the "real" type as the memory type.
|
||||||
bool NeedTrunc = false;
|
bool NeedTrunc = false;
|
||||||
if (EltVT.getSizeInBits() < 16) {
|
if (EltVT.getSizeInBits() < 16) {
|
||||||
EltVT = MVT::i16;
|
EltVT = MVT::i16;
|
||||||
@ -2161,7 +2161,7 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
|
|||||||
// Since LDU/LDG are target nodes, we cannot rely on DAG type
|
// Since LDU/LDG are target nodes, we cannot rely on DAG type
|
||||||
// legalization.
|
// legalization.
|
||||||
// Therefore, we must ensure the type is legal. For i1 and i8, we set the
|
// Therefore, we must ensure the type is legal. For i1 and i8, we set the
|
||||||
// loaded type to i16 and propogate the "real" type as the memory type.
|
// loaded type to i16 and propagate the "real" type as the memory type.
|
||||||
bool NeedTrunc = false;
|
bool NeedTrunc = false;
|
||||||
if (EltVT.getSizeInBits() < 16) {
|
if (EltVT.getSizeInBits() < 16) {
|
||||||
EltVT = MVT::i16;
|
EltVT = MVT::i16;
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
//
|
//
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
//
|
//
|
||||||
// This pass replaces occurences of __nvvm_reflect("string") with an
|
// This pass replaces occurrences of __nvvm_reflect("string") with an
|
||||||
// integer based on -nvvm-reflect-list string=<int> option given to this pass.
|
// integer based on -nvvm-reflect-list string=<int> option given to this pass.
|
||||||
// If an undefined string value is seen in a call to __nvvm_reflect("string"),
|
// If an undefined string value is seen in a call to __nvvm_reflect("string"),
|
||||||
// a default value of 0 will be used.
|
// a default value of 0 will be used.
|
||||||
@ -84,7 +84,7 @@ NVVMReflectEnabled("nvvm-reflect-enable", cl::init(true), cl::Hidden,
|
|||||||
|
|
||||||
char NVVMReflect::ID = 0;
|
char NVVMReflect::ID = 0;
|
||||||
INITIALIZE_PASS(NVVMReflect, "nvvm-reflect",
|
INITIALIZE_PASS(NVVMReflect, "nvvm-reflect",
|
||||||
"Replace occurences of __nvvm_reflect() calls with 0/1", false,
|
"Replace occurrences of __nvvm_reflect() calls with 0/1", false,
|
||||||
false)
|
false)
|
||||||
|
|
||||||
static cl::list<std::string>
|
static cl::list<std::string>
|
||||||
|
@ -7205,7 +7205,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
|
|||||||
// you might suspect (sizeof(vector) bytes after the last requested
|
// you might suspect (sizeof(vector) bytes after the last requested
|
||||||
// load), but rather sizeof(vector) - 1 bytes after the last
|
// load), but rather sizeof(vector) - 1 bytes after the last
|
||||||
// requested vector. The point of this is to avoid a page fault if the
|
// requested vector. The point of this is to avoid a page fault if the
|
||||||
// base address happend to be aligned. This works because if the base
|
// base address happened to be aligned. This works because if the base
|
||||||
// address is aligned, then adding less than a full vector length will
|
// address is aligned, then adding less than a full vector length will
|
||||||
// cause the last vector in the sequence to be (re)loaded. Otherwise,
|
// cause the last vector in the sequence to be (re)loaded. Otherwise,
|
||||||
// the next vector will be fetched as you might suspect was necessary.
|
// the next vector will be fetched as you might suspect was necessary.
|
||||||
|
@ -68,7 +68,7 @@ namespace ShaderType {
|
|||||||
/// various memory regions on the hardware. On the CPU
|
/// various memory regions on the hardware. On the CPU
|
||||||
/// all of the address spaces point to the same memory,
|
/// all of the address spaces point to the same memory,
|
||||||
/// however on the GPU, each address space points to
|
/// however on the GPU, each address space points to
|
||||||
/// a seperate piece of memory that is unique from other
|
/// a separate piece of memory that is unique from other
|
||||||
/// memory locations.
|
/// memory locations.
|
||||||
namespace AMDGPUAS {
|
namespace AMDGPUAS {
|
||||||
enum AddressSpaces {
|
enum AddressSpaces {
|
||||||
|
@ -224,7 +224,7 @@ protected:
|
|||||||
/// Compute the reversed DFS post order of Blocks
|
/// Compute the reversed DFS post order of Blocks
|
||||||
void orderBlocks(MachineFunction *MF);
|
void orderBlocks(MachineFunction *MF);
|
||||||
|
|
||||||
// Function originaly from CFGStructTraits
|
// Function originally from CFGStructTraits
|
||||||
void insertInstrEnd(MachineBasicBlock *MBB, int NewOpcode,
|
void insertInstrEnd(MachineBasicBlock *MBB, int NewOpcode,
|
||||||
DebugLoc DL = DebugLoc());
|
DebugLoc DL = DebugLoc());
|
||||||
MachineInstr *insertInstrBefore(MachineBasicBlock *MBB, int NewOpcode,
|
MachineInstr *insertInstrBefore(MachineBasicBlock *MBB, int NewOpcode,
|
||||||
|
@ -53,7 +53,7 @@ public:
|
|||||||
|
|
||||||
~SIMCCodeEmitter() { }
|
~SIMCCodeEmitter() { }
|
||||||
|
|
||||||
/// \breif Encode the instruction and write it to the OS.
|
/// \brief Encode the instruction and write it to the OS.
|
||||||
virtual void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
|
virtual void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
|
||||||
SmallVectorImpl<MCFixup> &Fixups) const;
|
SmallVectorImpl<MCFixup> &Fixups) const;
|
||||||
|
|
||||||
|
@ -50,7 +50,7 @@ private:
|
|||||||
|
|
||||||
/// IfCvt pass can generate "disabled" ALU clause marker that need to be
|
/// IfCvt pass can generate "disabled" ALU clause marker that need to be
|
||||||
/// removed and their content affected to the previous alu clause.
|
/// removed and their content affected to the previous alu clause.
|
||||||
/// This function parse instructions after CFAlu untill it find a disabled
|
/// This function parse instructions after CFAlu until it find a disabled
|
||||||
/// CFAlu and merge the content, or an enabled CFAlu.
|
/// CFAlu and merge the content, or an enabled CFAlu.
|
||||||
void cleanPotentialDisabledCFAlu(MachineInstr *CFAlu) const;
|
void cleanPotentialDisabledCFAlu(MachineInstr *CFAlu) const;
|
||||||
|
|
||||||
|
@ -52,7 +52,7 @@ namespace R600_InstFlag {
|
|||||||
|
|
||||||
#define HAS_NATIVE_OPERANDS(Flags) ((Flags) & R600_InstFlag::NATIVE_OPERANDS)
|
#define HAS_NATIVE_OPERANDS(Flags) ((Flags) & R600_InstFlag::NATIVE_OPERANDS)
|
||||||
|
|
||||||
/// \brief Defines for extracting register infomation from register encoding
|
/// \brief Defines for extracting register information from register encoding
|
||||||
#define HW_REG_MASK 0x1ff
|
#define HW_REG_MASK 0x1ff
|
||||||
#define HW_CHAN_SHIFT 9
|
#define HW_CHAN_SHIFT 9
|
||||||
|
|
||||||
|
@ -990,7 +990,7 @@ SDValue R600TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const
|
|||||||
DAG.getCondCode(ISD::SETNE));
|
DAG.getCondCode(ISD::SETNE));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// LLVM generates byte-addresed pointers. For indirect addressing, we need to
|
/// LLVM generates byte-addressed pointers. For indirect addressing, we need to
|
||||||
/// convert these pointers to a register index. Each register holds
|
/// convert these pointers to a register index. Each register holds
|
||||||
/// 16 bytes, (4 x 32bit sub-register), but we need to take into account the
|
/// 16 bytes, (4 x 32bit sub-register), but we need to take into account the
|
||||||
/// \p StackWidth, which tells us how many of the 4 sub-registrers will be used
|
/// \p StackWidth, which tells us how many of the 4 sub-registrers will be used
|
||||||
@ -1389,8 +1389,8 @@ SDValue R600TargetLowering::LowerFormalArguments(
|
|||||||
DAG.getConstant(36 + VA.getLocMemOffset(), MVT::i32),
|
DAG.getConstant(36 + VA.getLocMemOffset(), MVT::i32),
|
||||||
MachinePointerInfo(UndefValue::get(PtrTy)),
|
MachinePointerInfo(UndefValue::get(PtrTy)),
|
||||||
MemVT, false, false, 4);
|
MemVT, false, false, 4);
|
||||||
// 4 is the prefered alignment for
|
// 4 is the preferred alignment for
|
||||||
// the CONSTANT memory space.
|
// the CONSTANT memory space.
|
||||||
InVals.push_back(Arg);
|
InVals.push_back(Arg);
|
||||||
}
|
}
|
||||||
return Chain;
|
return Chain;
|
||||||
|
@ -43,7 +43,7 @@ private:
|
|||||||
unsigned Gen;
|
unsigned Gen;
|
||||||
/// Each OpenCL kernel has nine implicit parameters that are stored in the
|
/// Each OpenCL kernel has nine implicit parameters that are stored in the
|
||||||
/// first nine dwords of a Vertex Buffer. These implicit parameters are
|
/// first nine dwords of a Vertex Buffer. These implicit parameters are
|
||||||
/// lowered to load instructions which retreive the values from the Vertex
|
/// lowered to load instructions which retrieve the values from the Vertex
|
||||||
/// Buffer.
|
/// Buffer.
|
||||||
SDValue LowerImplicitParameter(SelectionDAG &DAG, EVT VT,
|
SDValue LowerImplicitParameter(SelectionDAG &DAG, EVT VT,
|
||||||
SDLoc DL, unsigned DwordOffset) const;
|
SDLoc DL, unsigned DwordOffset) const;
|
||||||
|
@ -138,7 +138,7 @@ namespace llvm {
|
|||||||
/// Same but using const index set instead of MI set.
|
/// Same but using const index set instead of MI set.
|
||||||
bool fitsConstReadLimitations(const std::vector<unsigned>&) const;
|
bool fitsConstReadLimitations(const std::vector<unsigned>&) const;
|
||||||
|
|
||||||
/// \breif Vector instructions are instructions that must fill all
|
/// \brief Vector instructions are instructions that must fill all
|
||||||
/// instruction slots within an instruction group.
|
/// instruction slots within an instruction group.
|
||||||
bool isVector(const MachineInstr &MI) const;
|
bool isVector(const MachineInstr &MI) const;
|
||||||
|
|
||||||
|
@ -2263,7 +2263,7 @@ let Inst{63-32} = Word1;
|
|||||||
//===--------------------------------------------------------------------===//
|
//===--------------------------------------------------------------------===//
|
||||||
//===---------------------------------------------------------------------===//
|
//===---------------------------------------------------------------------===//
|
||||||
// Custom Inserter for Branches and returns, this eventually will be a
|
// Custom Inserter for Branches and returns, this eventually will be a
|
||||||
// seperate pass
|
// separate pass
|
||||||
//===---------------------------------------------------------------------===//
|
//===---------------------------------------------------------------------===//
|
||||||
let isTerminator = 1, usesCustomInserter = 1, isBranch = 1, isBarrier = 1 in {
|
let isTerminator = 1, usesCustomInserter = 1, isBranch = 1, isBarrier = 1 in {
|
||||||
def BRANCH : ILFormat<(outs), (ins brtarget:$target),
|
def BRANCH : ILFormat<(outs), (ins brtarget:$target),
|
||||||
|
@ -66,7 +66,7 @@ private:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// \returns register to PV chan mapping for bundle/single instructions that
|
/// \returns register to PV chan mapping for bundle/single instructions that
|
||||||
/// immediatly precedes I.
|
/// immediately precedes I.
|
||||||
DenseMap<unsigned, unsigned> getPreviousVector(MachineBasicBlock::iterator I)
|
DenseMap<unsigned, unsigned> getPreviousVector(MachineBasicBlock::iterator I)
|
||||||
const {
|
const {
|
||||||
DenseMap<unsigned, unsigned> Result;
|
DenseMap<unsigned, unsigned> Result;
|
||||||
|
@ -1083,7 +1083,7 @@ void SITargetLowering::ensureSRegLimit(SelectionDAG &DAG, SDValue &Operand,
|
|||||||
else
|
else
|
||||||
return;
|
return;
|
||||||
|
|
||||||
// Nothing todo if they fit naturaly
|
// Nothing to do if they fit naturally
|
||||||
if (fitsRegClass(DAG, Operand, RegClass))
|
if (fitsRegClass(DAG, Operand, RegClass))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -122,7 +122,7 @@ const TargetRegisterClass *SIRegisterInfo::getSubRegClass(
|
|||||||
return RC;
|
return RC;
|
||||||
|
|
||||||
// If this register has a sub-register, we can safely assume it is a 32-bit
|
// If this register has a sub-register, we can safely assume it is a 32-bit
|
||||||
// register, becuase all of SI's sub-registers are 32-bit.
|
// register, because all of SI's sub-registers are 32-bit.
|
||||||
if (isSGPRClass(RC)) {
|
if (isSGPRClass(RC)) {
|
||||||
return &AMDGPU::SGPR_32RegClass;
|
return &AMDGPU::SGPR_32RegClass;
|
||||||
} else {
|
} else {
|
||||||
|
@ -35,7 +35,7 @@ enum TOF {
|
|||||||
// Assembler: %hi(addr) or %lm(addr)
|
// Assembler: %hi(addr) or %lm(addr)
|
||||||
MO_HI,
|
MO_HI,
|
||||||
|
|
||||||
// Extract bits 43-22 of an adress. Only for sethi.
|
// Extract bits 43-22 of an address. Only for sethi.
|
||||||
// Assembler: %h44(addr)
|
// Assembler: %h44(addr)
|
||||||
MO_H44,
|
MO_H44,
|
||||||
|
|
||||||
|
@ -1076,7 +1076,7 @@ static IPMConversion getIPMConversion(unsigned CCValid, unsigned CCMask) {
|
|||||||
if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_3)))
|
if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_3)))
|
||||||
return IPMConversion(0, -(1 << SystemZ::IPM_CC), SystemZ::IPM_CC + 1);
|
return IPMConversion(0, -(1 << SystemZ::IPM_CC), SystemZ::IPM_CC + 1);
|
||||||
|
|
||||||
// The remaing cases are 1, 2, 0/1/3 and 0/2/3. All these are
|
// The remaining cases are 1, 2, 0/1/3 and 0/2/3. All these are
|
||||||
// can be done by inverting the low CC bit and applying one of the
|
// can be done by inverting the low CC bit and applying one of the
|
||||||
// sign-based extractions above.
|
// sign-based extractions above.
|
||||||
if (CCMask == (CCValid & SystemZ::CCMASK_1))
|
if (CCMask == (CCValid & SystemZ::CCMASK_1))
|
||||||
|
@ -53,7 +53,7 @@ void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI,
|
|||||||
MachineFunction &MF = *MBB->getParent();
|
MachineFunction &MF = *MBB->getParent();
|
||||||
|
|
||||||
// Get two load or store instructions. Use the original instruction for one
|
// Get two load or store instructions. Use the original instruction for one
|
||||||
// of them (arbitarily the second here) and create a clone for the other.
|
// of them (arbitrarily the second here) and create a clone for the other.
|
||||||
MachineInstr *EarlierMI = MF.CloneMachineInstr(MI);
|
MachineInstr *EarlierMI = MF.CloneMachineInstr(MI);
|
||||||
MBB->insert(MI, EarlierMI);
|
MBB->insert(MI, EarlierMI);
|
||||||
|
|
||||||
|
@ -759,7 +759,7 @@ let Defs = [CC], Uses = [CC] in {
|
|||||||
// Subtraction
|
// Subtraction
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
|
|
||||||
// Plain substraction. Although immediate forms exist, we use the
|
// Plain subtraction. Although immediate forms exist, we use the
|
||||||
// add-immediate instruction instead.
|
// add-immediate instruction instead.
|
||||||
let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0x8 in {
|
let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0x8 in {
|
||||||
// Subtraction of a register.
|
// Subtraction of a register.
|
||||||
|
@ -563,7 +563,7 @@ struct InternalInstruction {
|
|||||||
uint8_t prefixPresent[0x100];
|
uint8_t prefixPresent[0x100];
|
||||||
/* contains the location (for use with the reader) of the prefix byte */
|
/* contains the location (for use with the reader) of the prefix byte */
|
||||||
uint64_t prefixLocations[0x100];
|
uint64_t prefixLocations[0x100];
|
||||||
/* The value of the vector extention prefix(EVEX/VEX/XOP), if present */
|
/* The value of the vector extension prefix(EVEX/VEX/XOP), if present */
|
||||||
uint8_t vectorExtensionPrefix[4];
|
uint8_t vectorExtensionPrefix[4];
|
||||||
/* The type of the vector extension prefix */
|
/* The type of the vector extension prefix */
|
||||||
VectorExtensionType vectorExtensionType;
|
VectorExtensionType vectorExtensionType;
|
||||||
|
@ -1512,7 +1512,7 @@ bool X86FastISel::X86SelectSelect(const Instruction *I) {
|
|||||||
// garbage. Indeed, only the less significant bit is supposed to be accurate.
|
// garbage. Indeed, only the less significant bit is supposed to be accurate.
|
||||||
// If we read more than the lsb, we may see non-zero values whereas lsb
|
// If we read more than the lsb, we may see non-zero values whereas lsb
|
||||||
// is zero. Therefore, we have to truncate Op0Reg to i1 for the select.
|
// is zero. Therefore, we have to truncate Op0Reg to i1 for the select.
|
||||||
// This is acheived by performing TEST against 1.
|
// This is achieved by performing TEST against 1.
|
||||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TEST8ri))
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TEST8ri))
|
||||||
.addReg(Op0Reg).addImm(1);
|
.addReg(Op0Reg).addImm(1);
|
||||||
unsigned ResultReg = createResultReg(RC);
|
unsigned ResultReg = createResultReg(RC);
|
||||||
|
@ -577,7 +577,7 @@ def IIC_NOP : InstrItinClass;
|
|||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
// Processor instruction itineraries.
|
// Processor instruction itineraries.
|
||||||
|
|
||||||
// IssueWidth is analagous to the number of decode units. Core and its
|
// IssueWidth is analogous to the number of decode units. Core and its
|
||||||
// descendents, including Nehalem and SandyBridge have 4 decoders.
|
// descendents, including Nehalem and SandyBridge have 4 decoders.
|
||||||
// Resources beyond the decoder operate on micro-ops and are bufferred
|
// Resources beyond the decoder operate on micro-ops and are bufferred
|
||||||
// so adjacent micro-ops don't directly compete.
|
// so adjacent micro-ops don't directly compete.
|
||||||
|
@ -154,8 +154,8 @@ static bool replaceConstantExprOp(ConstantExpr *CE, Pass *P) {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} while (CE->hasNUsesOrMore(1)); // We need to check becasue a recursive
|
} while (CE->hasNUsesOrMore(1)); // We need to check because a recursive
|
||||||
// sibbling may have used 'CE' when createReplacementInstr was called.
|
// sibling may have used 'CE' when createReplacementInstr was called.
|
||||||
CE->destroyConstant();
|
CE->destroyConstant();
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -77,8 +77,8 @@ static void FindUsedValues(GlobalVariable *LLVMUsed,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// True if A is better than B.
|
// True if A is better than B.
|
||||||
static bool IsBetterCannonical(const GlobalVariable &A,
|
static bool IsBetterCanonical(const GlobalVariable &A,
|
||||||
const GlobalVariable &B) {
|
const GlobalVariable &B) {
|
||||||
if (!A.hasLocalLinkage() && B.hasLocalLinkage())
|
if (!A.hasLocalLinkage() && B.hasLocalLinkage())
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
@ -160,7 +160,7 @@ bool ConstantMerge::runOnModule(Module &M) {
|
|||||||
// If this is the first constant we find or if the old one is local,
|
// If this is the first constant we find or if the old one is local,
|
||||||
// replace with the current one. If the current is externally visible
|
// replace with the current one. If the current is externally visible
|
||||||
// it cannot be replace, but can be the canonical constant we merge with.
|
// it cannot be replace, but can be the canonical constant we merge with.
|
||||||
if (Slot == 0 || IsBetterCannonical(*GV, *Slot))
|
if (Slot == 0 || IsBetterCanonical(*GV, *Slot))
|
||||||
Slot = GV;
|
Slot = GV;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -723,7 +723,7 @@ void MergeFunctions::writeThunkOrAlias(Function *F, Function *G) {
|
|||||||
|
|
||||||
// Helper for writeThunk,
|
// Helper for writeThunk,
|
||||||
// Selects proper bitcast operation,
|
// Selects proper bitcast operation,
|
||||||
// but a bit simplier then CastInst::getCastOpcode.
|
// but a bit simpler then CastInst::getCastOpcode.
|
||||||
static Value* createCast(IRBuilder<false> &Builder, Value *V, Type *DestTy) {
|
static Value* createCast(IRBuilder<false> &Builder, Value *V, Type *DestTy) {
|
||||||
Type *SrcTy = V->getType();
|
Type *SrcTy = V->getType();
|
||||||
if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
|
if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
|
||||||
|
@ -77,7 +77,7 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
|
|||||||
// A single load+store correctly handles overlapping memory in the memmove
|
// A single load+store correctly handles overlapping memory in the memmove
|
||||||
// case.
|
// case.
|
||||||
uint64_t Size = MemOpLength->getLimitedValue();
|
uint64_t Size = MemOpLength->getLimitedValue();
|
||||||
assert(Size && "0-sized memory transfering should be removed already.");
|
assert(Size && "0-sized memory transferring should be removed already.");
|
||||||
|
|
||||||
if (Size > 8 || (Size&(Size-1)))
|
if (Size > 8 || (Size&(Size-1)))
|
||||||
return 0; // If not 1/2/4/8 bytes, exit.
|
return 0; // If not 1/2/4/8 bytes, exit.
|
||||||
@ -684,7 +684,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||||||
return ReplaceInstUsesWith(CI, ConstantVector::get(NewElems));
|
return ReplaceInstUsesWith(CI, ConstantVector::get(NewElems));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Couldn't simplify - cannonicalize constant to the RHS.
|
// Couldn't simplify - canonicalize constant to the RHS.
|
||||||
std::swap(Arg0, Arg1);
|
std::swap(Arg0, Arg1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1193,10 +1193,10 @@ Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
|
|||||||
// will not occur because the result of OpI is exact (as we will for
|
// will not occur because the result of OpI is exact (as we will for
|
||||||
// FMul, for example) is hopeless. However, we *can* nonetheless
|
// FMul, for example) is hopeless. However, we *can* nonetheless
|
||||||
// frequently know that double rounding cannot occur (or that it is
|
// frequently know that double rounding cannot occur (or that it is
|
||||||
// innoculous) by taking advantage of the specific structure of
|
// innocuous) by taking advantage of the specific structure of
|
||||||
// infinitely-precise results that admit double rounding.
|
// infinitely-precise results that admit double rounding.
|
||||||
//
|
//
|
||||||
// Specifically, if OpWidth >= 2*DstWdith+1 and DstWidth is sufficent
|
// Specifically, if OpWidth >= 2*DstWdith+1 and DstWidth is sufficient
|
||||||
// to represent both sources, we can guarantee that the double
|
// to represent both sources, we can guarantee that the double
|
||||||
// rounding is innocuous (See p50 of Figueroa's 2000 PhD thesis,
|
// rounding is innocuous (See p50 of Figueroa's 2000 PhD thesis,
|
||||||
// "A Rigorous Framework for Fully Supporting the IEEE Standard ..."
|
// "A Rigorous Framework for Fully Supporting the IEEE Standard ..."
|
||||||
|
@ -2048,7 +2048,7 @@ static APInt DemandedBitsLHSMask(ICmpInst &I,
|
|||||||
|
|
||||||
/// \brief Check if the order of \p Op0 and \p Op1 as operand in an ICmpInst
|
/// \brief Check if the order of \p Op0 and \p Op1 as operand in an ICmpInst
|
||||||
/// should be swapped.
|
/// should be swapped.
|
||||||
/// The descision is based on how many times these two operands are reused
|
/// The decision is based on how many times these two operands are reused
|
||||||
/// as subtract operands and their positions in those instructions.
|
/// as subtract operands and their positions in those instructions.
|
||||||
/// The rational is that several architectures use the same instruction for
|
/// The rational is that several architectures use the same instruction for
|
||||||
/// both subtract and cmp, thus it is better if the order of those operands
|
/// both subtract and cmp, thus it is better if the order of those operands
|
||||||
@ -2064,7 +2064,7 @@ static bool swapMayExposeCSEOpportunities(const Value * Op0,
|
|||||||
// Each time Op0 is the first operand, count -1: swapping is bad, the
|
// Each time Op0 is the first operand, count -1: swapping is bad, the
|
||||||
// subtract has already the same layout as the compare.
|
// subtract has already the same layout as the compare.
|
||||||
// Each time Op0 is the second operand, count +1: swapping is good, the
|
// Each time Op0 is the second operand, count +1: swapping is good, the
|
||||||
// subtract has a diffrent layout as the compare.
|
// subtract has a different layout as the compare.
|
||||||
// At the end, if the benefit is greater than 0, Op0 should come second to
|
// At the end, if the benefit is greater than 0, Op0 should come second to
|
||||||
// expose more CSE opportunities.
|
// expose more CSE opportunities.
|
||||||
int GlobalSwapBenefits = 0;
|
int GlobalSwapBenefits = 0;
|
||||||
|
@ -1013,7 +1013,7 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
|
|||||||
// references from RHSOp0 to LHSOp0, so we don't need to shift the mask.
|
// references from RHSOp0 to LHSOp0, so we don't need to shift the mask.
|
||||||
// If newRHS == newLHS, we want to remap any references from newRHS to
|
// If newRHS == newLHS, we want to remap any references from newRHS to
|
||||||
// newLHS so that we can properly identify splats that may occur due to
|
// newLHS so that we can properly identify splats that may occur due to
|
||||||
// obfuscation accross the two vectors.
|
// obfuscation across the two vectors.
|
||||||
if (eltMask >= 0 && newRHS != NULL && newLHS != newRHS)
|
if (eltMask >= 0 && newRHS != NULL && newLHS != newRHS)
|
||||||
eltMask += newLHSWidth;
|
eltMask += newLHSWidth;
|
||||||
}
|
}
|
||||||
|
@ -1629,7 +1629,7 @@ Instruction *InstCombiner::visitBranchInst(BranchInst &BI) {
|
|||||||
return &BI;
|
return &BI;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cannonicalize fcmp_one -> fcmp_oeq
|
// Canonicalize fcmp_one -> fcmp_oeq
|
||||||
FCmpInst::Predicate FPred; Value *Y;
|
FCmpInst::Predicate FPred; Value *Y;
|
||||||
if (match(&BI, m_Br(m_FCmp(FPred, m_Value(X), m_Value(Y)),
|
if (match(&BI, m_Br(m_FCmp(FPred, m_Value(X), m_Value(Y)),
|
||||||
TrueDest, FalseDest)) &&
|
TrueDest, FalseDest)) &&
|
||||||
@ -1645,7 +1645,7 @@ Instruction *InstCombiner::visitBranchInst(BranchInst &BI) {
|
|||||||
return &BI;
|
return &BI;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cannonicalize icmp_ne -> icmp_eq
|
// Canonicalize icmp_ne -> icmp_eq
|
||||||
ICmpInst::Predicate IPred;
|
ICmpInst::Predicate IPred;
|
||||||
if (match(&BI, m_Br(m_ICmp(IPred, m_Value(X), m_Value(Y)),
|
if (match(&BI, m_Br(m_ICmp(IPred, m_Value(X), m_Value(Y)),
|
||||||
TrueDest, FalseDest)) &&
|
TrueDest, FalseDest)) &&
|
||||||
|
@ -249,7 +249,7 @@ static ShadowMapping getShadowMapping(const Module &M, int LongSize) {
|
|||||||
ShadowMapping Mapping;
|
ShadowMapping Mapping;
|
||||||
|
|
||||||
// OR-ing shadow offset if more efficient (at least on x86),
|
// OR-ing shadow offset if more efficient (at least on x86),
|
||||||
// but on ppc64 we have to use add since the shadow offset is not neccesary
|
// but on ppc64 we have to use add since the shadow offset is not necessary
|
||||||
// 1/8-th of the address space.
|
// 1/8-th of the address space.
|
||||||
Mapping.OrShadowOffset = !IsPPC64 && !ClShort64BitOffset;
|
Mapping.OrShadowOffset = !IsPPC64 && !ClShort64BitOffset;
|
||||||
|
|
||||||
|
@ -1964,7 +1964,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
|
|||||||
// Now, get the shadow for the RetVal.
|
// Now, get the shadow for the RetVal.
|
||||||
if (!I.getType()->isSized()) return;
|
if (!I.getType()->isSized()) return;
|
||||||
IRBuilder<> IRBBefore(&I);
|
IRBuilder<> IRBBefore(&I);
|
||||||
// Untill we have full dynamic coverage, make sure the retval shadow is 0.
|
// Until we have full dynamic coverage, make sure the retval shadow is 0.
|
||||||
Value *Base = getShadowPtrForRetval(&I, IRBBefore);
|
Value *Base = getShadowPtrForRetval(&I, IRBBefore);
|
||||||
IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment);
|
IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment);
|
||||||
Instruction *NextInsn = 0;
|
Instruction *NextInsn = 0;
|
||||||
|
@ -487,7 +487,7 @@ bool ThreadSanitizer::instrumentMemIntrinsic(Instruction *I) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Both llvm and ThreadSanitizer atomic operations are based on C++11/C1x
|
// Both llvm and ThreadSanitizer atomic operations are based on C++11/C1x
|
||||||
// standards. For background see C++11 standard. A slightly older, publically
|
// standards. For background see C++11 standard. A slightly older, publicly
|
||||||
// available draft of the standard (not entirely up-to-date, but close enough
|
// available draft of the standard (not entirely up-to-date, but close enough
|
||||||
// for casual browsing) is available here:
|
// for casual browsing) is available here:
|
||||||
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
|
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
|
||||||
|
@ -382,7 +382,7 @@ namespace {
|
|||||||
void clear();
|
void clear();
|
||||||
|
|
||||||
/// Conservatively merge the two RRInfo. Returns true if a partial merge has
|
/// Conservatively merge the two RRInfo. Returns true if a partial merge has
|
||||||
/// occured, false otherwise.
|
/// occurred, false otherwise.
|
||||||
bool Merge(const RRInfo &Other);
|
bool Merge(const RRInfo &Other);
|
||||||
|
|
||||||
};
|
};
|
||||||
@ -659,7 +659,7 @@ namespace {
|
|||||||
/// which pass through this block. This is only valid after both the
|
/// which pass through this block. This is only valid after both the
|
||||||
/// top-down and bottom-up traversals are complete.
|
/// top-down and bottom-up traversals are complete.
|
||||||
///
|
///
|
||||||
/// Returns true if overflow occured. Returns false if overflow did not
|
/// Returns true if overflow occurred. Returns false if overflow did not
|
||||||
/// occur.
|
/// occur.
|
||||||
bool GetAllPathCountWithOverflow(unsigned &PathCount) const {
|
bool GetAllPathCountWithOverflow(unsigned &PathCount) const {
|
||||||
if (TopDownPathCount == OverflowOccurredValue ||
|
if (TopDownPathCount == OverflowOccurredValue ||
|
||||||
@ -667,7 +667,7 @@ namespace {
|
|||||||
return true;
|
return true;
|
||||||
unsigned long long Product =
|
unsigned long long Product =
|
||||||
(unsigned long long)TopDownPathCount*BottomUpPathCount;
|
(unsigned long long)TopDownPathCount*BottomUpPathCount;
|
||||||
// Overflow occured if any of the upper bits of Product are set or if all
|
// Overflow occurred if any of the upper bits of Product are set or if all
|
||||||
// the lower bits of Product are all set.
|
// the lower bits of Product are all set.
|
||||||
return (Product >> 32) ||
|
return (Product >> 32) ||
|
||||||
((PathCount = Product) == OverflowOccurredValue);
|
((PathCount = Product) == OverflowOccurredValue);
|
||||||
@ -711,7 +711,7 @@ void BBState::MergePred(const BBState &Other) {
|
|||||||
|
|
||||||
// In order to be consistent, we clear the top down pointers when by adding
|
// In order to be consistent, we clear the top down pointers when by adding
|
||||||
// TopDownPathCount becomes OverflowOccurredValue even though "true" overflow
|
// TopDownPathCount becomes OverflowOccurredValue even though "true" overflow
|
||||||
// has not occured.
|
// has not occurred.
|
||||||
if (TopDownPathCount == OverflowOccurredValue) {
|
if (TopDownPathCount == OverflowOccurredValue) {
|
||||||
clearTopDownPointers();
|
clearTopDownPointers();
|
||||||
return;
|
return;
|
||||||
@ -755,7 +755,7 @@ void BBState::MergeSucc(const BBState &Other) {
|
|||||||
|
|
||||||
// In order to be consistent, we clear the top down pointers when by adding
|
// In order to be consistent, we clear the top down pointers when by adding
|
||||||
// BottomUpPathCount becomes OverflowOccurredValue even though "true" overflow
|
// BottomUpPathCount becomes OverflowOccurredValue even though "true" overflow
|
||||||
// has not occured.
|
// has not occurred.
|
||||||
if (BottomUpPathCount == OverflowOccurredValue) {
|
if (BottomUpPathCount == OverflowOccurredValue) {
|
||||||
clearBottomUpPointers();
|
clearBottomUpPointers();
|
||||||
return;
|
return;
|
||||||
@ -1808,13 +1808,13 @@ ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst,
|
|||||||
// pointer has multiple owners implying that we must be more conservative.
|
// pointer has multiple owners implying that we must be more conservative.
|
||||||
//
|
//
|
||||||
// This comes up in the context of a pointer being ``KnownSafe''. In the
|
// This comes up in the context of a pointer being ``KnownSafe''. In the
|
||||||
// presense of a block being initialized, the frontend will emit the
|
// presence of a block being initialized, the frontend will emit the
|
||||||
// objc_retain on the original pointer and the release on the pointer loaded
|
// objc_retain on the original pointer and the release on the pointer loaded
|
||||||
// from the alloca. The optimizer will through the provenance analysis
|
// from the alloca. The optimizer will through the provenance analysis
|
||||||
// realize that the two are related, but since we only require KnownSafe in
|
// realize that the two are related, but since we only require KnownSafe in
|
||||||
// one direction, will match the inner retain on the original pointer with
|
// one direction, will match the inner retain on the original pointer with
|
||||||
// the guard release on the original pointer. This is fixed by ensuring that
|
// the guard release on the original pointer. This is fixed by ensuring that
|
||||||
// in the presense of allocas we only unconditionally remove pointers if
|
// in the presence of allocas we only unconditionally remove pointers if
|
||||||
// both our retain and our release are KnownSafe.
|
// both our retain and our release are KnownSafe.
|
||||||
if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
|
if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
|
||||||
if (AreAnyUnderlyingObjectsAnAlloca(SI->getPointerOperand())) {
|
if (AreAnyUnderlyingObjectsAnAlloca(SI->getPointerOperand())) {
|
||||||
|
@ -109,8 +109,8 @@ namespace {
|
|||||||
bool preliminaryScreen();
|
bool preliminaryScreen();
|
||||||
|
|
||||||
/// Check if the given conditional branch is based on the comparison
|
/// Check if the given conditional branch is based on the comparison
|
||||||
/// beween a variable and zero, and if the variable is non-zero, the
|
/// between a variable and zero, and if the variable is non-zero, the
|
||||||
/// control yeilds to the loop entry. If the branch matches the behavior,
|
/// control yields to the loop entry. If the branch matches the behavior,
|
||||||
/// the variable involved in the comparion is returned. This function will
|
/// the variable involved in the comparion is returned. This function will
|
||||||
/// be called to see if the precondition and postcondition of the loop
|
/// be called to see if the precondition and postcondition of the loop
|
||||||
/// are in desirable form.
|
/// are in desirable form.
|
||||||
@ -521,7 +521,7 @@ void NclPopcountRecognize::transform(Instruction *CntInst,
|
|||||||
// TripCnt is exactly the number of iterations the loop has
|
// TripCnt is exactly the number of iterations the loop has
|
||||||
TripCnt = NewCount;
|
TripCnt = NewCount;
|
||||||
|
|
||||||
// If the popoulation counter's initial value is not zero, insert Add Inst.
|
// If the population counter's initial value is not zero, insert Add Inst.
|
||||||
Value *CntInitVal = CntPhi->getIncomingValueForBlock(PreHead);
|
Value *CntInitVal = CntPhi->getIncomingValueForBlock(PreHead);
|
||||||
ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal);
|
ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal);
|
||||||
if (!InitConst || !InitConst->isZero()) {
|
if (!InitConst || !InitConst->isZero()) {
|
||||||
|
@ -240,7 +240,7 @@ bool FlattenCFGOpt::FlattenParallelAndOr(BasicBlock *BB, IRBuilder<> &Builder,
|
|||||||
BranchInst *BI = dyn_cast<BranchInst>(CurrBlock->getTerminator());
|
BranchInst *BI = dyn_cast<BranchInst>(CurrBlock->getTerminator());
|
||||||
CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition());
|
CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition());
|
||||||
CmpInst::Predicate Predicate = CI->getPredicate();
|
CmpInst::Predicate Predicate = CI->getPredicate();
|
||||||
// Cannonicalize icmp_ne -> icmp_eq, fcmp_one -> fcmp_oeq
|
// Canonicalize icmp_ne -> icmp_eq, fcmp_one -> fcmp_oeq
|
||||||
if ((Predicate == CmpInst::ICMP_NE) || (Predicate == CmpInst::FCMP_ONE)) {
|
if ((Predicate == CmpInst::ICMP_NE) || (Predicate == CmpInst::FCMP_ONE)) {
|
||||||
CI->setPredicate(ICmpInst::getInversePredicate(Predicate));
|
CI->setPredicate(ICmpInst::getInversePredicate(Predicate));
|
||||||
BI->swapSuccessors();
|
BI->swapSuccessors();
|
||||||
|
@ -62,9 +62,9 @@ static cl::opt<bool>
|
|||||||
SinkCommon("simplifycfg-sink-common", cl::Hidden, cl::init(true),
|
SinkCommon("simplifycfg-sink-common", cl::Hidden, cl::init(true),
|
||||||
cl::desc("Sink common instructions down to the end block"));
|
cl::desc("Sink common instructions down to the end block"));
|
||||||
|
|
||||||
static cl::opt<bool>
|
static cl::opt<bool> HoistCondStores(
|
||||||
HoistCondStores("simplifycfg-hoist-cond-stores", cl::Hidden, cl::init(true),
|
"simplifycfg-hoist-cond-stores", cl::Hidden, cl::init(true),
|
||||||
cl::desc("Hoist conditional stores if an unconditional store preceeds"));
|
cl::desc("Hoist conditional stores if an unconditional store precedes"));
|
||||||
|
|
||||||
STATISTIC(NumBitMaps, "Number of switch instructions turned into bitmaps");
|
STATISTIC(NumBitMaps, "Number of switch instructions turned into bitmaps");
|
||||||
STATISTIC(NumLookupTables, "Number of switch instructions turned into lookup tables");
|
STATISTIC(NumLookupTables, "Number of switch instructions turned into lookup tables");
|
||||||
|
@ -2371,7 +2371,7 @@ void InnerLoopVectorizer::vectorizeLoop() {
|
|||||||
setDebugLocFromInst(Builder, RdxDesc.StartValue);
|
setDebugLocFromInst(Builder, RdxDesc.StartValue);
|
||||||
|
|
||||||
// We need to generate a reduction vector from the incoming scalar.
|
// We need to generate a reduction vector from the incoming scalar.
|
||||||
// To do so, we need to generate the 'identity' vector and overide
|
// To do so, we need to generate the 'identity' vector and override
|
||||||
// one of the elements with the incoming scalar reduction. We need
|
// one of the elements with the incoming scalar reduction. We need
|
||||||
// to do it in the vector-loop preheader.
|
// to do it in the vector-loop preheader.
|
||||||
Builder.SetInsertPoint(LoopBypassBlocks.front()->getTerminator());
|
Builder.SetInsertPoint(LoopBypassBlocks.front()->getTerminator());
|
||||||
@ -3713,8 +3713,8 @@ void AccessAnalysis::processMemAccesses(bool UseDeferred) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool NeedDepCheck = false;
|
bool NeedDepCheck = false;
|
||||||
// Check whether there is the possiblity of dependency because of underlying
|
// Check whether there is the possibility of dependency because of
|
||||||
// objects being the same.
|
// underlying objects being the same.
|
||||||
typedef SmallVector<Value*, 16> ValueVector;
|
typedef SmallVector<Value*, 16> ValueVector;
|
||||||
ValueVector TempObjects;
|
ValueVector TempObjects;
|
||||||
GetUnderlyingObjects(Ptr, TempObjects, DL);
|
GetUnderlyingObjects(Ptr, TempObjects, DL);
|
||||||
|
@ -1871,7 +1871,7 @@ private:
|
|||||||
StoreListMap StoreRefs;
|
StoreListMap StoreRefs;
|
||||||
};
|
};
|
||||||
|
|
||||||
/// \brief Check that the Values in the slice in VL array are still existant in
|
/// \brief Check that the Values in the slice in VL array are still existent in
|
||||||
/// the WeakVH array.
|
/// the WeakVH array.
|
||||||
/// Vectorization of part of the VL array may cause later values in the VL array
|
/// Vectorization of part of the VL array may cause later values in the VL array
|
||||||
/// to become invalid. We track when this has happened in the WeakVH array.
|
/// to become invalid. We track when this has happened in the WeakVH array.
|
||||||
@ -2516,7 +2516,7 @@ bool SLPVectorizer::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start over at the next instruction of a differnt type (or the end).
|
// Start over at the next instruction of a different type (or the end).
|
||||||
IncIt = SameTypeIt;
|
IncIt = SameTypeIt;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
; getUDivExpr()->getZeroExtendExpr()->isLoopBackedgeGuardedBy()
|
; getUDivExpr()->getZeroExtendExpr()->isLoopBackedgeGuardedBy()
|
||||||
;
|
;
|
||||||
; We actually want SCEV simplification to fail gracefully in this
|
; We actually want SCEV simplification to fail gracefully in this
|
||||||
; case, so there's no output to check, just the absense of stack overflow.
|
; case, so there's no output to check, just the absence of stack overflow.
|
||||||
|
|
||||||
@c = common global i8 0, align 1
|
@c = common global i8 0, align 1
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ entry:
|
|||||||
; OPT: define
|
; OPT: define
|
||||||
; OPT: store i32 1
|
; OPT: store i32 1
|
||||||
; OPT: store i16 4
|
; OPT: store i16 4
|
||||||
; Remove a load and propogate the value from store.
|
; Remove a load and propagate the value from store.
|
||||||
; OPT: ret i32 1
|
; OPT: ret i32 1
|
||||||
%s.addr = alloca i32*, align 8
|
%s.addr = alloca i32*, align 8
|
||||||
%A.addr = alloca %struct.StructA*, align 8
|
%A.addr = alloca %struct.StructA*, align 8
|
||||||
@ -98,7 +98,7 @@ entry:
|
|||||||
; OPT: define
|
; OPT: define
|
||||||
; OPT: store i32 1
|
; OPT: store i32 1
|
||||||
; OPT: store i16 4
|
; OPT: store i16 4
|
||||||
; Remove a load and propogate the value from store.
|
; Remove a load and propagate the value from store.
|
||||||
; OPT: ret i32 1
|
; OPT: ret i32 1
|
||||||
%A.addr = alloca %struct.StructA*, align 8
|
%A.addr = alloca %struct.StructA*, align 8
|
||||||
%B.addr = alloca %struct.StructB*, align 8
|
%B.addr = alloca %struct.StructB*, align 8
|
||||||
@ -127,7 +127,7 @@ entry:
|
|||||||
; OPT: define
|
; OPT: define
|
||||||
; OPT: store i32 1
|
; OPT: store i32 1
|
||||||
; OPT: store i32 4
|
; OPT: store i32 4
|
||||||
; Remove a load and propogate the value from store.
|
; Remove a load and propagate the value from store.
|
||||||
; OPT: ret i32 1
|
; OPT: ret i32 1
|
||||||
%A.addr = alloca %struct.StructA*, align 8
|
%A.addr = alloca %struct.StructA*, align 8
|
||||||
%B.addr = alloca %struct.StructB*, align 8
|
%B.addr = alloca %struct.StructB*, align 8
|
||||||
@ -155,7 +155,7 @@ entry:
|
|||||||
; OPT: define
|
; OPT: define
|
||||||
; OPT: store i32 1
|
; OPT: store i32 1
|
||||||
; OPT: store i32 4
|
; OPT: store i32 4
|
||||||
; Remove a load and propogate the value from store.
|
; Remove a load and propagate the value from store.
|
||||||
; OPT: ret i32 1
|
; OPT: ret i32 1
|
||||||
%A.addr = alloca %struct.StructA*, align 8
|
%A.addr = alloca %struct.StructA*, align 8
|
||||||
%B.addr = alloca %struct.StructB*, align 8
|
%B.addr = alloca %struct.StructB*, align 8
|
||||||
@ -184,7 +184,7 @@ entry:
|
|||||||
; OPT: define
|
; OPT: define
|
||||||
; OPT: store i32 1
|
; OPT: store i32 1
|
||||||
; OPT: store i32 4
|
; OPT: store i32 4
|
||||||
; Remove a load and propogate the value from store.
|
; Remove a load and propagate the value from store.
|
||||||
; OPT: ret i32 1
|
; OPT: ret i32 1
|
||||||
%A.addr = alloca %struct.StructA*, align 8
|
%A.addr = alloca %struct.StructA*, align 8
|
||||||
%S.addr = alloca %struct.StructS*, align 8
|
%S.addr = alloca %struct.StructS*, align 8
|
||||||
@ -212,7 +212,7 @@ entry:
|
|||||||
; OPT: define
|
; OPT: define
|
||||||
; OPT: store i32 1
|
; OPT: store i32 1
|
||||||
; OPT: store i16 4
|
; OPT: store i16 4
|
||||||
; Remove a load and propogate the value from store.
|
; Remove a load and propagate the value from store.
|
||||||
; OPT: ret i32 1
|
; OPT: ret i32 1
|
||||||
%A.addr = alloca %struct.StructA*, align 8
|
%A.addr = alloca %struct.StructA*, align 8
|
||||||
%S.addr = alloca %struct.StructS*, align 8
|
%S.addr = alloca %struct.StructS*, align 8
|
||||||
@ -240,7 +240,7 @@ entry:
|
|||||||
; OPT: define
|
; OPT: define
|
||||||
; OPT: store i32 1
|
; OPT: store i32 1
|
||||||
; OPT: store i32 4
|
; OPT: store i32 4
|
||||||
; Remove a load and propogate the value from store.
|
; Remove a load and propagate the value from store.
|
||||||
; OPT: ret i32 1
|
; OPT: ret i32 1
|
||||||
%S.addr = alloca %struct.StructS*, align 8
|
%S.addr = alloca %struct.StructS*, align 8
|
||||||
%S2.addr = alloca %struct.StructS2*, align 8
|
%S2.addr = alloca %struct.StructS2*, align 8
|
||||||
@ -268,7 +268,7 @@ entry:
|
|||||||
; OPT: define
|
; OPT: define
|
||||||
; OPT: store i32 1
|
; OPT: store i32 1
|
||||||
; OPT: store i16 4
|
; OPT: store i16 4
|
||||||
; Remove a load and propogate the value from store.
|
; Remove a load and propagate the value from store.
|
||||||
; OPT: ret i32 1
|
; OPT: ret i32 1
|
||||||
%S.addr = alloca %struct.StructS*, align 8
|
%S.addr = alloca %struct.StructS*, align 8
|
||||||
%S2.addr = alloca %struct.StructS2*, align 8
|
%S2.addr = alloca %struct.StructS2*, align 8
|
||||||
@ -296,7 +296,7 @@ entry:
|
|||||||
; OPT: define
|
; OPT: define
|
||||||
; OPT: store i32 1
|
; OPT: store i32 1
|
||||||
; OPT: store i32 4
|
; OPT: store i32 4
|
||||||
; Remove a load and propogate the value from store.
|
; Remove a load and propagate the value from store.
|
||||||
; OPT: ret i32 1
|
; OPT: ret i32 1
|
||||||
%C.addr = alloca %struct.StructC*, align 8
|
%C.addr = alloca %struct.StructC*, align 8
|
||||||
%D.addr = alloca %struct.StructD*, align 8
|
%D.addr = alloca %struct.StructD*, align 8
|
||||||
|
@ -413,7 +413,7 @@ let test_global_values () =
|
|||||||
|
|
||||||
let test_global_variables () =
|
let test_global_variables () =
|
||||||
let (++) x f = f x; x in
|
let (++) x f = f x; x in
|
||||||
let fourty_two32 = const_int i32_type 42 in
|
let forty_two32 = const_int i32_type 42 in
|
||||||
|
|
||||||
group "declarations"; begin
|
group "declarations"; begin
|
||||||
(* CHECK: @GVar01 = external global i32
|
(* CHECK: @GVar01 = external global i32
|
||||||
@ -444,16 +444,16 @@ let test_global_variables () =
|
|||||||
* CHECK: @QGVar02 = addrspace(3) global i32 42
|
* CHECK: @QGVar02 = addrspace(3) global i32 42
|
||||||
* CHECK: @QGVar03 = addrspace(3) global i32 42
|
* CHECK: @QGVar03 = addrspace(3) global i32 42
|
||||||
*)
|
*)
|
||||||
let g = define_global "GVar02" fourty_two32 m in
|
let g = define_global "GVar02" forty_two32 m in
|
||||||
let g2 = declare_global i32_type "GVar03" m ++
|
let g2 = declare_global i32_type "GVar03" m ++
|
||||||
set_initializer fourty_two32 in
|
set_initializer forty_two32 in
|
||||||
insist (not (is_declaration g));
|
insist (not (is_declaration g));
|
||||||
insist (not (is_declaration g2));
|
insist (not (is_declaration g2));
|
||||||
insist ((global_initializer g) == (global_initializer g2));
|
insist ((global_initializer g) == (global_initializer g2));
|
||||||
|
|
||||||
let g = define_qualified_global "QGVar02" fourty_two32 3 m in
|
let g = define_qualified_global "QGVar02" forty_two32 3 m in
|
||||||
let g2 = declare_qualified_global i32_type "QGVar03" 3 m ++
|
let g2 = declare_qualified_global i32_type "QGVar03" 3 m ++
|
||||||
set_initializer fourty_two32 in
|
set_initializer forty_two32 in
|
||||||
insist (not (is_declaration g));
|
insist (not (is_declaration g));
|
||||||
insist (not (is_declaration g2));
|
insist (not (is_declaration g2));
|
||||||
insist ((global_initializer g) == (global_initializer g2));
|
insist ((global_initializer g) == (global_initializer g2));
|
||||||
@ -462,34 +462,34 @@ let test_global_variables () =
|
|||||||
(* CHECK: GVar04{{.*}}thread_local
|
(* CHECK: GVar04{{.*}}thread_local
|
||||||
*)
|
*)
|
||||||
group "threadlocal";
|
group "threadlocal";
|
||||||
let g = define_global "GVar04" fourty_two32 m ++
|
let g = define_global "GVar04" forty_two32 m ++
|
||||||
set_thread_local true in
|
set_thread_local true in
|
||||||
insist (is_thread_local g);
|
insist (is_thread_local g);
|
||||||
|
|
||||||
(* CHECK: GVar05{{.*}}thread_local(initialexec)
|
(* CHECK: GVar05{{.*}}thread_local(initialexec)
|
||||||
*)
|
*)
|
||||||
group "threadlocal_mode";
|
group "threadlocal_mode";
|
||||||
let g = define_global "GVar05" fourty_two32 m ++
|
let g = define_global "GVar05" forty_two32 m ++
|
||||||
set_thread_local_mode ThreadLocalMode.InitialExec in
|
set_thread_local_mode ThreadLocalMode.InitialExec in
|
||||||
insist ((thread_local_mode g) = ThreadLocalMode.InitialExec);
|
insist ((thread_local_mode g) = ThreadLocalMode.InitialExec);
|
||||||
|
|
||||||
(* CHECK: GVar06{{.*}}externally_initialized
|
(* CHECK: GVar06{{.*}}externally_initialized
|
||||||
*)
|
*)
|
||||||
group "externally_initialized";
|
group "externally_initialized";
|
||||||
let g = define_global "GVar06" fourty_two32 m ++
|
let g = define_global "GVar06" forty_two32 m ++
|
||||||
set_externally_initialized true in
|
set_externally_initialized true in
|
||||||
insist (is_externally_initialized g);
|
insist (is_externally_initialized g);
|
||||||
|
|
||||||
(* CHECK-NOWHERE-NOT: GVar07
|
(* CHECK-NOWHERE-NOT: GVar07
|
||||||
*)
|
*)
|
||||||
group "delete";
|
group "delete";
|
||||||
let g = define_global "GVar07" fourty_two32 m in
|
let g = define_global "GVar07" forty_two32 m in
|
||||||
delete_global g;
|
delete_global g;
|
||||||
|
|
||||||
(* CHECK: ConstGlobalVar{{.*}}constant
|
(* CHECK: ConstGlobalVar{{.*}}constant
|
||||||
*)
|
*)
|
||||||
group "constant";
|
group "constant";
|
||||||
let g = define_global "ConstGlobalVar" fourty_two32 m in
|
let g = define_global "ConstGlobalVar" forty_two32 m in
|
||||||
insist (not (is_global_constant g));
|
insist (not (is_global_constant g));
|
||||||
set_global_constant true g;
|
set_global_constant true g;
|
||||||
insist (is_global_constant g);
|
insist (is_global_constant g);
|
||||||
|
@ -13,7 +13,7 @@
|
|||||||
;structs at varying alignments. Each test is run for arm, thumb2 and thumb1.
|
;structs at varying alignments. Each test is run for arm, thumb2 and thumb1.
|
||||||
;We check for the strings in the generated object code using llvm-objdump
|
;We check for the strings in the generated object code using llvm-objdump
|
||||||
;because it provides better assurance that we are generating instructions
|
;because it provides better assurance that we are generating instructions
|
||||||
;for the correct architecture. Otherwise we could accidently generate an
|
;for the correct architecture. Otherwise we could accidentally generate an
|
||||||
;ARM instruction for THUMB1 and wouldn't detect it because the assembly
|
;ARM instruction for THUMB1 and wouldn't detect it because the assembly
|
||||||
;code representation is the same, but the object code would be generated
|
;code representation is the same, but the object code would be generated
|
||||||
;incorrectly. For each test we check for the label, a load instruction of the
|
;incorrectly. For each test we check for the label, a load instruction of the
|
||||||
|
@ -7,7 +7,7 @@ target datalayout = "e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16"
|
|||||||
|
|
||||||
; Test that the MI Scheduler's initPolicy does not crash when i32 is
|
; Test that the MI Scheduler's initPolicy does not crash when i32 is
|
||||||
; unsupported. The content of the asm check below is unimportant. It
|
; unsupported. The content of the asm check below is unimportant. It
|
||||||
; only verifies that the code generator ran succesfully.
|
; only verifies that the code generator ran successfully.
|
||||||
;
|
;
|
||||||
; CHECK-LABEL: @f
|
; CHECK-LABEL: @f
|
||||||
; CHECK: mov.w &y, &x
|
; CHECK: mov.w &y, &x
|
||||||
|
@ -10,7 +10,7 @@
|
|||||||
; The legalizer legalized ; the <4 x i8>'s into <4 x i32>'s, then a call to
|
; The legalizer legalized ; the <4 x i8>'s into <4 x i32>'s, then a call to
|
||||||
; isVSplat() returned the splat value for <i8 -1, i8 -1, ...> as a 32-bit APInt
|
; isVSplat() returned the splat value for <i8 -1, i8 -1, ...> as a 32-bit APInt
|
||||||
; (255), but the zeroinitializer splat value as an 8-bit APInt (0). The
|
; (255), but the zeroinitializer splat value as an 8-bit APInt (0). The
|
||||||
; assertion occured when trying to check the values were bitwise inverses of
|
; assertion occurred when trying to check the values were bitwise inverses of
|
||||||
; each-other.
|
; each-other.
|
||||||
;
|
;
|
||||||
; It should at least successfully build.
|
; It should at least successfully build.
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
; Test that floating-point compares are ommitted if CC already has the
|
; Test that floating-point compares are omitted if CC already has the
|
||||||
; right value.
|
; right value.
|
||||||
;
|
;
|
||||||
; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 | FileCheck %s
|
; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 | FileCheck %s
|
||||||
|
@ -208,7 +208,7 @@ define void @f4(i32 *%ptr, i64 %x) {
|
|||||||
ret void
|
ret void
|
||||||
}
|
}
|
||||||
|
|
||||||
; This is the largest frame size for which the prepatory increment for
|
; This is the largest frame size for which the preparatory increment for
|
||||||
; "lmg %r14, %r15, ..." can be done using AGHI.
|
; "lmg %r14, %r15, ..." can be done using AGHI.
|
||||||
define void @f5(i32 *%ptr, i64 %x) {
|
define void @f5(i32 *%ptr, i64 %x) {
|
||||||
; CHECK-LABEL: f5:
|
; CHECK-LABEL: f5:
|
||||||
@ -242,7 +242,7 @@ define void @f5(i32 *%ptr, i64 %x) {
|
|||||||
ret void
|
ret void
|
||||||
}
|
}
|
||||||
|
|
||||||
; This is the smallest frame size for which the prepatory increment for
|
; This is the smallest frame size for which the preparatory increment for
|
||||||
; "lmg %r14, %r15, ..." needs to be done using AGFI.
|
; "lmg %r14, %r15, ..." needs to be done using AGFI.
|
||||||
define void @f6(i32 *%ptr, i64 %x) {
|
define void @f6(i32 *%ptr, i64 %x) {
|
||||||
; CHECK-LABEL: f6:
|
; CHECK-LABEL: f6:
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
; Test that compares are ommitted if CC already has the right value
|
; Test that compares are omitted if CC already has the right value
|
||||||
; (z10 version).
|
; (z10 version).
|
||||||
;
|
;
|
||||||
; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 | FileCheck %s
|
; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 | FileCheck %s
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
; Test that compares are ommitted if CC already has the right value
|
; Test that compares are omitted if CC already has the right value
|
||||||
; (z196 version).
|
; (z196 version).
|
||||||
;
|
;
|
||||||
; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | FileCheck %s
|
; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | FileCheck %s
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
; During X86 fastisel, the address of indirect call was resolved
|
; During X86 fastisel, the address of indirect call was resolved
|
||||||
; through bitcast, ptrtoint, and inttoptr instructions. This is valid
|
; through bitcast, ptrtoint, and inttoptr instructions. This is valid
|
||||||
; only if the related instructions are in that same basic block, otherwise
|
; only if the related instructions are in that same basic block, otherwise
|
||||||
; we may reference variables that were not live accross basic blocks
|
; we may reference variables that were not live across basic blocks
|
||||||
; resulting in undefined virtual registers.
|
; resulting in undefined virtual registers.
|
||||||
;
|
;
|
||||||
; In this example, this is illustrated by a the spill/reload of the
|
; In this example, this is illustrated by a the spill/reload of the
|
||||||
@ -25,7 +25,7 @@
|
|||||||
; CHECK: movq [[ARG2_SLOT]], %rdi
|
; CHECK: movq [[ARG2_SLOT]], %rdi
|
||||||
; Load the second argument
|
; Load the second argument
|
||||||
; CHECK: movq [[ARG2_SLOT]], %rsi
|
; CHECK: movq [[ARG2_SLOT]], %rsi
|
||||||
; Load the thrid argument
|
; Load the third argument
|
||||||
; CHECK: movq [[ARG2_SLOT]], %rdx
|
; CHECK: movq [[ARG2_SLOT]], %rdx
|
||||||
; Load the function pointer.
|
; Load the function pointer.
|
||||||
; CHECK: movq [[LOADED_PTR_SLOT]], [[FCT_PTR:%[a-z]+]]
|
; CHECK: movq [[LOADED_PTR_SLOT]], [[FCT_PTR:%[a-z]+]]
|
||||||
@ -64,7 +64,7 @@ label_end:
|
|||||||
; CHECK: movq [[ARG2_SLOT]], %rdi
|
; CHECK: movq [[ARG2_SLOT]], %rdi
|
||||||
; Load the second argument
|
; Load the second argument
|
||||||
; CHECK: movq [[ARG2_SLOT]], %rsi
|
; CHECK: movq [[ARG2_SLOT]], %rsi
|
||||||
; Load the thrid argument
|
; Load the third argument
|
||||||
; CHECK: movq [[ARG2_SLOT]], %rdx
|
; CHECK: movq [[ARG2_SLOT]], %rdx
|
||||||
; Load the function pointer.
|
; Load the function pointer.
|
||||||
; CHECK: movq [[LOADED_PTR_SLOT]], [[FCT_PTR:%[a-z]+]]
|
; CHECK: movq [[LOADED_PTR_SLOT]], [[FCT_PTR:%[a-z]+]]
|
||||||
@ -103,7 +103,7 @@ label_end:
|
|||||||
; CHECK: movq [[ARG2_SLOT]], %rdi
|
; CHECK: movq [[ARG2_SLOT]], %rdi
|
||||||
; Load the second argument
|
; Load the second argument
|
||||||
; CHECK: movq [[ARG2_SLOT]], %rsi
|
; CHECK: movq [[ARG2_SLOT]], %rsi
|
||||||
; Load the thrid argument
|
; Load the third argument
|
||||||
; CHECK: movq [[ARG2_SLOT]], %rdx
|
; CHECK: movq [[ARG2_SLOT]], %rdx
|
||||||
; Load the function pointer.
|
; Load the function pointer.
|
||||||
; CHECK: movq [[LOADED_PTR_SLOT]], [[FCT_PTR:%[a-z]+]]
|
; CHECK: movq [[LOADED_PTR_SLOT]], [[FCT_PTR:%[a-z]+]]
|
||||||
|
@ -701,7 +701,7 @@ exit:
|
|||||||
|
|
||||||
define void @unanalyzable_branch_to_best_succ(i1 %cond) {
|
define void @unanalyzable_branch_to_best_succ(i1 %cond) {
|
||||||
; Ensure that we can handle unanalyzable branches where the destination block
|
; Ensure that we can handle unanalyzable branches where the destination block
|
||||||
; gets selected as the optimal sucessor to merge.
|
; gets selected as the optimal successor to merge.
|
||||||
;
|
;
|
||||||
; CHECK: unanalyzable_branch_to_best_succ
|
; CHECK: unanalyzable_branch_to_best_succ
|
||||||
; CHECK: %entry
|
; CHECK: %entry
|
||||||
|
@ -6,7 +6,7 @@
|
|||||||
%class.Complex = type { float, float }
|
%class.Complex = type { float, float }
|
||||||
|
|
||||||
|
|
||||||
; Check that independant slices leads to independant loads then the slices leads to
|
; Check that independent slices leads to independent loads then the slices leads to
|
||||||
; different register file.
|
; different register file.
|
||||||
;
|
;
|
||||||
; The layout is:
|
; The layout is:
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
; %tmp1676 = xor i32 %tmp1634, %tmp1530 have zero demanded bits after
|
; %tmp1676 = xor i32 %tmp1634, %tmp1530 have zero demanded bits after
|
||||||
; DAGCombiner optimization pass. These are changed to undef and in turn
|
; DAGCombiner optimization pass. These are changed to undef and in turn
|
||||||
; the successor shl(s) become shl undef, 1. This pattern then matches
|
; the successor shl(s) become shl undef, 1. This pattern then matches
|
||||||
; shl x, 1 -> add x, x. add undef, undef doesn't guarentee the low
|
; shl x, 1 -> add x, x. add undef, undef doesn't guarantee the low
|
||||||
; order bit is zero and is incorrect.
|
; order bit is zero and is incorrect.
|
||||||
;
|
;
|
||||||
; See rdar://9453156 and rdar://9487392.
|
; See rdar://9453156 and rdar://9487392.
|
||||||
|
@ -6,7 +6,7 @@
|
|||||||
|
|
||||||
; 10 bytes of data in this DW_FORM_block1 representation of the location of 'tls'
|
; 10 bytes of data in this DW_FORM_block1 representation of the location of 'tls'
|
||||||
; CHECK: .byte 10{{ *}}# DW_AT_location
|
; CHECK: .byte 10{{ *}}# DW_AT_location
|
||||||
; DW_OP_const8u (0x0e == 14) of adress
|
; DW_OP_const8u (0x0e == 14) of address
|
||||||
; CHECK: .byte 14
|
; CHECK: .byte 14
|
||||||
; The debug relocation of the address of the tls variable
|
; The debug relocation of the address of the tls variable
|
||||||
; CHECK: .quad tls@DTPOFF
|
; CHECK: .quad tls@DTPOFF
|
||||||
|
@ -9,7 +9,7 @@
|
|||||||
@ then libunwind will reconstruct the stack pointer from the frame pointer.
|
@ then libunwind will reconstruct the stack pointer from the frame pointer.
|
||||||
@ The reconstruction code is implemented by two different unwind opcode:
|
@ The reconstruction code is implemented by two different unwind opcode:
|
||||||
@ (i) the unwind opcode to copy stack offset from the other register, and
|
@ (i) the unwind opcode to copy stack offset from the other register, and
|
||||||
@ (ii) the unwind opcode to add or substract the stack offset.
|
@ (ii) the unwind opcode to add or subtract the stack offset.
|
||||||
@
|
@
|
||||||
@ This file includes several cases separated by different range of -offset
|
@ This file includes several cases separated by different range of -offset
|
||||||
@
|
@
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
; RUN: FileCheck %s
|
; RUN: FileCheck %s
|
||||||
|
|
||||||
; FIXME: This file needs to be in .s form!
|
; FIXME: This file needs to be in .s form!
|
||||||
; We wanna test relocatable thumb function call,
|
; We want to test relocatable thumb function call,
|
||||||
; but ARMAsmParser cannot handle "bl foo(PLT)" yet
|
; but ARMAsmParser cannot handle "bl foo(PLT)" yet
|
||||||
|
|
||||||
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:32-n32"
|
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:32-n32"
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// The purpose of this test is to verify that bss sections are emited correctly.
|
// The purpose of this test is to verify that bss sections are emitted correctly.
|
||||||
|
|
||||||
// RUN: llvm-mc -filetype=obj -triple i686-pc-win32 %s | llvm-readobj -s | FileCheck %s
|
// RUN: llvm-mc -filetype=obj -triple i686-pc-win32 %s | llvm-readobj -s | FileCheck %s
|
||||||
// RUN: llvm-mc -filetype=obj -triple x86_64-pc-win32 %s | llvm-readobj -s | FileCheck %s
|
// RUN: llvm-mc -filetype=obj -triple x86_64-pc-win32 %s | llvm-readobj -s | FileCheck %s
|
||||||
|
@ -52,7 +52,7 @@ pad_sections aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
|||||||
|
|
||||||
// /1000029 == 4 + 10 + (5 * (2 + (20 * 10 * 1000) + 1))
|
// /1000029 == 4 + 10 + (5 * (2 + (20 * 10 * 1000) + 1))
|
||||||
// v | | v ~~~~~~~~~~~~~~ v
|
// v | | v ~~~~~~~~~~~~~~ v
|
||||||
// table size v v "p0" pad NUL seperator
|
// table size v v "p0" pad NUL separator
|
||||||
// "s12345678\0" # of pad sections
|
// "s12345678\0" # of pad sections
|
||||||
//
|
//
|
||||||
// CHECK: Section {
|
// CHECK: Section {
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// The purpose of this test is to verify that bss sections are emited correctly.
|
// The purpose of this test is to verify that bss sections are emitted correctly.
|
||||||
|
|
||||||
// RUN: llvm-mc -filetype=obj -triple i686-apple-darwin9 %s | llvm-readobj -s | FileCheck %s
|
// RUN: llvm-mc -filetype=obj -triple i686-apple-darwin9 %s | llvm-readobj -s | FileCheck %s
|
||||||
// RUN: llvm-mc -filetype=obj -triple x86_64-apple-darwin9 %s | llvm-readobj -s | FileCheck %s
|
// RUN: llvm-mc -filetype=obj -triple x86_64-apple-darwin9 %s | llvm-readobj -s | FileCheck %s
|
||||||
|
@ -31,7 +31,7 @@ else:
|
|||||||
|
|
||||||
define i32 @outer2(i32* %ptr) {
|
define i32 @outer2(i32* %ptr) {
|
||||||
; Test that an inbounds GEP disables this -- it isn't safe in general as
|
; Test that an inbounds GEP disables this -- it isn't safe in general as
|
||||||
; wrapping changes the behavior of lessthan and greaterthan comparisions.
|
; wrapping changes the behavior of lessthan and greaterthan comparisons.
|
||||||
; CHECK-LABEL: @outer2(
|
; CHECK-LABEL: @outer2(
|
||||||
; CHECK: call i32 @inner2
|
; CHECK: call i32 @inner2
|
||||||
; CHECK: ret i32
|
; CHECK: ret i32
|
||||||
|
@ -4,7 +4,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
|
|||||||
|
|
||||||
; PR16073
|
; PR16073
|
||||||
|
|
||||||
; Because we were caching value pointers accross a function call that could RAUW
|
; Because we were caching value pointers across a function call that could RAUW
|
||||||
; we would generate an undefined value store below:
|
; we would generate an undefined value store below:
|
||||||
; SCEVExpander::expandCodeFor would change a value (the start value of an
|
; SCEVExpander::expandCodeFor would change a value (the start value of an
|
||||||
; induction) that we cached in the induction variable list.
|
; induction) that we cached in the induction variable list.
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user