mirror of
https://github.com/RPCSX/llvm.git
synced 2024-12-14 15:39:00 +00:00
remove function names from comments; NFC
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@224080 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
73460aea4f
commit
6f44989d39
@ -1741,7 +1741,7 @@ EVT X86TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
|
||||
return VT.changeVectorElementTypeToInteger();
|
||||
}
|
||||
|
||||
/// getMaxByValAlign - Helper for getByValTypeAlignment to determine
|
||||
/// Helper for getByValTypeAlignment to determine
|
||||
/// the desired ByVal argument alignment.
|
||||
static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
|
||||
if (MaxAlign == 16)
|
||||
@ -1766,7 +1766,7 @@ static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
|
||||
}
|
||||
}
|
||||
|
||||
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
|
||||
/// Return the desired alignment for ByVal aggregate
|
||||
/// function arguments in the caller parameter area. For X86, aggregates
|
||||
/// that contain SSE vectors are placed at 16-byte boundaries while the rest
|
||||
/// are at 4-byte boundaries.
|
||||
@ -1785,7 +1785,7 @@ unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty) const {
|
||||
return Align;
|
||||
}
|
||||
|
||||
/// getOptimalMemOpType - Returns the target specific optimal type for load
|
||||
/// Returns the target specific optimal type for load
|
||||
/// and store operations as a result of memset, memcpy, and memmove
|
||||
/// lowering. If DstAlign is zero that means it's safe to destination
|
||||
/// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
|
||||
@ -1851,7 +1851,7 @@ X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
|
||||
return true;
|
||||
}
|
||||
|
||||
/// getJumpTableEncoding - Return the entry encoding for a jump table in the
|
||||
/// Return the entry encoding for a jump table in the
|
||||
/// current function. The returned value is a member of the
|
||||
/// MachineJumpTableInfo::JTEntryKind enum.
|
||||
unsigned X86TargetLowering::getJumpTableEncoding() const {
|
||||
@ -1877,8 +1877,7 @@ X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
|
||||
MCSymbolRefExpr::VK_GOTOFF, Ctx);
|
||||
}
|
||||
|
||||
/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
|
||||
/// jumptable.
|
||||
/// Returns relocation base for the given PIC jumptable.
|
||||
SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
|
||||
SelectionDAG &DAG) const {
|
||||
if (!Subtarget->is64Bit())
|
||||
@ -1888,9 +1887,8 @@ SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
|
||||
return Table;
|
||||
}
|
||||
|
||||
/// getPICJumpTableRelocBaseExpr - This returns the relocation base for the
|
||||
/// given PIC jumptable, the same as getPICJumpTableRelocBase, but as an
|
||||
/// MCExpr.
|
||||
/// This returns the relocation base for the given PIC jumptable,
|
||||
/// the same as getPICJumpTableRelocBase, but as an MCExpr.
|
||||
const MCExpr *X86TargetLowering::
|
||||
getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
|
||||
MCContext &Ctx) const {
|
||||
@ -2149,7 +2147,7 @@ X86TargetLowering::getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
|
||||
return VT.bitsLT(MinVT) ? MinVT : VT;
|
||||
}
|
||||
|
||||
/// LowerCallResult - Lower the result values of a call into the
|
||||
/// Lower the result values of a call into the
|
||||
/// appropriate copies out of appropriate physical registers.
|
||||
///
|
||||
SDValue
|
||||
@ -2229,8 +2227,7 @@ callIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) {
|
||||
return StackStructReturn;
|
||||
}
|
||||
|
||||
/// ArgsAreStructReturn - Determines whether a function uses struct
|
||||
/// return semantics.
|
||||
/// Determines whether a function uses struct return semantics.
|
||||
static StructReturnType
|
||||
argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) {
|
||||
if (Ins.empty())
|
||||
@ -2244,10 +2241,9 @@ argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) {
|
||||
return StackStructReturn;
|
||||
}
|
||||
|
||||
/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
|
||||
/// by "Src" to address "Dst" with size and alignment information specified by
|
||||
/// the specific parameter attribute. The copy will be passed as a byval
|
||||
/// function parameter.
|
||||
/// Make a copy of an aggregate at address specified by "Src" to address
|
||||
/// "Dst" with size and alignment information specified by the specific
|
||||
/// parameter attribute. The copy will be passed as a byval function parameter.
|
||||
static SDValue
|
||||
CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
|
||||
ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
|
||||
@ -2259,7 +2255,7 @@ CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
|
||||
MachinePointerInfo(), MachinePointerInfo());
|
||||
}
|
||||
|
||||
/// IsTailCallConvention - Return true if the calling convention is one that
|
||||
/// Return true if the calling convention is one that
|
||||
/// supports tail call optimization.
|
||||
static bool IsTailCallConvention(CallingConv::ID CC) {
|
||||
return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
|
||||
@ -2284,7 +2280,7 @@ bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
|
||||
return true;
|
||||
}
|
||||
|
||||
/// FuncIsMadeTailCallSafe - Return true if the function is being made into
|
||||
/// Return true if the function is being made into
|
||||
/// a tailcall target by changing its ABI.
|
||||
static bool FuncIsMadeTailCallSafe(CallingConv::ID CC,
|
||||
bool GuaranteedTailCallOpt) {
|
||||
@ -2696,7 +2692,7 @@ X86TargetLowering::LowerMemOpCallTo(SDValue Chain,
|
||||
false, false, 0);
|
||||
}
|
||||
|
||||
/// EmitTailCallLoadRetAddr - Emit a load of return address if tail call
|
||||
/// Emit a load of return address if tail call
|
||||
/// optimization is performed and it is required.
|
||||
SDValue
|
||||
X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
|
||||
@ -2713,7 +2709,7 @@ X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
|
||||
return SDValue(OutRetAddr.getNode(), 1);
|
||||
}
|
||||
|
||||
/// EmitTailCallStoreRetAddr - Emit a store of the return address if tail call
|
||||
/// Emit a store of the return address if tail call
|
||||
/// optimization is performed and it is required (FPDiff!=0).
|
||||
static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
|
||||
SDValue Chain, SDValue RetAddrFrIdx,
|
||||
@ -24571,7 +24567,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
|
||||
return SDValue();
|
||||
}
|
||||
|
||||
/// isHorizontalBinOp - Return 'true' if this vector operation is "horizontal"
|
||||
/// Return 'true' if this vector operation is "horizontal"
|
||||
/// and return the operands for the horizontal operation in LHS and RHS. A
|
||||
/// horizontal operation performs the binary operation on successive elements
|
||||
/// of its first operand, then on successive elements of its second operand,
|
||||
@ -24697,7 +24693,7 @@ static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) {
|
||||
return true;
|
||||
}
|
||||
|
||||
/// PerformFADDCombine - Do target-specific dag combines on floating point adds.
|
||||
/// Do target-specific dag combines on floating point adds.
|
||||
static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG,
|
||||
const X86Subtarget *Subtarget) {
|
||||
EVT VT = N->getValueType(0);
|
||||
@ -24712,7 +24708,7 @@ static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG,
|
||||
return SDValue();
|
||||
}
|
||||
|
||||
/// PerformFSUBCombine - Do target-specific dag combines on floating point subs.
|
||||
/// Do target-specific dag combines on floating point subs.
|
||||
static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG,
|
||||
const X86Subtarget *Subtarget) {
|
||||
EVT VT = N->getValueType(0);
|
||||
@ -24727,8 +24723,7 @@ static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG,
|
||||
return SDValue();
|
||||
}
|
||||
|
||||
/// PerformFORCombine - Do target-specific dag combines on X86ISD::FOR and
|
||||
/// X86ISD::FXOR nodes.
|
||||
/// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
|
||||
static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) {
|
||||
assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
|
||||
// F[X]OR(0.0, x) -> x
|
||||
@ -24742,8 +24737,7 @@ static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) {
|
||||
return SDValue();
|
||||
}
|
||||
|
||||
/// PerformFMinFMaxCombine - Do target-specific dag combines on X86ISD::FMIN and
|
||||
/// X86ISD::FMAX nodes.
|
||||
/// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
|
||||
static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) {
|
||||
assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
|
||||
|
||||
@ -24764,7 +24758,7 @@ static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) {
|
||||
N->getOperand(0), N->getOperand(1));
|
||||
}
|
||||
|
||||
/// PerformFANDCombine - Do target-specific dag combines on X86ISD::FAND nodes.
|
||||
/// Do target-specific dag combines on X86ISD::FAND nodes.
|
||||
static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) {
|
||||
// FAND(0.0, x) -> 0.0
|
||||
// FAND(x, 0.0) -> 0.0
|
||||
@ -24777,7 +24771,7 @@ static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) {
|
||||
return SDValue();
|
||||
}
|
||||
|
||||
/// PerformFANDNCombine - Do target-specific dag combines on X86ISD::FANDN nodes
|
||||
/// Do target-specific dag combines on X86ISD::FANDN nodes
|
||||
static SDValue PerformFANDNCombine(SDNode *N, SelectionDAG &DAG) {
|
||||
// FANDN(x, 0.0) -> 0.0
|
||||
// FANDN(0.0, x) -> x
|
||||
|
Loading…
Reference in New Issue
Block a user