Supporting inline memmove isn't going to be worthwhile. The only way to avoid

violating a dependency is to emit all loads prior to stores.  This would likely
cause a great deal of spillage offsetting any potential gains.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@144585 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Chad Rosier 2011-11-14 23:04:09 +00:00
parent ffc658b056
commit 2c42b8c912

View File

@ -185,9 +185,8 @@ class ARMFastISel : public FastISel {
bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr);
bool ARMComputeAddress(const Value *Obj, Address &Addr);
void ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3);
bool ARMIsMemXferSmall(uint64_t Len);
bool ARMTryEmitSmallMemXfer(Address Dest, Address Src, uint64_t Len,
bool isMemCpy);
bool ARMIsMemCpySmall(uint64_t Len);
bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len);
unsigned ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, bool isZExt);
unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT);
unsigned ARMMaterializeInt(const Constant *C, EVT VT);
@ -2196,19 +2195,13 @@ bool ARMFastISel::SelectCall(const Instruction *I,
return true;
}
bool ARMFastISel::ARMIsMemXferSmall(uint64_t Len) {
bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) {
return Len <= 16;
}
bool ARMFastISel::ARMTryEmitSmallMemXfer(Address Dest, Address Src, uint64_t Len,
bool isMemCpy) {
// FIXME: Memmove's require a little more care because their source and
// destination may overlap.
if (!isMemCpy)
return false;
bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len) {
// Make sure we don't bloat code by inlining very large memcpy's.
if (!ARMIsMemXferSmall(Len))
if (!ARMIsMemCpySmall(Len))
return false;
// We don't care about alignment here since we just emit integer accesses.
@ -2254,15 +2247,15 @@ bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) {
// we would emit dead code because we don't currently handle memmoves.
bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy);
if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) {
// Small memcpy/memmove's are common enough that we want to do them
// without a call if possible.
// Small memcpy's are common enough that we want to do them without a call
// if possible.
uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue();
if (ARMIsMemXferSmall(Len)) {
if (ARMIsMemCpySmall(Len)) {
Address Dest, Src;
if (!ARMComputeAddress(MTI.getRawDest(), Dest) ||
!ARMComputeAddress(MTI.getRawSource(), Src))
return false;
if (ARMTryEmitSmallMemXfer(Dest, Src, Len, isMemCpy))
if (ARMTryEmitSmallMemCpy(Dest, Src, Len))
return true;
}
}