Start using GPR's to copy around mmx value instead of mmx regs.

GCC apparently does this, and code depends on not having to do
emms when this happens.  This is x86-64 only so far, second half
should handle x86-32.

rdar://5741668


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@47470 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Chris Lattner 2008-02-22 02:09:43 +00:00
parent 59a8cdd36d
commit 149a4e56fc
2 changed files with 46 additions and 0 deletions

View File

@ -704,6 +704,7 @@ X86TargetLowering::X86TargetLowering(TargetMachine &TM)
// We have target-specific dag combine patterns for the following nodes:
setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
setTargetDAGCombine(ISD::SELECT);
setTargetDAGCombine(ISD::STORE);
computeRegisterProperties();
@ -5872,6 +5873,35 @@ static SDOperand PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
return SDOperand();
}
/// PerformSTORECombine - Do target-specific dag combines on STORE nodes.
static SDOperand PerformSTORECombine(StoreSDNode *St, SelectionDAG &DAG,
const X86Subtarget *Subtarget) {
// Turn load->store of MMX types into GPR load/stores. This avoids clobbering
// the FP state in cases where an emms may be missing.
if (MVT::isVector(St->getValue().getValueType()) &&
MVT::getSizeInBits(St->getValue().getValueType()) == 64 &&
// Must be a store of a load.
isa<LoadSDNode>(St->getChain()) &&
St->getChain().Val == St->getValue().Val &&
St->getValue().hasOneUse() && St->getChain().hasOneUse() &&
!St->isVolatile() && !cast<LoadSDNode>(St->getChain())->isVolatile()) {
LoadSDNode *Ld = cast<LoadSDNode>(St->getChain());
// If we are a 64-bit capable x86, lower to a single movq load/store pair.
if (Subtarget->is64Bit()) {
SDOperand NewLd = DAG.getLoad(MVT::i64, Ld->getChain(), Ld->getBasePtr(),
Ld->getSrcValue(), Ld->getSrcValueOffset(),
Ld->isVolatile(), Ld->getAlignment());
return DAG.getStore(NewLd.getValue(1), NewLd, St->getBasePtr(),
St->getSrcValue(), St->getSrcValueOffset(),
St->isVolatile(), St->getAlignment());
}
// TODO: 2 32-bit copies.
}
return SDOperand();
}
/// PerformFORCombine - Do target-specific dag combines on X86ISD::FOR and
/// X86ISD::FXOR nodes.
static SDOperand PerformFORCombine(SDNode *N, SelectionDAG &DAG) {
@ -5908,6 +5938,8 @@ SDOperand X86TargetLowering::PerformDAGCombine(SDNode *N,
default: break;
case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, Subtarget);
case ISD::SELECT: return PerformSELECTCombine(N, DAG, Subtarget);
case ISD::STORE:
return PerformSTORECombine(cast<StoreSDNode>(N), DAG, Subtarget);
case X86ISD::FXOR:
case X86ISD::FOR: return PerformFORCombine(N, DAG);
case X86ISD::FAND: return PerformFANDCombine(N, DAG);

View File

@ -0,0 +1,14 @@
; RUN: llvm-as < %s | llc -march=x86-64 | grep {movq.*(%rsi), %rax}
; This test should use GPRs to copy the mmx value, not MMX regs. Using mmx regs,
; increases the places that need to use emms.
; rdar://5741668
target triple = "x86_64-apple-darwin8"
define i32 @foo(<1 x i64>* %x, <1 x i64>* %y) nounwind {
entry:
%tmp1 = load <1 x i64>* %y, align 8 ; <<1 x i64>> [#uses=1]
store <1 x i64> %tmp1, <1 x i64>* %x, align 8
ret i32 undef
}