Cleanup Darwin MMX calling conv stuff - make the stuff more generic. This also fixes a subtle bug, when 6th v1i64 argument passed wrongly.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@77963 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Anton Korobeynikov 2009-08-03 08:13:24 +00:00
parent cf6b739d3d
commit 80cb8aa862
2 changed files with 36 additions and 62 deletions

View File

@ -137,26 +137,26 @@ def CC_X86_64_C : CallingConv<[
// The 'nest' parameter, if any, is passed in R10. // The 'nest' parameter, if any, is passed in R10.
CCIfNest<CCAssignToReg<[R10]>>, CCIfNest<CCAssignToReg<[R10]>>,
// The first 6 v1i64 vector arguments are passed in GPRs on Darwin.
CCIfType<[v1i64],
CCIfSubtarget<"isTargetDarwin()",
CCBitConvertToType<i64>>>,
// The first 6 integer arguments are passed in integer registers. // The first 6 integer arguments are passed in integer registers.
CCIfType<[i32], CCAssignToReg<[EDI, ESI, EDX, ECX, R8D, R9D]>>, CCIfType<[i32], CCAssignToReg<[EDI, ESI, EDX, ECX, R8D, R9D]>>,
CCIfType<[i64], CCAssignToReg<[RDI, RSI, RDX, RCX, R8 , R9 ]>>, CCIfType<[i64], CCAssignToReg<[RDI, RSI, RDX, RCX, R8 , R9 ]>>,
// The first 8 FP/Vector arguments are passed in XMM registers.
CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
CCIfSubtarget<"hasSSE1()",
CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>>>,
// The first 8 MMX (except for v1i64) vector arguments are passed in XMM // The first 8 MMX (except for v1i64) vector arguments are passed in XMM
// registers on Darwin. // registers on Darwin.
CCIfType<[v8i8, v4i16, v2i32, v2f32], CCIfType<[v8i8, v4i16, v2i32, v2f32],
CCIfSubtarget<"isTargetDarwin()", CCIfSubtarget<"isTargetDarwin()",
CCIfSubtarget<"hasSSE2()", CCIfSubtarget<"hasSSE2()",
CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>>>>, CCPromoteToType<v2i64>>>>,
// The first 8 v1i64 vector arguments are passed in GPRs on Darwin. // The first 8 FP/Vector arguments are passed in XMM registers.
CCIfType<[v1i64], CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
CCIfSubtarget<"isTargetDarwin()", CCIfSubtarget<"hasSSE1()",
CCAssignToReg<[RDI, RSI, RDX, RCX, R8]>>>, CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>>>,
// Integer/FP values get stored in stack slots that are 8 bytes in size and // Integer/FP values get stored in stack slots that are 8 bytes in size and
// 8-byte aligned if there are no more registers to hold them. // 8-byte aligned if there are no more registers to hold them.

View File

@ -1429,24 +1429,10 @@ X86TargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) {
RC = X86::FR64RegisterClass; RC = X86::FR64RegisterClass;
else if (RegVT.isVector() && RegVT.getSizeInBits() == 128) else if (RegVT.isVector() && RegVT.getSizeInBits() == 128)
RC = X86::VR128RegisterClass; RC = X86::VR128RegisterClass;
else if (RegVT.isVector()) { else if (RegVT.isVector() && RegVT.getSizeInBits() == 64)
assert(RegVT.getSizeInBits() == 64); RC = X86::VR64RegisterClass;
if (!Is64Bit) else
RC = X86::VR64RegisterClass; // MMX values are passed in MMXs.
else {
// Darwin calling convention passes MMX values in either GPRs or
// XMMs in x86-64. Other targets pass them in memory.
if (RegVT != MVT::v1i64 && Subtarget->hasSSE2()) {
RC = X86::VR128RegisterClass; // MMX values are passed in XMMs.
RegVT = MVT::v2i64;
} else {
RC = X86::GR64RegisterClass; // v1i64 values are passed in GPRs.
RegVT = MVT::i64;
}
}
} else {
llvm_unreachable("Unknown argument type!"); llvm_unreachable("Unknown argument type!");
}
unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, RegVT); SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, RegVT);
@ -1460,19 +1446,19 @@ X86TargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) {
else if (VA.getLocInfo() == CCValAssign::ZExt) else if (VA.getLocInfo() == CCValAssign::ZExt)
ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
DAG.getValueType(VA.getValVT())); DAG.getValueType(VA.getValVT()));
else if (VA.getLocInfo() == CCValAssign::BCvt)
ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, RegVT, ArgValue,
DAG.getValueType(VA.getValVT()));
if (VA.getLocInfo() != CCValAssign::Full) if (VA.getLocInfo() != CCValAssign::Full &&
ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); VA.getLocInfo() != CCValAssign::BCvt) {
// Handle MMX values passed in XMM regs.
// Handle MMX values passed in GPRs. if (RegVT.isVector()) {
if (Is64Bit && RegVT != VA.getLocVT()) {
if (RegVT.getSizeInBits() == 64 && RC == X86::GR64RegisterClass)
ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), ArgValue);
else if (RC == X86::VR128RegisterClass) {
ArgValue = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, ArgValue = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64,
ArgValue, DAG.getConstant(0, MVT::i64)); ArgValue, DAG.getConstant(0, MVT::i64));
ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), ArgValue); ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), ArgValue);
} } else
ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
} }
ArgValues.push_back(ArgValue); ArgValues.push_back(ArgValue);
@ -1734,6 +1720,7 @@ SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
// of tail call optimization arguments are handle later. // of tail call optimization arguments are handle later.
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i]; CCValAssign &VA = ArgLocs[i];
MVT RegVT = VA.getLocVT();
SDValue Arg = TheCall->getArg(i); SDValue Arg = TheCall->getArg(i);
ISD::ArgFlagsTy Flags = TheCall->getArgFlags(i); ISD::ArgFlagsTy Flags = TheCall->getArgFlags(i);
bool isByVal = Flags.isByVal(); bool isByVal = Flags.isByVal();
@ -1743,39 +1730,26 @@ SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
default: llvm_unreachable("Unknown loc info!"); default: llvm_unreachable("Unknown loc info!");
case CCValAssign::Full: break; case CCValAssign::Full: break;
case CCValAssign::SExt: case CCValAssign::SExt:
Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
break; break;
case CCValAssign::ZExt: case CCValAssign::ZExt:
Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
break; break;
case CCValAssign::AExt: case CCValAssign::AExt:
Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); if (RegVT.isVector() && RegVT.getSizeInBits() == 128) {
// Special case: passing MMX values in XMM registers.
Arg = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, Arg);
Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
} else
Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
break;
case CCValAssign::BCvt:
Arg = DAG.getNode(ISD::BIT_CONVERT, dl, RegVT, Arg);
break; break;
} }
if (VA.isRegLoc()) { if (VA.isRegLoc()) {
if (Is64Bit) {
MVT RegVT = VA.getLocVT();
if (RegVT.isVector() && RegVT.getSizeInBits() == 64)
switch (VA.getLocReg()) {
default:
break;
case X86::RDI: case X86::RSI: case X86::RDX: case X86::RCX:
case X86::R8: {
// Special case: passing MMX values in GPR registers.
Arg = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, Arg);
break;
}
case X86::XMM0: case X86::XMM1: case X86::XMM2: case X86::XMM3:
case X86::XMM4: case X86::XMM5: case X86::XMM6: case X86::XMM7: {
// Special case: passing MMX values in XMM registers.
Arg = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, Arg);
Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
break;
}
}
}
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
} else { } else {
if (!IsTailCall || (IsTailCall && isByVal)) { if (!IsTailCall || (IsTailCall && isByVal)) {