[FastIsel][X86] Add support for lowering the first 8 floating-point arguments.

Recommit with fixed argument attribute checking code, which is required to bail
out of all the cases we don't handle yet.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@210815 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Juergen Ributzka 2014-06-12 20:12:34 +00:00
parent 3455c40f91
commit 3f2b28dcaf
2 changed files with 65 additions and 20 deletions

View File

@ -1948,31 +1948,43 @@ bool X86FastISel::FastLowerArguments() {
return false;
// Only handle simple cases. i.e. Up to 6 i32/i64 scalar arguments.
unsigned Idx = 1;
for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
I != E; ++I, ++Idx) {
if (Idx > 6)
return false;
unsigned GPRCnt = 0;
unsigned FPRCnt = 0;
unsigned Idx = 0;
for (auto const &Arg : F->args()) {
// The first argument is at index 1.
++Idx;
if (F->getAttributes().hasAttribute(Idx, Attribute::ByVal) ||
F->getAttributes().hasAttribute(Idx, Attribute::InReg) ||
F->getAttributes().hasAttribute(Idx, Attribute::StructRet) ||
F->getAttributes().hasAttribute(Idx, Attribute::Nest))
return false;
Type *ArgTy = I->getType();
Type *ArgTy = Arg.getType();
if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy())
return false;
EVT ArgVT = TLI.getValueType(ArgTy);
if (!ArgVT.isSimple()) return false;
switch (ArgVT.getSimpleVT().SimpleTy) {
default: return false;
case MVT::i32:
case MVT::i64:
++GPRCnt;
break;
case MVT::f32:
case MVT::f64:
if (!Subtarget->hasSSE1())
return false;
++FPRCnt;
break;
default:
return false;
}
if (GPRCnt > 6)
return false;
if (FPRCnt > 8)
return false;
}
static const MCPhysReg GPR32ArgRegs[] = {
@ -1981,24 +1993,33 @@ bool X86FastISel::FastLowerArguments() {
static const MCPhysReg GPR64ArgRegs[] = {
X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8 , X86::R9
};
static const MCPhysReg XMMArgRegs[] = {
X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
};
Idx = 0;
const TargetRegisterClass *RC32 = TLI.getRegClassFor(MVT::i32);
const TargetRegisterClass *RC64 = TLI.getRegClassFor(MVT::i64);
for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
I != E; ++I, ++Idx) {
bool is32Bit = TLI.getValueType(I->getType()) == MVT::i32;
const TargetRegisterClass *RC = is32Bit ? RC32 : RC64;
unsigned SrcReg = is32Bit ? GPR32ArgRegs[Idx] : GPR64ArgRegs[Idx];
unsigned GPRIdx = 0;
unsigned FPRIdx = 0;
for (auto const &Arg : F->args()) {
MVT VT = TLI.getSimpleValueType(Arg.getType());
const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
unsigned SrcReg;
switch (VT.SimpleTy) {
default: llvm_unreachable("Unexpected value type.");
case MVT::i32: SrcReg = GPR32ArgRegs[GPRIdx++]; break;
case MVT::i64: SrcReg = GPR64ArgRegs[GPRIdx++]; break;
case MVT::f32: // fall-through
case MVT::f64: SrcReg = XMMArgRegs[FPRIdx++]; break;
}
unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
// FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
// Without this, EmitLiveInCopies may eliminate the livein if its only
// use is a bitcast (which isn't turned into an instruction).
unsigned ResultReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY),
ResultReg).addReg(DstReg, getKillRegState(true));
UpdateValueMap(I, ResultReg);
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(DstReg, getKillRegState(true));
UpdateValueMap(&Arg, ResultReg);
}
return true;
}

View File

@ -23,3 +23,27 @@ entry:
%add2 = add nsw i64 %add, %conv1
ret i64 %add2
}
define float @t4(float %a, float %b, float %c, float %d, float %e, float %f, float %g, float %h) {
entry:
%add1 = fadd float %a, %b
%add2 = fadd float %c, %d
%add3 = fadd float %e, %f
%add4 = fadd float %g, %h
%add5 = fadd float %add1, %add2
%add6 = fadd float %add3, %add4
%add7 = fadd float %add5, %add6
ret float %add7
}
define double @t5(double %a, double %b, double %c, double %d, double %e, double %f, double %g, double %h) {
entry:
%add1 = fadd double %a, %b
%add2 = fadd double %c, %d
%add3 = fadd double %e, %f
%add4 = fadd double %g, %h
%add5 = fadd double %add1, %add2
%add6 = fadd double %add3, %add4
%add7 = fadd double %add5, %add6
ret double %add7
}