mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-12-29 15:04:57 +00:00
Don't forego folding of loads into 64-bit adds when the other
operand is a signed 32-bit immediate. Unlike with the 8-bit signed immediate case, it isn't actually smaller to fold a 32-bit signed immediate instead of a load. In fact, it's larger in the case of 32-bit unsigned immediates, because they can be materialized with movl instead of movq. llvm-svn: 67001
This commit is contained in:
parent
2cdac55ad0
commit
e7495ef7aa
@ -319,16 +319,9 @@ bool X86DAGToDAGISel::IsLegalAndProfitableToFold(SDNode *N, SDNode *U,
|
||||
// addl 4(%esp), %eax
|
||||
// The former is 2 bytes shorter. In case where the increment is 1, then
|
||||
// the saving can be 4 bytes (by using incl %eax).
|
||||
ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(U->getOperand(1));
|
||||
if (Imm) {
|
||||
if (U->getValueType(0) == MVT::i64) {
|
||||
if ((int32_t)Imm->getZExtValue() == (int64_t)Imm->getZExtValue())
|
||||
return false;
|
||||
} else {
|
||||
if ((int8_t)Imm->getZExtValue() == (int64_t)Imm->getZExtValue())
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(U->getOperand(1)))
|
||||
if (Imm->getAPIntValue().isSignedIntN(8))
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user