mirror of
https://github.com/mozilla/gecko-dev.git
synced 2025-02-22 02:09:28 +00:00
Bug 602788 - nanojit: more constant folding of quads in ExprFilter, please. r=stejohns.
--HG-- extra : convert_revision : 8409c49a72dc223ca0622bc18b396e81f8d45608
This commit is contained in:
parent
4d05ce6702
commit
b4192c8e6b
@ -632,6 +632,9 @@ namespace nanojit
|
||||
{
|
||||
NanoAssert(oprnd1 && oprnd2);
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Folding where the two operands are equal
|
||||
//-------------------------------------------------------------------
|
||||
if (oprnd1 == oprnd2) {
|
||||
// The operands are equal.
|
||||
switch (v) {
|
||||
@ -658,8 +661,11 @@ namespace nanojit
|
||||
}
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Folding where both operands are immediates, grouped by type
|
||||
//-------------------------------------------------------------------
|
||||
if (oprnd1->isImmI() && oprnd2->isImmI()) {
|
||||
// The operands are both 32-bit integer immediates.
|
||||
// The operands are both int immediates.
|
||||
int32_t c1 = oprnd1->immI();
|
||||
int32_t c2 = oprnd2->immI();
|
||||
double d;
|
||||
@ -679,9 +685,9 @@ namespace nanojit
|
||||
case LIR_leui: return insImmI(uint32_t(c1) <= uint32_t(c2));
|
||||
case LIR_geui: return insImmI(uint32_t(c1) >= uint32_t(c2));
|
||||
|
||||
case LIR_rshi: return insImmI(c1 >> c2);
|
||||
case LIR_lshi: return insImmI(c1 << c2);
|
||||
case LIR_rshui: return insImmI(uint32_t(c1) >> c2);
|
||||
case LIR_lshi: return insImmI(c1 << (c2 & 0x1f));
|
||||
case LIR_rshi: return insImmI(c1 >> (c2 & 0x1f));
|
||||
case LIR_rshui: return insImmI(uint32_t(c1) >> (c2 & 0x1f));
|
||||
|
||||
case LIR_ori: return insImmI(c1 | c2);
|
||||
case LIR_andi: return insImmI(c1 & c2);
|
||||
@ -714,7 +720,7 @@ namespace nanojit
|
||||
|
||||
#ifdef NANOJIT_64BIT
|
||||
} else if (oprnd1->isImmQ() && oprnd2->isImmQ()) {
|
||||
// The operands are both 64-bit integer immediates.
|
||||
// The operands are both quad immediates.
|
||||
int64_t c1 = oprnd1->immQ();
|
||||
int64_t c2 = oprnd2->immQ();
|
||||
static const int64_t MIN_INT64 = int64_t(0x8000000000000000LL);
|
||||
@ -736,7 +742,7 @@ namespace nanojit
|
||||
case LIR_xorq: return insImmQ(c1 ^ c2);
|
||||
|
||||
// Nb: LIR_rshq, LIR_lshq and LIR_rshuq aren't here because their
|
||||
// second arg is a 32-bit int.
|
||||
// RHS is an int. They are below.
|
||||
|
||||
case LIR_addq:
|
||||
// Overflow is only possible if both values are positive or
|
||||
@ -776,9 +782,24 @@ namespace nanojit
|
||||
default:
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
|
||||
} else if (oprnd1->isImmQ() && oprnd2->isImmI()) {
|
||||
// The first operand is a quad immediate, the second is an int
|
||||
// immediate.
|
||||
int64_t c1 = oprnd1->immQ();
|
||||
int32_t c2 = oprnd2->immI();
|
||||
|
||||
switch (v) {
|
||||
case LIR_lshq: return insImmQ(c1 << (c2 & 0x3f));
|
||||
case LIR_rshq: return insImmQ(c1 >> (c2 & 0x3f));
|
||||
case LIR_rshuq: return insImmQ(uint64_t(c1) >> (c2 & 0x3f));
|
||||
|
||||
default: break;
|
||||
}
|
||||
#endif // NANOJIT_64BIT
|
||||
|
||||
} else if (oprnd1->isImmD() && oprnd2->isImmD()) {
|
||||
// The operands are both 64-bit double immediates.
|
||||
// The operands are both double immediates.
|
||||
double c1 = oprnd1->immD();
|
||||
double c2 = oprnd2->immD();
|
||||
switch (v) {
|
||||
@ -795,39 +816,50 @@ namespace nanojit
|
||||
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
|
||||
} else if (oprnd1->isImmI() && !oprnd2->isImmI()) {
|
||||
// The first operand is a 32-bit integer immediate; move it to
|
||||
// the right if possible.
|
||||
//-------------------------------------------------------------------
|
||||
// If only one operand is an immediate, make sure it's on the RHS, if possible
|
||||
//-------------------------------------------------------------------
|
||||
if (oprnd1->isImmAny() && !oprnd2->isImmAny()) {
|
||||
switch (v) {
|
||||
case LIR_eqi:
|
||||
CASE64(LIR_eqq:)
|
||||
case LIR_eqd:
|
||||
case LIR_addi:
|
||||
case LIR_muli:
|
||||
CASE64(LIR_addq:)
|
||||
case LIR_addd:
|
||||
case LIR_muli:
|
||||
case LIR_muld:
|
||||
case LIR_xori:
|
||||
case LIR_ori:
|
||||
case LIR_andi:
|
||||
case LIR_eqi: {
|
||||
// move const to rhs
|
||||
CASE64(LIR_andq:)
|
||||
case LIR_ori:
|
||||
CASE64(LIR_orq:)
|
||||
case LIR_xori:
|
||||
CASE64(LIR_xorq:) {
|
||||
// move immediate to RHS
|
||||
LIns* t = oprnd2;
|
||||
oprnd2 = oprnd1;
|
||||
oprnd1 = t;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
if (isCmpIOpcode(v)) {
|
||||
// move const to rhs, swap the operator
|
||||
if (isCmpOpcode(v)) {
|
||||
// move immediate to RHS, swap the operator
|
||||
LIns *t = oprnd2;
|
||||
oprnd2 = oprnd1;
|
||||
oprnd1 = t;
|
||||
v = invertCmpIOpcode(v);
|
||||
v = invertCmpOpcode(v);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Folding where the RHS is an immediate
|
||||
//-------------------------------------------------------------------
|
||||
if (oprnd2->isImmI()) {
|
||||
// The second operand is a 32-bit integer immediate.
|
||||
// The second operand is an int immediate.
|
||||
int c = oprnd2->immI();
|
||||
switch (v) {
|
||||
case LIR_addi:
|
||||
@ -872,6 +904,9 @@ namespace nanojit
|
||||
case LIR_lshi:
|
||||
case LIR_rshi:
|
||||
case LIR_rshui:
|
||||
CASE64(LIR_lshq:) // These are here because their RHS is an int
|
||||
CASE64(LIR_rshq:)
|
||||
CASE64(LIR_rshuq:)
|
||||
return oprnd1;
|
||||
|
||||
case LIR_andi:
|
||||
@ -899,7 +934,7 @@ namespace nanojit
|
||||
switch (v) {
|
||||
case LIR_ori: return oprnd2; // x | -1 = -1
|
||||
case LIR_andi: return oprnd1; // x & -1 = x
|
||||
case LIR_gtui: return insImmI(0); // u32 > 0xffffffff -> always false
|
||||
case LIR_gtui: return insImmI(0); // u32 > 0xffffffff -> always false
|
||||
case LIR_leui: return insImmI(1); // u32 <= 0xffffffff -> always true
|
||||
default: break;
|
||||
}
|
||||
@ -907,26 +942,79 @@ namespace nanojit
|
||||
} else if (c == 1) {
|
||||
if (oprnd1->isCmp()) {
|
||||
switch (v) {
|
||||
case LIR_ori: return oprnd2; // cmp | 1 = 1 (and oprnd2 == 1)
|
||||
case LIR_andi: return oprnd1; // cmp & 1 = cmp
|
||||
case LIR_gtui: return insImmI(0); // (0|1) > 1 -> always false
|
||||
case LIR_ori: return oprnd2; // 0or1 | 1 = 1 (and oprnd2 == 1)
|
||||
case LIR_andi: return oprnd1; // 0or1 & 1 = 0or1
|
||||
case LIR_gtui: return insImmI(0); // 0or1 > 1 -> always false
|
||||
default: break;
|
||||
}
|
||||
} else if (v == LIR_muli) {
|
||||
return oprnd1; // x * 1 = x
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef NANOJIT_64BIT
|
||||
} else if (oprnd2->isImmQ()) {
|
||||
// The second operand is a quad immediate.
|
||||
int64_t c = oprnd2->immQ();
|
||||
if (c == 0) {
|
||||
switch (v) {
|
||||
case LIR_addq:
|
||||
case LIR_orq:
|
||||
case LIR_xorq:
|
||||
case LIR_subq:
|
||||
return oprnd1;
|
||||
|
||||
case LIR_andq:
|
||||
return oprnd2;
|
||||
|
||||
case LIR_ltuq: // unsigned < 0 -> always false
|
||||
return insImmI(0);
|
||||
|
||||
case LIR_geuq: // unsigned >= 0 -> always true
|
||||
return insImmI(1);
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
} else if (c == -1) {
|
||||
switch (v) {
|
||||
case LIR_orq: return oprnd2; // x | -1 = -1
|
||||
case LIR_andq: return oprnd1; // x & -1 = x
|
||||
case LIR_gtuq: return insImmI(0); // u64 > 0xffffffffffffffff -> always false
|
||||
case LIR_leuq: return insImmI(1); // u64 <= 0xffffffffffffffff -> always true
|
||||
default: break;
|
||||
}
|
||||
|
||||
} else if (c == 1) {
|
||||
if (oprnd1->isCmp()) {
|
||||
switch (v) {
|
||||
case LIR_orq: return oprnd2; // 0or1 | 1 = 1 (and oprnd2 == 1)
|
||||
case LIR_andq: return oprnd1; // 0or1 & 1 = 0or1
|
||||
case LIR_gtuq: return insImmI(0); // 0or1 > 1 -> always false
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif // NANOJIT_64BIT
|
||||
}
|
||||
|
||||
#if NJ_SOFTFLOAT_SUPPORTED
|
||||
//-------------------------------------------------------------------
|
||||
// SoftFloat-specific folding
|
||||
//-------------------------------------------------------------------
|
||||
LIns* ins;
|
||||
if (v == LIR_ii2d && oprnd1->isop(LIR_dlo2i) && oprnd2->isop(LIR_dhi2i) &&
|
||||
(ins = oprnd1->oprnd1()) == oprnd2->oprnd1()) {
|
||||
(ins = oprnd1->oprnd1()) == oprnd2->oprnd1())
|
||||
{
|
||||
// qjoin(qlo(x),qhi(x)) == x
|
||||
return ins;
|
||||
}
|
||||
#endif
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// No folding possible
|
||||
//-------------------------------------------------------------------
|
||||
return out->ins2(v, oprnd1, oprnd2);
|
||||
}
|
||||
|
||||
@ -939,7 +1027,7 @@ namespace nanojit
|
||||
return oprnd2;
|
||||
}
|
||||
if (oprnd1->isImmI()) {
|
||||
// const ? x : y => return x or y depending on const
|
||||
// immediate ? x : y => return x or y depending on immediate
|
||||
return oprnd1->immI() ? oprnd2 : oprnd3;
|
||||
}
|
||||
if (oprnd1->isop(LIR_eqi) &&
|
||||
@ -1016,7 +1104,7 @@ namespace nanojit
|
||||
case LIR_addxovi:
|
||||
case LIR_muljovi:
|
||||
case LIR_mulxovi: {
|
||||
// swap operands, moving const to rhs
|
||||
// swap operands, moving immediate to RHS
|
||||
LIns* t = oprnd2;
|
||||
oprnd2 = oprnd1;
|
||||
oprnd1 = t;
|
||||
@ -3008,11 +3096,11 @@ namespace nanojit
|
||||
{
|
||||
switch (type) {
|
||||
case LTy_V: return "void";
|
||||
case LTy_I: return "int32";
|
||||
case LTy_I: return "int";
|
||||
#ifdef NANOJIT_64BIT
|
||||
case LTy_Q: return "int64";
|
||||
case LTy_Q: return "quad";
|
||||
#endif
|
||||
case LTy_D: return "float64";
|
||||
case LTy_D: return "double";
|
||||
default: NanoAssert(0); return "???";
|
||||
}
|
||||
}
|
||||
|
@ -491,18 +491,8 @@ namespace nanojit
|
||||
NanoAssert(op == LIR_xt || op == LIR_xf);
|
||||
return LOpcode(op ^ 1);
|
||||
}
|
||||
inline LOpcode invertCmpIOpcode(LOpcode op) {
|
||||
NanoAssert(isCmpIOpcode(op));
|
||||
return LOpcode(op ^ 1);
|
||||
}
|
||||
#ifdef NANOJIT_64BIT
|
||||
inline LOpcode invertCmpQOpcode(LOpcode op) {
|
||||
NanoAssert(isCmpQOpcode(op));
|
||||
return LOpcode(op ^ 1);
|
||||
}
|
||||
#endif
|
||||
inline LOpcode invertCmpDOpcode(LOpcode op) {
|
||||
NanoAssert(isCmpDOpcode(op));
|
||||
inline LOpcode invertCmpOpcode(LOpcode op) {
|
||||
NanoAssert(isCmpOpcode(op));
|
||||
return LOpcode(op ^ 1);
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user