Bug 1654947 - Don't clobber RHS register for shift instructions. r=iain

The masking was unnecessary anyway on x86/x64/arm64 because the CPU does it (and
we are already relying on that in Ion). For arm32 handle this in the flexible*
MacroAssembler implementation by using the scratch register.

Differential Revision: https://phabricator.services.mozilla.com/D84787
This commit is contained in:
Jan de Mooij 2020-07-24 14:59:06 +00:00
parent 9d7df9a34d
commit 6af55781ed
4 changed files with 24 additions and 10 deletions

View File

@ -0,0 +1,11 @@
function f() {
let g = x => (x >>> {} >>> x, x);
let arr = [0, 0xffff, undefined];
for (let i = 0; i < 10; i++) {
for (let j = 0; j < 3; j++) {
let y = g(arr[j]);
assertEq(y, arr[j]);
}
}
}
f();

View File

@ -2781,8 +2781,6 @@ bool CacheIRCompiler::emitInt32LeftShiftResult(Int32OperandId lhsId,
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
masm.mov(lhs, scratch);
// Mask shift amount as specified by 12.9.3.1 Step 7
masm.and32(Imm32(0x1F), rhs);
masm.flexibleLshift32(rhs, scratch);
EmitStoreResult(masm, scratch, JSVAL_TYPE_INT32, output);
@ -2798,8 +2796,6 @@ bool CacheIRCompiler::emitInt32RightShiftResult(Int32OperandId lhsId,
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
masm.mov(lhs, scratch);
// Mask shift amount as specified by 12.9.4.1 Step 7
masm.and32(Imm32(0x1F), rhs);
masm.flexibleRshift32Arithmetic(rhs, scratch);
EmitStoreResult(masm, scratch, JSVAL_TYPE_INT32, output);
@ -2822,8 +2818,6 @@ bool CacheIRCompiler::emitInt32URightShiftResult(Int32OperandId lhsId,
}
masm.mov(lhs, scratch);
// Mask shift amount as specified by 12.9.4.1 Step 7
masm.and32(Imm32(0x1F), rhs);
masm.flexibleRshift32(rhs, scratch);
Label intDone, floatDone;
if (allowDouble) {

View File

@ -1104,7 +1104,10 @@ class MacroAssembler : public MacroAssemblerSpecific {
inline void rshift32Arithmetic(Register shift,
Register srcDest) PER_SHARED_ARCH;
// These variants may use the stack, but do not have the above constraint.
// These variants do not have the above constraint, but may emit some extra
// instructions on x86_shared. They also handle shift >= 32 consistently by
// masking with 0x1F (either explicitly or relying on the hardware to do
// that).
inline void flexibleLshift32(Register shift,
Register srcDest) PER_SHARED_ARCH;
inline void flexibleRshift32(Register shift,

View File

@ -619,7 +619,9 @@ void MacroAssembler::lshift32(Register src, Register dest) {
}
void MacroAssembler::flexibleLshift32(Register src, Register dest) {
lshift32(src, dest);
ScratchRegisterScope scratch(*this);
as_and(scratch, src, Imm8(0x1F));
lshift32(scratch, dest);
}
void MacroAssembler::lshift32(Imm32 imm, Register dest) {
@ -639,7 +641,9 @@ void MacroAssembler::rshift32(Register src, Register dest) {
}
void MacroAssembler::flexibleRshift32(Register src, Register dest) {
rshift32(src, dest);
ScratchRegisterScope scratch(*this);
as_and(scratch, src, Imm8(0x1F));
rshift32(scratch, dest);
}
void MacroAssembler::rshift32(Imm32 imm, Register dest) {
@ -709,7 +713,9 @@ void MacroAssembler::rshift32Arithmetic(Imm32 imm, Register dest) {
}
void MacroAssembler::flexibleRshift32Arithmetic(Register src, Register dest) {
rshift32Arithmetic(src, dest);
ScratchRegisterScope scratch(*this);
as_and(scratch, src, Imm8(0x1F));
rshift32Arithmetic(scratch, dest);
}
void MacroAssembler::rshift64(Imm32 imm, Register64 dest) {