diff --git a/js/src/jit/BaselineCacheIRCompiler.cpp b/js/src/jit/BaselineCacheIRCompiler.cpp index 1c7d77e4a012..3e20267e8bdf 100644 --- a/js/src/jit/BaselineCacheIRCompiler.cpp +++ b/js/src/jit/BaselineCacheIRCompiler.cpp @@ -866,15 +866,16 @@ BaselineCacheIRCompiler::emitLoadFrameArgumentResult() { AutoOutputRegister output(*this); Register index = allocator.useRegister(masm, reader.int32OperandId()); - AutoScratchRegisterMaybeOutput scratch(allocator, masm, output); + AutoScratchRegister scratch1(allocator, masm); + AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output); FailurePath* failure; if (!addFailurePath(&failure)) return false; // Bounds check. - masm.loadPtr(Address(BaselineFrameReg, BaselineFrame::offsetOfNumActualArgs()), scratch); - masm.branch32(Assembler::AboveOrEqual, index, scratch, failure->label()); + masm.loadPtr(Address(BaselineFrameReg, BaselineFrame::offsetOfNumActualArgs()), scratch1); + masm.boundsCheck32ForLoad(index, scratch1, scratch2, failure->label()); // Load the argument. masm.loadValue(BaseValueIndex(BaselineFrameReg, index, BaselineFrame::offsetOfArg(0)), diff --git a/js/src/jit/CacheIRCompiler.cpp b/js/src/jit/CacheIRCompiler.cpp index 9c6335e0a23a..6f9fd754d826 100644 --- a/js/src/jit/CacheIRCompiler.cpp +++ b/js/src/jit/CacheIRCompiler.cpp @@ -1833,8 +1833,8 @@ CacheIRCompiler::emitLoadStringCharResult() return false; // Bounds check, load string char. - masm.branch32(Assembler::BelowOrEqual, Address(str, JSString::offsetOfLength()), - index, failure->label()); + masm.boundsCheck32ForLoad(index, Address(str, JSString::offsetOfLength()), scratch1, + failure->label()); masm.loadStringChar(str, index, scratch1, failure->label()); // Load StaticString for this char. @@ -1852,37 +1852,38 @@ CacheIRCompiler::emitLoadArgumentsObjectArgResult() AutoOutputRegister output(*this); Register obj = allocator.useRegister(masm, reader.objOperandId()); Register index = allocator.useRegister(masm, reader.int32OperandId()); - AutoScratchRegisterMaybeOutput scratch(allocator, masm, output); + AutoScratchRegister scratch1(allocator, masm); + AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output); FailurePath* failure; if (!addFailurePath(&failure)) return false; // Get initial length value. - masm.unboxInt32(Address(obj, ArgumentsObject::getInitialLengthSlotOffset()), scratch); + masm.unboxInt32(Address(obj, ArgumentsObject::getInitialLengthSlotOffset()), scratch1); // Ensure no overridden length/element. masm.branchTest32(Assembler::NonZero, - scratch, + scratch1, Imm32(ArgumentsObject::LENGTH_OVERRIDDEN_BIT | ArgumentsObject::ELEMENT_OVERRIDDEN_BIT), failure->label()); // Bounds check. - masm.rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT), scratch); - masm.branch32(Assembler::AboveOrEqual, index, scratch, failure->label()); + masm.rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT), scratch1); + masm.boundsCheck32ForLoad(index, scratch1, scratch2, failure->label()); // Load ArgumentsData. - masm.loadPrivate(Address(obj, ArgumentsObject::getDataSlotOffset()), scratch); + masm.loadPrivate(Address(obj, ArgumentsObject::getDataSlotOffset()), scratch1); // Fail if we have a RareArgumentsData (elements were deleted). masm.branchPtr(Assembler::NotEqual, - Address(scratch, offsetof(ArgumentsData, rareData)), + Address(scratch1, offsetof(ArgumentsData, rareData)), ImmWord(0), failure->label()); // Guard the argument is not a FORWARD_TO_CALL_SLOT MagicValue. - BaseValueIndex argValue(scratch, index, ArgumentsData::offsetOfArgs()); + BaseValueIndex argValue(scratch1, index, ArgumentsData::offsetOfArgs()); masm.branchTestMagic(Assembler::Equal, argValue, failure->label()); masm.loadValue(argValue, output.valueReg()); return true; @@ -1894,21 +1895,22 @@ CacheIRCompiler::emitLoadDenseElementResult() AutoOutputRegister output(*this); Register obj = allocator.useRegister(masm, reader.objOperandId()); Register index = allocator.useRegister(masm, reader.int32OperandId()); - AutoScratchRegisterMaybeOutput scratch(allocator, masm, output); + AutoScratchRegister scratch1(allocator, masm); + AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output); FailurePath* failure; if (!addFailurePath(&failure)) return false; // Load obj->elements. - masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch); + masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch1); // Bounds check. - Address initLength(scratch, ObjectElements::offsetOfInitializedLength()); - masm.branch32(Assembler::BelowOrEqual, initLength, index, failure->label()); + Address initLength(scratch1, ObjectElements::offsetOfInitializedLength()); + masm.boundsCheck32ForLoad(index, initLength, scratch2, failure->label()); // Hole check. - BaseObjectElementIndex element(scratch, index); + BaseObjectElementIndex element(scratch1, index); masm.branchTestMagic(Assembler::Equal, element, failure->label()); masm.loadTypedOrValue(element, output); return true; @@ -1933,7 +1935,8 @@ CacheIRCompiler::emitLoadDenseElementHoleResult() AutoOutputRegister output(*this); Register obj = allocator.useRegister(masm, reader.objOperandId()); Register index = allocator.useRegister(masm, reader.int32OperandId()); - AutoScratchRegisterMaybeOutput scratch(allocator, masm, output); + AutoScratchRegister scratch1(allocator, masm); + AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output); if (!output.hasValue()) { masm.assumeUnreachable("Should have monitored undefined value after attaching stub"); @@ -1948,16 +1951,16 @@ CacheIRCompiler::emitLoadDenseElementHoleResult() masm.branch32(Assembler::LessThan, index, Imm32(0), failure->label()); // Load obj->elements. - masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch); + masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch1); // Guard on the initialized length. Label hole; - Address initLength(scratch, ObjectElements::offsetOfInitializedLength()); - masm.branch32(Assembler::BelowOrEqual, initLength, index, &hole); + Address initLength(scratch1, ObjectElements::offsetOfInitializedLength()); + masm.boundsCheck32ForLoad(index, initLength, scratch2, &hole); // Load the value. Label done; - masm.loadValue(BaseObjectElementIndex(scratch, index), output.valueReg()); + masm.loadValue(BaseObjectElementIndex(scratch1, index), output.valueReg()); masm.branchTestMagic(Assembler::NotEqual, output.valueReg(), &done); // Load undefined for the hole. @@ -2114,7 +2117,8 @@ CacheIRCompiler::emitLoadTypedElementResult() TypedThingLayout layout = reader.typedThingLayout(); Scalar::Type type = reader.scalarType(); - AutoScratchRegisterMaybeOutput scratch(allocator, masm, output); + AutoScratchRegister scratch1(allocator, masm); + AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output); if (!output.hasValue()) { if (type == Scalar::Float32 || type == Scalar::Float64) { @@ -2135,16 +2139,16 @@ CacheIRCompiler::emitLoadTypedElementResult() return false; // Bounds check. - LoadTypedThingLength(masm, layout, obj, scratch); - masm.branch32(Assembler::BelowOrEqual, scratch, index, failure->label()); + LoadTypedThingLength(masm, layout, obj, scratch1); + masm.boundsCheck32ForLoad(index, scratch1, scratch2, failure->label()); // Load the elements vector. - LoadTypedThingData(masm, layout, obj, scratch); + LoadTypedThingData(masm, layout, obj, scratch1); // Load the value. - BaseIndex source(scratch, index, ScaleFromElemWidth(Scalar::byteSize(type))); + BaseIndex source(scratch1, index, ScaleFromElemWidth(Scalar::byteSize(type))); if (output.hasValue()) { - masm.loadFromTypedArray(type, source, output.valueReg(), *allowDoubleResult_, scratch, + masm.loadFromTypedArray(type, source, output.valueReg(), *allowDoubleResult_, scratch1, failure->label()); } else { bool needGpr = (type == Scalar::Int8 || type == Scalar::Uint8 || @@ -2152,10 +2156,11 @@ CacheIRCompiler::emitLoadTypedElementResult() type == Scalar::Uint8Clamped || type == Scalar::Int32); if (needGpr && output.type() == JSVAL_TYPE_DOUBLE) { // Load the element as integer, then convert it to double. - masm.loadFromTypedArray(type, source, AnyRegister(scratch), scratch, failure->label()); + masm.loadFromTypedArray(type, source, AnyRegister(scratch1), scratch1, + failure->label()); masm.convertInt32ToDouble(source, output.typedReg().fpu()); } else { - masm.loadFromTypedArray(type, source, output.typedReg(), scratch, failure->label()); + masm.loadFromTypedArray(type, source, output.typedReg(), scratch1, failure->label()); } } return true; diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp index 1521f54d63fc..38821694d517 100644 --- a/js/src/jit/CodeGenerator.cpp +++ b/js/src/jit/CodeGenerator.cpp @@ -10841,6 +10841,7 @@ void CodeGenerator::visitLoadElementHole(LLoadElementHole* lir) { Register elements = ToRegister(lir->elements()); + Register index = ToRegister(lir->index()); Register initLength = ToRegister(lir->initLength()); const ValueOperand out = ToOutValue(lir); @@ -10848,40 +10849,27 @@ CodeGenerator::visitLoadElementHole(LLoadElementHole* lir) // If the index is out of bounds, load |undefined|. Otherwise, load the // value. - Label undefined, done; - if (lir->index()->isConstant()) - masm.branch32(Assembler::BelowOrEqual, initLength, Imm32(ToInt32(lir->index())), &undefined); - else - masm.branch32(Assembler::BelowOrEqual, initLength, ToRegister(lir->index()), &undefined); + Label outOfBounds, done; + masm.boundsCheck32ForLoad(index, initLength, out.scratchReg(), &outOfBounds); - if (lir->index()->isConstant()) { - NativeObject::elementsSizeMustNotOverflow(); - masm.loadValue(Address(elements, ToInt32(lir->index()) * sizeof(Value)), out); - } else { - masm.loadValue(BaseObjectElementIndex(elements, ToRegister(lir->index())), out); - } + masm.loadValue(BaseObjectElementIndex(elements, index), out); // If a hole check is needed, and the value wasn't a hole, we're done. // Otherwise, we'll load undefined. - if (lir->mir()->needsHoleCheck()) + if (lir->mir()->needsHoleCheck()) { masm.branchTestMagic(Assembler::NotEqual, out, &done); - else - masm.jump(&done); - - masm.bind(&undefined); - - if (mir->needsNegativeIntCheck()) { - if (lir->index()->isConstant()) { - if (ToInt32(lir->index()) < 0) - bailout(lir->snapshot()); - } else { - Label negative; - masm.branch32(Assembler::LessThan, ToRegister(lir->index()), Imm32(0), &negative); - bailoutFrom(&negative, lir->snapshot()); - } + masm.moveValue(UndefinedValue(), out); } + masm.jump(&done); + masm.bind(&outOfBounds); + if (mir->needsNegativeIntCheck()) { + Label negative; + masm.branch32(Assembler::LessThan, index, Imm32(0), &negative); + bailoutFrom(&negative, lir->snapshot()); + } masm.moveValue(UndefinedValue(), out); + masm.bind(&done); } @@ -10995,32 +10983,27 @@ CodeGenerator::visitLoadTypedArrayElementHole(LLoadTypedArrayElementHole* lir) // Load the length. Register scratch = out.scratchReg(); - RegisterOrInt32Constant key = ToRegisterOrInt32Constant(lir->index()); + Register scratch2 = ToRegister(lir->temp()); + Register index = ToRegister(lir->index()); masm.unboxInt32(Address(object, TypedArrayObject::lengthOffset()), scratch); - // Load undefined unless length > key. - Label inbounds, done; - masm.branch32(Assembler::Above, scratch, key, &inbounds); - masm.moveValue(UndefinedValue(), out); - masm.jump(&done); + // Load undefined if index >= length. + Label outOfBounds, done; + masm.boundsCheck32ForLoad(index, scratch, scratch2, &outOfBounds); // Load the elements vector. - masm.bind(&inbounds); masm.loadPtr(Address(object, TypedArrayObject::dataOffset()), scratch); Scalar::Type arrayType = lir->mir()->arrayType(); int width = Scalar::byteSize(arrayType); - Label fail; - if (key.isConstant()) { - Address source(scratch, key.constant() * width); - masm.loadFromTypedArray(arrayType, source, out, lir->mir()->allowDouble(), - out.scratchReg(), &fail); - } else { - BaseIndex source(scratch, key.reg(), ScaleFromElemWidth(width)); - masm.loadFromTypedArray(arrayType, source, out, lir->mir()->allowDouble(), - out.scratchReg(), &fail); - } + BaseIndex source(scratch, index, ScaleFromElemWidth(width)); + masm.loadFromTypedArray(arrayType, source, out, lir->mir()->allowDouble(), + out.scratchReg(), &fail); + masm.jump(&done); + + masm.bind(&outOfBounds); + masm.moveValue(UndefinedValue(), out); if (fail.used()) bailoutFrom(&fail, lir->snapshot()); diff --git a/js/src/jit/Lowering.cpp b/js/src/jit/Lowering.cpp index 9e74860820d7..75d354b28ac9 100644 --- a/js/src/jit/Lowering.cpp +++ b/js/src/jit/Lowering.cpp @@ -3224,7 +3224,7 @@ LIRGenerator::visitLoadElementHole(MLoadElementHole* ins) MOZ_ASSERT(ins->type() == MIRType::Value); LLoadElementHole* lir = new(alloc()) LLoadElementHole(useRegister(ins->elements()), - useRegisterOrConstant(ins->index()), + useRegister(ins->index()), useRegister(ins->initLength())); if (ins->needsNegativeIntCheck()) assignSnapshot(lir, Bailout_NegativeIndex); @@ -3654,9 +3654,10 @@ LIRGenerator::visitLoadTypedArrayElementHole(MLoadTypedArrayElementHole* ins) MOZ_ASSERT(ins->type() == MIRType::Value); const LUse object = useRegister(ins->object()); - const LAllocation index = useRegisterOrConstant(ins->index()); + const LAllocation index = useRegister(ins->index()); - LLoadTypedArrayElementHole* lir = new(alloc()) LLoadTypedArrayElementHole(object, index); + LLoadTypedArrayElementHole* lir = new(alloc()) LLoadTypedArrayElementHole(object, index, + temp()); if (ins->fallible()) assignSnapshot(lir, Bailout_Overflow); defineBox(lir, ins); diff --git a/js/src/jit/MacroAssembler.cpp b/js/src/jit/MacroAssembler.cpp index 891215dfe3b0..8fa35e387a91 100644 --- a/js/src/jit/MacroAssembler.cpp +++ b/js/src/jit/MacroAssembler.cpp @@ -1383,6 +1383,7 @@ void MacroAssembler::loadStringChar(Register str, Register index, Register output, Label* fail) { MOZ_ASSERT(str != output); + MOZ_ASSERT(str != index); MOZ_ASSERT(index != output); movePtr(str, output); @@ -1396,7 +1397,17 @@ MacroAssembler::loadStringChar(Register str, Register index, Register output, La // Check if the index is contained in the leftChild. // Todo: Handle index in the rightChild. - branch32(Assembler::BelowOrEqual, Address(output, JSString::offsetOfLength()), index, fail); + Label failPopStr, inLeft; + push(str); + boundsCheck32ForLoad(index, Address(output, JSString::offsetOfLength()), str, &failPopStr); + pop(str); + jump(&inLeft); + + bind(&failPopStr); + pop(str); + jump(fail); + + bind(&inLeft); // If the left side is another rope, give up. branchIfRope(output, fail); @@ -3419,69 +3430,99 @@ MacroAssembler::debugAssertIsObject(const ValueOperand& val) template void -MacroAssembler::spectreMaskIndexImpl(Register index, const T& length, Register output) +MacroAssembler::computeSpectreIndexMaskGeneric(Register index, const T& length, Register output) { + MOZ_ASSERT(JitOptions.spectreIndexMasking); + MOZ_ASSERT(index != output); + // mask := ((index - length) & ~index) >> 31 - // output := index & mask mov(index, output); sub32(length, output); not32(index); and32(index, output); not32(index); // Restore index register to its original value. rshift32Arithmetic(Imm32(31), output); - and32(index, output); } template void -MacroAssembler::spectreMaskIndexImpl(int32_t index, const T& length, Register output) +MacroAssembler::computeSpectreIndexMask(int32_t index, const T& length, Register output) { + MOZ_ASSERT(JitOptions.spectreIndexMasking); + // mask := ((index - length) & ~index) >> 31 - // output := index & mask move32(Imm32(index), output); - if (index == 0) - return; sub32(length, output); and32(Imm32(~index), output); rshift32Arithmetic(Imm32(31), output); - and32(Imm32(index), output); } void -MacroAssembler::spectreMaskIndex(int32_t index, Register length, Register output) +MacroAssembler::computeSpectreIndexMask(Register index, Register length, Register output) { - spectreMaskIndexImpl(index, length, output); -} + MOZ_ASSERT(JitOptions.spectreIndexMasking); + MOZ_ASSERT(index != length); + MOZ_ASSERT(length != output); + MOZ_ASSERT(index != output); -void -MacroAssembler::spectreMaskIndex(int32_t index, const Address& length, Register output) -{ - spectreMaskIndexImpl(index, length, output); -} - -void -MacroAssembler::spectreMaskIndex(Register index, Register length, Register output) -{ #if JS_BITS_PER_WORD == 64 // On 64-bit platforms, we can use a faster algorithm: // // mask := (uint64_t(index) - uint64_t(length)) >> 32 - // output := index & mask // // mask is 0x11…11 if index < length, 0 otherwise. move32(index, output); subPtr(length, output); rshiftPtr(Imm32(32), output); - and32(index, output); #else - spectreMaskIndexImpl(index, length, output); + computeSpectreIndexMaskGeneric(index, length, output); #endif } +void +MacroAssembler::spectreMaskIndex(int32_t index, Register length, Register output) +{ + MOZ_ASSERT(length != output); + if (index == 0) { + move32(Imm32(index), output); + } else { + computeSpectreIndexMask(index, length, output); + and32(Imm32(index), output); + } +} + +void +MacroAssembler::spectreMaskIndex(int32_t index, const Address& length, Register output) +{ + MOZ_ASSERT(length.base != output); + if (index == 0) { + move32(Imm32(index), output); + } else { + computeSpectreIndexMask(index, length, output); + and32(Imm32(index), output); + } +} + +void +MacroAssembler::spectreMaskIndex(Register index, Register length, Register output) +{ + MOZ_ASSERT(index != length); + MOZ_ASSERT(length != output); + MOZ_ASSERT(index != output); + + computeSpectreIndexMask(index, length, output); + and32(index, output); +} + void MacroAssembler::spectreMaskIndex(Register index, const Address& length, Register output) { - spectreMaskIndexImpl(index, length, output); + MOZ_ASSERT(index != length.base); + MOZ_ASSERT(length.base != output); + MOZ_ASSERT(index != output); + + computeSpectreIndexMaskGeneric(index, length, output); + and32(index, output); } void @@ -3496,6 +3537,38 @@ MacroAssembler::boundsCheck32PowerOfTwo(Register index, uint32_t length, Label* and32(Imm32(length - 1), index); } +void +MacroAssembler::boundsCheck32ForLoad(Register index, Register length, Register scratch, + Label* failure) +{ + MOZ_ASSERT(index != length); + MOZ_ASSERT(length != scratch); + MOZ_ASSERT(index != scratch); + + branch32(Assembler::AboveOrEqual, index, length, failure); + + if (JitOptions.spectreIndexMasking) { + computeSpectreIndexMask(index, length, scratch); + and32(scratch, index); + } +} + +void +MacroAssembler::boundsCheck32ForLoad(Register index, const Address& length, Register scratch, + Label* failure) +{ + MOZ_ASSERT(index != length.base); + MOZ_ASSERT(length.base != scratch); + MOZ_ASSERT(index != scratch); + + branch32(Assembler::BelowOrEqual, length, index, failure); + + if (JitOptions.spectreIndexMasking) { + computeSpectreIndexMaskGeneric(index, length, scratch); + and32(scratch, index); + } +} + namespace js { namespace jit { diff --git a/js/src/jit/MacroAssembler.h b/js/src/jit/MacroAssembler.h index 73a881e48bea..83f40c09864e 100644 --- a/js/src/jit/MacroAssembler.h +++ b/js/src/jit/MacroAssembler.h @@ -1948,10 +1948,12 @@ class MacroAssembler : public MacroAssemblerSpecific private: template - void spectreMaskIndexImpl(Register index, const T& length, Register output); + void computeSpectreIndexMaskGeneric(Register index, const T& length, Register output); + + void computeSpectreIndexMask(Register index, Register length, Register output); template - void spectreMaskIndexImpl(int32_t index, const T& length, Register output); + void computeSpectreIndexMask(int32_t index, const T& length, Register output); public: void spectreMaskIndex(int32_t index, Register length, Register output); @@ -1959,9 +1961,14 @@ class MacroAssembler : public MacroAssemblerSpecific void spectreMaskIndex(Register index, Register length, Register output); void spectreMaskIndex(Register index, const Address& length, Register output); - // The length must be a power of two. + // The length must be a power of two. Performs a bounds check and Spectre index + // masking. void boundsCheck32PowerOfTwo(Register index, uint32_t length, Label* failure); + // Performs a bounds check and Spectre index masking. + void boundsCheck32ForLoad(Register index, Register length, Register scratch, Label* failure); + void boundsCheck32ForLoad(Register index, const Address& length, Register scratch, Label* failure); + template void guardedCallPreBarrier(const T& address, MIRType type) { Label done; diff --git a/js/src/jit/shared/LIR-shared.h b/js/src/jit/shared/LIR-shared.h index cb3bd368973a..5a389f4d3c2d 100644 --- a/js/src/jit/shared/LIR-shared.h +++ b/js/src/jit/shared/LIR-shared.h @@ -6210,14 +6210,16 @@ class LLoadUnboxedScalar : public LInstructionHelper<1, 2, 1> } }; -class LLoadTypedArrayElementHole : public LInstructionHelper +class LLoadTypedArrayElementHole : public LInstructionHelper { public: LIR_HEADER(LoadTypedArrayElementHole) - LLoadTypedArrayElementHole(const LAllocation& object, const LAllocation& index) { + LLoadTypedArrayElementHole(const LAllocation& object, const LAllocation& index, + const LDefinition& temp) { setOperand(0, object); setOperand(1, index); + setTemp(0, temp); } const MLoadTypedArrayElementHole* mir() const { return mir_->toLoadTypedArrayElementHole(); @@ -6228,6 +6230,9 @@ class LLoadTypedArrayElementHole : public LInstructionHelper const LAllocation* index() { return getOperand(1); } + const LDefinition* temp() { + return getTemp(0); + } }; class LLoadTypedArrayElementStatic : public LInstructionHelper<1, 1, 0>