Bug 1575153 part 2 - Remove AutoFlushICache infrastructure. r=tcampbell,lth

Depends on D45996

Differential Revision: https://phabricator.services.mozilla.com/D45997

--HG--
extra : moz-landing-system : lando
This commit is contained in:
Jan de Mooij 2019-09-16 15:06:29 +00:00
parent cb079474aa
commit 14d93412bf
38 changed files with 40 additions and 316 deletions

View File

@ -505,7 +505,7 @@ NativeRegExpMacroAssembler::GenerateCode(JSContext* cx, bool match_only)
masm.jump(&return_temp0);
}
Linker linker(masm, "RegExp");
Linker linker(masm);
JitCode* code = linker.newCode(cx, CodeKind::RegExp);
if (!code)
return RegExpCode();

View File

@ -190,7 +190,7 @@ JitCode* BaselineCacheIRCompiler::compile() {
EmitStubGuardFailure(masm);
}
Linker linker(masm, "getStubCode");
Linker linker(masm);
Rooted<JitCode*> newStubCode(cx_, linker.newCode(cx_, CodeKind::Baseline));
if (!newStubCode) {
cx_->recoverFromOutOfMemory();

View File

@ -220,7 +220,7 @@ MethodStatus BaselineCompiler::compile() {
return Method_Error;
}
Linker linker(masm, "Baseline");
Linker linker(masm);
if (masm.oom()) {
ReportOutOfMemory(cx);
return Method_Error;
@ -7089,7 +7089,7 @@ bool BaselineInterpreterGenerator::generate(BaselineInterpreter& interpreter) {
emitOutOfLineCodeCoverageInstrumentation();
{
Linker linker(masm, "BaselineInterpreter");
Linker linker(masm);
if (masm.oom()) {
ReportOutOfMemory(cx);
return false;
@ -7250,7 +7250,7 @@ JitCode* JitRuntime::generateDebugTrapHandler(JSContext* cx,
masm.ret();
Linker linker(masm, "DebugTrapHandler");
Linker linker(masm);
JitCode* handlerCode = linker.newCode(cx, CodeKind::Other);
if (!handlerCode) {
return nullptr;

View File

@ -1198,7 +1198,7 @@ JitCode* ICStubCompiler::getStubCode() {
if (!generateStubCode(masm)) {
return nullptr;
}
Linker linker(masm, "getStubCode");
Linker linker(masm);
Rooted<JitCode*> newStubCode(cx, linker.newCode(cx, CodeKind::Baseline));
if (!newStubCode) {
return nullptr;
@ -4150,7 +4150,7 @@ bool JitRuntime::generateBaselineICFallbackCode(JSContext* cx) {
IC_BASELINE_FALLBACK_CODE_KIND_LIST(EMIT_CODE)
#undef EMIT_CODE
Linker linker(masm, "BaselineICFallback");
Linker linker(masm);
JitCode* code = linker.newCode(cx, CodeKind::Other);
if (!code) {
return false;

View File

@ -2697,7 +2697,7 @@ JitCode* JitRealm::generateRegExpMatcherStub(JSContext* cx) {
masm.moveValue(UndefinedValue(), result);
masm.ret();
Linker linker(masm, "RegExpMatcherStub");
Linker linker(masm);
JitCode* code = linker.newCode(cx, CodeKind::Other);
if (!code) {
return nullptr;
@ -2878,7 +2878,7 @@ JitCode* JitRealm::generateRegExpSearcherStub(JSContext* cx) {
masm.move32(Imm32(RegExpSearcherResultFailed), result);
masm.ret();
Linker linker(masm, "RegExpSearcherStub");
Linker linker(masm);
JitCode* code = linker.newCode(cx, CodeKind::Other);
if (!code) {
return nullptr;
@ -3016,7 +3016,7 @@ JitCode* JitRealm::generateRegExpTesterStub(JSContext* cx) {
masm.freeStack(sizeof(irregexp::InputOutputData));
masm.ret();
Linker linker(masm, "RegExpTesterStub");
Linker linker(masm);
JitCode* code = linker.newCode(cx, CodeKind::Other);
if (!code) {
return nullptr;
@ -8868,7 +8868,7 @@ JitCode* JitRealm::generateStringConcatStub(JSContext* cx) {
masm.movePtr(ImmPtr(nullptr), output);
masm.ret();
Linker linker(masm, "StringConcatStub");
Linker linker(masm);
JitCode* code = linker.newCode(cx, CodeKind::Other);
#ifdef JS_ION_PERF
@ -10763,7 +10763,7 @@ bool CodeGenerator::link(JSContext* cx, CompilerConstraintList* constraints) {
js_free(ionScript);
});
Linker linker(masm, "IonLink");
Linker linker(masm);
JitCode* code = linker.newCode(cx, CodeKind::Ion);
if (!code) {
return false;

View File

@ -317,7 +317,7 @@ bool JitRuntime::generateTrampolines(JSContext* cx) {
void* handler = JS_FUNC_TO_DATA_PTR(void*, jit::HandleException);
generateExceptionTailStub(masm, handler, &profilerExitTail);
Linker linker(masm, "Trampolines");
Linker linker(masm);
trampolineCode_ = linker.newCode(cx, CodeKind::Other);
if (!trampolineCode_) {
return false;
@ -2835,160 +2835,6 @@ void jit::ForbidCompilation(JSContext* cx, JSScript* script) {
script->disableIon();
}
AutoFlushICache* JSContext::autoFlushICache() const { return autoFlushICache_; }
void JSContext::setAutoFlushICache(AutoFlushICache* afc) {
autoFlushICache_ = afc;
}
// Set the range for the merging of flushes. The flushing is deferred until the
// end of the AutoFlushICache context. Subsequent flushing within this range
// will is also deferred. This is only expected to be defined once for each
// AutoFlushICache context. It assumes the range will be flushed is required to
// be within an AutoFlushICache context.
void AutoFlushICache::setRange(uintptr_t start, size_t len) {
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
AutoFlushICache* afc = TlsContext.get()->autoFlushICache();
MOZ_ASSERT(afc);
MOZ_ASSERT(!afc->start_);
JitSpewCont(JitSpew_CacheFlush, "(%" PRIxPTR " %zx):", start, len);
uintptr_t stop = start + len;
afc->start_ = start;
afc->stop_ = stop;
#endif
}
// Flush the instruction cache.
//
// If called within a dynamic AutoFlushICache context and if the range is
// already pending flushing for this AutoFlushICache context then the request is
// ignored with the understanding that it will be flushed on exit from the
// AutoFlushICache context. Otherwise the range is flushed immediately.
//
// Updates outside the current code object are typically the exception so they
// are flushed immediately rather than attempting to merge them.
//
// For efficiency it is expected that all large ranges will be flushed within an
// AutoFlushICache, so check. If this assertion is hit then it does not
// necessarily indicate a program fault but it might indicate a lost opportunity
// to merge cache flushing. It can be corrected by wrapping the call in an
// AutoFlushICache to context.
//
// Note this can be called without TLS JSContext defined so this case needs
// to be guarded against. E.g. when patching instructions from the exception
// handler on MacOS running the ARM simulator.
void AutoFlushICache::flush(uintptr_t start, size_t len) {
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) || \
defined(JS_CODEGEN_NONE)
// Nothing
#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
JSContext* cx = TlsContext.get();
AutoFlushICache* afc = cx ? cx->autoFlushICache() : nullptr;
if (!afc) {
JitSpewCont(JitSpew_CacheFlush, "#");
jit::FlushICache((void*)start, len);
MOZ_ASSERT(len <= 32);
return;
}
uintptr_t stop = start + len;
if (start >= afc->start_ && stop <= afc->stop_) {
// Update is within the pending flush range, so defer to the end of the
// context.
JitSpewCont(JitSpew_CacheFlush, afc->inhibit_ ? "-" : "=");
return;
}
JitSpewCont(JitSpew_CacheFlush, afc->inhibit_ ? "x" : "*");
jit::FlushICache((void*)start, len);
#else
MOZ_CRASH("Unresolved porting API - AutoFlushICache::flush");
#endif
}
// Flag the current dynamic AutoFlushICache as inhibiting flushing. Useful in
// error paths where the changes are being abandoned.
void AutoFlushICache::setInhibit() {
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) || \
defined(JS_CODEGEN_NONE)
// Nothing
#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
AutoFlushICache* afc = TlsContext.get()->autoFlushICache();
MOZ_ASSERT(afc);
MOZ_ASSERT(afc->start_);
JitSpewCont(JitSpew_CacheFlush, "I");
afc->inhibit_ = true;
#else
MOZ_CRASH("Unresolved porting API - AutoFlushICache::setInhibit");
#endif
}
// The common use case is merging cache flushes when preparing a code object. In
// this case the entire range of the code object is being flushed and as the
// code is patched smaller redundant flushes could occur. The design allows an
// AutoFlushICache dynamic thread local context to be declared in which the
// range of the code object can be set which defers flushing until the end of
// this dynamic context. The redundant flushing within this code range is also
// deferred avoiding redundant flushing. Flushing outside this code range is
// not affected and proceeds immediately.
//
// In some cases flushing is not necessary, such as when compiling an wasm
// module which is flushed again when dynamically linked, and also in error
// paths that abandon the code. Flushing within the set code range can be
// inhibited within the AutoFlushICache dynamic context by setting an inhibit
// flag.
//
// The JS compiler can be re-entered while within an AutoFlushICache dynamic
// context and it is assumed that code being assembled or patched is not
// executed before the exit of the respective AutoFlushICache dynamic context.
//
AutoFlushICache::AutoFlushICache(const char* nonce, bool inhibit)
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
: start_(0),
stop_(0),
# ifdef JS_JITSPEW
name_(nonce),
# endif
inhibit_(inhibit)
#endif
{
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
JSContext* cx = TlsContext.get();
AutoFlushICache* afc = cx->autoFlushICache();
if (afc) {
JitSpew(JitSpew_CacheFlush, "<%s,%s%s ", nonce, afc->name_,
inhibit ? " I" : "");
} else {
JitSpewCont(JitSpew_CacheFlush, "<%s%s ", nonce, inhibit ? " I" : "");
}
prev_ = afc;
cx->setAutoFlushICache(this);
#endif
}
AutoFlushICache::~AutoFlushICache() {
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
JSContext* cx = TlsContext.get();
MOZ_ASSERT(cx->autoFlushICache() == this);
if (!inhibit_ && start_) {
jit::FlushICache((void*)start_, size_t(stop_ - start_));
}
JitSpewCont(JitSpew_CacheFlush, "%s%s>", name_, start_ ? "" : " U");
JitSpewFin(JitSpew_CacheFlush);
cx->setAutoFlushICache(prev_);
#endif
}
size_t jit::SizeOfIonData(JSScript* script,
mozilla::MallocSizeOf mallocSizeOf) {
size_t result = 0;

View File

@ -551,7 +551,7 @@ JitCode* IonCacheIRCompiler::compile() {
}
}
Linker linker(masm, "getStubCode");
Linker linker(masm);
Rooted<JitCode*> newStubCode(cx_, linker.newCode(cx_, CodeKind::Ion));
if (!newStubCode) {
cx_->recoverFromOutOfMemory();

View File

@ -617,27 +617,6 @@ struct IonScriptCounts {
struct VMFunction;
struct AutoFlushICache {
private:
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
uintptr_t start_;
uintptr_t stop_;
# ifdef JS_JITSPEW
const char* name_;
# endif
bool inhibit_;
AutoFlushICache* prev_;
#endif
public:
static void setRange(uintptr_t p, size_t len);
static void flush(uintptr_t p, size_t len);
static void setInhibit();
~AutoFlushICache();
explicit AutoFlushICache(const char* nonce, bool inhibit = false);
};
} // namespace jit
namespace gc {

View File

@ -20,7 +20,6 @@ namespace jit {
class Linker {
MacroAssembler& masm;
mozilla::Maybe<AutoWritableJitCodeFallible> awjcf;
AutoFlushICache afc;
JitCode* fail(JSContext* cx) {
ReportOutOfMemory(cx);
@ -29,10 +28,7 @@ class Linker {
public:
// Construct a linker with a rooted macro assembler.
explicit Linker(MacroAssembler& masm, const char* name)
: masm(masm), afc(name) {
masm.finish();
}
explicit Linker(MacroAssembler& masm) : masm(masm) { masm.finish(); }
// Create a new JitCode object and populate it with the contents of the
// macro assember buffer.

View File

@ -521,12 +521,9 @@ bool Assembler::swapBuffer(wasm::Bytes& bytes) {
return true;
}
void Assembler::executableCopy(uint8_t* buffer, bool flushICache) {
void Assembler::executableCopy(uint8_t* buffer) {
MOZ_ASSERT(isFinished);
m_buffer.executableCopy(buffer);
if (flushICache) {
AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size());
}
}
class RelocationIterator {
@ -709,12 +706,6 @@ static void TraceOneDataRelocation(JSTracer* trc,
MacroAssemblerARM::ma_mov_patch(Imm32(int32_t(ptr)), dest,
Assembler::Always, rs, iter);
// L_LDR won't cause any instructions to be updated.
if (rs != Assembler::L_LDR) {
AutoFlushICache::flush(uintptr_t(iter.cur()), 4);
AutoFlushICache::flush(uintptr_t(iter.next()), 4);
}
}
}
@ -2345,8 +2336,6 @@ void Assembler::PatchWrite_NearCall(CodeLocationLabel start,
// 24 << 2 byte bl instruction.
uint8_t* dest = toCall.raw();
new (inst) InstBLImm(BOffImm(dest - (uint8_t*)inst), Always);
// Ensure everyone sees the code that was just written into memory.
AutoFlushICache::flush(uintptr_t(inst), 4);
}
void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
@ -2369,13 +2358,6 @@ void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
MacroAssembler::ma_mov_patch(Imm32(int32_t(newValue.value)), dest, Always,
rs, iter);
}
// L_LDR won't cause any instructions to be updated.
if (rs != L_LDR) {
InstructionIterator iter(ptr);
AutoFlushICache::flush(uintptr_t(iter.cur()), 4);
AutoFlushICache::flush(uintptr_t(iter.next()), 4);
}
}
void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
@ -2554,7 +2536,6 @@ void Assembler::ToggleToJmp(CodeLocationLabel inst_) {
// Zero bits 20-27, then set 24-27 to be correct for a branch.
// 20-23 will be party of the B's immediate, and should be 0.
*ptr = (*ptr & ~(0xff << 20)) | (0xa0 << 20);
AutoFlushICache::flush(uintptr_t(ptr), 4);
}
void Assembler::ToggleToCmp(CodeLocationLabel inst_) {
@ -2574,8 +2555,6 @@ void Assembler::ToggleToCmp(CodeLocationLabel inst_) {
// Zero out bits 20-27, then set them to be correct for a compare.
*ptr = (*ptr & ~(0xff << 20)) | (0x35 << 20);
AutoFlushICache::flush(uintptr_t(ptr), 4);
}
void Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled) {
@ -2604,8 +2583,6 @@ void Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled) {
} else {
*inst = InstNOP();
}
AutoFlushICache::flush(uintptr_t(inst), 4);
}
size_t Assembler::ToggledCallSize(uint8_t* code) {

View File

@ -1697,7 +1697,7 @@ class Assembler : public AssemblerShared {
// Copy the assembly code to the given buffer, and perform any pending
// relocations relying on the target address.
void executableCopy(uint8_t* buffer, bool flushICache = true);
void executableCopy(uint8_t* buffer);
// Actual assembly emitting functions.

View File

@ -4305,7 +4305,6 @@ void MacroAssembler::patchNopToCall(uint8_t* call, uint8_t* target) {
reinterpret_cast<Instruction*>(inst)->is<InstNOP>());
new (inst) InstBLImm(BOffImm(target - inst), Assembler::Always);
AutoFlushICache::flush(uintptr_t(inst), 4);
}
void MacroAssembler::patchCallToNop(uint8_t* call) {
@ -4313,7 +4312,6 @@ void MacroAssembler::patchCallToNop(uint8_t* call) {
MOZ_ASSERT(reinterpret_cast<Instruction*>(inst)->is<InstBLImm>() ||
reinterpret_cast<Instruction*>(inst)->is<InstNOP>());
new (inst) InstNOP();
AutoFlushICache::flush(uintptr_t(inst), 4);
}
void MacroAssembler::pushReturnAddress() { push(lr); }

View File

@ -149,7 +149,7 @@ BufferOffset Assembler::emitExtendedJumpTable() {
return tableOffset;
}
void Assembler::executableCopy(uint8_t* buffer, bool flushICache) {
void Assembler::executableCopy(uint8_t* buffer) {
// Copy the code and all constant pools into the output buffer.
armbuffer_.executableCopy(buffer);
@ -183,10 +183,6 @@ void Assembler::executableCopy(uint8_t* buffer, bool flushICache) {
// will work.
}
}
if (flushICache) {
AutoFlushICache::setRange(uintptr_t(buffer), armbuffer_.size());
}
}
BufferOffset Assembler::immPool(ARMRegister dest, uint8_t* value,
@ -340,8 +336,6 @@ void Assembler::PatchWrite_NearCall(CodeLocationLabel start,
// printf("patching %p with call to %p\n", start.raw(), toCall.raw());
bl(dest, relTarget00);
AutoFlushICache::flush(uintptr_t(dest), 4);
}
void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
@ -368,8 +362,6 @@ void Assembler::ToggleToJmp(CodeLocationLabel inst_) {
MOZ_ASSERT(vixl::IsInt19(imm19));
b(i, imm19, Always);
AutoFlushICache::flush(uintptr_t(i), 4);
}
void Assembler::ToggleToCmp(CodeLocationLabel inst_) {
@ -393,8 +385,6 @@ void Assembler::ToggleToCmp(CodeLocationLabel inst_) {
Emit(i, vixl::ThirtyTwoBits | vixl::AddSubImmediateFixed | vixl::SUB |
Flags(vixl::SetFlags) | Rd(vixl::xzr) |
(imm19 << vixl::Rn_offset));
AutoFlushICache::flush(uintptr_t(i), 4);
}
void Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled) {
@ -446,9 +436,6 @@ void Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled) {
ldr(load, ScratchReg2_64, int32_t(offset));
blr(call, ScratchReg2_64);
}
AutoFlushICache::flush(uintptr_t(first), 4);
AutoFlushICache::flush(uintptr_t(call), 8);
}
// Patches loads generated by MacroAssemblerCompat::mov(CodeLabel*, Register).

View File

@ -205,7 +205,7 @@ class Assembler : public vixl::Assembler {
// table.
BufferOffset emitExtendedJumpTable();
BufferOffset ExtendedJumpTable_;
void executableCopy(uint8_t* buffer, bool flushICache = true);
void executableCopy(uint8_t* buffer);
BufferOffset immPool(ARMRegister dest, uint8_t* value, vixl::LoadLiteralOp op,
const LiteralDoc& doc,

View File

@ -682,7 +682,6 @@ void MacroAssembler::patchCall(uint32_t callerOffset, uint32_t calleeOffset) {
MOZ_RELEASE_ASSERT((relTarget & 0x3) == 0);
MOZ_RELEASE_ASSERT(vixl::IsInt26(relTarget00));
bl(inst, relTarget00);
AutoFlushICache::flush(uintptr_t(inst), 4);
}
CodeOffset MacroAssembler::farJumpWithPatch() {
@ -739,7 +738,6 @@ void MacroAssembler::patchNopToCall(uint8_t* call, uint8_t* target) {
Instruction* instr = reinterpret_cast<Instruction*>(inst);
MOZ_ASSERT(instr->IsBL() || instr->IsNOP());
bl(instr, (target - inst) >> 2);
AutoFlushICache::flush(uintptr_t(inst), 4);
}
void MacroAssembler::patchCallToNop(uint8_t* call) {
@ -747,7 +745,6 @@ void MacroAssembler::patchCallToNop(uint8_t* call) {
Instruction* instr = reinterpret_cast<Instruction*>(inst);
MOZ_ASSERT(instr->IsBL() || instr->IsNOP());
nop(instr);
AutoFlushICache::flush(uintptr_t(inst), 4);
}
void MacroAssembler::pushReturnAddress() {

View File

@ -1740,8 +1740,6 @@ void AssemblerMIPSShared::ToggleToJmp(CodeLocationLabel inst_) {
MOZ_ASSERT(inst->extractOpcode() == ((uint32_t)op_andi >> OpcodeShift));
// We converted beq to andi, so now we restore it.
inst->setOpcode(op_beq);
AutoFlushICache::flush(uintptr_t(inst), 4);
}
void AssemblerMIPSShared::ToggleToCmp(CodeLocationLabel inst_) {
@ -1751,8 +1749,6 @@ void AssemblerMIPSShared::ToggleToCmp(CodeLocationLabel inst_) {
MOZ_ASSERT(inst->extractOpcode() == ((uint32_t)op_beq >> OpcodeShift));
// Replace "beq $zero, $zero, offset" with "andi $zero, $zero, offset"
inst->setOpcode(op_andi);
AutoFlushICache::flush(uintptr_t(inst), 4);
}
void AssemblerMIPSShared::UpdateLuiOriValue(Instruction* inst0,

View File

@ -859,7 +859,7 @@ class AssemblerMIPSShared : public AssemblerShared {
bool appendRawCode(const uint8_t* code, size_t numBytes);
bool reserve(size_t size);
bool swapBuffer(wasm::Bytes& bytes);
void executableCopy(void* buffer, bool flushICache = true);
void executableCopy(void* buffer);
void copyJumpRelocationTable(uint8_t* dest);
void copyDataRelocationTable(uint8_t* dest);

View File

@ -1500,13 +1500,11 @@ void MacroAssembler::patchNopToCall(uint8_t* call, uint8_t* target) {
Instruction* inst = (Instruction*)call - 6 /* six nops */;
Assembler::WriteLoad64Instructions(inst, ScratchRegister, (uint64_t)target);
inst[4] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
AutoFlushICache::flush(uintptr_t(inst), 6 * 4);
#else
Instruction* inst = (Instruction*)call - 4 /* four nops */;
Assembler::WriteLuiOriInstructions(inst, &inst[1], ScratchRegister,
(uint32_t)target);
inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
AutoFlushICache::flush(uintptr_t(inst), 4 * 4);
#endif
}
@ -1524,9 +1522,6 @@ void MacroAssembler::patchCallToNop(uint8_t* call) {
#ifdef JS_CODEGEN_MIPS64
inst[4].makeNop();
inst[5].makeNop();
AutoFlushICache::flush(uintptr_t(inst), 6 * 4);
#else
AutoFlushICache::flush(uintptr_t(inst), 4 * 4);
#endif
}

View File

@ -111,13 +111,9 @@ uint32_t js::jit::SA(FloatRegister r) {
return r.id() << SAShift;
}
void Assembler::executableCopy(uint8_t* buffer, bool flushICache) {
void Assembler::executableCopy(uint8_t* buffer) {
MOZ_ASSERT(isFinished);
m_buffer.executableCopy(buffer);
if (flushICache) {
AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size());
}
}
uintptr_t Assembler::GetPointer(uint8_t* instPtr) {
@ -153,7 +149,6 @@ static void TraceOneDataRelocation(JSTracer* trc,
awjc.emplace(code);
}
AssemblerMIPSShared::UpdateLuiOriValue(inst, inst->next(), uint32_t(ptr));
AutoFlushICache::flush(uintptr_t(inst), 8);
}
}
@ -308,9 +303,6 @@ void Assembler::PatchWrite_NearCall(CodeLocationLabel start,
(uint32_t)dest);
inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
inst[3] = InstNOP();
// Ensure everyone sees the code that was just written into memory.
AutoFlushICache::flush(uintptr_t(inst), PatchWrite_NearCallSize());
}
uint32_t Assembler::ExtractLuiOriValue(Instruction* inst0, Instruction* inst1) {
@ -348,8 +340,6 @@ void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
// Replace with new value
AssemblerMIPSShared::UpdateLuiOriValue(inst, inst->next(),
uint32_t(newValue.value));
AutoFlushICache::flush(uintptr_t(inst), 8);
}
uint32_t Assembler::ExtractInstructionImmediate(uint8_t* code) {
@ -373,6 +363,4 @@ void Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled) {
InstNOP nop;
*i2 = nop;
}
AutoFlushICache::flush(uintptr_t(i2), 4);
}

View File

@ -195,7 +195,7 @@ class Assembler : public AssemblerMIPSShared {
// Copy the assembly code to the given buffer, and perform any pending
// relocations relying on the target address.
void executableCopy(uint8_t* buffer, bool flushICache = true);
void executableCopy(uint8_t* buffer);
static uint32_t PatchWrite_NearCallSize();

View File

@ -75,13 +75,9 @@ uint32_t js::jit::SA(FloatRegister r) {
return r.id() << SAShift;
}
void Assembler::executableCopy(uint8_t* buffer, bool flushICache) {
void Assembler::executableCopy(uint8_t* buffer) {
MOZ_ASSERT(isFinished);
m_buffer.executableCopy(buffer);
if (flushICache) {
AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size());
}
}
uintptr_t Assembler::GetPointer(uint8_t* instPtr) {
@ -131,7 +127,6 @@ static void TraceOneDataRelocation(JSTracer* trc,
awjc.emplace(code);
}
Assembler::UpdateLoad64Value(inst, uint64_t(ptr));
AutoFlushICache::flush(uintptr_t(inst), 6 * sizeof(uint32_t));
}
}
@ -249,9 +244,6 @@ void Assembler::PatchWrite_NearCall(CodeLocationLabel start,
Assembler::WriteLoad64Instructions(inst, ScratchRegister, (uint64_t)dest);
inst[4] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
inst[5] = InstNOP();
// Ensure everyone sees the code that was just written into memory.
AutoFlushICache::flush(uintptr_t(inst), PatchWrite_NearCallSize());
}
uint64_t Assembler::ExtractLoad64Value(Instruction* inst0) {
@ -337,8 +329,6 @@ void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
// Replace with new value
Assembler::UpdateLoad64Value(inst, uint64_t(newValue.value));
AutoFlushICache::flush(uintptr_t(inst), 6 * sizeof(uint32_t));
}
uint64_t Assembler::ExtractInstructionImmediate(uint8_t* code) {
@ -365,6 +355,4 @@ void Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled) {
InstNOP nop;
*i4 = nop;
}
AutoFlushICache::flush(uintptr_t(i4), sizeof(uint32_t));
}

View File

@ -215,7 +215,7 @@ class Assembler : public AssemblerMIPSShared {
// Copy the assembly code to the given buffer, and perform any pending
// relocations relying on the target address.
void executableCopy(uint8_t* buffer, bool flushICache = true);
void executableCopy(uint8_t* buffer);
static uint32_t PatchWrite_NearCallSize();

View File

@ -872,9 +872,8 @@ bool InitializeJittedAtomics() {
// Zero the padding.
memset(code + codeLength, 0, roundedCodeLength - codeLength);
// Copy the code into place but do not flush, as the flush path requires a
// JSContext* we do not have.
masm.executableCopy(code, /* flushICache = */ false);
// Copy the code into place.
masm.executableCopy(code);
// Reprotect the whole region to avoid having separate RW and RX mappings.
if (!ExecutableAllocator::makeExecutableAndFlushICache(code,

View File

@ -198,7 +198,7 @@ void Assembler::finish() {
}
}
void Assembler::executableCopy(uint8_t* buffer, bool flushICache) {
void Assembler::executableCopy(uint8_t* buffer) {
AssemblerX86Shared::executableCopy(buffer);
for (size_t i = 0; i < jumps_.length(); i++) {

View File

@ -340,7 +340,7 @@ class Assembler : public AssemblerX86Shared {
// Copy the assembly code to the given buffer, and perform any pending
// relocations relying on the target address.
void executableCopy(uint8_t* buffer, bool flushICache = true);
void executableCopy(uint8_t* buffer);
// Actual assembly emitting functions.

View File

@ -47,7 +47,7 @@ ABIArg ABIArgGenerator::next(MIRType type) {
return current_;
}
void Assembler::executableCopy(uint8_t* buffer, bool flushICache) {
void Assembler::executableCopy(uint8_t* buffer) {
AssemblerX86Shared::executableCopy(buffer);
for (RelativePatch& rp : jumps_) {
X86Encoding::SetRel32(buffer + rp.offset, rp.target);

View File

@ -254,7 +254,7 @@ class Assembler : public AssemblerX86Shared {
// Copy the assembly code to the given buffer, and perform any pending
// relocations relying on the target address.
void executableCopy(uint8_t* buffer, bool flushICache = true);
void executableCopy(uint8_t* buffer);
// Actual assembly emitting functions.

View File

@ -44,7 +44,7 @@ static bool Execute(JSContext* cx, MacroAssembler& masm) {
return false;
}
Linker linker(masm, "Test");
Linker linker(masm);
JitCode* code = linker.newCode(cx, CodeKind::Other);
if (!code) {
return false;

View File

@ -121,7 +121,7 @@ static js::jit::JitCode* linkAndAllocate(JSContext* cx,
js::jit::MacroAssembler* masm) {
using namespace js;
using namespace js::jit;
Linker l(*masm, "test");
Linker l(*masm);
return l.newCode(cx, CodeKind::Ion);
}
@ -134,7 +134,6 @@ BEGIN_TEST(testJitMoveEmitterCycles_simple) {
LifoAlloc lifo(LIFO_ALLOC_PRIMARY_CHUNK_SIZE);
TempAllocator alloc(&lifo);
JitContext jc(cx, &alloc);
AutoFlushICache afc("test");
StackMacroAssembler masm;
MoveEmitter mover(masm);
@ -175,7 +174,6 @@ BEGIN_TEST(testJitMoveEmitterCycles_autogen) {
LifoAlloc lifo(LIFO_ALLOC_PRIMARY_CHUNK_SIZE);
TempAllocator alloc(&lifo);
JitContext jc(cx, &alloc);
AutoFlushICache afc("test");
StackMacroAssembler masm;
MoveEmitter mover(masm);
MoveResolver mr;
@ -268,7 +266,6 @@ BEGIN_TEST(testJitMoveEmitterCycles_autogen2) {
LifoAlloc lifo(LIFO_ALLOC_PRIMARY_CHUNK_SIZE);
TempAllocator alloc(&lifo);
JitContext jc(cx, &alloc);
AutoFlushICache afc("test");
StackMacroAssembler masm;
MoveEmitter mover(masm);
MoveResolver mr;
@ -374,7 +371,6 @@ BEGIN_TEST(testJitMoveEmitterCycles_autogen3) {
LifoAlloc lifo(LIFO_ALLOC_PRIMARY_CHUNK_SIZE);
TempAllocator alloc(&lifo);
JitContext jc(cx, &alloc);
AutoFlushICache afc("test");
StackMacroAssembler masm;
MoveEmitter mover(masm);
MoveResolver mr;

View File

@ -57,7 +57,7 @@ static js::jit::JitCode* linkAndAllocate(JSContext* cx,
js::jit::MacroAssembler* masm) {
using namespace js;
using namespace js::jit;
Linker l(*masm, "test");
Linker l(*masm);
return l.newCode(cx, CodeKind::Ion);
}

View File

@ -1244,7 +1244,6 @@ JSContext::JSContext(JSRuntime* runtime, const JS::ContextOptions& options)
#ifdef JS_TRACE_LOGGING
traceLogger(nullptr),
#endif
autoFlushICache_(this, nullptr),
dtoaState(this, nullptr),
suppressGC(this, 0),
gcSweeping(this, false),
@ -1298,8 +1297,7 @@ JSContext::JSContext(JSRuntime* runtime, const JS::ContextOptions& options)
#ifdef JS_STRUCTURED_SPEW
structuredSpewer_(),
#endif
insideDebuggerEvaluationWithOnNativeCallHook(this, nullptr)
{
insideDebuggerEvaluationWithOnNativeCallHook(this, nullptr) {
MOZ_ASSERT(static_cast<JS::RootingContext*>(this) ==
JS::RootingContext::get(this));
}

View File

@ -523,14 +523,7 @@ struct JSContext : public JS::RootingContext,
js::UnprotectedData<js::TraceLoggerThread*> traceLogger;
#endif
private:
/* Pointer to the current AutoFlushICache. */
js::ContextData<js::jit::AutoFlushICache*> autoFlushICache_;
public:
js::jit::AutoFlushICache* autoFlushICache() const;
void setAutoFlushICache(js::jit::AutoFlushICache* afc);
// State used by util/DoubleToString.cpp.
js::ContextData<DtoaState*> dtoaState;
@ -1252,6 +1245,7 @@ class MOZ_RAII AutoKeepAtoms {
class MOZ_RAII AutoNoteDebuggerEvaluationWithOnNativeCallHook {
JSContext* cx;
Debugger* oldValue;
public:
AutoNoteDebuggerEvaluationWithOnNativeCallHook(JSContext* cx, Debugger* dbg)
: cx(cx), oldValue(cx->insideDebuggerEvaluationWithOnNativeCallHook) {

View File

@ -103,7 +103,6 @@ namespace jit {
class JitRuntime;
class JitActivation;
struct PcScriptCache;
struct AutoFlushICache;
class CompileRuntime;
#ifdef JS_SIMULATOR_ARM64

View File

@ -1204,7 +1204,7 @@ bool wasm::EnsureBuiltinThunksInitialized() {
return false;
}
masm.executableCopy(thunks->codeBase, /* flushICache = */ false);
masm.executableCopy(thunks->codeBase);
memset(thunks->codeBase + masm.bytesNeeded(), 0,
allocSize - masm.bytesNeeded());

View File

@ -347,8 +347,7 @@ UniqueModuleSegment ModuleSegment::create(Tier tier, MacroAssembler& masm,
return nullptr;
}
// We'll flush the icache after static linking, in initialize().
masm.executableCopy(codeBytes.get(), /* flushICache = */ false);
masm.executableCopy(codeBytes.get());
return js::MakeUnique<ModuleSegment>(tier, std::move(codeBytes), codeLength,
linkData);
@ -714,7 +713,7 @@ bool LazyStubTier::createMany(const Uint32Vector& funcExportIndices,
&codePtr, &interpRangeIndex))
return false;
masm.executableCopy(codePtr, /* flushICache = */ false);
masm.executableCopy(codePtr);
PatchDebugSymbolicAccesses(codePtr, masm);
memset(codePtr + masm.bytesNeeded(), 0, codeLength - masm.bytesNeeded());

View File

@ -427,7 +427,7 @@ bool wasm::CraneliftCompileFunctions(const ModuleEnvironment& env,
uint32_t totalCodeSize = masm.currentOffset();
uint8_t* codeBuf = (uint8_t*)js_malloc(totalCodeSize);
if (codeBuf) {
masm.executableCopy(codeBuf, totalCodeSize);
masm.executableCopy(codeBuf);
const CodeRangeVector& codeRanges = code->codeRanges;
MOZ_ASSERT(codeRanges.length() >= inputs.length());

View File

@ -112,7 +112,6 @@ bool DebugState::incrementStepperCount(JSContext* cx, uint32_t funcIndex) {
AutoWritableJitCode awjc(
cx->runtime(), code_->segment(Tier::Debug).base() + codeRange.begin(),
codeRange.end() - codeRange.begin());
AutoFlushICache afc("Code::incrementStepperCount");
for (const CallSite& callSite : callSites(Tier::Debug)) {
if (callSite.kind() != CallSite::Breakpoint) {
@ -143,7 +142,6 @@ bool DebugState::decrementStepperCount(JSFreeOp* fop, uint32_t funcIndex) {
AutoWritableJitCode awjc(
fop->runtime(), code_->segment(Tier::Debug).base() + codeRange.begin(),
codeRange.end() - codeRange.begin());
AutoFlushICache afc("Code::decrementStepperCount");
for (const CallSite& callSite : callSites(Tier::Debug)) {
if (callSite.kind() != CallSite::Breakpoint) {
@ -181,9 +179,6 @@ void DebugState::toggleBreakpointTrap(JSRuntime* rt, uint32_t offset,
}
AutoWritableJitCode awjc(rt, codeSegment.base(), codeSegment.length());
AutoFlushICache afc("Code::toggleBreakpointTrap");
AutoFlushICache::setRange(uintptr_t(codeSegment.base()),
codeSegment.length());
toggleDebugTrap(debugTrapOffset, enabled);
}
@ -304,9 +299,6 @@ void DebugState::adjustEnterAndLeaveFrameTrapsState(JSContext* cx,
const ModuleSegment& codeSegment = code_->segment(Tier::Debug);
AutoWritableJitCode awjc(cx->runtime(), codeSegment.base(),
codeSegment.length());
AutoFlushICache afc("Code::adjustEnterAndLeaveFrameTrapsState");
AutoFlushICache::setRange(uintptr_t(codeSegment.base()),
codeSegment.length());
for (const CallSite& callSite : callSites(Tier::Debug)) {
if (callSite.kind() != CallSite::EnterFrame &&
callSite.kind() != CallSite::LeaveFrame) {

View File

@ -1210,7 +1210,7 @@ SharedModule ModuleGenerator::finishModule(
return nullptr;
}
masm_.executableCopy(debugUnlinkedCode->begin(), /* flushICache = */ false);
masm_.executableCopy(debugUnlinkedCode->begin());
debugLinkData = std::move(linkData_);
debugBytecode = &bytecode;