Bug 1569564 - Replace the JIT code counter using the new precise tracking infrastructure r=sfink

This replaces the original JIT code memory counter with one that tracks the allocated JIT code precisely.  This now uses a fixed threshold which was the original intention.

This also removes the INCREMENTAL_MALLOC_TRIGGER GC reason and all GCs triggered by malloc allocations use TOO_MUCH_MALLOC, so whether a GC is incremental is separated from the trigger reason.

Differential Revision: https://phabricator.services.mozilla.com/D39734

--HG--
extra : moz-landing-system : lando
This commit is contained in:
Jon Coppeard 2019-07-31 09:13:19 +00:00
parent 0c1255fb90
commit 9730094120
12 changed files with 125 additions and 81 deletions

View File

@ -456,7 +456,7 @@ namespace JS {
D(PREPARE_FOR_TRACING, 26) \
D(INCREMENTAL_ALLOC_TRIGGER, 27) \
D(FULL_CELL_PTR_STR_BUFFER, 28) \
D(INCREMENTAL_MALLOC_TRIGGER, 29) \
D(TOO_MUCH_JIT_CODE, 29) \
\
/* These are reserved for future use. */ \
D(RESERVED6, 30) \

View File

@ -3444,6 +3444,7 @@ static bool RecordReplayCheckCanGC(JS::GCReason reason) {
case JS::GCReason::ALLOC_TRIGGER:
case JS::GCReason::DELAYED_ATOMS_GC:
case JS::GCReason::TOO_MUCH_WASM_MEMORY:
case JS::GCReason::TOO_MUCH_JIT_CODE:
return false;
default:
@ -3533,37 +3534,54 @@ void GCRuntime::maybeAllocTriggerZoneGC(Zone* zone, size_t nbytes) {
}
}
void js::gc::MaybeMallocTriggerZoneGC(JSRuntime* rt, ZoneAllocator* zoneAlloc) {
rt->gc.maybeMallocTriggerZoneGC(Zone::from(zoneAlloc));
void js::gc::MaybeMallocTriggerZoneGC(JSRuntime* rt, ZoneAllocator* zoneAlloc,
const HeapSize& heap,
const ZoneThreshold& threshold,
JS::GCReason reason) {
rt->gc.maybeMallocTriggerZoneGC(Zone::from(zoneAlloc), heap, threshold,
reason);
}
void GCRuntime::maybeMallocTriggerZoneGC(Zone* zone) {
if (maybeMallocTriggerZoneGC(zone, zone->gcMallocBytes,
zone->gcMallocThreshold,
JS::GCReason::TOO_MUCH_MALLOC)) {
return;
}
maybeMallocTriggerZoneGC(zone, zone->gcJitBytes, zone->gcJitThreshold,
JS::GCReason::TOO_MUCH_JIT_CODE);
}
bool GCRuntime::maybeMallocTriggerZoneGC(Zone* zone, const HeapSize& heap,
const ZoneThreshold& threshold,
JS::GCReason reason) {
if (!CurrentThreadCanAccessRuntime(rt)) {
// Zones in use by a helper thread can't be collected.
MOZ_ASSERT(zone->usedByHelperThread() || zone->isAtomsZone());
return;
return false;
}
MOZ_ASSERT(!JS::RuntimeHeapIsCollecting());
size_t usedBytes = zone->gcMallocBytes.gcBytes();
size_t thresholdBytes = zone->gcMallocThreshold.gcTriggerBytes();
size_t usedBytes = heap.gcBytes();
size_t thresholdBytes = threshold.gcTriggerBytes();
if (usedBytes >= thresholdBytes) {
// The threshold has been surpassed, immediately trigger a GC, which
// will be done non-incrementally.
triggerZoneGC(zone, JS::GCReason::TOO_MUCH_MALLOC, usedBytes,
thresholdBytes);
return;
triggerZoneGC(zone, reason, usedBytes, thresholdBytes);
return true;
}
float zoneGCThresholdFactor = tunables.allocThresholdFactor();
size_t igcThresholdBytes = thresholdBytes * zoneGCThresholdFactor;
if (usedBytes >= igcThresholdBytes) {
// Start or continue an in progress incremental GC.
triggerZoneGC(zone, JS::GCReason::INCREMENTAL_MALLOC_TRIGGER, usedBytes,
igcThresholdBytes);
return;
triggerZoneGC(zone, reason, usedBytes, igcThresholdBytes);
return true;
}
return false;
}
bool GCRuntime::triggerZoneGC(Zone* zone, JS::GCReason reason, size_t used,
@ -6056,7 +6074,6 @@ IncrementalProgress GCRuntime::endSweepingSweepGroup(FreeOp* fop,
AutoLockGC lock(rt);
zone->changeGCState(Zone::Sweep, Zone::Finished);
zone->updateGCThresholds(*this, invocationKind, lock);
zone->updateMemoryCountersOnGCEnd(lock);
zone->arenas.unmarkPreMarkedFreeCells();
}
@ -7510,6 +7527,15 @@ GCRuntime::IncrementalResult GCRuntime::budgetIncrementalGC(
}
}
if (zone->gcJitBytes.gcBytes() >= zone->gcJitThreshold.gcTriggerBytes()) {
CheckZoneIsScheduled(zone, reason, "JIT code bytes");
budget.makeUnlimited();
stats().nonincremental(AbortReason::JitCodeBytesTrigger);
if (zone->wasGCStarted() && zone->gcState() > Zone::Sweep) {
resetReason = AbortReason::JitCodeBytesTrigger;
}
}
if (zone->shouldTriggerGCForTooMuchMalloc() == NonIncrementalTrigger) {
CheckZoneIsScheduled(zone, reason, "malloc bytes");
budget.makeUnlimited();
@ -7555,11 +7581,10 @@ static void ScheduleZones(GCRuntime* gc) {
// This is a heuristic to reduce the total number of collections.
bool inHighFrequencyMode = gc->schedulingState.inHighFrequencyGCMode();
if (zone->zoneSize.gcBytes() >=
zone->threshold.eagerAllocTrigger(inHighFrequencyMode)) {
zone->scheduleGC();
}
if (zone->gcMallocBytes.gcBytes() >=
zone->gcMallocThreshold.eagerAllocTrigger(inHighFrequencyMode)) {
zone->threshold.eagerAllocTrigger(inHighFrequencyMode) ||
zone->gcMallocBytes.gcBytes() >=
zone->gcMallocThreshold.eagerAllocTrigger(inHighFrequencyMode) ||
zone->gcJitBytes.gcBytes() >= zone->gcJitThreshold.gcTriggerBytes()) {
zone->scheduleGC();
}
@ -7729,6 +7754,7 @@ static bool IsDeterministicGCReason(JS::GCReason reason) {
case JS::GCReason::LAST_DITCH:
case JS::GCReason::TOO_MUCH_MALLOC:
case JS::GCReason::TOO_MUCH_WASM_MEMORY:
case JS::GCReason::TOO_MUCH_JIT_CODE:
case JS::GCReason::ALLOC_TRIGGER:
case JS::GCReason::DEBUG_GC:
case JS::GCReason::CC_FORCED:
@ -7899,14 +7925,6 @@ void GCRuntime::collect(bool nonincrementalByAPI, SliceBudget budget,
}
} while (repeat);
#ifdef DEBUG
if (!isIncrementalGCInProgress()) {
for (ZonesIter zone(rt, WithAtoms); zone.done(); zone.next()) {
MOZ_ASSERT(!zone->jitCodeCounter.triggered());
}
}
#endif
if (reason == JS::GCReason::COMPARTMENT_REVIVED) {
maybeDoCycleCollection();
}

View File

@ -39,18 +39,19 @@ enum class State {
};
// Reasons we reset an ongoing incremental GC or perform a non-incremental GC.
#define GC_ABORT_REASONS(D) \
D(None, 0) \
D(NonIncrementalRequested, 1) \
D(AbortRequested, 2) \
D(Unused1, 3) \
D(IncrementalDisabled, 4) \
D(ModeChange, 5) \
D(MallocBytesTrigger, 6) \
D(GCBytesTrigger, 7) \
D(ZoneChange, 8) \
D(CompartmentRevived, 9) \
D(GrayRootBufferingFailed, 10)
#define GC_ABORT_REASONS(D) \
D(None, 0) \
D(NonIncrementalRequested, 1) \
D(AbortRequested, 2) \
D(Unused1, 3) \
D(IncrementalDisabled, 4) \
D(ModeChange, 5) \
D(MallocBytesTrigger, 6) \
D(GCBytesTrigger, 7) \
D(ZoneChange, 8) \
D(CompartmentRevived, 9) \
D(GrayRootBufferingFailed, 10) \
D(JitCodeBytesTrigger, 11)
enum class AbortReason {
#define MAKE_REASON(name, num) name = num,
GC_ABORT_REASONS(MAKE_REASON)

View File

@ -266,6 +266,9 @@ class GCRuntime {
void maybeAllocTriggerZoneGC(Zone* zone, size_t nbytes = 0);
// Check whether to trigger a zone GC after malloc memory.
void maybeMallocTriggerZoneGC(Zone* zone);
bool maybeMallocTriggerZoneGC(Zone* zone, const HeapSize& heap,
const ZoneThreshold& threshold,
JS::GCReason reason);
// The return value indicates if we were able to do the GC.
bool triggerZoneGC(Zone* zone, JS::GCReason reason, size_t usedBytes,
size_t thresholdBytes);

View File

@ -755,6 +755,13 @@ class ZoneMallocThreshold : public ZoneThreshold {
const AutoLockGC& lock);
};
// A fixed threshold that determines when we need to do a zone GC based on
// allocated JIT code.
class ZoneFixedThreshold : public ZoneThreshold {
public:
explicit ZoneFixedThreshold(size_t bytes) { gcTriggerBytes_ = bytes; }
};
#ifdef DEBUG
// Counts memory associated with GC things in a zone.

View File

@ -32,10 +32,11 @@ Zone* const Zone::NotOnList = reinterpret_cast<Zone*>(1);
ZoneAllocator::ZoneAllocator(JSRuntime* rt)
: JS::shadow::Zone(rt, &rt->gc.marker),
zoneSize(&rt->gc.heapSize),
gcMallocBytes(nullptr) {
gcMallocBytes(nullptr),
gcJitBytes(nullptr),
gcJitThreshold(jit::MaxCodeBytesPerProcess * 0.8) {
AutoLockGC lock(rt);
updateGCThresholds(rt->gc, GC_NORMAL, lock);
jitCodeCounter.setMax(jit::MaxCodeBytesPerProcess * 0.8, lock);
}
ZoneAllocator::~ZoneAllocator() {
@ -44,6 +45,7 @@ ZoneAllocator::~ZoneAllocator() {
gcMallocTracker.checkEmptyOnDestroy();
MOZ_ASSERT(zoneSize.gcBytes() == 0);
MOZ_ASSERT(gcMallocBytes.gcBytes() == 0);
MOZ_ASSERT(gcJitBytes.gcBytes() == 0);
}
#endif
}
@ -57,13 +59,6 @@ void ZoneAllocator::fixupAfterMovingGC() {
void js::ZoneAllocator::updateMemoryCountersOnGCStart() {
zoneSize.updateOnGCStart();
gcMallocBytes.updateOnGCStart();
jitCodeCounter.updateOnGCStart();
}
void js::ZoneAllocator::updateMemoryCountersOnGCEnd(
const js::AutoLockGC& lock) {
auto& gc = runtimeFromAnyThread()->gc;
jitCodeCounter.updateOnGCEnd(gc.tunables, lock);
}
void js::ZoneAllocator::updateGCThresholds(GCRuntime& gc,
@ -78,11 +73,6 @@ void js::ZoneAllocator::updateGCThresholds(GCRuntime& gc,
gc.tunables.mallocGrowthFactor(), lock);
}
js::gc::TriggerKind js::ZoneAllocator::shouldTriggerGCForTooMuchMalloc() {
auto& gc = runtimeFromAnyThread()->gc;
return jitCodeCounter.shouldTriggerGC(gc.tunables);
}
void ZoneAllocPolicy::decMemory(size_t nbytes) {
// Unfortunately we don't have enough context here to know whether we're being
// called on behalf of the collector so we have to do a TLS lookup to find

View File

@ -21,7 +21,10 @@ class Zone;
namespace js {
namespace gc {
void MaybeMallocTriggerZoneGC(JSRuntime* rt, ZoneAllocator* zoneAlloc);
void MaybeMallocTriggerZoneGC(JSRuntime* rt, ZoneAllocator* zoneAlloc,
const HeapSize& heap,
const ZoneThreshold& threshold,
JS::GCReason reason);
}
// Base class of JS::Zone that provides malloc memory allocation and accounting.
@ -47,20 +50,18 @@ class ZoneAllocator : public JS::shadow::Zone,
void adoptMallocBytes(ZoneAllocator* other) {
gcMallocBytes.adopt(other->gcMallocBytes);
gcJitBytes.adopt(other->gcJitBytes);
#ifdef DEBUG
gcMallocTracker.adopt(other->gcMallocTracker);
#endif
}
void updateJitCodeMallocBytes(size_t nbytes) {
updateMemoryCounter(jitCodeCounter, nbytes);
}
void updateMemoryCountersOnGCStart();
void updateMemoryCountersOnGCEnd(const js::AutoLockGC& lock);
void updateGCThresholds(gc::GCRuntime& gc, JSGCInvocationKind invocationKind,
const js::AutoLockGC& lock);
js::gc::TriggerKind shouldTriggerGCForTooMuchMalloc();
js::gc::TriggerKind shouldTriggerGCForTooMuchMalloc() {
return gc::NoTrigger;
}
// Memory accounting APIs for malloc memory owned by GC cells.
@ -129,18 +130,36 @@ class ZoneAllocator : public JS::shadow::Zone,
#endif
}
void incJitMemory(size_t nbytes) {
MOZ_ASSERT(nbytes);
gcJitBytes.addBytes(nbytes);
maybeTriggerZoneGC(gcJitBytes, gcJitThreshold,
JS::GCReason::TOO_MUCH_JIT_CODE);
}
void decJitMemory(size_t nbytes) {
MOZ_ASSERT(nbytes);
gcJitBytes.removeBytes(nbytes, true);
}
// Check malloc allocation threshold and trigger a zone GC if necessary.
void maybeMallocTriggerZoneGC() {
JSRuntime* rt = runtimeFromAnyThread();
float factor = rt->gc.tunables.allocThresholdFactor();
size_t threshold = gcMallocThreshold.gcTriggerBytes() * factor;
if (gcMallocBytes.gcBytes() >= threshold &&
rt->heapState() == JS::HeapState::Idle) {
gc::MaybeMallocTriggerZoneGC(rt, this);
}
maybeTriggerZoneGC(gcMallocBytes, gcMallocThreshold,
JS::GCReason::TOO_MUCH_MALLOC);
}
private:
void maybeTriggerZoneGC(const js::gc::HeapSize& heap,
const js::gc::ZoneThreshold& threshold,
JS::GCReason reason) {
JSRuntime* rt = runtimeFromAnyThread();
float factor = rt->gc.tunables.allocThresholdFactor();
size_t thresholdBytes = threshold.gcTriggerBytes() * factor;
if (heap.gcBytes() >= thresholdBytes &&
rt->heapState() == JS::HeapState::Idle) {
gc::MaybeMallocTriggerZoneGC(rt, this, heap, threshold, reason);
}
}
void updateMemoryCounter(js::gc::MemoryCounter& counter, size_t nbytes) {
JSRuntime* rt = runtimeFromAnyThread();
@ -175,6 +194,12 @@ class ZoneAllocator : public JS::shadow::Zone,
// Thresholds used to trigger GC based on malloc allocations.
js::gc::ZoneMallocThreshold gcMallocThreshold;
// Malloc counter used for JIT code allocation.
js::gc::HeapSize gcJitBytes;
// Thresholds used to trigger GC based on JIT allocations.
js::gc::ZoneFixedThreshold gcJitThreshold;
private:
#ifdef DEBUG
// In debug builds, malloc allocations can be tracked to make debugging easier
@ -182,10 +207,6 @@ class ZoneAllocator : public JS::shadow::Zone,
js::gc::MemoryTracker gcMallocTracker;
#endif
// Counter of JIT code executable memory for GC scheduling. Also imprecise,
// since wasm can generate code that outlives a zone.
js::gc::MemoryCounter jitCodeCounter;
friend class js::gc::GCRuntime;
};

View File

@ -217,8 +217,6 @@ void* ExecutableAllocator::alloc(JSContext* cx, size_t n,
void* result = (*poolp)->alloc(n, type);
MOZ_ASSERT(result);
cx->zone()->updateJitCodeMallocBytes(n);
return result;
}

View File

@ -647,16 +647,20 @@ void JitCodeHeader::init(JitCode* jitCode) {
}
template <AllowGC allowGC>
JitCode* JitCode::New(JSContext* cx, uint8_t* code, uint32_t bufferSize,
JitCode* JitCode::New(JSContext* cx, uint8_t* code, uint32_t totalSize,
uint32_t headerSize, ExecutablePool* pool,
CodeKind kind) {
JitCode* codeObj = Allocate<JitCode, allowGC>(cx);
if (!codeObj) {
pool->release(headerSize + bufferSize, kind);
pool->release(totalSize, kind);
return nullptr;
}
uint32_t bufferSize = totalSize - headerSize;
new (codeObj) JitCode(code, bufferSize, headerSize, pool, kind);
cx->zone()->incJitMemory(totalSize);
return codeObj;
}
@ -743,6 +747,8 @@ void JitCode::finalize(FreeOp* fop) {
pool_->release(headerSize_ + bufferSize_, CodeKind(kind_));
}
zone()->decJitMemory(headerSize_ + bufferSize_);
pool_ = nullptr;
}

View File

@ -132,7 +132,7 @@ class JitCode : public gc::TenuredCell {
// object can be allocated, nullptr is returned. On failure, |pool| is
// automatically released, so the code may be freed.
template <AllowGC allowGC>
static JitCode* New(JSContext* cx, uint8_t* code, uint32_t bufferSize,
static JitCode* New(JSContext* cx, uint8_t* code, uint32_t totalSize,
uint32_t headerSize, ExecutablePool* pool, CodeKind kind);
public:

View File

@ -50,8 +50,8 @@ JitCode* Linker::newCode(JSContext* cx, CodeKind kind) {
codeStart = (uint8_t*)AlignBytes((uintptr_t)codeStart, CodeAlignment);
MOZ_ASSERT(codeStart + masm.bytesNeeded() <= result + bytesNeeded);
uint32_t headerSize = codeStart - result;
JitCode* code = JitCode::New<NoGC>(cx, codeStart, bytesNeeded - headerSize,
headerSize, pool, kind);
JitCode* code =
JitCode::New<NoGC>(cx, codeStart, bytesNeeded, headerSize, pool, kind);
if (!code) {
return fail(cx);
}

View File

@ -687,6 +687,7 @@ const JSFunctionSpec WasmModuleObject::static_methods[] = {
/* static */
void WasmModuleObject::finalize(FreeOp* fop, JSObject* obj) {
const Module& module = obj->as<WasmModuleObject>().module();
obj->zone()->decJitMemory(module.codeLength(module.code().stableTier()));
fop->release(obj, &module, module.gcMallocBytesExcludingCode(),
MemoryUse::WasmModule);
}
@ -1043,10 +1044,9 @@ WasmModuleObject* WasmModuleObject::create(JSContext* cx, const Module& module,
module.gcMallocBytesExcludingCode(), MemoryUse::WasmModule);
module.AddRef();
// We account for the first tier here; the second tier, if different, will be
// accounted for separately when it's been compiled.
cx->zone()->updateJitCodeMallocBytes(
module.codeLength(module.code().stableTier()));
// Bug 1569888: We account for the first tier here; the second tier, if
// different, also needs to be accounted for.
cx->zone()->incJitMemory(module.codeLength(module.code().stableTier()));
return obj;
}