Bug 1395509 - Add a separate byte count for malloc allocations r=sfink

Previously I rolled the malloc byte count into a total byte count for each zone but this may adversely affect GC scheduling (e.g. by triggering more non-incremental GCs because allocation volumes appear higher with this change). So that we can land this machinery without disturbing benchmarks too much, this patch splits out the new malloc memory accounting into a separate counter and uses the maxMallocBytes setting as the threshold (default value is 128MB vs 30MB for the GC heap threshold) and a growth factor of 2. This should make the behaviour closer to the original behaviour for now. We can go back and adjust the parameters later to obtain the desired behaviour.

Differential Revision: https://phabricator.services.mozilla.com/D34181
This commit is contained in:
Jon Coppeard 2019-06-07 17:03:08 +01:00
parent 8ade7a6dab
commit 3291e3d4b5
8 changed files with 179 additions and 73 deletions

View File

@ -436,9 +436,9 @@ namespace JS {
D(PREPARE_FOR_TRACING, 26) \
D(INCREMENTAL_ALLOC_TRIGGER, 27) \
D(FULL_CELL_PTR_STR_BUFFER, 28) \
D(INCREMENTAL_MALLOC_TRIGGER, 29) \
\
/* These are reserved for future use. */ \
D(RESERVED5, 29) \
D(RESERVED6, 30) \
D(RESERVED7, 31) \
D(RESERVED8, 32) \

View File

@ -371,8 +371,9 @@ bool GCRuntime::gcIfNeededAtAllocation(JSContext* cx) {
// If we have grown past our GC heap threshold while in the middle of
// an incremental GC, we're growing faster than we're GCing, so stop
// the world and do a full, non-incremental GC right now, if possible.
Zone* zone = cx->zone();
if (isIncrementalGCInProgress() &&
cx->zone()->totalBytes() > cx->zone()->threshold.gcTriggerBytes()) {
zone->zoneSize.gcBytes() > zone->threshold.gcTriggerBytes()) {
PrepareZoneForGC(cx->zone());
gc(GC_NORMAL, JS::GCReason::INCREMENTAL_TOO_SLOW);
}

View File

@ -1460,8 +1460,7 @@ bool GCRuntime::setParameter(JSGCParamKey key, uint32_t value,
return false;
}
for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
zone->threshold.updateAfterGC(zone->totalBytes(), GC_NORMAL, tunables,
schedulingState, lock);
zone->updateAllGCThresholds(*this, lock);
}
}
@ -1720,8 +1719,7 @@ void GCRuntime::resetParameter(JSGCParamKey key, AutoLockGC& lock) {
default:
tunables.resetParameter(key, lock);
for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
zone->threshold.updateAfterGC(zone->totalBytes(), GC_NORMAL, tunables,
schedulingState, lock);
zone->updateAllGCThresholds(*this, lock);
}
}
}
@ -2071,7 +2069,7 @@ void GCRuntime::setMaxMallocBytes(size_t value, const AutoLockGC& lock) {
}
}
float ZoneHeapThreshold::eagerAllocTrigger(bool highFrequencyGC) const {
float ZoneThreshold::eagerAllocTrigger(bool highFrequencyGC) const {
float eagerTriggerFactor = highFrequencyGC
? HighFrequencyEagerAllocTriggerFactor
: LowFrequencyEagerAllocTriggerFactor;
@ -2167,6 +2165,22 @@ void ZoneHeapThreshold::updateForRemovedArena(
gcTriggerBytes_ -= amount;
}
/* static */
size_t ZoneMallocThreshold::computeZoneTriggerBytes(
float growthFactor, size_t lastBytes, const GCSchedulingTunables& tunables,
const AutoLockGC& lock) {
size_t base = Max(lastBytes, tunables.maxMallocBytes());
float trigger = float(base) * growthFactor;
return size_t(trigger);
}
void ZoneMallocThreshold::updateAfterGC(size_t lastBytes,
const GCSchedulingTunables& tunables,
const GCSchedulingState& state,
const AutoLockGC& lock) {
gcTriggerBytes_ = computeZoneTriggerBytes(2.0, lastBytes, tunables, lock);
}
MemoryCounter::MemoryCounter()
: bytes_(0), maxBytes_(0), triggered_(NoTrigger) {}
@ -3422,10 +3436,6 @@ bool GCRuntime::triggerGC(JS::GCReason reason) {
return true;
}
void js::gc::MaybeAllocTriggerZoneGC(JSRuntime* rt, ZoneAllocator* zoneAlloc) {
rt->gc.maybeAllocTriggerZoneGC(Zone::from(zoneAlloc));
}
void GCRuntime::maybeAllocTriggerZoneGC(Zone* zone, size_t nbytes) {
if (!CurrentThreadCanAccessRuntime(rt)) {
// Zones in use by a helper thread can't be collected.
@ -3435,9 +3445,9 @@ void GCRuntime::maybeAllocTriggerZoneGC(Zone* zone, size_t nbytes) {
MOZ_ASSERT(!JS::RuntimeHeapIsCollecting());
size_t usedBytes = zone->totalBytes(); // This already includes |nbytes|.
size_t usedBytes =
zone->zoneSize.gcBytes(); // This already includes |nbytes|.
size_t thresholdBytes = zone->threshold.gcTriggerBytes();
if (usedBytes >= thresholdBytes) {
// The threshold has been surpassed, immediately trigger a GC, which
// will be done non-incrementally.
@ -3478,6 +3488,39 @@ void GCRuntime::maybeAllocTriggerZoneGC(Zone* zone, size_t nbytes) {
}
}
void js::gc::MaybeMallocTriggerZoneGC(JSRuntime* rt, ZoneAllocator* zoneAlloc) {
rt->gc.maybeMallocTriggerZoneGC(Zone::from(zoneAlloc));
}
void GCRuntime::maybeMallocTriggerZoneGC(Zone* zone) {
if (!CurrentThreadCanAccessRuntime(rt)) {
// Zones in use by a helper thread can't be collected.
MOZ_ASSERT(zone->usedByHelperThread() || zone->isAtomsZone());
return;
}
MOZ_ASSERT(!JS::RuntimeHeapIsCollecting());
size_t usedBytes = zone->gcMallocBytes.gcBytes();
size_t thresholdBytes = zone->gcMallocThreshold.gcTriggerBytes();
if (usedBytes >= thresholdBytes) {
// The threshold has been surpassed, immediately trigger a GC, which
// will be done non-incrementally.
triggerZoneGC(zone, JS::GCReason::TOO_MUCH_MALLOC, usedBytes,
thresholdBytes);
return;
}
float zoneGCThresholdFactor = tunables.allocThresholdFactor();
size_t igcThresholdBytes = thresholdBytes * zoneGCThresholdFactor;
if (usedBytes >= igcThresholdBytes) {
// Start or continue an in progress incremental GC.
triggerZoneGC(zone, JS::GCReason::INCREMENTAL_MALLOC_TRIGGER, usedBytes,
igcThresholdBytes);
return;
}
}
bool GCRuntime::triggerZoneGC(Zone* zone, JS::GCReason reason, size_t used,
size_t threshold) {
MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
@ -3532,17 +3575,30 @@ void GCRuntime::maybeGC(Zone* zone) {
return;
}
float threshold = zone->threshold.eagerAllocTrigger(
schedulingState.inHighFrequencyGCMode());
float usedBytes = zone->totalBytes();
if (usedBytes > 1024 * 1024 && usedBytes >= threshold &&
!isIncrementalGCInProgress() && !isBackgroundSweeping()) {
stats().recordTrigger(usedBytes, threshold);
if (isIncrementalGCInProgress() || isBackgroundSweeping()) {
return;
}
if (checkEagerAllocTrigger(zone->zoneSize, zone->threshold) ||
checkEagerAllocTrigger(zone->gcMallocBytes, zone->gcMallocThreshold)) {
PrepareZoneForGC(zone);
startGC(GC_NORMAL, JS::GCReason::EAGER_ALLOC_TRIGGER);
}
}
bool GCRuntime::checkEagerAllocTrigger(const HeapSize& size,
const ZoneThreshold& threshold) {
float thresholdBytes =
threshold.eagerAllocTrigger(schedulingState.inHighFrequencyGCMode());
float usedBytes = size.gcBytes();
if (usedBytes <= 1024 * 1024 || usedBytes < thresholdBytes) {
return false;
}
stats().recordTrigger(usedBytes, thresholdBytes);
return true;
}
void GCRuntime::triggerFullGCForAtoms(JSContext* cx) {
MOZ_ASSERT(fullGCForAtomsRequested_);
MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
@ -5938,8 +5994,7 @@ IncrementalProgress GCRuntime::endSweepingSweepGroup(FreeOp* fop,
for (SweepGroupZonesIter zone(rt); !zone.done(); zone.next()) {
AutoLockGC lock(rt);
zone->changeGCState(Zone::Sweep, Zone::Finished);
zone->threshold.updateAfterGC(zone->totalBytes(), invocationKind, tunables,
schedulingState, lock);
zone->updateAllGCThresholds(*this, lock);
zone->updateAllGCMallocCountersOnGCEnd(lock);
zone->arenas.unmarkPreMarkedFreeCells();
}
@ -7440,7 +7495,7 @@ GCRuntime::IncrementalResult GCRuntime::budgetIncrementalGC(
continue;
}
if (zone->totalBytes() >= zone->threshold.gcTriggerBytes()) {
if (zone->zoneSize.gcBytes() >= zone->threshold.gcTriggerBytes()) {
CheckZoneIsScheduled(zone, reason, "GC bytes");
budget.makeUnlimited();
stats().nonincremental(AbortReason::GCBytesTrigger);
@ -7449,6 +7504,16 @@ GCRuntime::IncrementalResult GCRuntime::budgetIncrementalGC(
}
}
if (zone->gcMallocBytes.gcBytes() >=
zone->gcMallocThreshold.gcTriggerBytes()) {
CheckZoneIsScheduled(zone, reason, "malloc bytes");
budget.makeUnlimited();
stats().nonincremental(AbortReason::MallocBytesTrigger);
if (zone->wasGCStarted() && zone->gcState() > Zone::Sweep) {
resetReason = AbortReason::MallocBytesTrigger;
}
}
if (zone->shouldTriggerGCForTooMuchMalloc() == NonIncrementalTrigger) {
CheckZoneIsScheduled(zone, reason, "malloc bytes");
budget.makeUnlimited();
@ -7493,10 +7558,14 @@ static void ScheduleZones(GCRuntime* gc) {
// This is a heuristic to reduce the total number of collections.
bool inHighFrequencyMode = gc->schedulingState.inHighFrequencyGCMode();
if (zone->totalBytes() >=
if (zone->zoneSize.gcBytes() >=
zone->threshold.eagerAllocTrigger(inHighFrequencyMode)) {
zone->scheduleGC();
}
if (zone->gcMallocBytes.gcBytes() >=
zone->gcMallocThreshold.eagerAllocTrigger(inHighFrequencyMode)) {
zone->scheduleGC();
}
// This ensures we collect zones that have reached the malloc limit.
if (zone->shouldTriggerGCForTooMuchMalloc()) {
@ -8025,6 +8094,7 @@ void GCRuntime::minorGC(JS::GCReason reason, gcstats::PhaseKind phase) {
for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
maybeAllocTriggerZoneGC(zone);
maybeMallocTriggerZoneGC(zone);
}
}
@ -8209,6 +8279,7 @@ void gc::MergeRealms(Realm* source, Realm* target) {
JSRuntime* rt = source->runtimeFromMainThread();
rt->gc.mergeRealms(source, target);
rt->gc.maybeAllocTriggerZoneGC(target->zone());
rt->gc.maybeMallocTriggerZoneGC(target->zone());
}
void GCRuntime::mergeRealms(Realm* source, Realm* target) {

View File

@ -258,13 +258,18 @@ class GCRuntime {
uint32_t getParameter(JSGCParamKey key, const AutoLockGC& lock);
MOZ_MUST_USE bool triggerGC(JS::GCReason reason);
// Check whether to trigger a zone GC. During an incremental GC, optionally
// count |nbytes| towards the threshold for performing the next slice.
// Check whether to trigger a zone GC after allocating GC cells. During an
// incremental GC, optionally count |nbytes| towards the threshold for
// performing the next slice.
void maybeAllocTriggerZoneGC(Zone* zone, size_t nbytes = 0);
// Check whether to trigger a zone GC after malloc memory.
void maybeMallocTriggerZoneGC(Zone* zone);
// The return value indicates if we were able to do the GC.
bool triggerZoneGC(Zone* zone, JS::GCReason reason, size_t usedBytes,
size_t thresholdBytes);
void maybeGC(Zone* zone);
bool checkEagerAllocTrigger(const HeapSize& size,
const ZoneThreshold& threshold);
// The return value indicates whether a major GC was performed.
bool gcIfRequested();
void gc(JSGCInvocationKind gckind, JS::GCReason reason);

View File

@ -645,17 +645,22 @@ class HeapSize {
size_t gcBytes() const { return gcBytes_; }
void addGCArena() {
gcBytes_ += ArenaSize;
void addGCArena() { addBytes(ArenaSize); }
void removeGCArena() { removeBytes(ArenaSize); }
void addBytes(size_t nbytes) {
mozilla::DebugOnly<size_t> initialBytes(gcBytes_);
MOZ_ASSERT(initialBytes + nbytes > initialBytes);
gcBytes_ += nbytes;
if (parent_) {
parent_->addGCArena();
parent_->addBytes(nbytes);
}
}
void removeGCArena() {
MOZ_ASSERT(gcBytes_ >= ArenaSize);
gcBytes_ -= ArenaSize;
void removeBytes(size_t nbytes) {
MOZ_ASSERT(gcBytes_ >= nbytes);
gcBytes_ -= nbytes;
if (parent_) {
parent_->removeGCArena();
parent_->removeBytes(nbytes);
}
}
@ -666,23 +671,29 @@ class HeapSize {
}
};
// This class encapsulates the data that determines when we need to do a zone
// GC.
class ZoneHeapThreshold {
// The "growth factor" for computing our next thresholds after a GC.
GCLockData<float> gcHeapGrowthFactor_;
// GC trigger threshold for allocations on the GC heap.
// Base class for GC heap and malloc thresholds.
class ZoneThreshold {
protected:
// GC trigger threshold.
mozilla::Atomic<size_t, mozilla::Relaxed,
mozilla::recordreplay::Behavior::DontPreserve>
gcTriggerBytes_;
public:
ZoneHeapThreshold() : gcHeapGrowthFactor_(3.0f), gcTriggerBytes_(0) {}
float gcHeapGrowthFactor() const { return gcHeapGrowthFactor_; }
size_t gcTriggerBytes() const { return gcTriggerBytes_; }
float eagerAllocTrigger(bool highFrequencyGC) const;
};
// This class encapsulates the data that determines when we need to do a zone GC
// base on GC heap size.
class ZoneHeapThreshold : public ZoneThreshold {
// The "growth factor" for computing our next thresholds after a GC.
GCLockData<float> gcHeapGrowthFactor_;
public:
ZoneHeapThreshold() : gcHeapGrowthFactor_(3.0f) {}
float gcHeapGrowthFactor() const { return gcHeapGrowthFactor_; }
void updateAfterGC(size_t lastBytes, JSGCInvocationKind gckind,
const GCSchedulingTunables& tunables,
@ -699,6 +710,19 @@ class ZoneHeapThreshold {
const AutoLockGC& lock);
};
// This class encapsulates the data that determines when we need to do a zone
// GC based on malloc data.
class ZoneMallocThreshold : public ZoneThreshold {
public:
void updateAfterGC(size_t lastBytes, const GCSchedulingTunables& tunables,
const GCSchedulingState& state, const AutoLockGC& lock);
private:
static size_t computeZoneTriggerBytes(float growthFactor, size_t lastBytes,
const GCSchedulingTunables& tunables,
const AutoLockGC& lock);
};
#ifdef DEBUG
// Counts memory associated with GC things in a zone.

View File

@ -32,10 +32,14 @@ using namespace js::gc;
Zone* const Zone::NotOnList = reinterpret_cast<Zone*>(1);
ZoneAllocator::ZoneAllocator(JSRuntime* rt)
: JS::shadow::Zone(rt, &rt->gc.marker), zoneSize(&rt->gc.heapSize) {
: JS::shadow::Zone(rt, &rt->gc.marker),
zoneSize(&rt->gc.heapSize),
gcMallocBytes(nullptr) {
AutoLockGC lock(rt);
threshold.updateAfterGC(8192, GC_NORMAL, rt->gc.tunables,
rt->gc.schedulingState, lock);
gcMallocThreshold.updateAfterGC(8192, rt->gc.tunables, rt->gc.schedulingState,
lock);
setGCMaxMallocBytes(rt->gc.tunables.maxMallocBytes(), lock);
jitCodeCounter.setMax(jit::MaxCodeBytesPerProcess * 0.8, lock);
}
@ -44,7 +48,8 @@ ZoneAllocator::~ZoneAllocator() {
#ifdef DEBUG
if (runtimeFromAnyThread()->gc.shutdownCollectedEverything()) {
gcMallocTracker.checkEmptyOnDestroy();
MOZ_ASSERT(gcMallocBytes == 0);
MOZ_ASSERT(zoneSize.gcBytes() == 0);
MOZ_ASSERT(gcMallocBytes.gcBytes() == 0);
}
#endif
}
@ -67,6 +72,14 @@ void js::ZoneAllocator::updateAllGCMallocCountersOnGCEnd(
jitCodeCounter.updateOnGCEnd(gc.tunables, lock);
}
void js::ZoneAllocator::updateAllGCThresholds(GCRuntime& gc,
const js::AutoLockGC& lock) {
threshold.updateAfterGC(zoneSize.gcBytes(), GC_NORMAL, gc.tunables,
gc.schedulingState, lock);
gcMallocThreshold.updateAfterGC(gcMallocBytes.gcBytes(), gc.tunables,
gc.schedulingState, lock);
}
js::gc::TriggerKind js::ZoneAllocator::shouldTriggerGCForTooMuchMalloc() {
auto& gc = runtimeFromAnyThread()->gc;
return std::max(gcMallocCounter.shouldTriggerGC(gc.tunables),

View File

@ -21,7 +21,7 @@ class Zone;
namespace js {
namespace gc {
void MaybeAllocTriggerZoneGC(JSRuntime* rt, ZoneAllocator* zoneAlloc);
void MaybeMallocTriggerZoneGC(JSRuntime* rt, ZoneAllocator* zoneAlloc);
}
// Base class of JS::Zone that provides malloc memory allocation and accounting.
@ -51,8 +51,7 @@ class ZoneAllocator : public JS::shadow::Zone,
}
void adoptMallocBytes(ZoneAllocator* other) {
gcMallocCounter.adopt(other->gcMallocCounter);
gcMallocBytes += other->gcMallocBytes;
other->gcMallocBytes = 0;
gcMallocBytes.adopt(other->gcMallocBytes);
#ifdef DEBUG
gcMallocTracker.adopt(other->gcMallocTracker);
#endif
@ -66,6 +65,7 @@ class ZoneAllocator : public JS::shadow::Zone,
void updateAllGCMallocCountersOnGCStart();
void updateAllGCMallocCountersOnGCEnd(const js::AutoLockGC& lock);
void updateAllGCThresholds(gc::GCRuntime& gc, const js::AutoLockGC& lock);
js::gc::TriggerKind shouldTriggerGCForTooMuchMalloc();
// Memory accounting APIs for malloc memory owned by GC cells.
@ -73,10 +73,8 @@ class ZoneAllocator : public JS::shadow::Zone,
void addCellMemory(js::gc::Cell* cell, size_t nbytes, js::MemoryUse use) {
MOZ_ASSERT(cell);
MOZ_ASSERT(nbytes);
mozilla::DebugOnly<size_t> initialBytes(gcMallocBytes);
MOZ_ASSERT(initialBytes + nbytes > initialBytes);
gcMallocBytes.addBytes(nbytes);
gcMallocBytes += nbytes;
// We don't currently check GC triggers here.
#ifdef DEBUG
@ -87,9 +85,7 @@ class ZoneAllocator : public JS::shadow::Zone,
void removeCellMemory(js::gc::Cell* cell, size_t nbytes, js::MemoryUse use) {
MOZ_ASSERT(cell);
MOZ_ASSERT(nbytes);
MOZ_ASSERT(gcMallocBytes >= nbytes);
gcMallocBytes -= nbytes;
gcMallocBytes.removeBytes(nbytes);
#ifdef DEBUG
gcMallocTracker.untrackMemory(cell, nbytes, use);
@ -113,36 +109,29 @@ class ZoneAllocator : public JS::shadow::Zone,
void incPolicyMemory(js::ZoneAllocPolicy* policy, size_t nbytes) {
MOZ_ASSERT(nbytes);
mozilla::DebugOnly<size_t> initialBytes(gcMallocBytes);
MOZ_ASSERT(initialBytes + nbytes > initialBytes);
gcMallocBytes += nbytes;
gcMallocBytes.addBytes(nbytes);
#ifdef DEBUG
gcMallocTracker.incPolicyMemory(policy, nbytes);
#endif
maybeAllocTriggerZoneGC();
maybeMallocTriggerZoneGC();
}
void decPolicyMemory(js::ZoneAllocPolicy* policy, size_t nbytes) {
MOZ_ASSERT(nbytes);
MOZ_ASSERT(gcMallocBytes >= nbytes);
gcMallocBytes -= nbytes;
gcMallocBytes.removeBytes(nbytes);
#ifdef DEBUG
gcMallocTracker.decPolicyMemory(policy, nbytes);
#endif
}
size_t totalBytes() const { return zoneSize.gcBytes() + gcMallocBytes; }
// Check allocation threshold and trigger a zone GC if necessary.
void maybeAllocTriggerZoneGC() {
// Check malloc allocation threshold and trigger a zone GC if necessary.
void maybeMallocTriggerZoneGC() {
JSRuntime* rt = runtimeFromAnyThread();
if (totalBytes() >= threshold.gcTriggerBytes() &&
if (gcMallocBytes.gcBytes() >= threshold.gcTriggerBytes() &&
rt->heapState() == JS::HeapState::Idle) {
gc::MaybeAllocTriggerZoneGC(rt, this);
gc::MaybeMallocTriggerZoneGC(rt, this);
}
}
@ -164,10 +153,10 @@ class ZoneAllocator : public JS::shadow::Zone,
js::gc::TriggerKind trigger);
public:
// Track heap size under this Zone.
// Track GC heap size under this Zone.
js::gc::HeapSize zoneSize;
// Thresholds used to trigger GC.
// Thresholds used to trigger GC based on heap size.
js::gc::ZoneHeapThreshold threshold;
// Amount of data to allocate before triggering a new incremental slice for
@ -180,12 +169,15 @@ class ZoneAllocator : public JS::shadow::Zone,
// free. Currently this is used for all internal malloc allocations.
js::gc::MemoryCounter gcMallocCounter;
public:
// Malloc counter used for allocations where size information is
// available. Used for some internal and all tracked external allocations.
mozilla::Atomic<size_t, mozilla::Relaxed,
mozilla::recordreplay::Behavior::DontPreserve>
gcMallocBytes;
js::gc::HeapSize gcMallocBytes;
// Thresholds used to trigger GC based on malloc allocations.
js::gc::ZoneMallocThreshold gcMallocThreshold;
private:
#ifdef DEBUG
// In debug builds, malloc allocations can be tracked to make debugging easier
// (possible?) if allocation and free sizes don't balance.

View File

@ -1172,7 +1172,7 @@ JS_PUBLIC_API void JS::AddAssociatedMemory(JSObject* obj, size_t nbytes,
Zone* zone = obj->zone();
zone->addCellMemory(obj, nbytes, js::MemoryUse(use));
zone->maybeAllocTriggerZoneGC();
zone->maybeMallocTriggerZoneGC();
}
JS_PUBLIC_API void JS::RemoveAssociatedMemory(JSObject* obj, size_t nbytes,