Bug 1575175 - Rename memory counter classes now they're used for both GC and malloc heaps r=sfink

This renames:
  HeapSize::gcBytes -> bytes (it's not just for GC heaps any more)
  ZoneThreshold -> HeapThreshold (to go with HeapSize)
  HeapThreshold::triggerBytes -> bytes (what else could it be?)

I renamed the ZoneAllocator members to make them more uniform/consitent so we now have gcHeapSize/gcHeapThreshold, mallocHeapSize/mallocHeapThreshold etc.

I also renamed the heap threshold classes.

Differential Revision: https://phabricator.services.mozilla.com/D42868

--HG--
extra : moz-landing-system : lando
This commit is contained in:
Jon Coppeard 2019-08-21 15:14:31 +00:00
parent 1652ece8e3
commit 266f4c6efb
11 changed files with 166 additions and 165 deletions

View File

@ -145,8 +145,8 @@ typedef enum JSGCParamKey {
* collections. * collections.
* *
* The RHS of the equation above is calculated and sets * The RHS of the equation above is calculated and sets
* zone->threshold.gcTriggerBytes(). When usage.gcBytes() surpasses * zone->gcHeapThreshold.bytes(). When gcHeapSize.bytes() exeeds
* threshold.gcTriggerBytes() for a zone, the zone may be scheduled for a GC. * gcHeapThreshold.bytes() for a zone, the zone may be scheduled for a GC.
*/ */
/** /**

View File

@ -461,7 +461,7 @@ static bool GC(JSContext* cx, unsigned argc, Value* vp) {
} }
#ifndef JS_MORE_DETERMINISTIC #ifndef JS_MORE_DETERMINISTIC
size_t preBytes = cx->runtime()->gc.heapSize.gcBytes(); size_t preBytes = cx->runtime()->gc.heapSize.bytes();
#endif #endif
if (zone) { if (zone) {
@ -476,7 +476,7 @@ static bool GC(JSContext* cx, unsigned argc, Value* vp) {
char buf[256] = {'\0'}; char buf[256] = {'\0'};
#ifndef JS_MORE_DETERMINISTIC #ifndef JS_MORE_DETERMINISTIC
SprintfLiteral(buf, "before %zu, after %zu\n", preBytes, SprintfLiteral(buf, "before %zu, after %zu\n", preBytes,
cx->runtime()->gc.heapSize.gcBytes()); cx->runtime()->gc.heapSize.bytes());
#endif #endif
return ReturnStringCopy(cx, args, buf); return ReturnStringCopy(cx, args, buf);
} }

View File

@ -373,7 +373,7 @@ bool GCRuntime::gcIfNeededAtAllocation(JSContext* cx) {
// the world and do a full, non-incremental GC right now, if possible. // the world and do a full, non-incremental GC right now, if possible.
Zone* zone = cx->zone(); Zone* zone = cx->zone();
if (isIncrementalGCInProgress() && if (isIncrementalGCInProgress() &&
zone->zoneSize.gcBytes() > zone->threshold.gcTriggerBytes()) { zone->gcHeapSize.bytes() > zone->gcHeapThreshold.bytes()) {
PrepareZoneForGC(cx->zone()); PrepareZoneForGC(cx->zone());
gc(GC_NORMAL, JS::GCReason::INCREMENTAL_TOO_SLOW); gc(GC_NORMAL, JS::GCReason::INCREMENTAL_TOO_SLOW);
} }
@ -595,11 +595,11 @@ Arena* GCRuntime::allocateArena(Chunk* chunk, Zone* zone, AllocKind thingKind,
// Fail the allocation if we are over our heap size limits. // Fail the allocation if we are over our heap size limits.
if ((checkThresholds != ShouldCheckThresholds::DontCheckThresholds) && if ((checkThresholds != ShouldCheckThresholds::DontCheckThresholds) &&
(heapSize.gcBytes() >= tunables.gcMaxBytes())) (heapSize.bytes() >= tunables.gcMaxBytes()))
return nullptr; return nullptr;
Arena* arena = chunk->allocateArena(rt, zone, thingKind, lock); Arena* arena = chunk->allocateArena(rt, zone, thingKind, lock);
zone->zoneSize.addGCArena(); zone->gcHeapSize.addGCArena();
// Trigger an incremental slice if needed. // Trigger an incremental slice if needed.
if (checkThresholds != ShouldCheckThresholds::DontCheckThresholds) { if (checkThresholds != ShouldCheckThresholds::DontCheckThresholds) {

View File

@ -957,7 +957,7 @@ void GCRuntime::releaseArena(Arena* arena, const AutoLockGC& lock) {
MOZ_ASSERT(arena->allocated()); MOZ_ASSERT(arena->allocated());
MOZ_ASSERT(!arena->onDelayedMarkingList()); MOZ_ASSERT(!arena->onDelayedMarkingList());
arena->zone->zoneSize.removeGCArena(); arena->zone->gcHeapSize.removeGCArena();
arena->release(lock); arena->release(lock);
arena->chunk()->releaseArena(rt, arena, lock); arena->chunk()->releaseArena(rt, arena, lock);
} }
@ -1797,7 +1797,7 @@ uint32_t GCRuntime::getParameter(JSGCParamKey key, const AutoLockGC& lock) {
MOZ_ASSERT(tunables.gcMaxNurseryBytes() < UINT32_MAX); MOZ_ASSERT(tunables.gcMaxNurseryBytes() < UINT32_MAX);
return uint32_t(tunables.gcMaxNurseryBytes()); return uint32_t(tunables.gcMaxNurseryBytes());
case JSGC_BYTES: case JSGC_BYTES:
return uint32_t(heapSize.gcBytes()); return uint32_t(heapSize.bytes());
case JSGC_NURSERY_BYTES: case JSGC_NURSERY_BYTES:
return nursery().capacity(); return nursery().capacity();
case JSGC_NUMBER: case JSGC_NUMBER:
@ -2054,15 +2054,15 @@ extern JS_FRIEND_API void js::RemoveRawValueRoot(JSContext* cx, Value* vp) {
cx->runtime()->gc.removeRoot(vp); cx->runtime()->gc.removeRoot(vp);
} }
float ZoneThreshold::eagerAllocTrigger(bool highFrequencyGC) const { float HeapThreshold::eagerAllocTrigger(bool highFrequencyGC) const {
float eagerTriggerFactor = highFrequencyGC float eagerTriggerFactor = highFrequencyGC
? HighFrequencyEagerAllocTriggerFactor ? HighFrequencyEagerAllocTriggerFactor
: LowFrequencyEagerAllocTriggerFactor; : LowFrequencyEagerAllocTriggerFactor;
return eagerTriggerFactor * gcTriggerBytes(); return eagerTriggerFactor * bytes();
} }
/* static */ /* static */
float ZoneHeapThreshold::computeZoneHeapGrowthFactorForHeapSize( float GCHeapThreshold::computeZoneHeapGrowthFactorForHeapSize(
size_t lastBytes, const GCSchedulingTunables& tunables, size_t lastBytes, const GCSchedulingTunables& tunables,
const GCSchedulingState& state) { const GCSchedulingState& state) {
if (!tunables.isDynamicHeapGrowthEnabled()) { if (!tunables.isDynamicHeapGrowthEnabled()) {
@ -2114,7 +2114,7 @@ float ZoneHeapThreshold::computeZoneHeapGrowthFactorForHeapSize(
} }
/* static */ /* static */
size_t ZoneHeapThreshold::computeZoneTriggerBytes( size_t GCHeapThreshold::computeZoneTriggerBytes(
float growthFactor, size_t lastBytes, JSGCInvocationKind gckind, float growthFactor, size_t lastBytes, JSGCInvocationKind gckind,
const GCSchedulingTunables& tunables, const AutoLockGC& lock) { const GCSchedulingTunables& tunables, const AutoLockGC& lock) {
size_t baseMin = gckind == GC_SHRINK size_t baseMin = gckind == GC_SHRINK
@ -2127,30 +2127,28 @@ size_t ZoneHeapThreshold::computeZoneTriggerBytes(
return size_t(Min(triggerMax, trigger)); return size_t(Min(triggerMax, trigger));
} }
void ZoneHeapThreshold::updateAfterGC(size_t lastBytes, void GCHeapThreshold::updateAfterGC(size_t lastBytes, JSGCInvocationKind gckind,
JSGCInvocationKind gckind, const GCSchedulingTunables& tunables,
const GCSchedulingTunables& tunables, const GCSchedulingState& state,
const GCSchedulingState& state, const AutoLockGC& lock) {
const AutoLockGC& lock) {
float growthFactor = float growthFactor =
computeZoneHeapGrowthFactorForHeapSize(lastBytes, tunables, state); computeZoneHeapGrowthFactorForHeapSize(lastBytes, tunables, state);
gcTriggerBytes_ = bytes_ =
computeZoneTriggerBytes(growthFactor, lastBytes, gckind, tunables, lock); computeZoneTriggerBytes(growthFactor, lastBytes, gckind, tunables, lock);
} }
/* static */ /* static */
size_t ZoneMallocThreshold::computeZoneTriggerBytes(float growthFactor, size_t MallocHeapThreshold::computeZoneTriggerBytes(float growthFactor,
size_t lastBytes, size_t lastBytes,
size_t baseBytes, size_t baseBytes,
const AutoLockGC& lock) { const AutoLockGC& lock) {
return size_t(float(Max(lastBytes, baseBytes)) * growthFactor); return size_t(float(Max(lastBytes, baseBytes)) * growthFactor);
} }
void ZoneMallocThreshold::updateAfterGC(size_t lastBytes, size_t baseBytes, void MallocHeapThreshold::updateAfterGC(size_t lastBytes, size_t baseBytes,
float growthFactor, float growthFactor,
const AutoLockGC& lock) { const AutoLockGC& lock) {
gcTriggerBytes_ = bytes_ = computeZoneTriggerBytes(growthFactor, lastBytes, baseBytes, lock);
computeZoneTriggerBytes(growthFactor, lastBytes, baseBytes, lock);
} }
/* Compacting GC */ /* Compacting GC */
@ -3046,7 +3044,7 @@ void GCRuntime::clearRelocatedArenasWithoutUnlocking(Arena* arenaList,
// everything to new arenas, as that will already have allocated a similar // everything to new arenas, as that will already have allocated a similar
// number of arenas. This only happens for collections triggered by GC zeal. // number of arenas. This only happens for collections triggered by GC zeal.
bool allArenasRelocated = ShouldRelocateAllArenas(reason); bool allArenasRelocated = ShouldRelocateAllArenas(reason);
arena->zone->zoneSize.removeBytes(ArenaSize, !allArenasRelocated); arena->zone->gcHeapSize.removeBytes(ArenaSize, !allArenasRelocated);
// Release the arena but don't return it to the chunk yet. // Release the arena but don't return it to the chunk yet.
arena->release(lock); arena->release(lock);
@ -3413,8 +3411,8 @@ void GCRuntime::maybeAllocTriggerZoneGC(Zone* zone, size_t nbytes) {
MOZ_ASSERT(!JS::RuntimeHeapIsCollecting()); MOZ_ASSERT(!JS::RuntimeHeapIsCollecting());
size_t usedBytes = size_t usedBytes =
zone->zoneSize.gcBytes(); // This already includes |nbytes|. zone->gcHeapSize.bytes(); // This already includes |nbytes|.
size_t thresholdBytes = zone->threshold.gcTriggerBytes(); size_t thresholdBytes = zone->gcHeapThreshold.bytes();
if (usedBytes < thresholdBytes) { if (usedBytes < thresholdBytes) {
return; return;
} }
@ -3459,25 +3457,25 @@ void GCRuntime::maybeAllocTriggerZoneGC(Zone* zone, size_t nbytes) {
void js::gc::MaybeMallocTriggerZoneGC(JSRuntime* rt, ZoneAllocator* zoneAlloc, void js::gc::MaybeMallocTriggerZoneGC(JSRuntime* rt, ZoneAllocator* zoneAlloc,
const HeapSize& heap, const HeapSize& heap,
const ZoneThreshold& threshold, const HeapThreshold& threshold,
JS::GCReason reason) { JS::GCReason reason) {
rt->gc.maybeMallocTriggerZoneGC(Zone::from(zoneAlloc), heap, threshold, rt->gc.maybeMallocTriggerZoneGC(Zone::from(zoneAlloc), heap, threshold,
reason); reason);
} }
void GCRuntime::maybeMallocTriggerZoneGC(Zone* zone) { void GCRuntime::maybeMallocTriggerZoneGC(Zone* zone) {
if (maybeMallocTriggerZoneGC(zone, zone->gcMallocBytes, if (maybeMallocTriggerZoneGC(zone, zone->mallocHeapSize,
zone->gcMallocThreshold, zone->mallocHeapThreshold,
JS::GCReason::TOO_MUCH_MALLOC)) { JS::GCReason::TOO_MUCH_MALLOC)) {
return; return;
} }
maybeMallocTriggerZoneGC(zone, zone->gcJitBytes, zone->gcJitThreshold, maybeMallocTriggerZoneGC(zone, zone->jitHeapSize, zone->jitHeapThreshold,
JS::GCReason::TOO_MUCH_JIT_CODE); JS::GCReason::TOO_MUCH_JIT_CODE);
} }
bool GCRuntime::maybeMallocTriggerZoneGC(Zone* zone, const HeapSize& heap, bool GCRuntime::maybeMallocTriggerZoneGC(Zone* zone, const HeapSize& heap,
const ZoneThreshold& threshold, const HeapThreshold& threshold,
JS::GCReason reason) { JS::GCReason reason) {
if (!CurrentThreadCanAccessRuntime(rt)) { if (!CurrentThreadCanAccessRuntime(rt)) {
// Zones in use by a helper thread can't be collected. // Zones in use by a helper thread can't be collected.
@ -3490,8 +3488,8 @@ bool GCRuntime::maybeMallocTriggerZoneGC(Zone* zone, const HeapSize& heap,
return false; return false;
} }
size_t usedBytes = heap.gcBytes(); size_t usedBytes = heap.bytes();
size_t thresholdBytes = threshold.gcTriggerBytes(); size_t thresholdBytes = threshold.bytes();
if (usedBytes < thresholdBytes) { if (usedBytes < thresholdBytes) {
return false; return false;
} }
@ -3576,8 +3574,9 @@ void GCRuntime::maybeGC() {
bool scheduledZones = false; bool scheduledZones = false;
for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) { for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
if (checkEagerAllocTrigger(zone->zoneSize, zone->threshold) || if (checkEagerAllocTrigger(zone->gcHeapSize, zone->gcHeapThreshold) ||
checkEagerAllocTrigger(zone->gcMallocBytes, zone->gcMallocThreshold)) { checkEagerAllocTrigger(zone->mallocHeapSize,
zone->mallocHeapThreshold)) {
zone->scheduleGC(); zone->scheduleGC();
scheduledZones = true; scheduledZones = true;
} }
@ -3589,10 +3588,10 @@ void GCRuntime::maybeGC() {
} }
bool GCRuntime::checkEagerAllocTrigger(const HeapSize& size, bool GCRuntime::checkEagerAllocTrigger(const HeapSize& size,
const ZoneThreshold& threshold) { const HeapThreshold& threshold) {
float thresholdBytes = float thresholdBytes =
threshold.eagerAllocTrigger(schedulingState.inHighFrequencyGCMode()); threshold.eagerAllocTrigger(schedulingState.inHighFrequencyGCMode());
float usedBytes = size.gcBytes(); float usedBytes = size.bytes();
if (usedBytes <= 1024 * 1024 || usedBytes < thresholdBytes) { if (usedBytes <= 1024 * 1024 || usedBytes < thresholdBytes) {
return false; return false;
} }
@ -7370,8 +7369,8 @@ GCRuntime::IncrementalResult GCRuntime::budgetIncrementalGC(
continue; continue;
} }
if (zone->zoneSize.gcBytes() >= if (zone->gcHeapSize.bytes() >=
zone->threshold.nonIncrementalTriggerBytes(tunables)) { zone->gcHeapThreshold.nonIncrementalTriggerBytes(tunables)) {
CheckZoneIsScheduled(zone, reason, "GC bytes"); CheckZoneIsScheduled(zone, reason, "GC bytes");
budget.makeUnlimited(); budget.makeUnlimited();
stats().nonincremental(AbortReason::GCBytesTrigger); stats().nonincremental(AbortReason::GCBytesTrigger);
@ -7380,8 +7379,8 @@ GCRuntime::IncrementalResult GCRuntime::budgetIncrementalGC(
} }
} }
if (zone->gcMallocBytes.gcBytes() >= if (zone->mallocHeapSize.bytes() >=
zone->gcMallocThreshold.nonIncrementalTriggerBytes(tunables)) { zone->mallocHeapThreshold.nonIncrementalTriggerBytes(tunables)) {
CheckZoneIsScheduled(zone, reason, "malloc bytes"); CheckZoneIsScheduled(zone, reason, "malloc bytes");
budget.makeUnlimited(); budget.makeUnlimited();
stats().nonincremental(AbortReason::MallocBytesTrigger); stats().nonincremental(AbortReason::MallocBytesTrigger);
@ -7390,8 +7389,8 @@ GCRuntime::IncrementalResult GCRuntime::budgetIncrementalGC(
} }
} }
if (zone->gcJitBytes.gcBytes() >= if (zone->jitHeapSize.bytes() >=
zone->gcJitThreshold.nonIncrementalTriggerBytes(tunables)) { zone->jitHeapThreshold.nonIncrementalTriggerBytes(tunables)) {
CheckZoneIsScheduled(zone, reason, "JIT code bytes"); CheckZoneIsScheduled(zone, reason, "JIT code bytes");
budget.makeUnlimited(); budget.makeUnlimited();
stats().nonincremental(AbortReason::JitCodeBytesTrigger); stats().nonincremental(AbortReason::JitCodeBytesTrigger);
@ -7435,11 +7434,11 @@ static void ScheduleZones(GCRuntime* gc) {
// This is a heuristic to reduce the total number of collections. // This is a heuristic to reduce the total number of collections.
bool inHighFrequencyMode = gc->schedulingState.inHighFrequencyGCMode(); bool inHighFrequencyMode = gc->schedulingState.inHighFrequencyGCMode();
if (zone->zoneSize.gcBytes() >= if (zone->gcHeapSize.bytes() >=
zone->threshold.eagerAllocTrigger(inHighFrequencyMode) || zone->gcHeapThreshold.eagerAllocTrigger(inHighFrequencyMode) ||
zone->gcMallocBytes.gcBytes() >= zone->mallocHeapSize.bytes() >=
zone->gcMallocThreshold.eagerAllocTrigger(inHighFrequencyMode) || zone->mallocHeapThreshold.eagerAllocTrigger(inHighFrequencyMode) ||
zone->gcJitBytes.gcBytes() >= zone->gcJitThreshold.gcTriggerBytes()) { zone->jitHeapSize.bytes() >= zone->jitHeapThreshold.bytes()) {
zone->scheduleGC(); zone->scheduleGC();
} }
} }
@ -8257,7 +8256,7 @@ void GCRuntime::mergeRealms(Realm* source, Realm* target) {
targetZoneIsCollecting); targetZoneIsCollecting);
target->zone()->addTenuredAllocsSinceMinorGC( target->zone()->addTenuredAllocsSinceMinorGC(
source->zone()->getAndResetTenuredAllocsSinceMinorGC()); source->zone()->getAndResetTenuredAllocsSinceMinorGC());
target->zone()->zoneSize.adopt(source->zone()->zoneSize); target->zone()->gcHeapSize.adopt(source->zone()->gcHeapSize);
target->zone()->adoptUniqueIds(source->zone()); target->zone()->adoptUniqueIds(source->zone());
target->zone()->adoptMallocBytes(source->zone()); target->zone()->adoptMallocBytes(source->zone());
@ -8861,7 +8860,7 @@ namespace MemInfo {
static bool GCBytesGetter(JSContext* cx, unsigned argc, Value* vp) { static bool GCBytesGetter(JSContext* cx, unsigned argc, Value* vp) {
CallArgs args = CallArgsFromVp(argc, vp); CallArgs args = CallArgsFromVp(argc, vp);
args.rval().setNumber(double(cx->runtime()->gc.heapSize.gcBytes())); args.rval().setNumber(double(cx->runtime()->gc.heapSize.bytes()));
return true; return true;
} }
@ -8904,13 +8903,13 @@ static bool GCSliceCountGetter(JSContext* cx, unsigned argc, Value* vp) {
static bool ZoneGCBytesGetter(JSContext* cx, unsigned argc, Value* vp) { static bool ZoneGCBytesGetter(JSContext* cx, unsigned argc, Value* vp) {
CallArgs args = CallArgsFromVp(argc, vp); CallArgs args = CallArgsFromVp(argc, vp);
args.rval().setNumber(double(cx->zone()->zoneSize.gcBytes())); args.rval().setNumber(double(cx->zone()->gcHeapSize.bytes()));
return true; return true;
} }
static bool ZoneGCTriggerBytesGetter(JSContext* cx, unsigned argc, Value* vp) { static bool ZoneGCTriggerBytesGetter(JSContext* cx, unsigned argc, Value* vp) {
CallArgs args = CallArgsFromVp(argc, vp); CallArgs args = CallArgsFromVp(argc, vp);
args.rval().setNumber(double(cx->zone()->threshold.gcTriggerBytes())); args.rval().setNumber(double(cx->zone()->gcHeapThreshold.bytes()));
return true; return true;
} }
@ -8919,20 +8918,20 @@ static bool ZoneGCAllocTriggerGetter(JSContext* cx, unsigned argc, Value* vp) {
bool highFrequency = bool highFrequency =
cx->runtime()->gc.schedulingState.inHighFrequencyGCMode(); cx->runtime()->gc.schedulingState.inHighFrequencyGCMode();
args.rval().setNumber( args.rval().setNumber(
double(cx->zone()->threshold.eagerAllocTrigger(highFrequency))); double(cx->zone()->gcHeapThreshold.eagerAllocTrigger(highFrequency)));
return true; return true;
} }
static bool ZoneMallocBytesGetter(JSContext* cx, unsigned argc, Value* vp) { static bool ZoneMallocBytesGetter(JSContext* cx, unsigned argc, Value* vp) {
CallArgs args = CallArgsFromVp(argc, vp); CallArgs args = CallArgsFromVp(argc, vp);
args.rval().setNumber(double(cx->zone()->gcMallocBytes.gcBytes())); args.rval().setNumber(double(cx->zone()->mallocHeapSize.bytes()));
return true; return true;
} }
static bool ZoneMallocTriggerBytesGetter(JSContext* cx, unsigned argc, static bool ZoneMallocTriggerBytesGetter(JSContext* cx, unsigned argc,
Value* vp) { Value* vp) {
CallArgs args = CallArgsFromVp(argc, vp); CallArgs args = CallArgsFromVp(argc, vp);
args.rval().setNumber(double(cx->zone()->gcMallocThreshold.gcTriggerBytes())); args.rval().setNumber(double(cx->zone()->mallocHeapThreshold.bytes()));
return true; return true;
} }

View File

@ -267,14 +267,14 @@ class GCRuntime {
// Check whether to trigger a zone GC after malloc memory. // Check whether to trigger a zone GC after malloc memory.
void maybeMallocTriggerZoneGC(Zone* zone); void maybeMallocTriggerZoneGC(Zone* zone);
bool maybeMallocTriggerZoneGC(Zone* zone, const HeapSize& heap, bool maybeMallocTriggerZoneGC(Zone* zone, const HeapSize& heap,
const ZoneThreshold& threshold, const HeapThreshold& threshold,
JS::GCReason reason); JS::GCReason reason);
// The return value indicates if we were able to do the GC. // The return value indicates if we were able to do the GC.
bool triggerZoneGC(Zone* zone, JS::GCReason reason, size_t usedBytes, bool triggerZoneGC(Zone* zone, JS::GCReason reason, size_t usedBytes,
size_t thresholdBytes); size_t thresholdBytes);
void maybeGC(); void maybeGC();
bool checkEagerAllocTrigger(const HeapSize& size, bool checkEagerAllocTrigger(const HeapSize& size,
const ZoneThreshold& threshold); const HeapThreshold& threshold);
// The return value indicates whether a major GC was performed. // The return value indicates whether a major GC was performed.
bool gcIfRequested(); bool gcIfRequested();
void gc(JSGCInvocationKind gckind, JS::GCReason reason); void gc(JSGCInvocationKind gckind, JS::GCReason reason);

View File

@ -957,8 +957,8 @@ void js::Nursery::collect(JS::GCReason reason) {
// We ignore gcMaxBytes when allocating for minor collection. However, if we // We ignore gcMaxBytes when allocating for minor collection. However, if we
// overflowed, we disable the nursery. The next time we allocate, we'll fail // overflowed, we disable the nursery. The next time we allocate, we'll fail
// because gcBytes >= gcMaxBytes. // because bytes >= gcMaxBytes.
if (rt->gc.heapSize.gcBytes() >= tunables().gcMaxBytes()) { if (rt->gc.heapSize.bytes() >= tunables().gcMaxBytes()) {
disable(); disable();
} }

View File

@ -260,7 +260,7 @@
* *
* Assumptions: * Assumptions:
* -> Common web scripts will return to the event loop before using * -> Common web scripts will return to the event loop before using
* 10% of the current gcTriggerBytes worth of GC memory. * 10% of the current triggerBytes worth of GC memory.
* *
* ALLOC_TRIGGER (incremental) * ALLOC_TRIGGER (incremental)
* --------------------------- * ---------------------------
@ -348,30 +348,30 @@ class GCSchedulingTunables {
/* /*
* JSGC_ALLOCATION_THRESHOLD * JSGC_ALLOCATION_THRESHOLD
* *
* The base value used to compute zone->threshold.gcTriggerBytes(). When * The base value used to compute zone->threshold.bytes(). When
* usage.gcBytes() surpasses threshold.gcTriggerBytes() for a zone, the * gcHeapSize.bytes() exceeds threshold.bytes() for a zone, the zone may be
* zone may be scheduled for a GC, depending on the exact circumstances. * scheduled for a GC, depending on the exact circumstances.
*/ */
MainThreadOrGCTaskData<size_t> gcZoneAllocThresholdBase_; MainThreadOrGCTaskData<size_t> gcZoneAllocThresholdBase_;
/* /*
* JSGC_NON_INCREMENTAL_FACTOR * JSGC_NON_INCREMENTAL_FACTOR
* *
* Multiple of threshold.gcBytes() which triggers a non-incremental GC. * Multiple of threshold.bytes() which triggers a non-incremental GC.
*/ */
UnprotectedData<float> nonIncrementalFactor_; UnprotectedData<float> nonIncrementalFactor_;
/* /*
* JSGC_AVOID_INTERRUPT_FACTOR * JSGC_AVOID_INTERRUPT_FACTOR
* *
* Multiple of threshold.gcBytes() which triggers a new incremental GC when * Multiple of threshold.bytes() which triggers a new incremental GC when
* doing so would interrupt an ongoing incremental GC. * doing so would interrupt an ongoing incremental GC.
*/ */
UnprotectedData<float> avoidInterruptFactor_; UnprotectedData<float> avoidInterruptFactor_;
/* /*
* Number of bytes to allocate between incremental slices in GCs triggered * Number of bytes to allocate between incremental slices in GCs triggered by
* by the zone allocation threshold. * the zone allocation threshold.
* *
* This value does not have a JSGCParamKey parameter yet. * This value does not have a JSGCParamKey parameter yet.
*/ */
@ -569,44 +569,43 @@ class GCSchedulingState {
} }
}; };
using AtomicByteCount =
mozilla::Atomic<size_t, mozilla::ReleaseAcquire,
mozilla::recordreplay::Behavior::DontPreserve>;
/* /*
* Tracks the used sizes for owned heap data and automatically maintains the * Tracks the size of allocated data. This is used for both GC and malloc data.
* memory usage relationship between GCRuntime and Zones. * It automatically maintains the memory usage relationship between parent and
* child instances, i.e. between those in a GCRuntime and its Zones.
*/ */
class HeapSize { class HeapSize {
/* /*
* A heap usage that contains our parent's heap usage, or null if this is * An instance that contains our parent's heap usage, or null if this is the
* the top-level usage container. * top-level usage container.
*/ */
HeapSize* const parent_; HeapSize* const parent_;
/* /*
* The approximate number of bytes in use on the GC heap, to the nearest * The number of bytes in use. For GC heaps this is approximate to the nearest
* ArenaSize. This does not include any malloc data. It also does not * ArenaSize. It is atomic because it is updated by both the active and GC
* include not-actively-used addresses that are still reserved at the OS * helper threads.
* level for GC usage. It is atomic because it is updated by both the active
* and GC helper threads.
*/ */
mozilla::Atomic<size_t, mozilla::ReleaseAcquire, AtomicByteCount bytes_;
mozilla::recordreplay::Behavior::DontPreserve>
gcBytes_;
/* /*
* The number of bytes retained after the last collection. This is updated * The number of bytes retained after the last collection. This is updated
* dynamically during incremental GC. It does not include allocations that * dynamically during incremental GC. It does not include allocations that
* happen during a GC. * happen during a GC.
*/ */
mozilla::Atomic<size_t, mozilla::ReleaseAcquire, AtomicByteCount retainedBytes_;
mozilla::recordreplay::Behavior::DontPreserve>
retainedBytes_;
public: public:
explicit HeapSize(HeapSize* parent) : parent_(parent), gcBytes_(0) {} explicit HeapSize(HeapSize* parent) : parent_(parent), bytes_(0) {}
size_t gcBytes() const { return gcBytes_; } size_t bytes() const { return bytes_; }
size_t retainedBytes() const { return retainedBytes_; } size_t retainedBytes() const { return retainedBytes_; }
void updateOnGCStart() { retainedBytes_ = size_t(gcBytes_); } void updateOnGCStart() { retainedBytes_ = size_t(bytes_); }
void addGCArena() { addBytes(ArenaSize); } void addGCArena() { addBytes(ArenaSize); }
void removeGCArena() { void removeGCArena() {
@ -615,9 +614,9 @@ class HeapSize {
} }
void addBytes(size_t nbytes) { void addBytes(size_t nbytes) {
mozilla::DebugOnly<size_t> initialBytes(gcBytes_); mozilla::DebugOnly<size_t> initialBytes(bytes_);
MOZ_ASSERT(initialBytes + nbytes > initialBytes); MOZ_ASSERT(initialBytes + nbytes > initialBytes);
gcBytes_ += nbytes; bytes_ += nbytes;
if (parent_) { if (parent_) {
parent_->addBytes(nbytes); parent_->addBytes(nbytes);
} }
@ -628,8 +627,8 @@ class HeapSize {
// we can't do that yet, so clamp the result to zero. // we can't do that yet, so clamp the result to zero.
retainedBytes_ = nbytes <= retainedBytes_ ? retainedBytes_ - nbytes : 0; retainedBytes_ = nbytes <= retainedBytes_ ? retainedBytes_ - nbytes : 0;
} }
MOZ_ASSERT(gcBytes_ >= nbytes); MOZ_ASSERT(bytes_ >= nbytes);
gcBytes_ -= nbytes; bytes_ -= nbytes;
if (parent_) { if (parent_) {
parent_->removeBytes(nbytes, wasSwept); parent_->removeBytes(nbytes, wasSwept);
} }
@ -639,31 +638,33 @@ class HeapSize {
void adopt(HeapSize& source) { void adopt(HeapSize& source) {
// Skip retainedBytes_: we never adopt zones that are currently being // Skip retainedBytes_: we never adopt zones that are currently being
// collected. // collected.
gcBytes_ += source.gcBytes_; bytes_ += source.bytes_;
source.retainedBytes_ = 0; source.retainedBytes_ = 0;
source.gcBytes_ = 0; source.bytes_ = 0;
} }
}; };
// Base class for GC heap and malloc thresholds. // A heap size threshold used to trigger GC. This is an abstract base class for
class ZoneThreshold { // GC heap and malloc thresholds defined below.
class HeapThreshold {
protected: protected:
HeapThreshold() = default;
// GC trigger threshold. // GC trigger threshold.
mozilla::Atomic<size_t, mozilla::Relaxed, AtomicByteCount bytes_;
mozilla::recordreplay::Behavior::DontPreserve>
gcTriggerBytes_;
public: public:
size_t gcTriggerBytes() const { return gcTriggerBytes_; } size_t bytes() const { return bytes_; }
size_t nonIncrementalTriggerBytes(GCSchedulingTunables& tunables) const { size_t nonIncrementalTriggerBytes(GCSchedulingTunables& tunables) const {
return gcTriggerBytes_ * tunables.nonIncrementalFactor(); return bytes_ * tunables.nonIncrementalFactor();
} }
float eagerAllocTrigger(bool highFrequencyGC) const; float eagerAllocTrigger(bool highFrequencyGC) const;
}; };
// This class encapsulates the data that determines when we need to do a zone GC // A heap threshold that is based on a multiple of the retained size after the
// base on GC heap size. // last collection adjusted based on collection frequency and retained
class ZoneHeapThreshold : public ZoneThreshold { // size. This is used to determine when to do a zone GC based on GC heap size.
class GCHeapThreshold : public HeapThreshold {
public: public:
void updateAfterGC(size_t lastBytes, JSGCInvocationKind gckind, void updateAfterGC(size_t lastBytes, JSGCInvocationKind gckind,
const GCSchedulingTunables& tunables, const GCSchedulingTunables& tunables,
@ -679,9 +680,10 @@ class ZoneHeapThreshold : public ZoneThreshold {
const AutoLockGC& lock); const AutoLockGC& lock);
}; };
// This class encapsulates the data that determines when we need to do a zone // A heap threshold that is calculated as a constant multiple of the retained
// size after the last collection. This is used to determines when to do a zone
// GC based on malloc data. // GC based on malloc data.
class ZoneMallocThreshold : public ZoneThreshold { class MallocHeapThreshold : public HeapThreshold {
public: public:
void updateAfterGC(size_t lastBytes, size_t baseBytes, float growthFactor, void updateAfterGC(size_t lastBytes, size_t baseBytes, float growthFactor,
const AutoLockGC& lock); const AutoLockGC& lock);
@ -692,11 +694,11 @@ class ZoneMallocThreshold : public ZoneThreshold {
const AutoLockGC& lock); const AutoLockGC& lock);
}; };
// A fixed threshold that determines when we need to do a zone GC based on // A fixed threshold that's used to determine when we need to do a zone GC based
// allocated JIT code. // on allocated JIT code.
class ZoneFixedThreshold : public ZoneThreshold { class JitHeapThreshold : public HeapThreshold {
public: public:
explicit ZoneFixedThreshold(size_t bytes) { gcTriggerBytes_ = bytes; } explicit JitHeapThreshold(size_t bytes) { bytes_ = bytes; }
}; };
#ifdef DEBUG #ifdef DEBUG

View File

@ -990,7 +990,7 @@ void Statistics::beginGC(JSGCInvocationKind kind,
nonincrementalReason_ = gc::AbortReason::None; nonincrementalReason_ = gc::AbortReason::None;
GCRuntime& gc = runtime->gc; GCRuntime& gc = runtime->gc;
preTotalHeapBytes = gc.heapSize.gcBytes(); preTotalHeapBytes = gc.heapSize.bytes();
preCollectedHeapBytes = 0; preCollectedHeapBytes = 0;
@ -1005,7 +1005,7 @@ void Statistics::beginGC(JSGCInvocationKind kind,
void Statistics::measureInitialHeapSize() { void Statistics::measureInitialHeapSize() {
MOZ_ASSERT(preCollectedHeapBytes == 0); MOZ_ASSERT(preCollectedHeapBytes == 0);
for (GCZonesIter zone(runtime, WithAtoms); !zone.done(); zone.next()) { for (GCZonesIter zone(runtime, WithAtoms); !zone.done(); zone.next()) {
preCollectedHeapBytes += zone->zoneSize.gcBytes(); preCollectedHeapBytes += zone->gcHeapSize.bytes();
} }
} }
@ -1013,11 +1013,11 @@ void Statistics::adoptHeapSizeDuringIncrementalGC(Zone* mergedZone) {
// A zone is being merged into a zone that's currently being collected so we // A zone is being merged into a zone that's currently being collected so we
// need to adjust our record of the total size of heap for collected zones. // need to adjust our record of the total size of heap for collected zones.
MOZ_ASSERT(runtime->gc.isIncrementalGCInProgress()); MOZ_ASSERT(runtime->gc.isIncrementalGCInProgress());
preCollectedHeapBytes += mergedZone->zoneSize.gcBytes(); preCollectedHeapBytes += mergedZone->gcHeapSize.bytes();
} }
void Statistics::endGC() { void Statistics::endGC() {
postTotalHeapBytes = runtime->gc.heapSize.gcBytes(); postTotalHeapBytes = runtime->gc.heapSize.bytes();
sendGCTelemetry(); sendGCTelemetry();
@ -1091,7 +1091,7 @@ void Statistics::sendGCTelemetry() {
size_t bytesSurvived = 0; size_t bytesSurvived = 0;
for (ZonesIter zone(runtime, WithAtoms); !zone.done(); zone.next()) { for (ZonesIter zone(runtime, WithAtoms); !zone.done(); zone.next()) {
if (zone->wasCollected()) { if (zone->wasCollected()) {
bytesSurvived += zone->zoneSize.retainedBytes(); bytesSurvived += zone->gcHeapSize.retainedBytes();
} }
} }

View File

@ -31,32 +31,32 @@ Zone* const Zone::NotOnList = reinterpret_cast<Zone*>(1);
ZoneAllocator::ZoneAllocator(JSRuntime* rt) ZoneAllocator::ZoneAllocator(JSRuntime* rt)
: JS::shadow::Zone(rt, &rt->gc.marker), : JS::shadow::Zone(rt, &rt->gc.marker),
zoneSize(&rt->gc.heapSize), gcHeapSize(&rt->gc.heapSize),
gcMallocBytes(nullptr), mallocHeapSize(nullptr),
gcJitBytes(nullptr), jitHeapSize(nullptr),
gcJitThreshold(jit::MaxCodeBytesPerProcess * 0.8) { jitHeapThreshold(jit::MaxCodeBytesPerProcess * 0.8) {
AutoLockGC lock(rt); AutoLockGC lock(rt);
updateGCThresholds(rt->gc, GC_NORMAL, lock); updateGCThresholds(rt->gc, GC_NORMAL, lock);
} }
ZoneAllocator::~ZoneAllocator() { ZoneAllocator::~ZoneAllocator() {
#ifdef DEBUG #ifdef DEBUG
gcMallocTracker.checkEmptyOnDestroy(); mallocTracker.checkEmptyOnDestroy();
MOZ_ASSERT(zoneSize.gcBytes() == 0); MOZ_ASSERT(gcHeapSize.bytes() == 0);
MOZ_ASSERT(gcMallocBytes.gcBytes() == 0); MOZ_ASSERT(mallocHeapSize.bytes() == 0);
MOZ_ASSERT(gcJitBytes.gcBytes() == 0); MOZ_ASSERT(jitHeapSize.bytes() == 0);
#endif #endif
} }
void ZoneAllocator::fixupAfterMovingGC() { void ZoneAllocator::fixupAfterMovingGC() {
#ifdef DEBUG #ifdef DEBUG
gcMallocTracker.fixupAfterMovingGC(); mallocTracker.fixupAfterMovingGC();
#endif #endif
} }
void js::ZoneAllocator::updateMemoryCountersOnGCStart() { void js::ZoneAllocator::updateMemoryCountersOnGCStart() {
zoneSize.updateOnGCStart(); gcHeapSize.updateOnGCStart();
gcMallocBytes.updateOnGCStart(); mallocHeapSize.updateOnGCStart();
} }
void js::ZoneAllocator::updateGCThresholds(GCRuntime& gc, void js::ZoneAllocator::updateGCThresholds(GCRuntime& gc,
@ -64,11 +64,11 @@ void js::ZoneAllocator::updateGCThresholds(GCRuntime& gc,
const js::AutoLockGC& lock) { const js::AutoLockGC& lock) {
// This is called repeatedly during a GC to update thresholds as memory is // This is called repeatedly during a GC to update thresholds as memory is
// freed. // freed.
threshold.updateAfterGC(zoneSize.retainedBytes(), invocationKind, gc.tunables, gcHeapThreshold.updateAfterGC(gcHeapSize.retainedBytes(), invocationKind,
gc.schedulingState, lock); gc.tunables, gc.schedulingState, lock);
gcMallocThreshold.updateAfterGC(gcMallocBytes.retainedBytes(), mallocHeapThreshold.updateAfterGC(mallocHeapSize.retainedBytes(),
gc.tunables.mallocThresholdBase(), gc.tunables.mallocThresholdBase(),
gc.tunables.mallocGrowthFactor(), lock); gc.tunables.mallocGrowthFactor(), lock);
} }
void ZoneAllocPolicy::decMemory(size_t nbytes) { void ZoneAllocPolicy::decMemory(size_t nbytes) {

View File

@ -30,7 +30,7 @@ bool CurrentThreadIsGCSweeping();
namespace gc { namespace gc {
void MaybeMallocTriggerZoneGC(JSRuntime* rt, ZoneAllocator* zoneAlloc, void MaybeMallocTriggerZoneGC(JSRuntime* rt, ZoneAllocator* zoneAlloc,
const HeapSize& heap, const HeapSize& heap,
const ZoneThreshold& threshold, const HeapThreshold& threshold,
JS::GCReason reason); JS::GCReason reason);
} }
@ -54,10 +54,10 @@ class ZoneAllocator : public JS::shadow::Zone,
void reportAllocationOverflow() const; void reportAllocationOverflow() const;
void adoptMallocBytes(ZoneAllocator* other) { void adoptMallocBytes(ZoneAllocator* other) {
gcMallocBytes.adopt(other->gcMallocBytes); mallocHeapSize.adopt(other->mallocHeapSize);
gcJitBytes.adopt(other->gcJitBytes); jitHeapSize.adopt(other->jitHeapSize);
#ifdef DEBUG #ifdef DEBUG
gcMallocTracker.adopt(other->gcMallocTracker); mallocTracker.adopt(other->mallocTracker);
#endif #endif
} }
@ -70,12 +70,12 @@ class ZoneAllocator : public JS::shadow::Zone,
void addCellMemory(js::gc::Cell* cell, size_t nbytes, js::MemoryUse use) { void addCellMemory(js::gc::Cell* cell, size_t nbytes, js::MemoryUse use) {
MOZ_ASSERT(cell); MOZ_ASSERT(cell);
MOZ_ASSERT(nbytes); MOZ_ASSERT(nbytes);
gcMallocBytes.addBytes(nbytes); mallocHeapSize.addBytes(nbytes);
// We don't currently check GC triggers here. // We don't currently check GC triggers here.
#ifdef DEBUG #ifdef DEBUG
gcMallocTracker.trackMemory(cell, nbytes, use); mallocTracker.trackMemory(cell, nbytes, use);
#endif #endif
} }
@ -85,37 +85,37 @@ class ZoneAllocator : public JS::shadow::Zone,
MOZ_ASSERT(nbytes); MOZ_ASSERT(nbytes);
MOZ_ASSERT_IF(CurrentThreadIsGCSweeping(), wasSwept); MOZ_ASSERT_IF(CurrentThreadIsGCSweeping(), wasSwept);
gcMallocBytes.removeBytes(nbytes, wasSwept); mallocHeapSize.removeBytes(nbytes, wasSwept);
#ifdef DEBUG #ifdef DEBUG
gcMallocTracker.untrackMemory(cell, nbytes, use); mallocTracker.untrackMemory(cell, nbytes, use);
#endif #endif
} }
void swapCellMemory(js::gc::Cell* a, js::gc::Cell* b, js::MemoryUse use) { void swapCellMemory(js::gc::Cell* a, js::gc::Cell* b, js::MemoryUse use) {
#ifdef DEBUG #ifdef DEBUG
gcMallocTracker.swapMemory(a, b, use); mallocTracker.swapMemory(a, b, use);
#endif #endif
} }
#ifdef DEBUG #ifdef DEBUG
void registerPolicy(js::ZoneAllocPolicy* policy) { void registerPolicy(js::ZoneAllocPolicy* policy) {
return gcMallocTracker.registerPolicy(policy); return mallocTracker.registerPolicy(policy);
} }
void unregisterPolicy(js::ZoneAllocPolicy* policy) { void unregisterPolicy(js::ZoneAllocPolicy* policy) {
return gcMallocTracker.unregisterPolicy(policy); return mallocTracker.unregisterPolicy(policy);
} }
void movePolicy(js::ZoneAllocPolicy* dst, js::ZoneAllocPolicy* src) { void movePolicy(js::ZoneAllocPolicy* dst, js::ZoneAllocPolicy* src) {
return gcMallocTracker.movePolicy(dst, src); return mallocTracker.movePolicy(dst, src);
} }
#endif #endif
void incPolicyMemory(js::ZoneAllocPolicy* policy, size_t nbytes) { void incPolicyMemory(js::ZoneAllocPolicy* policy, size_t nbytes) {
MOZ_ASSERT(nbytes); MOZ_ASSERT(nbytes);
gcMallocBytes.addBytes(nbytes); mallocHeapSize.addBytes(nbytes);
#ifdef DEBUG #ifdef DEBUG
gcMallocTracker.incPolicyMemory(policy, nbytes); mallocTracker.incPolicyMemory(policy, nbytes);
#endif #endif
maybeMallocTriggerZoneGC(); maybeMallocTriggerZoneGC();
@ -125,69 +125,69 @@ class ZoneAllocator : public JS::shadow::Zone,
MOZ_ASSERT(nbytes); MOZ_ASSERT(nbytes);
MOZ_ASSERT_IF(CurrentThreadIsGCSweeping(), wasSwept); MOZ_ASSERT_IF(CurrentThreadIsGCSweeping(), wasSwept);
gcMallocBytes.removeBytes(nbytes, wasSwept); mallocHeapSize.removeBytes(nbytes, wasSwept);
#ifdef DEBUG #ifdef DEBUG
gcMallocTracker.decPolicyMemory(policy, nbytes); mallocTracker.decPolicyMemory(policy, nbytes);
#endif #endif
} }
void incJitMemory(size_t nbytes) { void incJitMemory(size_t nbytes) {
MOZ_ASSERT(nbytes); MOZ_ASSERT(nbytes);
gcJitBytes.addBytes(nbytes); jitHeapSize.addBytes(nbytes);
maybeTriggerZoneGC(gcJitBytes, gcJitThreshold, maybeTriggerZoneGC(jitHeapSize, jitHeapThreshold,
JS::GCReason::TOO_MUCH_JIT_CODE); JS::GCReason::TOO_MUCH_JIT_CODE);
} }
void decJitMemory(size_t nbytes) { void decJitMemory(size_t nbytes) {
MOZ_ASSERT(nbytes); MOZ_ASSERT(nbytes);
gcJitBytes.removeBytes(nbytes, true); jitHeapSize.removeBytes(nbytes, true);
} }
// Check malloc allocation threshold and trigger a zone GC if necessary. // Check malloc allocation threshold and trigger a zone GC if necessary.
void maybeMallocTriggerZoneGC() { void maybeMallocTriggerZoneGC() {
maybeTriggerZoneGC(gcMallocBytes, gcMallocThreshold, maybeTriggerZoneGC(mallocHeapSize, mallocHeapThreshold,
JS::GCReason::TOO_MUCH_MALLOC); JS::GCReason::TOO_MUCH_MALLOC);
} }
private: private:
void maybeTriggerZoneGC(const js::gc::HeapSize& heap, void maybeTriggerZoneGC(const js::gc::HeapSize& heap,
const js::gc::ZoneThreshold& threshold, const js::gc::HeapThreshold& threshold,
JS::GCReason reason) { JS::GCReason reason) {
if (heap.gcBytes() >= threshold.gcTriggerBytes()) { if (heap.bytes() >= threshold.bytes()) {
gc::MaybeMallocTriggerZoneGC(runtimeFromAnyThread(), this, heap, gc::MaybeMallocTriggerZoneGC(runtimeFromAnyThread(), this, heap,
threshold, reason); threshold, reason);
} }
} }
public: public:
// Track GC heap size under this Zone. // The size of allocated GC arenas in this zone.
js::gc::HeapSize zoneSize; js::gc::HeapSize gcHeapSize;
// Thresholds used to trigger GC based on heap size. // Threshold used to trigger GC based on GC heap size.
js::gc::ZoneHeapThreshold threshold; js::gc::GCHeapThreshold gcHeapThreshold;
// Amount of data to allocate before triggering a new incremental slice for // Amount of data to allocate before triggering a new incremental slice for
// the current GC. // the current GC.
js::MainThreadData<size_t> gcDelayBytes; js::MainThreadData<size_t> gcDelayBytes;
// Malloc counter used for allocations where size information is // Amount of malloc data owned by GC things in this zone, including external
// available. Used for some internal and all tracked external allocations. // allocations supplied by JS::AddAssociatedMemory.
js::gc::HeapSize gcMallocBytes; js::gc::HeapSize mallocHeapSize;
// Thresholds used to trigger GC based on malloc allocations. // Threshold used to trigger GC based on malloc allocations.
js::gc::ZoneMallocThreshold gcMallocThreshold; js::gc::MallocHeapThreshold mallocHeapThreshold;
// Malloc counter used for JIT code allocation. // Amount of exectuable JIT code owned by GC things in this zone.
js::gc::HeapSize gcJitBytes; js::gc::HeapSize jitHeapSize;
// Thresholds used to trigger GC based on JIT allocations. // Threshold used to trigger GC based on JIT allocations.
js::gc::ZoneFixedThreshold gcJitThreshold; js::gc::JitHeapThreshold jitHeapThreshold;
private: private:
#ifdef DEBUG #ifdef DEBUG
// In debug builds, malloc allocations can be tracked to make debugging easier // In debug builds, malloc allocations can be tracked to make debugging easier
// (possible?) if allocation and free sizes don't balance. // (possible?) if allocation and free sizes don't balance.
js::gc::MemoryTracker gcMallocTracker; js::gc::MemoryTracker mallocTracker;
#endif #endif
friend class js::gc::GCRuntime; friend class js::gc::GCRuntime;

View File

@ -1401,7 +1401,7 @@ JS_FRIEND_API JS::Value js::MaybeGetScriptPrivate(JSObject* object) {
} }
JS_FRIEND_API uint64_t js::GetGCHeapUsageForObjectZone(JSObject* obj) { JS_FRIEND_API uint64_t js::GetGCHeapUsageForObjectZone(JSObject* obj) {
return obj->zone()->zoneSize.gcBytes(); return obj->zone()->gcHeapSize.bytes();
} }
#ifdef DEBUG #ifdef DEBUG