Bug 1520366 - Update LifoAlloc growth heuristics to track smallAllocsSize. r=nbp

- Replace oversizeSize with smallAllocsSize. This will track sizes of
  non-transferred small allocation chunks. It excludes unused chunks,
  oversize chunks and chunks transfered from other LifoAlloc. This new
  counter is used to determine chunk size growth heuristics. This aims
  to reduce memory spikes due to transferFrom allocation patterns that
  we see in the wild.
- Also fix a pre-existing typo in LifoAlloc::reset
This commit is contained in:
Ted Campbell 2019-01-22 00:54:03 -05:00
parent 817e76e410
commit ec8ee04a23
2 changed files with 47 additions and 18 deletions

View File

@ -104,7 +104,7 @@ void LifoAlloc::reset(size_t defaultChunkSize) {
chunks_.popFirst();
}
while (!oversize_.empty()) {
chunks_.popFirst();
oversize_.popFirst();
}
while (!unused_.empty()) {
unused_.popFirst();
@ -113,10 +113,15 @@ void LifoAlloc::reset(size_t defaultChunkSize) {
oversizeThreshold_ = defaultChunkSize;
markCount = 0;
curSize_ = 0;
oversizeSize_ = 0;
smallAllocsSize_ = 0;
}
void LifoAlloc::freeAll() {
// When free-ing all chunks, we can no longer determine which chunks were
// transferred and which were not, so simply clear the heuristic to zero
// right away.
smallAllocsSize_ = 0;
while (!chunks_.empty()) {
UniqueBumpChunk bc = chunks_.popFirst();
decrementCurSize(bc->computedSizeOfIncludingThis());
@ -124,7 +129,6 @@ void LifoAlloc::freeAll() {
while (!oversize_.empty()) {
UniqueBumpChunk bc = oversize_.popFirst();
decrementCurSize(bc->computedSizeOfIncludingThis());
oversizeSize_ -= bc->computedSizeOfIncludingThis();
}
while (!unused_.empty()) {
UniqueBumpChunk bc = unused_.popFirst();
@ -134,7 +138,6 @@ void LifoAlloc::freeAll() {
// Nb: maintaining curSize_ correctly isn't easy. Fortunately, this is an
// excellent sanity check.
MOZ_ASSERT(curSize_ == 0);
MOZ_ASSERT(oversizeSize_ == 0);
}
// Round at the same page granularity used by malloc.
@ -176,11 +179,14 @@ LifoAlloc::UniqueBumpChunk LifoAlloc::newChunkWithCapacity(size_t n,
return nullptr;
}
MOZ_ASSERT(curSize_ >= oversizeSize_);
// Note: When computing chunkSize growth, we only are interested in chunks
// used for small allocations. This excludes unused chunks, oversized chunks,
// and chunks transferred in from another LifoAlloc.
MOZ_ASSERT(curSize_ >= smallAllocsSize_);
const size_t chunkSize =
(oversize || minSize > defaultChunkSize_)
? MallocGoodSize(minSize)
: NextSize(defaultChunkSize_, curSize_ - oversizeSize_);
: NextSize(defaultChunkSize_, smallAllocsSize_);
// Create a new BumpChunk, and allocate space for it.
UniqueBumpChunk result = detail::BumpChunk::newWithCapacity(chunkSize);
@ -219,8 +225,7 @@ LifoAlloc::UniqueBumpChunk LifoAlloc::getOrCreateChunk(size_t n) {
if (!newChunk) {
return newChunk;
}
size_t size = newChunk->computedSizeOfIncludingThis();
incrementCurSize(size);
incrementCurSize(newChunk->computedSizeOfIncludingThis());
return newChunk;
}
@ -231,6 +236,9 @@ void* LifoAlloc::allocImplColdPath(size_t n) {
return nullptr;
}
// This new chunk is about to be used for small allocations.
smallAllocsSize_ += newChunk->computedSizeOfIncludingThis();
// Since we just created a large enough chunk, this can't fail.
chunks_.append(std::move(newChunk));
result = chunks_.last()->tryAlloc(n);
@ -245,7 +253,6 @@ void* LifoAlloc::allocImplOversize(size_t n) {
return nullptr;
}
incrementCurSize(newChunk->computedSizeOfIncludingThis());
oversizeSize_ += newChunk->computedSizeOfIncludingThis();
// Since we just created a large enough chunk, this can't fail.
oversize_.append(std::move(newChunk));
@ -266,9 +273,8 @@ bool LifoAlloc::ensureUnusedApproximateColdPath(size_t n, size_t total) {
if (!newChunk) {
return false;
}
size_t size = newChunk->computedSizeOfIncludingThis();
incrementCurSize(newChunk->computedSizeOfIncludingThis());
unused_.pushFront(std::move(newChunk));
incrementCurSize(size);
return true;
}
@ -325,6 +331,10 @@ void LifoAlloc::release(Mark mark) {
cutAtMark(mark.chunk, chunks_);
for (detail::BumpChunk& bc : released) {
bc.release();
// Chunks moved from (after a mark) in chunks_ to unused_ are no longer
// considered small allocations.
smallAllocsSize_ -= bc.computedSizeOfIncludingThis();
}
unused_.appendAll(std::move(released));
@ -333,7 +343,6 @@ void LifoAlloc::release(Mark mark) {
while (!released.empty()) {
UniqueBumpChunk bc = released.popFirst();
decrementCurSize(bc->computedSizeOfIncludingThis());
oversizeSize_ -= bc->computedSizeOfIncludingThis();
}
}
@ -353,7 +362,7 @@ void LifoAlloc::steal(LifoAlloc* other) {
oversizeThreshold_ = other->oversizeThreshold_;
curSize_ = other->curSize_;
peakSize_ = Max(peakSize_, other->peakSize_);
oversizeSize_ = other->oversizeSize_;
smallAllocsSize_ = other->smallAllocsSize_;
#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
fallibleScope_ = other->fallibleScope_;
#endif
@ -365,13 +374,19 @@ void LifoAlloc::transferFrom(LifoAlloc* other) {
MOZ_ASSERT(!markCount);
MOZ_ASSERT(!other->markCount);
// Transferred chunks are not counted as part of |smallAllocsSize| as this
// could introduce bias in the |NextSize| heuristics, leading to
// over-allocations in *this* LifoAlloc. As well, to avoid interference with
// small allocations made with |this|, the last chunk of the |chunks_| list
// should remain the last chunk. Therefore, the transferred chunks are
// prepended to the |chunks_| list.
incrementCurSize(other->curSize_);
oversizeSize_ += other->oversizeSize_;
appendUnused(std::move(other->unused_));
chunks_.prependAll(std::move(other->chunks_));
oversize_.prependAll(std::move(other->oversize_));
other->curSize_ = 0;
other->oversizeSize_ = 0;
other->smallAllocsSize_ = 0;
}
void LifoAlloc::transferUnusedFrom(LifoAlloc* other) {

View File

@ -516,7 +516,7 @@ class LifoAlloc {
// List of chunks containing allocated data where each allocation is larger
// than the oversize threshold. Each chunk contains exactly on allocation.
// This reduces wasted space in the normal chunk list.
// This reduces wasted space in the chunk list.
//
// Oversize chunks are allocated on demand and freed as soon as they are
// released, instead of being pushed to the unused list.
@ -528,9 +528,16 @@ class LifoAlloc {
size_t markCount;
size_t defaultChunkSize_;
size_t oversizeThreshold_;
// Size of all chunks in chunks_, oversize_, unused_ lists.
size_t curSize_;
size_t peakSize_;
size_t oversizeSize_;
// Size of all chunks containing small bump allocations. This heuristic is
// used to compute growth rate while ignoring chunks such as oversized,
// now-unused, or transferred (which followed their own growth patterns).
size_t smallAllocsSize_;
#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
bool fallibleScope_;
#endif
@ -573,6 +580,7 @@ class LifoAlloc {
void decrementCurSize(size_t size) {
MOZ_ASSERT(curSize_ >= size);
curSize_ -= size;
MOZ_ASSERT(curSize_ >= smallAllocsSize_);
}
void* allocImplColdPath(size_t n);
@ -771,16 +779,22 @@ class LifoAlloc {
public:
void releaseAll() {
MOZ_ASSERT(!markCount);
// When releasing all chunks, we can no longer determine which chunks were
// transferred and which were not, so simply clear the heuristic to zero
// right away.
smallAllocsSize_ = 0;
for (detail::BumpChunk& bc : chunks_) {
bc.release();
}
unused_.appendAll(std::move(chunks_));
// On release, we free any oversize allocations instead of keeping them
// in unused chunks.
while (!oversize_.empty()) {
UniqueBumpChunk bc = oversize_.popFirst();
decrementCurSize(bc->computedSizeOfIncludingThis());
oversizeSize_ -= bc->computedSizeOfIncludingThis();
}
}