Bug 1483182 - Don't allocate in HashTable::lookupOrAdd(). r=luke

Currently lookupOrAdd() will allocate if the table has no storage. But it
doesn't report an error if the allocation fails, which can cause problems.

This patch changes things so that lookupOrAdd() doesn't allocate when the table
has no storage. Instead, it returns an AddPtr that is not *valid* (its mTable
is empty) but it is *live*, and can be used in add(), whereupon the allocation
will occur.

The patch also makes Ptr::isValid() and AddPtr::isValid() non-public, because
the valid vs. live distinction is non-obvious and best kept hidden within the
classes.

--HG--
extra : rebase_source : 95d58725d92cc83332e27a61f98fa61185440e26
This commit is contained in:
Nicholas Nethercote 2018-08-21 13:49:17 +10:00
parent abd415337c
commit d4f517f3e2
4 changed files with 99 additions and 19 deletions

View File

@ -28,7 +28,7 @@ SparseBitmap::sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf)
SparseBitmap::BitBlock&
SparseBitmap::createBlock(Data::AddPtr p, size_t blockId, AutoEnterOOMUnsafeRegion& oomUnsafe)
{
MOZ_ASSERT(!p && p.isValid());
MOZ_ASSERT(!p);
BitBlock* block = js_new<BitBlock>();
if (!block || !data.add(p, blockId, block))
oomUnsafe.crash("Bitmap OOM");

View File

@ -0,0 +1,15 @@
var lfLogBuffer = `
function testOuterForInVar() {
return eval("for (var x in {}); (function() { return delete x; })");
}
testOuterForInVar();
`;
loadFile(lfLogBuffer);
loadFile(lfLogBuffer);
function loadFile(lfVarx) {
try {
oomTest(function() {
eval(lfVarx);
});
} catch (lfVare) {}
}

View File

@ -448,8 +448,34 @@ BEGIN_TEST(testHashLazyStorage)
set.compact();
CHECK(set.capacity() == 0);
// lookupForAdd() instantiates, even if not followed by add().
set.lookupForAdd(1);
auto p = set.lookupForAdd(1);
CHECK(set.capacity() == 0);
CHECK(set.add(p, 1));
CHECK(set.capacity() == minCap);
CHECK(set.has(1));
set.clear();
set.compact();
CHECK(set.capacity() == 0);
p = set.lookupForAdd(1);
CHECK(set.putNew(2));
CHECK(set.capacity() == minCap);
CHECK(set.relookupOrAdd(p, 1, 1));
CHECK(set.capacity() == minCap);
CHECK(set.has(1));
set.clear();
set.compact();
CHECK(set.capacity() == 0);
CHECK(set.putNew(1));
p = set.lookupForAdd(1);
set.clear();
set.compact();
CHECK(set.count() == 0);
CHECK(set.relookupOrAdd(p, 1, 1));
CHECK(set.count() == 1);
CHECK(set.capacity() == minCap);
set.clear();

View File

@ -1209,6 +1209,18 @@ public:
{
}
// This constructor is used only by AddPtr() within lookupForAdd().
explicit Ptr(const HashTable& aTable)
: mEntry(nullptr)
#ifdef DEBUG
, mTable(&aTable)
, mGeneration(aTable.generation())
#endif
{
}
bool isValid() const { return !!mEntry; }
public:
Ptr()
: mEntry(nullptr)
@ -1219,8 +1231,6 @@ public:
{
}
bool isValid() const { return !!mEntry; }
bool found() const
{
if (!isValid()) {
@ -1286,6 +1296,21 @@ public:
{
}
// This constructor is used when lookupForAdd() is performed on a table
// lacking entry storage; it leaves mEntry null but initializes everything
// else.
AddPtr(const HashTable& aTable, HashNumber aHashNumber)
: Ptr(aTable)
, mKeyHash(aHashNumber)
#ifdef DEBUG
, mMutationCount(aTable.mMutationCount)
#endif
{
MOZ_ASSERT(isLive());
}
bool isLive() const { return isLiveHash(mKeyHash); }
public:
AddPtr()
: mKeyHash(0)
@ -2107,16 +2132,12 @@ public:
return AddPtr();
}
HashNumber keyHash = prepareHash(aLookup);
if (!mTable) {
uint32_t newCapacity = rawCapacity();
RebuildStatus status = changeTableSize(newCapacity, ReportFailure);
MOZ_ASSERT(status != NotOverloaded);
if (status == RehashFailed) {
return AddPtr();
}
return AddPtr(*this, keyHash);
}
HashNumber keyHash = prepareHash(aLookup);
// Directly call the constructor in the return statement to avoid
// excess copying when building with Visual Studio 2017.
// See bug 1385181.
@ -2133,7 +2154,7 @@ public:
MOZ_ASSERT(!(aPtr.mKeyHash & sCollisionBit));
// Check for error from ensureHash() here.
if (!aPtr.isValid()) {
if (!aPtr.isLive()) {
return false;
}
@ -2142,14 +2163,25 @@ public:
MOZ_ASSERT(aPtr.mMutationCount == mMutationCount);
#endif
// Changing an entry from removed to live does not affect whether we
// are overloaded and can be handled separately.
if (aPtr.mEntry->isRemoved()) {
if (!aPtr.isValid()) {
MOZ_ASSERT(!mTable && mEntryCount == 0);
uint32_t newCapacity = rawCapacity();
RebuildStatus status = changeTableSize(newCapacity, ReportFailure);
MOZ_ASSERT(status != NotOverloaded);
if (status == RehashFailed) {
return false;
}
aPtr.mEntry = &findNonLiveEntry(aPtr.mKeyHash);
} else if (aPtr.mEntry->isRemoved()) {
// Changing an entry from removed to live does not affect whether we are
// overloaded and can be handled separately.
if (!this->checkSimulatedOOM()) {
return false;
}
mRemovedCount--;
aPtr.mKeyHash |= sCollisionBit;
} else {
// Preserve the validity of |aPtr.mEntry|.
RebuildStatus status = rehashIfOverloaded();
@ -2210,20 +2242,27 @@ public:
Args&&... aArgs)
{
// Check for error from ensureHash() here.
if (!aPtr.isValid()) {
if (!aPtr.isLive()) {
return false;
}
#ifdef DEBUG
aPtr.mGeneration = generation();
aPtr.mMutationCount = mMutationCount;
#endif
{
if (mTable) {
ReentrancyGuard g(*this);
// Check that aLookup has not been destroyed.
MOZ_ASSERT(prepareHash(aLookup) == aPtr.mKeyHash);
aPtr.mEntry = &lookup<ForAdd>(aLookup, aPtr.mKeyHash);
if (aPtr.found()) {
return true;
}
} else {
// Clear aPtr so it's invalid; add() will allocate storage and redo the
// lookup.
aPtr.mEntry = nullptr;
}
return aPtr.found() || add(aPtr, std::forward<Args>(aArgs)...);
return add(aPtr, std::forward<Args>(aArgs)...);
}
void remove(Ptr aPtr)