diff --git a/js/src/ds/Bitmap.cpp b/js/src/ds/Bitmap.cpp index 79610d3e2f04..8aedd333c069 100644 --- a/js/src/ds/Bitmap.cpp +++ b/js/src/ds/Bitmap.cpp @@ -28,7 +28,7 @@ SparseBitmap::sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) SparseBitmap::BitBlock& SparseBitmap::createBlock(Data::AddPtr p, size_t blockId, AutoEnterOOMUnsafeRegion& oomUnsafe) { - MOZ_ASSERT(!p && p.isValid()); + MOZ_ASSERT(!p); BitBlock* block = js_new(); if (!block || !data.add(p, blockId, block)) oomUnsafe.crash("Bitmap OOM"); diff --git a/js/src/jit-test/tests/basic/bug1483182.js b/js/src/jit-test/tests/basic/bug1483182.js new file mode 100644 index 000000000000..75e7f2e8730b --- /dev/null +++ b/js/src/jit-test/tests/basic/bug1483182.js @@ -0,0 +1,15 @@ +var lfLogBuffer = ` + function testOuterForInVar() { + return eval("for (var x in {}); (function() { return delete x; })"); + } + testOuterForInVar(); +`; +loadFile(lfLogBuffer); +loadFile(lfLogBuffer); +function loadFile(lfVarx) { + try { + oomTest(function() { + eval(lfVarx); + }); + } catch (lfVare) {} +} diff --git a/js/src/jsapi-tests/testHashTable.cpp b/js/src/jsapi-tests/testHashTable.cpp index 50708610804d..eac829b950e4 100644 --- a/js/src/jsapi-tests/testHashTable.cpp +++ b/js/src/jsapi-tests/testHashTable.cpp @@ -448,8 +448,34 @@ BEGIN_TEST(testHashLazyStorage) set.compact(); CHECK(set.capacity() == 0); - // lookupForAdd() instantiates, even if not followed by add(). - set.lookupForAdd(1); + auto p = set.lookupForAdd(1); + CHECK(set.capacity() == 0); + CHECK(set.add(p, 1)); + CHECK(set.capacity() == minCap); + CHECK(set.has(1)); + + set.clear(); + set.compact(); + CHECK(set.capacity() == 0); + + p = set.lookupForAdd(1); + CHECK(set.putNew(2)); + CHECK(set.capacity() == minCap); + CHECK(set.relookupOrAdd(p, 1, 1)); + CHECK(set.capacity() == minCap); + CHECK(set.has(1)); + + set.clear(); + set.compact(); + CHECK(set.capacity() == 0); + + CHECK(set.putNew(1)); + p = set.lookupForAdd(1); + set.clear(); + set.compact(); + CHECK(set.count() == 0); + CHECK(set.relookupOrAdd(p, 1, 1)); + CHECK(set.count() == 1); CHECK(set.capacity() == minCap); set.clear(); diff --git a/mfbt/HashTable.h b/mfbt/HashTable.h index f81e06895875..95aca11efa4f 100644 --- a/mfbt/HashTable.h +++ b/mfbt/HashTable.h @@ -1209,6 +1209,18 @@ public: { } + // This constructor is used only by AddPtr() within lookupForAdd(). + explicit Ptr(const HashTable& aTable) + : mEntry(nullptr) +#ifdef DEBUG + , mTable(&aTable) + , mGeneration(aTable.generation()) +#endif + { + } + + bool isValid() const { return !!mEntry; } + public: Ptr() : mEntry(nullptr) @@ -1219,8 +1231,6 @@ public: { } - bool isValid() const { return !!mEntry; } - bool found() const { if (!isValid()) { @@ -1286,6 +1296,21 @@ public: { } + // This constructor is used when lookupForAdd() is performed on a table + // lacking entry storage; it leaves mEntry null but initializes everything + // else. + AddPtr(const HashTable& aTable, HashNumber aHashNumber) + : Ptr(aTable) + , mKeyHash(aHashNumber) +#ifdef DEBUG + , mMutationCount(aTable.mMutationCount) +#endif + { + MOZ_ASSERT(isLive()); + } + + bool isLive() const { return isLiveHash(mKeyHash); } + public: AddPtr() : mKeyHash(0) @@ -2107,16 +2132,12 @@ public: return AddPtr(); } + HashNumber keyHash = prepareHash(aLookup); + if (!mTable) { - uint32_t newCapacity = rawCapacity(); - RebuildStatus status = changeTableSize(newCapacity, ReportFailure); - MOZ_ASSERT(status != NotOverloaded); - if (status == RehashFailed) { - return AddPtr(); - } + return AddPtr(*this, keyHash); } - HashNumber keyHash = prepareHash(aLookup); // Directly call the constructor in the return statement to avoid // excess copying when building with Visual Studio 2017. // See bug 1385181. @@ -2133,7 +2154,7 @@ public: MOZ_ASSERT(!(aPtr.mKeyHash & sCollisionBit)); // Check for error from ensureHash() here. - if (!aPtr.isValid()) { + if (!aPtr.isLive()) { return false; } @@ -2142,14 +2163,25 @@ public: MOZ_ASSERT(aPtr.mMutationCount == mMutationCount); #endif - // Changing an entry from removed to live does not affect whether we - // are overloaded and can be handled separately. - if (aPtr.mEntry->isRemoved()) { + if (!aPtr.isValid()) { + MOZ_ASSERT(!mTable && mEntryCount == 0); + uint32_t newCapacity = rawCapacity(); + RebuildStatus status = changeTableSize(newCapacity, ReportFailure); + MOZ_ASSERT(status != NotOverloaded); + if (status == RehashFailed) { + return false; + } + aPtr.mEntry = &findNonLiveEntry(aPtr.mKeyHash); + + } else if (aPtr.mEntry->isRemoved()) { + // Changing an entry from removed to live does not affect whether we are + // overloaded and can be handled separately. if (!this->checkSimulatedOOM()) { return false; } mRemovedCount--; aPtr.mKeyHash |= sCollisionBit; + } else { // Preserve the validity of |aPtr.mEntry|. RebuildStatus status = rehashIfOverloaded(); @@ -2210,20 +2242,27 @@ public: Args&&... aArgs) { // Check for error from ensureHash() here. - if (!aPtr.isValid()) { + if (!aPtr.isLive()) { return false; } #ifdef DEBUG aPtr.mGeneration = generation(); aPtr.mMutationCount = mMutationCount; #endif - { + if (mTable) { ReentrancyGuard g(*this); // Check that aLookup has not been destroyed. MOZ_ASSERT(prepareHash(aLookup) == aPtr.mKeyHash); aPtr.mEntry = &lookup(aLookup, aPtr.mKeyHash); + if (aPtr.found()) { + return true; + } + } else { + // Clear aPtr so it's invalid; add() will allocate storage and redo the + // lookup. + aPtr.mEntry = nullptr; } - return aPtr.found() || add(aPtr, std::forward(aArgs)...); + return add(aPtr, std::forward(aArgs)...); } void remove(Ptr aPtr)