merge mozilla-inbound to mozilla-central a=merge

This commit is contained in:
Carsten "Tomcat" Book 2017-03-31 14:45:33 +02:00
commit 095969b8ee
105 changed files with 48672 additions and 60778 deletions

View File

@ -2242,18 +2242,21 @@ DocAccessible::MoveChild(Accessible* aChild, Accessible* aNewParent,
return false;
}
MOZ_ASSERT(aIdxInParent <= static_cast<int32_t>(aNewParent->ChildCount()),
"Wrong insertion point for a moving child");
// If the child cannot be re-inserted into the tree, then make sure to remove
// it from its present parent and then shutdown it.
bool hasInsertionPoint = (aIdxInParent != -1) ||
(aIdxInParent <= static_cast<int32_t>(aNewParent->ChildCount()));
TreeMutation rmut(curParent);
rmut.BeforeRemoval(aChild, TreeMutation::kNoShutdown);
rmut.BeforeRemoval(aChild, hasInsertionPoint && TreeMutation::kNoShutdown);
curParent->RemoveChild(aChild);
rmut.Done();
// No insertion point for the child.
if (aIdxInParent == -1) {
return true;
}
if (aIdxInParent > static_cast<int32_t>(aNewParent->ChildCount())) {
MOZ_ASSERT_UNREACHABLE("Wrong insertion point for a moving child");
if (!hasInsertionPoint) {
return true;
}

View File

@ -1472,9 +1472,10 @@ pref("browser.tabs.crashReporting.email", "");
pref("extensions.interposition.enabled", true);
pref("extensions.interposition.prefetching", true);
// Enable blocking of e10s for add-on users on beta/release.
// Enable blocking of e10s and e10s-multi for add-on users on beta/release.
#ifdef RELEASE_OR_BETA
pref("extensions.e10sBlocksEnabling", true);
pref("extensions.e10sMultiBlocksEnabling", true);
#endif
// How often to check for CPOW timeouts. CPOWs are only timed out by

View File

@ -1,3 +1,3 @@
This is the pdf.js project output, https://github.com/mozilla/pdf.js
Current extension version is: 1.7.381
Current extension version is: 1.7.401

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -502,7 +502,7 @@ var Stats = (function Stats() {
})();
// Manages all the debugging tools.
var PDFBug = (function PDFBugClosure() {
window.PDFBug = (function PDFBugClosure() {
var panelWidth = 300;
var buttons = [];
var activePanel = null;

File diff suppressed because it is too large Load Diff

View File

@ -10,7 +10,7 @@ const {
PropTypes,
} = require("devtools/client/shared/vendor/react");
const { L10N } = require("../utils/l10n");
const { getUrlQuery, parseQueryString } = require("../utils/request-utils");
const { getUrlQuery, parseQueryString, parseFormData } = require("../utils/request-utils");
// Components
const PropertiesView = createFactory(require("./properties-view"));
@ -61,7 +61,7 @@ function ParamsPanel({ request }) {
// Form Data section
if (formDataSections && formDataSections.length > 0) {
let sections = formDataSections.filter((str) => /\S/.test(str)).join("&");
object[PARAMS_FORM_DATA] = getProperties(parseQueryString(sections));
object[PARAMS_FORM_DATA] = getProperties(parseFormData(sections));
}
// Request payload section

View File

@ -220,6 +220,26 @@ function parseQueryString(query) {
});
}
/**
* Parse a string of formdata sections into its components
*
* @param {string} sections - sections of formdata joined by &
* @return {array} array of formdata params { name, value }
*/
function parseFormData(sections) {
if (!sections) {
return null;
}
return sections.replace(/^&/, "").split("&").map(e => {
let param = e.split("=");
return {
name: param[0] ? decodeUnicodeUrl(param[0]) : "",
value: param[1] ? decodeUnicodeUrl(param[1]) : "",
};
});
}
module.exports = {
getFormDataSections,
fetchHeaders,
@ -234,4 +254,5 @@ module.exports = {
getUrlHost,
getUrlDetails,
parseQueryString,
parseFormData,
};

View File

@ -42,7 +42,7 @@ add_task(function* () {
EventUtils.sendMouseEvent({ type: "mousedown" },
document.querySelectorAll(".request-list-item")[2]);
yield wait;
testParamsTab1("a", '"b"', "foo", '"bar"');
testParamsTab1("a", '"b"', "?foo", '"bar"');
wait = waitForDOM(document, "#params-panel tr:not(.tree-section).treeRow", 2);
EventUtils.sendMouseEvent({ type: "mousedown" },

View File

@ -748,7 +748,7 @@ CustomElementRegistry::Define(const nsAString& aName,
// here.
JS::RootedValue rootedv(cx, JS::ObjectValue(*constructorProtoUnwrapped));
if (!JS_WrapValue(cx, &rootedv) || !callbacksHolder->Init(cx, rootedv)) {
aRv.Throw(NS_ERROR_FAILURE);
aRv.StealExceptionFromJSContext(cx);
return;
}
} // Leave constructorProtoUnwrapped's compartment.

View File

@ -10,14 +10,6 @@ Cu.import('resource://gre/modules/Services.jsm');
const BASE_PREF = "dom.ipc.processCount"
const PREF_BRANCH = BASE_PREF + ".";
// Utilities:
function getMaxContentParents(processType) {
// If the pref doesn't exist, get the default number of processes.
// If there's no pref, use only one process.
return Services.prefs.getIntPref(PREF_BRANCH + processType,
Services.prefs.getIntPref(BASE_PREF, 1));
}
// Fills up aProcesses until max and then selects randomly from the available
// ones.
function RandomSelector() {
@ -27,13 +19,12 @@ RandomSelector.prototype = {
classID: Components.ID("{c616fcfd-9737-41f1-aa74-cee72a38f91b}"),
QueryInterface: XPCOMUtils.generateQI([Ci.nsIContentProcessProvider]),
provideProcess(aType, aOpener, aProcesses, aCount) {
let maxContentParents = getMaxContentParents(aType);
if (aCount < maxContentParents) {
provideProcess(aType, aOpener, aProcesses, aCount, aMaxCount) {
if (aCount < aMaxCount) {
return Ci.nsIContentProcessProvider.NEW_PROCESS;
}
let startIdx = Math.floor(Math.random() * maxContentParents);
let startIdx = Math.floor(Math.random() * aMaxCount);
let curIdx = startIdx;
do {
@ -41,7 +32,7 @@ RandomSelector.prototype = {
return curIdx;
}
curIdx = (curIdx + 1) % maxContentParents;
curIdx = (curIdx + 1) % aMaxCount;
} while (curIdx !== startIdx);
return Ci.nsIContentProcessProvider.NEW_PROCESS;
@ -57,16 +48,20 @@ MinTabSelector.prototype = {
classID: Components.ID("{2dc08eaf-6eef-4394-b1df-a3a927c1290b}"),
QueryInterface: XPCOMUtils.generateQI([Ci.nsIContentProcessProvider]),
provideProcess(aType, aOpener, aProcesses, aCount) {
let maxContentParents = getMaxContentParents(aType);
if (aCount < maxContentParents) {
provideProcess(aType, aOpener, aProcesses, aCount, aMaxCount) {
if (aCount < aMaxCount) {
return Ci.nsIContentProcessProvider.NEW_PROCESS;
}
let min = Number.MAX_VALUE;
let candidate = Ci.nsIContentProcessProvider.NEW_PROCESS;
for (let i = 0; i < maxContentParents; i++) {
// Note, that at this point aMaxCount is in the valid range and
// the reason for not using aCount here is because if we keep
// processes alive for testing but want a test to use only single
// content process we can just keep relying on dom.ipc.processCount = 1
// this way.
for (let i = 0; i < aMaxCount; i++) {
let process = aProcesses[i];
let tabCount = process.tabCount;
if (process.opener === aOpener && tabCount < min) {

View File

@ -14,6 +14,8 @@
<script type="application/javascript">
SimpleTest.requestLongerTimeout(2);
const SJS = "://example.com/tests/dom/base/test/referrer_testserver.sjs?";
const PARAMS = ["ATTRIBUTE_POLICY", "NEW_ATTRIBUTE_POLICY", "META_POLICY", "REL", "SCHEME_FROM", "SCHEME_TO"];

View File

@ -54,5 +54,5 @@ interface nsIContentProcessProvider : nsISupports
*/
int32_t provideProcess(in AString aType, in nsIContentProcessInfo aOpener,
[array, size_is(aCount)] in nsIContentProcessInfo aAliveProcesses,
in uint32_t aCount);
in uint32_t aCount, in uint32_t aMaxCount);
};

View File

@ -712,9 +712,20 @@ ContentParent::GetOrCreatePool(const nsAString& aContentProcessType)
/*static*/ uint32_t
ContentParent::GetMaxProcessCount(const nsAString& aContentProcessType)
{
int32_t maxContentParents;
nsAutoCString processCountPref("dom.ipc.processCount.");
processCountPref.Append(NS_ConvertUTF16toUTF8(aContentProcessType));
bool hasUserValue = Preferences::HasUserValue(processCountPref.get()) ||
Preferences::HasUserValue("dom.ipc.processCount");
// Let's respect the user's decision to enable multiple content processes
// despite some add-ons installed that might performing poorly.
if (!hasUserValue &&
Preferences::GetBool("extensions.e10sMultiBlocksEnabling", false) &&
Preferences::GetBool("extensions.e10sMultiBlockedByAddons", false)) {
return 1;
}
int32_t maxContentParents;
if (NS_FAILED(Preferences::GetInt(processCountPref.get(), &maxContentParents))) {
maxContentParents = Preferences::GetInt("dom.ipc.processCount", 1);
}
@ -811,7 +822,7 @@ ContentParent::GetNewOrUsedBrowserProcess(const nsAString& aRemoteType,
if (cpp &&
NS_SUCCEEDED(cpp->ProvideProcess(aRemoteType, openerInfo,
infos.Elements(), infos.Length(),
&index))) {
maxContentParents, &index))) {
// If the provider returned an existing ContentParent, use that one.
if (0 <= index && static_cast<uint32_t>(index) <= maxContentParents) {
RefPtr<ContentParent> retval = contentParents[index];

View File

@ -204,7 +204,6 @@ const char* mozilla::dom::ContentPrefs::gInitPrefs[] = {
"security.sandbox.content.tempDirSuffix",
"security.sandbox.logging.enabled",
"security.sandbox.mac.track.violations",
"security.sandbox.windows.log",
"security.sandbox.windows.log.stackTraceDepth",
"shutdown.watchdog.timeoutSecs",
"signed.applets.codebase_principal_support",
@ -244,4 +243,3 @@ const char* mozilla::dom::ContentPrefs::GetContentPref(size_t aIndex)
MOZ_ASSERT(aIndex < ArrayLength(ContentPrefs::gInitPrefs));
return gInitPrefs[aIndex];
}

View File

@ -5,6 +5,7 @@
#include "OpusTrackEncoder.h"
#include "nsString.h"
#include "GeckoProfiler.h"
#include "mozilla/CheckedInt.h"
#include <opus/opus.h>
@ -334,24 +335,42 @@ OpusTrackEncoder::GetEncodedTrack(EncodedFrameContainer& aData)
AudioChunk chunk = *iter;
// Chunk to the required frame size.
int frameToCopy = chunk.GetDuration();
if (frameCopied + frameToCopy > framesToFetch) {
StreamTime frameToCopy = chunk.GetDuration();
if (frameToCopy > framesToFetch - frameCopied) {
frameToCopy = framesToFetch - frameCopied;
}
// Possible greatest value of framesToFetch = 3844: see
// https://bugzilla.mozilla.org/show_bug.cgi?id=1349421#c8. frameToCopy
// should not be able to exceed this value.
MOZ_ASSERT(frameToCopy <= 3844, "frameToCopy exceeded expected range");
if (!chunk.IsNull()) {
// Append the interleaved data to the end of pcm buffer.
AudioTrackEncoder::InterleaveTrackData(chunk, frameToCopy, mChannels,
pcm.Elements() + frameCopied * mChannels);
} else {
CheckedInt<int> memsetLength = CheckedInt<int>(frameToCopy) *
mChannels *
sizeof(AudioDataValue);
if (!memsetLength.isValid()) {
// This should never happen, but we use a defensive check because
// we really don't want a bad memset
MOZ_ASSERT_UNREACHABLE("memsetLength invalid!");
return NS_ERROR_FAILURE;
}
memset(pcm.Elements() + frameCopied * mChannels, 0,
frameToCopy * mChannels * sizeof(AudioDataValue));
memsetLength.value());
}
frameCopied += frameToCopy;
iter.Next();
}
// Possible greatest value of framesToFetch = 3844: see
// https://bugzilla.mozilla.org/show_bug.cgi?id=1349421#c8. frameCopied
// should not be able to exceed this value.
MOZ_ASSERT(frameCopied <= 3844, "frameCopied exceeded expected range");
RefPtr<EncodedFrame> audiodata = new EncodedFrame();
audiodata->SetFrameType(EncodedFrame::OPUS_AUDIO_FRAME);
int framesInPCM = frameCopied;

View File

@ -64,8 +64,8 @@ AnimationState::UpdateStateInternal(LookupResult& aResult,
if (mHasBeenDecoded) {
Maybe<uint32_t> frameCount = FrameCount();
MOZ_ASSERT(frameCount.isSome());
aResult.Surface().Seek(*frameCount - 1);
if (aResult.Surface() && aResult.Surface()->IsFinished()) {
if (NS_SUCCEEDED(aResult.Surface().Seek(*frameCount - 1)) &&
aResult.Surface()->IsFinished()) {
mIsCurrentlyDecoded = true;
} else {
mIsCurrentlyDecoded = false;

View File

@ -103,11 +103,13 @@ class HashMap
//
// Also see the definition of Ptr in HashTable above (with T = Entry).
typedef typename Impl::Ptr Ptr;
Ptr lookup(const Lookup& l) const { return impl.lookup(l); }
MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& l) const { return impl.lookup(l); }
// Like lookup, but does not assert if two threads call lookup at the same
// time. Only use this method when none of the threads will modify the map.
Ptr readonlyThreadsafeLookup(const Lookup& l) const { return impl.readonlyThreadsafeLookup(l); }
MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& l) const {
return impl.readonlyThreadsafeLookup(l);
}
// Assuming |p.found()|, remove |*p|.
void remove(Ptr p) { impl.remove(p); }
@ -146,7 +148,7 @@ class HashMap
// assert(p->key == 3);
// char val = p->value;
typedef typename Impl::AddPtr AddPtr;
AddPtr lookupForAdd(const Lookup& l) const {
MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& l) const {
return impl.lookupForAdd(l);
}
@ -197,6 +199,10 @@ class HashMap
// using the finish() method.
void clear() { impl.clear(); }
// Remove all entries. Unlike clear() this method tries to shrink the table.
// Unlike finish() it does not require the map to be initialized again.
void clearAndShrink() { impl.clearAndShrink(); }
// Remove all the entries and release all internal buffers. The map must
// be initialized again before any use.
void finish() { impl.finish(); }
@ -354,11 +360,13 @@ class HashSet
//
// Also see the definition of Ptr in HashTable above.
typedef typename Impl::Ptr Ptr;
Ptr lookup(const Lookup& l) const { return impl.lookup(l); }
MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& l) const { return impl.lookup(l); }
// Like lookup, but does not assert if two threads call lookup at the same
// time. Only use this method when none of the threads will modify the map.
Ptr readonlyThreadsafeLookup(const Lookup& l) const { return impl.readonlyThreadsafeLookup(l); }
MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& l) const {
return impl.readonlyThreadsafeLookup(l);
}
// Assuming |p.found()|, remove |*p|.
void remove(Ptr p) { impl.remove(p); }
@ -396,7 +404,9 @@ class HashSet
// Note that relookupOrAdd(p,l,t) performs Lookup using |l| and adds the
// entry |t|, where the caller ensures match(l,t).
typedef typename Impl::AddPtr AddPtr;
AddPtr lookupForAdd(const Lookup& l) const { return impl.lookupForAdd(l); }
MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& l) const {
return impl.lookupForAdd(l);
}
template <typename U>
MOZ_MUST_USE bool add(AddPtr& p, U&& u) {
@ -436,6 +446,10 @@ class HashSet
// using the finish() method.
void clear() { impl.clear(); }
// Remove all entries. Unlike clear() this method tries to shrink the table.
// Unlike finish() it does not require the set to be initialized again.
void clearAndShrink() { impl.clearAndShrink(); }
// Remove all the entries and release all internal buffers. The set must
// be initialized again before any use.
void finish() { impl.finish(); }
@ -1359,7 +1373,7 @@ class HashTable : private AllocPolicy
return wouldBeUnderloaded(capacity(), entryCount);
}
static bool match(Entry& e, const Lookup& l)
static MOZ_ALWAYS_INLINE bool match(Entry& e, const Lookup& l)
{
return HashPolicy::match(HashPolicy::getKey(e.get()), l);
}
@ -1369,7 +1383,8 @@ class HashTable : private AllocPolicy
// (The use of the METER() macro to increment stats violates this
// restriction but we will live with that for now because it's enabled so
// rarely.)
Entry& lookup(const Lookup& l, HashNumber keyHash, unsigned collisionBit) const
MOZ_ALWAYS_INLINE Entry&
lookup(const Lookup& l, HashNumber keyHash, unsigned collisionBit) const
{
MOZ_ASSERT(isLiveHash(keyHash));
MOZ_ASSERT(!(keyHash & sCollisionBit));
@ -1669,6 +1684,12 @@ class HashTable : private AllocPolicy
#endif
}
void clearAndShrink()
{
clear();
compactIfUnderloaded();
}
void finish()
{
#ifdef JS_DEBUG
@ -1727,7 +1748,7 @@ class HashTable : private AllocPolicy
return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf);
}
Ptr lookup(const Lookup& l) const
MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& l) const
{
mozilla::ReentrancyGuard g(*this);
if (!HasHash<HashPolicy>(l))
@ -1736,7 +1757,7 @@ class HashTable : private AllocPolicy
return Ptr(lookup(l, keyHash, 0), *this);
}
Ptr readonlyThreadsafeLookup(const Lookup& l) const
MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& l) const
{
if (!HasHash<HashPolicy>(l))
return Ptr();
@ -1744,7 +1765,7 @@ class HashTable : private AllocPolicy
return Ptr(lookup(l, keyHash, 0), *this);
}
AddPtr lookupForAdd(const Lookup& l) const
MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& l) const
{
mozilla::ReentrancyGuard g(*this);
if (!EnsureHash<HashPolicy>(l))

View File

@ -2944,7 +2944,7 @@ js::SharedIntlData::ensureTimeZones(JSContext* cx)
bool
js::SharedIntlData::validateTimeZoneName(JSContext* cx, HandleString timeZone,
MutableHandleString result)
MutableHandleAtom result)
{
if (!ensureTimeZones(cx))
return false;
@ -3033,7 +3033,7 @@ js::intl_IsValidTimeZoneName(JSContext* cx, unsigned argc, Value* vp)
SharedIntlData& sharedIntlData = cx->runtime()->sharedIntlData.ref();
RootedString timeZone(cx, args[0].toString());
RootedString validatedTimeZone(cx);
RootedAtom validatedTimeZone(cx);
if (!sharedIntlData.validateTimeZoneName(cx, timeZone, &validatedTimeZone))
return false;

View File

@ -170,7 +170,7 @@ class SharedIntlData
* isn't a valid IANA time zone name, |result| remains unchanged.
*/
bool validateTimeZoneName(JSContext* cx, JS::HandleString timeZone,
JS::MutableHandleString result);
MutableHandleAtom result);
/**
* Returns the canonical time zone name in |result|. If no canonical name

View File

@ -26,33 +26,15 @@ SparseBitmap::sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf)
}
SparseBitmap::BitBlock&
SparseBitmap::getOrCreateBlock(size_t blockId)
SparseBitmap::createBlock(Data::AddPtr p, size_t blockId)
{
Data::AddPtr p = data.lookupForAdd(blockId);
if (!p) {
AutoEnterOOMUnsafeRegion oomUnsafe;
BitBlock* block = js_new<BitBlock>();
if (!block || !data.add(p, blockId, block))
oomUnsafe.crash("Bitmap OOM");
PodZero(block);
}
return *p->value();
}
void
SparseBitmap::setBit(size_t bit)
{
size_t word = bit / JS_BITS_PER_WORD;
size_t blockWord = blockStartWord(word);
BitBlock& block = getOrCreateBlock(blockWord / WordsInBlock);
block[word - blockWord] |= uintptr_t(1) << (bit % JS_BITS_PER_WORD);
}
SparseBitmap::BitBlock*
SparseBitmap::getBlock(size_t blockId) const
{
Data::Ptr p = data.lookup(blockId);
return p ? p->value() : nullptr;
MOZ_ASSERT(!p);
AutoEnterOOMUnsafeRegion oomUnsafe;
BitBlock* block = js_new<BitBlock>();
if (!block || !data.add(p, blockId, block))
oomUnsafe.crash("Bitmap OOM");
PodZero(block);
return *block;
}
bool

View File

@ -78,8 +78,19 @@ class SparseBitmap
return std::min<size_t>((size_t)WordsInBlock, std::max<long>(count, 0));
}
BitBlock* getBlock(size_t blockId) const;
BitBlock& getOrCreateBlock(size_t blockId);
BitBlock& createBlock(Data::AddPtr p, size_t blockId);
MOZ_ALWAYS_INLINE BitBlock* getBlock(size_t blockId) const {
Data::Ptr p = data.lookup(blockId);
return p ? p->value() : nullptr;
}
MOZ_ALWAYS_INLINE BitBlock& getOrCreateBlock(size_t blockId) {
Data::AddPtr p = data.lookupForAdd(blockId);
if (p)
return *p->value();
return createBlock(p, blockId);
}
public:
bool init() { return data.init(); }
@ -87,7 +98,13 @@ class SparseBitmap
size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf);
void setBit(size_t bit);
MOZ_ALWAYS_INLINE void setBit(size_t bit) {
size_t word = bit / JS_BITS_PER_WORD;
size_t blockWord = blockStartWord(word);
BitBlock& block = getOrCreateBlock(blockWord / WordsInBlock);
block[word - blockWord] |= uintptr_t(1) << (bit % JS_BITS_PER_WORD);
}
bool getBit(size_t bit) const;
void bitwiseAndWith(const DenseBitmap& other);

View File

@ -0,0 +1,76 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "gc/AtomMarking.h"
#include "jscompartment.h"
#include "gc/Heap-inl.h"
namespace js {
namespace gc {
inline size_t
GetAtomBit(TenuredCell* thing)
{
MOZ_ASSERT(thing->zoneFromAnyThread()->isAtomsZone());
Arena* arena = thing->arena();
size_t arenaBit = (reinterpret_cast<uintptr_t>(thing) - arena->address()) / CellSize;
return arena->atomBitmapStart() * JS_BITS_PER_WORD + arenaBit;
}
inline bool
ThingIsPermanent(JSAtom* atom)
{
return atom->isPermanentAtom();
}
inline bool
ThingIsPermanent(JS::Symbol* symbol)
{
return symbol->isWellKnownSymbol();
}
template <typename T>
MOZ_ALWAYS_INLINE void
AtomMarkingRuntime::inlinedMarkAtom(JSContext* cx, T* thing)
{
static_assert(mozilla::IsSame<T, JSAtom>::value ||
mozilla::IsSame<T, JS::Symbol>::value,
"Should only be called with JSAtom* or JS::Symbol* argument");
MOZ_ASSERT(thing);
MOZ_ASSERT(thing->zoneFromAnyThread()->isAtomsZone());
// The context's zone will be null during initialization of the runtime.
if (!cx->zone())
return;
MOZ_ASSERT(!cx->zone()->isAtomsZone());
if (ThingIsPermanent(thing))
return;
size_t bit = GetAtomBit(thing);
MOZ_ASSERT(bit / JS_BITS_PER_WORD < allocatedWords);
cx->zone()->markedAtoms().setBit(bit);
if (!cx->helperThread()) {
// Trigger a read barrier on the atom, in case there is an incremental
// GC in progress. This is necessary if the atom is being marked
// because a reference to it was obtained from another zone which is
// not being collected by the incremental GC.
T::readBarrier(thing);
}
// Children of the thing also need to be marked in the context's zone.
// We don't have a JSTracer for this so manually handle the cases in which
// an atom can reference other atoms.
markChildren(cx, thing);
}
} // namespace gc
} // namespace js

View File

@ -4,7 +4,7 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "gc/AtomMarking.h"
#include "gc/AtomMarking-inl.h"
#include "jscompartment.h"
@ -152,74 +152,43 @@ AtomMarkingRuntime::updateChunkMarkBits(JSRuntime* runtime)
}
}
static inline size_t
GetAtomBit(TenuredCell* thing)
{
MOZ_ASSERT(thing->zoneFromAnyThread()->isAtomsZone());
Arena* arena = thing->arena();
size_t arenaBit = (reinterpret_cast<uintptr_t>(thing) - arena->address()) / CellSize;
return arena->atomBitmapStart() * JS_BITS_PER_WORD + arenaBit;
}
static bool
ThingIsPermanent(TenuredCell* thing)
{
JS::TraceKind kind = thing->getTraceKind();
if (kind == JS::TraceKind::String && static_cast<JSString*>(thing)->isPermanentAtom())
return true;
if (kind == JS::TraceKind::Symbol && static_cast<JS::Symbol*>(thing)->isWellKnownSymbol())
return true;
return false;
}
template <typename T>
void
AtomMarkingRuntime::markAtom(JSContext* cx, TenuredCell* thing)
AtomMarkingRuntime::markAtom(JSContext* cx, T* thing)
{
// The context's zone will be null during initialization of the runtime.
if (!thing || !cx->zone())
return;
MOZ_ASSERT(!cx->zone()->isAtomsZone());
if (ThingIsPermanent(thing) || !thing->zoneFromAnyThread()->isAtomsZone())
return;
size_t bit = GetAtomBit(thing);
MOZ_ASSERT(bit / JS_BITS_PER_WORD < allocatedWords);
cx->zone()->markedAtoms().setBit(bit);
if (!cx->helperThread()) {
// Trigger a read barrier on the atom, in case there is an incremental
// GC in progress. This is necessary if the atom is being marked
// because a reference to it was obtained from another zone which is
// not being collected by the incremental GC.
TenuredCell::readBarrier(thing);
}
// Children of the thing also need to be marked in the context's zone.
// We don't have a JSTracer for this so manually handle the cases in which
// an atom can reference other atoms.
if (thing->getTraceKind() == JS::TraceKind::Symbol) {
JSAtom* description = static_cast<JS::Symbol*>(thing)->description();
markAtom(cx, description);
}
return inlinedMarkAtom(cx, thing);
}
template void AtomMarkingRuntime::markAtom(JSContext* cx, JSAtom* thing);
template void AtomMarkingRuntime::markAtom(JSContext* cx, JS::Symbol* thing);
void
AtomMarkingRuntime::markId(JSContext* cx, jsid id)
{
if (JSID_IS_GCTHING(id))
markAtom(cx, &JSID_TO_GCTHING(id).asCell()->asTenured());
if (JSID_IS_ATOM(id)) {
markAtom(cx, JSID_TO_ATOM(id));
return;
}
if (JSID_IS_SYMBOL(id)) {
markAtom(cx, JSID_TO_SYMBOL(id));
return;
}
MOZ_ASSERT(!JSID_IS_GCTHING(id));
}
void
AtomMarkingRuntime::markAtomValue(JSContext* cx, const Value& value)
{
if (value.isGCThing()) {
Cell* thing = value.toGCThing();
if (thing && !IsInsideNursery(thing))
markAtom(cx, &thing->asTenured());
if (value.isString()) {
if (value.toString()->isAtom())
markAtom(cx, &value.toString()->asAtom());
return;
}
if (value.isSymbol()) {
markAtom(cx, value.toSymbol());
return;
}
MOZ_ASSERT_IF(value.isGCThing(), value.isObject() || value.isPrivateGCThing());
}
void
@ -230,23 +199,26 @@ AtomMarkingRuntime::adoptMarkedAtoms(Zone* target, Zone* source)
}
#ifdef DEBUG
template <typename T>
bool
AtomMarkingRuntime::atomIsMarked(Zone* zone, Cell* thingArg)
AtomMarkingRuntime::atomIsMarked(Zone* zone, T* thing)
{
if (!thingArg || IsInsideNursery(thingArg))
return true;
TenuredCell* thing = &thingArg->asTenured();
static_assert(mozilla::IsSame<T, JSAtom>::value ||
mozilla::IsSame<T, JS::Symbol>::value,
"Should only be called with JSAtom* or JS::Symbol* argument");
MOZ_ASSERT(thing);
MOZ_ASSERT(!IsInsideNursery(thing));
MOZ_ASSERT(thing->zoneFromAnyThread()->isAtomsZone());
if (!zone->runtimeFromAnyThread()->permanentAtoms)
return true;
if (ThingIsPermanent(thing) || !thing->zoneFromAnyThread()->isAtomsZone())
if (ThingIsPermanent(thing))
return true;
JS::TraceKind kind = thing->getTraceKind();
if (kind == JS::TraceKind::String) {
JSAtom* atom = static_cast<JSAtom*>(thing);
if (mozilla::IsSame<T, JSAtom>::value) {
JSAtom* atom = reinterpret_cast<JSAtom*>(thing);
if (AtomIsPinnedInRuntime(zone->runtimeFromAnyThread(), atom))
return true;
}
@ -255,19 +227,54 @@ AtomMarkingRuntime::atomIsMarked(Zone* zone, Cell* thingArg)
return zone->markedAtoms().getBit(bit);
}
template bool AtomMarkingRuntime::atomIsMarked(Zone* zone, JSAtom* thing);
template bool AtomMarkingRuntime::atomIsMarked(Zone* zone, JS::Symbol* thing);
template<>
bool
AtomMarkingRuntime::atomIsMarked(Zone* zone, TenuredCell* thing)
{
if (!thing)
return true;
JS::TraceKind kind = thing->getTraceKind();
if (kind == JS::TraceKind::String) {
JSString* str = static_cast<JSString*>(thing);
if (str->isAtom())
return atomIsMarked(zone, &str->asAtom());
return true;
}
if (kind == JS::TraceKind::Symbol)
return atomIsMarked(zone, static_cast<JS::Symbol*>(thing));
return true;
}
bool
AtomMarkingRuntime::idIsMarked(Zone* zone, jsid id)
{
if (JSID_IS_GCTHING(id))
return atomIsMarked(zone, JSID_TO_GCTHING(id).asCell());
if (JSID_IS_ATOM(id))
return atomIsMarked(zone, JSID_TO_ATOM(id));
if (JSID_IS_SYMBOL(id))
return atomIsMarked(zone, JSID_TO_SYMBOL(id));
MOZ_ASSERT(!JSID_IS_GCTHING(id));
return true;
}
bool
AtomMarkingRuntime::valueIsMarked(Zone* zone, const Value& value)
{
if (value.isGCThing())
return atomIsMarked(zone, value.toGCThing());
if (value.isString()) {
if (value.toString()->isAtom())
return atomIsMarked(zone, &value.toString()->asAtom());
return true;
}
if (value.isSymbol())
return atomIsMarked(zone, value.toSymbol());
MOZ_ASSERT_IF(value.isGCThing(), value.isObject() || value.isPrivateGCThing());
return true;
}

View File

@ -11,6 +11,7 @@
#include "ds/Bitmap.h"
#include "gc/Heap.h"
#include "threading/ProtectedData.h"
#include "vm/Symbol.h"
namespace js {
namespace gc {
@ -22,6 +23,13 @@ class AtomMarkingRuntime
// Unused arena atom bitmap indexes. Protected by the GC lock.
js::ExclusiveAccessLockOrGCTaskData<Vector<size_t, 0, SystemAllocPolicy>> freeArenaIndexes;
void markChildren(JSContext* cx, JSAtom*) {}
void markChildren(JSContext* cx, JS::Symbol* symbol) {
if (JSAtom* description = symbol->description())
markAtom(cx, description);
}
public:
// The extent of all allocated and free words in atom mark bitmaps.
// This monotonically increases and may be read from without locking.
@ -51,7 +59,12 @@ class AtomMarkingRuntime
void updateChunkMarkBits(JSRuntime* runtime);
// Mark an atom or id as being newly reachable by the context's zone.
void markAtom(JSContext* cx, TenuredCell* thing);
template <typename T> void markAtom(JSContext* cx, T* thing);
// Version of markAtom that's always inlined, for performance-sensitive
// callers.
template <typename T> MOZ_ALWAYS_INLINE void inlinedMarkAtom(JSContext* cx, T* thing);
void markId(JSContext* cx, jsid id);
void markAtomValue(JSContext* cx, const Value& value);
@ -60,7 +73,7 @@ class AtomMarkingRuntime
#ifdef DEBUG
// Return whether |thing/id| is in the atom marking bitmap for |zone|.
bool atomIsMarked(Zone* zone, Cell* thing);
template <typename T> bool atomIsMarked(Zone* zone, T* thing);
bool idIsMarked(Zone* zone, jsid id);
bool valueIsMarked(Zone* zone, const Value& value);
#endif

View File

@ -41,6 +41,7 @@ JS::Zone::Zone(JSRuntime* rt, ZoneGroup* group)
hasDeadProxies_(group),
typeDescrObjects_(group, this, SystemAllocPolicy()),
markedAtoms_(group),
atomCache_(group),
usage(&rt->gc.usage),
threshold(),
gcDelayBytes(0),
@ -93,7 +94,8 @@ bool Zone::init(bool isSystemArg)
gcSweepGroupEdges().init() &&
gcWeakKeys().init() &&
typeDescrObjects().init() &&
markedAtoms().init();
markedAtoms().init() &&
atomCache().init();
}
void

View File

@ -434,9 +434,15 @@ struct Zone : public JS::shadow::Zone,
private:
// Bitmap of atoms marked by this zone.
js::ZoneGroupOrGCTaskData<js::SparseBitmap> markedAtoms_;
// Set of atoms recently used by this Zone. Purged on GC.
js::ZoneGroupOrGCTaskData<js::AtomSet> atomCache_;
public:
js::SparseBitmap& markedAtoms() { return markedAtoms_.ref(); }
js::AtomSet& atomCache() { return atomCache_.ref(); }
// Track heap usage under this Zone.
js::gc::HeapUsage usage;

View File

@ -3376,6 +3376,7 @@ ICCall_Native::Compiler::generateStubCode(MacroAssembler& masm)
EmitBaselineCreateStubFrameDescriptor(masm, scratch, ExitFrameLayout::Size());
masm.push(scratch);
masm.push(ICTailCallReg);
masm.loadJSContext(scratch);
masm.enterFakeExitFrameForNative(scratch, isConstructing_);
// Execute call.
@ -3473,6 +3474,7 @@ ICCall_ClassHook::Compiler::generateStubCode(MacroAssembler& masm)
EmitBaselineCreateStubFrameDescriptor(masm, scratch, ExitFrameLayout::Size());
masm.push(scratch);
masm.push(ICTailCallReg);
masm.loadJSContext(scratch);
masm.enterFakeExitFrameForNative(scratch, isConstructing_);
// Execute call.

View File

@ -3921,7 +3921,7 @@ CodeGenerator::visitCallNative(LCallNative* call)
// Construct native exit frame.
uint32_t safepointOffset = masm.buildFakeExitFrame(tempReg);
masm.enterFakeExitFrameForNative(tempReg, call->mir()->isConstructing());
masm.enterFakeExitFrameForNative(argContextReg, call->mir()->isConstructing());
markSafepointAt(safepointOffset, call);
@ -4049,15 +4049,14 @@ CodeGenerator::visitCallDOMNative(LCallDOMNative* call)
// Construct native exit frame.
uint32_t safepointOffset = masm.buildFakeExitFrame(argJSContext);
masm.loadJSContext(argJSContext);
masm.enterFakeExitFrame(argJSContext, IonDOMMethodExitFrameLayoutToken);
markSafepointAt(safepointOffset, call);
// Construct and execute call.
masm.setupUnalignedABICall(argJSContext);
masm.loadJSContext(argJSContext);
masm.passABIArg(argJSContext);
masm.passABIArg(argObj);
masm.passABIArg(argPrivate);
@ -7884,6 +7883,7 @@ JitRuntime::generateLazyLinkStub(JSContext* cx)
AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
Register temp0 = regs.takeAny();
masm.loadJSContext(temp0);
masm.enterFakeExitFrame(temp0, LazyLinkExitFrameLayoutToken);
masm.PushStubCode();
@ -11435,14 +11435,13 @@ CodeGenerator::visitGetDOMProperty(LGetDOMProperty* ins)
masm.moveStackPtrTo(ObjectReg);
uint32_t safepointOffset = masm.buildFakeExitFrame(JSContextReg);
masm.loadJSContext(JSContextReg);
masm.enterFakeExitFrame(JSContextReg, IonDOMExitFrameLayoutGetterToken);
markSafepointAt(safepointOffset, ins);
masm.setupUnalignedABICall(JSContextReg);
masm.loadJSContext(JSContextReg);
masm.passABIArg(JSContextReg);
masm.passABIArg(ObjectReg);
masm.passABIArg(PrivateReg);
@ -11524,14 +11523,13 @@ CodeGenerator::visitSetDOMProperty(LSetDOMProperty* ins)
masm.moveStackPtrTo(ObjectReg);
uint32_t safepointOffset = masm.buildFakeExitFrame(JSContextReg);
masm.loadJSContext(JSContextReg);
masm.enterFakeExitFrame(JSContextReg, IonDOMExitFrameLayoutSetterToken);
markSafepointAt(safepointOffset, ins);
masm.setupUnalignedABICall(JSContextReg);
masm.loadJSContext(JSContextReg);
masm.passABIArg(JSContextReg);
masm.passABIArg(ObjectReg);
masm.passABIArg(PrivateReg);

View File

@ -875,7 +875,7 @@ IonCacheIRCompiler::emitCallNativeGetterResult()
if (!masm.icBuildOOLFakeExitFrame(GetReturnAddressToIonCode(cx_), save))
return false;
masm.enterFakeExitFrame(scratch, IonOOLNativeExitFrameLayoutToken);
masm.enterFakeExitFrame(argJSContext, IonOOLNativeExitFrameLayoutToken);
// Construct and execute call.
masm.setupUnalignedABICall(scratch);
@ -932,7 +932,7 @@ IonCacheIRCompiler::emitCallProxyGetResult()
if (!masm.icBuildOOLFakeExitFrame(GetReturnAddressToIonCode(cx_), save))
return false;
masm.enterFakeExitFrame(scratch, IonOOLProxyExitFrameLayoutToken);
masm.enterFakeExitFrame(argJSContext, IonOOLProxyExitFrameLayoutToken);
// Make the call.
masm.setupUnalignedABICall(scratch);
@ -1631,7 +1631,7 @@ IonCacheIRCompiler::emitCallNativeSetter()
if (!masm.icBuildOOLFakeExitFrame(GetReturnAddressToIonCode(cx_), save))
return false;
masm.enterFakeExitFrame(scratch, IonOOLNativeExitFrameLayoutToken);
masm.enterFakeExitFrame(argJSContext, IonOOLNativeExitFrameLayoutToken);
// Make the call.
masm.setupUnalignedABICall(scratch);

View File

@ -830,7 +830,7 @@ EmitGetterCall(JSContext* cx, MacroAssembler& masm,
if (!masm.icBuildOOLFakeExitFrame(returnAddr, aic))
return false;
masm.enterFakeExitFrame(scratchReg, IonOOLNativeExitFrameLayoutToken);
masm.enterFakeExitFrame(argJSContextReg, IonOOLNativeExitFrameLayoutToken);
// Construct and execute call.
masm.setupUnalignedABICall(scratchReg);
@ -888,7 +888,7 @@ EmitGetterCall(JSContext* cx, MacroAssembler& masm,
if (!masm.icBuildOOLFakeExitFrame(returnAddr, aic))
return false;
masm.enterFakeExitFrame(scratchReg, IonOOLPropertyOpExitFrameLayoutToken);
masm.enterFakeExitFrame(argJSContextReg, IonOOLPropertyOpExitFrameLayoutToken);
// Make the call.
masm.setupUnalignedABICall(scratchReg);

View File

@ -281,9 +281,9 @@ MacroAssembler::PushStubCode()
}
void
MacroAssembler::enterExitFrame(Register temp, const VMFunction* f)
MacroAssembler::enterExitFrame(Register cxreg, const VMFunction* f)
{
linkExitFrame(temp);
linkExitFrame(cxreg);
// Push the JitCode pointer. (Keep the code alive, when on the stack)
PushStubCode();
// Push VMFunction pointer, to mark arguments.
@ -291,18 +291,18 @@ MacroAssembler::enterExitFrame(Register temp, const VMFunction* f)
}
void
MacroAssembler::enterFakeExitFrame(Register temp, enum ExitFrameTokenValues token)
MacroAssembler::enterFakeExitFrame(Register cxreg, enum ExitFrameTokenValues token)
{
linkExitFrame(temp);
linkExitFrame(cxreg);
Push(Imm32(token));
Push(ImmPtr(nullptr));
}
void
MacroAssembler::enterFakeExitFrameForNative(Register temp, bool isConstructing)
MacroAssembler::enterFakeExitFrameForNative(Register cxreg, bool isConstructing)
{
enterFakeExitFrame(temp, isConstructing ? ConstructNativeExitFrameLayoutToken
: CallNativeExitFrameLayoutToken);
enterFakeExitFrame(cxreg, isConstructing ? ConstructNativeExitFrameLayoutToken
: CallNativeExitFrameLayoutToken);
}
void

View File

@ -1449,6 +1449,7 @@ BailoutReportOverRecursed(JSContext* cx)
void
MacroAssembler::generateBailoutTail(Register scratch, Register bailoutInfo)
{
loadJSContext(scratch);
enterExitFrame(scratch);
Label baseline;
@ -1510,6 +1511,7 @@ MacroAssembler::generateBailoutTail(Register scratch, Register bailoutInfo)
push(temp);
push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeAddr)));
// No GC things to mark on the stack, push a bare token.
loadJSContext(scratch);
enterFakeExitFrame(scratch, ExitFrameLayoutBareToken);
// If monitorStub is non-null, handle resumeAddr appropriately.
@ -2729,10 +2731,9 @@ MacroAssembler::callWithABINoProfiler(wasm::SymbolicAddress imm, MoveOp::Type re
// Exit frame footer.
void
MacroAssembler::linkExitFrame(Register temp)
MacroAssembler::linkExitFrame(Register cxreg)
{
loadJSContext(temp);
storeStackPtr(Address(temp, offsetof(JSContext, jitTop)));
storeStackPtr(Address(cxreg, offsetof(JSContext, jitTop)));
}
void

View File

@ -688,14 +688,14 @@ class MacroAssembler : public MacroAssemblerSpecific
inline bool hasSelfReference() const;
// Push stub code and the VMFunction pointer.
inline void enterExitFrame(Register temp, const VMFunction* f = nullptr);
inline void enterExitFrame(Register cxreg, const VMFunction* f = nullptr);
// Push an exit frame token to identify which fake exit frame this footer
// corresponds to.
inline void enterFakeExitFrame(Register temp, enum ExitFrameTokenValues token);
inline void enterFakeExitFrame(Register cxreg, enum ExitFrameTokenValues token);
// Push an exit frame token for a native call.
inline void enterFakeExitFrameForNative(Register temp, bool isConstructing);
inline void enterFakeExitFrameForNative(Register cxreg, bool isConstructing);
// Pop ExitFrame footer in addition to the extra frame.
inline void leaveExitFrame(size_t extraFrame = 0);
@ -703,7 +703,7 @@ class MacroAssembler : public MacroAssemblerSpecific
private:
// Save the top of the stack into JSontext::jitTop of the current thread,
// which should be the location of the latest exit frame.
void linkExitFrame(Register temp);
void linkExitFrame(Register cxreg);
// Patch the value of PushStubCode with the pointer to the finalized code.
void linkSelfReference(JitCode* code);

View File

@ -287,6 +287,7 @@ JitRuntime::generateEnterJIT(JSContext* cx, EnterJitType type)
masm.push(scratch);
masm.push(Imm32(0)); // Fake return address.
// No GC things to mark on the stack, push a bare token.
masm.loadJSContext(scratch);
masm.enterFakeExitFrame(scratch, ExitFrameLayoutBareToken);
masm.push(framePtr); // BaselineFrame
@ -793,8 +794,8 @@ JitRuntime::generateVMWrapper(JSContext* cx, const VMFunction& f)
if (f.expectTailCall == NonTailCall)
masm.pushReturnAddress();
masm.enterExitFrame(cxreg, &f);
masm.loadJSContext(cxreg);
masm.enterExitFrame(cxreg, &f);
// Save the base of the argument set stored on the stack.
Register argsBase = InvalidReg;

View File

@ -189,6 +189,7 @@ JitRuntime::generateEnterJIT(JSContext* cx, EnterJitType type)
masm.makeFrameDescriptor(r19, JitFrame_BaselineJS, ExitFrameLayout::Size());
masm.asVIXL().Push(x19, xzr); // Push xzr for a fake return address.
// No GC things to mark: push a bare token.
masm.loadJSContext(r19);
masm.enterFakeExitFrame(r19, ExitFrameLayoutBareToken);
masm.push(BaselineFrameReg, reg_code);
@ -586,8 +587,8 @@ JitRuntime::generateVMWrapper(JSContext* cx, const VMFunction& f)
// +0 returnAddress (pushed by this function, caller sets as lr)
//
// We're aligned to an exit frame, so link it up.
masm.enterExitFrame(reg_cx, &f);
masm.loadJSContext(reg_cx);
masm.enterExitFrame(reg_cx, &f);
// Save the current stack pointer as the base for copying arguments.
Register argsBase = InvalidReg;

View File

@ -254,6 +254,7 @@ JitRuntime::generateEnterJIT(JSContext* cx, EnterJitType type)
masm.storePtr(zero, Address(StackPointer, 0)); // fake return address
// No GC things to mark, push a bare token.
masm.loadJSContext(scratch);
masm.enterFakeExitFrame(scratch, ExitFrameLayoutBareToken);
masm.reserveStack(2 * sizeof(uintptr_t));
@ -736,8 +737,8 @@ JitRuntime::generateVMWrapper(JSContext* cx, const VMFunction& f)
masm.pushReturnAddress();
// We're aligned to an exit frame, so link it up.
masm.enterExitFrame(cxreg, &f);
masm.loadJSContext(cxreg);
masm.enterExitFrame(cxreg, &f);
// Save the base of the argument set stored on the stack.
Register argsBase = InvalidReg;

View File

@ -271,6 +271,7 @@ JitRuntime::generateEnterJIT(JSContext* cx, EnterJitType type)
masm.storePtr(zero, Address(StackPointer, 0)); // fake return address
// No GC things to mark, push a bare token.
masm.loadJSContext(scratch);
masm.enterFakeExitFrame(scratch, ExitFrameLayoutBareToken);
masm.reserveStack(2 * sizeof(uintptr_t));
@ -706,8 +707,8 @@ JitRuntime::generateVMWrapper(JSContext* cx, const VMFunction& f)
masm.pushReturnAddress();
// We're aligned to an exit frame, so link it up.
masm.enterExitFrame(cxreg, &f);
masm.loadJSContext(cxreg);
masm.enterExitFrame(cxreg, &f);
// Save the base of the argument set stored on the stack.
Register argsBase = InvalidReg;

View File

@ -230,6 +230,7 @@ JitRuntime::generateEnterJIT(JSContext* cx, EnterJitType type)
masm.push(valuesSize);
masm.push(Imm32(0)); // Fake return address.
// No GC things to mark, push a bare token.
masm.loadJSContext(scratch);
masm.enterFakeExitFrame(scratch, ExitFrameLayoutBareToken);
regs.add(valuesSize);
@ -680,8 +681,8 @@ JitRuntime::generateVMWrapper(JSContext* cx, const VMFunction& f)
// +0 returnAddress
//
// We're aligned to an exit frame, so link it up.
masm.enterExitFrame(cxreg, &f);
masm.loadJSContext(cxreg);
masm.enterExitFrame(cxreg, &f);
// Save the current stack pointer as the base for copying arguments.
Register argsBase = InvalidReg;

View File

@ -225,6 +225,7 @@ JitRuntime::generateEnterJIT(JSContext* cx, EnterJitType type)
masm.push(scratch); // Fake return address.
masm.push(Imm32(0));
// No GC things to mark on the stack, push a bare token.
masm.loadJSContext(scratch);
masm.enterFakeExitFrame(scratch, ExitFrameLayoutBareToken);
masm.push(framePtr);
@ -709,8 +710,8 @@ JitRuntime::generateVMWrapper(JSContext* cx, const VMFunction& f)
// +0 returnAddress
//
// We're aligned to an exit frame, so link it up.
masm.enterExitFrame(cxreg, &f);
masm.loadJSContext(cxreg);
masm.enterExitFrame(cxreg, &f);
// Save the current stack pointer as the base for copying arguments.
Register argsBase = InvalidReg;

View File

@ -224,7 +224,12 @@ class JSAPITest
const char* filename = "-",
int lineno = 0)
{
JSAPITestString message = msg;
char location[256];
snprintf(location, mozilla::ArrayLength(location), "%s:%d:", filename, lineno);
JSAPITestString message(location);
message += msg;
if (JS_IsExceptionPending(cx)) {
js::gc::AutoSuppressGC gcoff(cx);
JS::RootedValue v(cx);
@ -237,8 +242,11 @@ class JSAPITest
message += bytes.ptr();
}
}
fprintf(stderr, "%s:%d:%.*s\n",
filename, lineno, int(message.length()), message.begin());
fprintf(stderr, "%.*s\n", int(message.length()), message.begin());
if (msgs.length() != 0)
msgs += " | ";
msgs += message;
return false;
}

View File

@ -12,6 +12,7 @@
#include "mozilla/ArrayUtils.h"
#include "mozilla/RangedPtr.h"
#include "mozilla/Unused.h"
#include <string.h>
@ -27,6 +28,7 @@
#include "jscompartmentinlines.h"
#include "jsobjinlines.h"
#include "gc/AtomMarking-inl.h"
#include "vm/String-inl.h"
using namespace js;
@ -62,7 +64,9 @@ const char js_setter_str[] = "setter";
// which create a small number of atoms.
static const uint32_t JS_STRING_HASH_COUNT = 64;
AtomSet::Ptr js::FrozenAtomSet::readonlyThreadsafeLookup(const AtomSet::Lookup& l) const {
MOZ_ALWAYS_INLINE AtomSet::Ptr
js::FrozenAtomSet::readonlyThreadsafeLookup(const AtomSet::Lookup& l) const
{
return mSet->readonlyThreadsafeLookup(l);
}
@ -301,10 +305,28 @@ static JSAtom*
AtomizeAndCopyChars(JSContext* cx, const CharT* tbchars, size_t length, PinningBehavior pin)
{
if (JSAtom* s = cx->staticStrings().lookup(tbchars, length))
return s;
return s;
AtomHasher::Lookup lookup(tbchars, length);
// Try the per-Zone cache first. If we find the atom there we can avoid the
// atoms lock, the markAtom call, and the multiple HashSet lookups below.
// We don't use the per-Zone cache if we want a pinned atom: handling that
// is more complicated and pinning atoms is relatively uncommon.
Zone* zone = cx->zone();
Maybe<AtomSet::AddPtr> zonePtr;
if (MOZ_LIKELY(zone && pin == DoNotPinAtom)) {
zonePtr.emplace(zone->atomCache().lookupForAdd(lookup));
if (zonePtr.ref()) {
// The cache is purged on GC so if we're in the middle of an
// incremental GC we should have barriered the atom when we put
// it in the cache.
JSAtom* atom = zonePtr.ref()->asPtrUnbarriered();
MOZ_ASSERT(AtomIsMarked(zone, atom));
return atom;
}
}
// Note: when this function is called while the permanent atoms table is
// being initialized (in initializeAtoms()), |permanentAtoms| is not yet
// initialized so this lookup is always skipped. Only once
@ -312,8 +334,12 @@ AtomizeAndCopyChars(JSContext* cx, const CharT* tbchars, size_t length, PinningB
// initialized and then this lookup will go ahead.
if (cx->isPermanentAtomsInitialized()) {
AtomSet::Ptr pp = cx->permanentAtoms().readonlyThreadsafeLookup(lookup);
if (pp)
return pp->asPtr(cx);
if (pp) {
JSAtom* atom = pp->asPtr(cx);
if (zonePtr)
mozilla::Unused << zone->atomCache().add(*zonePtr, AtomStateEntry(atom, false));
return atom;
}
}
AutoLockForExclusiveAccess lock(cx);
@ -323,10 +349,15 @@ AtomizeAndCopyChars(JSContext* cx, const CharT* tbchars, size_t length, PinningB
if (p) {
JSAtom* atom = p->asPtr(cx);
p->setPinned(bool(pin));
cx->markAtom(atom);
cx->atomMarking().inlinedMarkAtom(cx, atom);
if (zonePtr)
mozilla::Unused << zone->atomCache().add(*zonePtr, AtomStateEntry(atom, false));
return atom;
}
if (!JSString::validateLength(cx, length))
return nullptr;
JSAtom* atom;
{
AutoAtomsCompartment ac(cx, lock);
@ -352,7 +383,9 @@ AtomizeAndCopyChars(JSContext* cx, const CharT* tbchars, size_t length, PinningB
}
}
cx->markAtom(atom);
cx->atomMarking().inlinedMarkAtom(cx, atom);
if (zonePtr)
mozilla::Unused << zone->atomCache().add(*zonePtr, AtomStateEntry(atom, false));
return atom;
}
@ -405,9 +438,6 @@ js::Atomize(JSContext* cx, const char* bytes, size_t length, PinningBehavior pin
{
CHECK_REQUEST(cx);
if (!JSString::validateLength(cx, length))
return nullptr;
const Latin1Char* chars = reinterpret_cast<const Latin1Char*>(bytes);
return AtomizeAndCopyChars(cx, chars, length, pin);
}
@ -417,10 +447,6 @@ JSAtom*
js::AtomizeChars(JSContext* cx, const CharT* chars, size_t length, PinningBehavior pin)
{
CHECK_REQUEST(cx);
if (!JSString::validateLength(cx, length))
return nullptr;
return AtomizeAndCopyChars(cx, chars, length, pin);
}

View File

@ -81,12 +81,12 @@ struct AtomHasher
HashNumber hash;
Lookup(const char16_t* chars, size_t length)
MOZ_ALWAYS_INLINE Lookup(const char16_t* chars, size_t length)
: twoByteChars(chars), isLatin1(false), length(length), atom(nullptr)
{
hash = mozilla::HashString(chars, length);
}
Lookup(const JS::Latin1Char* chars, size_t length)
MOZ_ALWAYS_INLINE Lookup(const JS::Latin1Char* chars, size_t length)
: latin1Chars(chars), isLatin1(true), length(length), atom(nullptr)
{
hash = mozilla::HashString(chars, length);
@ -95,7 +95,7 @@ struct AtomHasher
};
static HashNumber hash(const Lookup& l) { return l.hash; }
static inline bool match(const AtomStateEntry& entry, const Lookup& lookup);
static MOZ_ALWAYS_INLINE bool match(const AtomStateEntry& entry, const Lookup& lookup);
static void rekey(AtomStateEntry& k, const AtomStateEntry& newKey) { k = newKey; }
};
@ -114,7 +114,7 @@ public:
~FrozenAtomSet() { js_delete(mSet); }
AtomSet::Ptr readonlyThreadsafeLookup(const AtomSet::Lookup& l) const;
MOZ_ALWAYS_INLINE AtomSet::Ptr readonlyThreadsafeLookup(const AtomSet::Lookup& l) const;
size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
return mSet->sizeOfIncludingThis(mallocSizeOf);

View File

@ -166,7 +166,7 @@ AtomHasher::Lookup::Lookup(const JSAtom* atom)
}
}
inline bool
MOZ_ALWAYS_INLINE bool
AtomHasher::match(const AtomStateEntry& entry, const Lookup& lookup)
{
JSAtom* key = entry.asPtrUnbarriered();

View File

@ -267,9 +267,12 @@ struct JSContext : public JS::RootingContext,
js::gc::AtomMarkingRuntime& atomMarking() {
return runtime_->gc.atomMarking;
}
void markAtom(js::gc::TenuredCell* atom) {
void markAtom(JSAtom* atom) {
atomMarking().markAtom(this, atom);
}
void markAtom(JS::Symbol* symbol) {
atomMarking().markAtom(this, symbol);
}
void markId(jsid id) {
atomMarking().markId(this, id);
}

View File

@ -88,13 +88,18 @@ class CompartmentChecker
check(handle.get());
}
void checkAtom(gc::Cell* cell) {
template <typename T>
void checkAtom(T* thing) {
static_assert(mozilla::IsSame<T, JSAtom>::value ||
mozilla::IsSame<T, JS::Symbol>::value,
"Should only be called with JSAtom* or JS::Symbol* argument");
#ifdef DEBUG
// Atoms which move across zone boundaries need to be marked in the new
// zone, see JS_MarkCrossZoneId.
if (compartment) {
JSRuntime* rt = compartment->runtimeFromAnyThread();
MOZ_ASSERT(rt->gc.atomMarking.atomIsMarked(compartment->zone(), cell));
MOZ_ASSERT(rt->gc.atomMarking.atomIsMarked(compartment->zone(), thing));
}
#endif
}
@ -102,7 +107,7 @@ class CompartmentChecker
void check(JSString* str) {
MOZ_ASSERT(JS::CellIsNotGray(str));
if (str->isAtom()) {
checkAtom(str);
checkAtom(&str->asAtom());
} else {
checkZone(str->zone());
}
@ -156,8 +161,12 @@ class CompartmentChecker
}
void check(jsid id) {
if (JSID_IS_GCTHING(id))
checkAtom(JSID_TO_GCTHING(id).asCell());
if (JSID_IS_ATOM(id))
checkAtom(JSID_TO_ATOM(id));
else if (JSID_IS_SYMBOL(id))
checkAtom(JSID_TO_SYMBOL(id));
else
MOZ_ASSERT(!JSID_IS_GCTHING(id));
}
void check(JSScript* script) {

View File

@ -338,7 +338,7 @@ JSCompartment::wrap(JSContext* cx, MutableHandleString strp)
* the atom as being in use by the new zone.
*/
if (str->isAtom()) {
cx->markAtom(str);
cx->markAtom(&str->asAtom());
return true;
}

View File

@ -3635,6 +3635,9 @@ GCRuntime::purgeRuntime(AutoLockForExclusiveAccess& lock)
for (GCCompartmentsIter comp(rt); !comp.done(); comp.next())
comp->purge();
for (GCZonesIter zone(rt); !zone.done(); zone.next())
zone->atomCache().clearAndShrink();
for (const CooperatingContext& target : rt->cooperatingContexts()) {
freeUnusedLifoBlocksAfterSweeping(&target.context()->tempLifoAlloc());
target.context()->interpreterStack().purge(rt);
@ -4508,7 +4511,10 @@ JSCompartment::findDeadProxyZoneEdges(bool* foundAny)
if (IsDeadProxyObject(&value.toObject())) {
*foundAny = true;
CrossCompartmentKey& key = e.front().mutableKey();
if (!key.as<JSObject*>()->zone()->gcSweepGroupEdges().put(zone()))
Zone* wrapperZone = key.as<JSObject*>()->zone();
if (!wrapperZone->isGCMarking())
continue;
if (!wrapperZone->gcSweepGroupEdges().put(zone()))
return false;
}
}

View File

@ -2301,7 +2301,8 @@ Debugger::appendAllocationSite(JSContext* cx, HandleObject obj, HandleSavedFrame
if (!JSObject::constructorDisplayAtom(cx, obj, &ctorName))
return false;
}
cx->markAtom(ctorName);
if (ctorName)
cx->markAtom(ctorName);
auto className = obj->getClass()->name;
auto size = JS::ubi::Node(obj.get()).size(cx->runtime()->debuggerMallocSizeOf);
@ -10132,7 +10133,8 @@ DebuggerObject::name(JSContext* cx) const
MOZ_ASSERT(isFunction());
JSAtom* atom = referent()->as<JSFunction>().explicitName();
cx->markAtom(atom);
if (atom)
cx->markAtom(atom);
return atom;
}
@ -10142,7 +10144,8 @@ DebuggerObject::displayName(JSContext* cx) const
MOZ_ASSERT(isFunction());
JSAtom* atom = referent()->as<JSFunction>().displayAtom();
cx->markAtom(atom);
if (atom)
cx->markAtom(atom);
return atom;
}
@ -10188,7 +10191,8 @@ DebuggerObject::getParameterNames(JSContext* cx, HandleDebuggerObject object,
for (size_t i = 0; i < referent->nargs(); i++, fi++) {
MOZ_ASSERT(fi.argumentSlot() == i);
JSAtom* atom = fi.name();
cx->markAtom(atom);
if (atom)
cx->markAtom(atom);
result[i].set(atom);
}
}

View File

@ -497,7 +497,8 @@ SavedFrame::initFromLookup(JSContext* cx, SavedFrame::HandleLookup lookup)
// points where the context moves between compartments, but Lookups live on
// the stack (where the atoms are kept alive regardless) and this is a
// more convenient pinchpoint.
cx->markAtom(lookup->source);
if (lookup->source)
cx->markAtom(lookup->source);
if (lookup->functionDisplayName)
cx->markAtom(lookup->functionDisplayName);
if (lookup->asyncCause)
@ -751,7 +752,8 @@ GetSavedFrameSource(JSContext* cx, HandleObject savedFrame, MutableHandleString
}
sourcep.set(frame->getSource());
}
cx->markAtom(sourcep);
if (sourcep->isAtom())
cx->markAtom(&sourcep->asAtom());
return SavedFrameResult::Ok;
}
@ -813,8 +815,8 @@ GetSavedFrameFunctionDisplayName(JSContext* cx, HandleObject savedFrame, Mutable
}
namep.set(frame->getFunctionDisplayName());
}
if (namep)
cx->markAtom(namep);
if (namep && namep->isAtom())
cx->markAtom(&namep->asAtom());
return SavedFrameResult::Ok;
}
@ -844,8 +846,8 @@ GetSavedFrameAsyncCause(JSContext* cx, HandleObject savedFrame, MutableHandleStr
if (!asyncCausep && skippedAsync)
asyncCausep.set(cx->names().Async);
}
if (asyncCausep)
cx->markAtom(asyncCausep);
if (asyncCausep && asyncCausep->isAtom())
cx->markAtom(&asyncCausep->asAtom());
return SavedFrameResult::Ok;
}

View File

@ -349,15 +349,6 @@ js::StaticStrings::getUnitStringForElement(JSContext* cx, JSString* str, size_t
return NewDependentString(cx, str, index, 1);
}
inline JSAtom*
js::StaticStrings::getLength2(char16_t c1, char16_t c2)
{
MOZ_ASSERT(fitsInSmallChar(c1));
MOZ_ASSERT(fitsInSmallChar(c2));
size_t index = (((size_t)toSmallChar[c1]) << 6) + toSmallChar[c2];
return length2StaticTable[index];
}
MOZ_ALWAYS_INLINE void
JSString::finalize(js::FreeOp* fop)
{

View File

@ -1142,7 +1142,7 @@ class StaticStrings
/* Return null if no static atom exists for the given (chars, length). */
template <typename CharT>
JSAtom* lookup(const CharT* chars, size_t length) {
MOZ_ALWAYS_INLINE JSAtom* lookup(const CharT* chars, size_t length) {
switch (length) {
case 1: {
char16_t c = chars[0];
@ -1190,7 +1190,12 @@ class StaticStrings
static const SmallChar toSmallChar[];
JSAtom* getLength2(char16_t c1, char16_t c2);
MOZ_ALWAYS_INLINE JSAtom* getLength2(char16_t c1, char16_t c2) {
MOZ_ASSERT(fitsInSmallChar(c1));
MOZ_ASSERT(fitsInSmallChar(c2));
size_t index = (size_t(toSmallChar[c1]) << 6) + toSmallChar[c2];
return length2StaticTable[index];
}
JSAtom* getLength2(uint32_t u) {
MOZ_ASSERT(u < 100);
return getLength2('0' + u / 10, '0' + u % 10);

View File

@ -1368,13 +1368,15 @@ JSStructuredCloneWriter::traverseSavedFrame(HandleObject obj)
return false;
auto name = savedFrame->getFunctionDisplayName();
context()->markAtom(name);
if (name)
context()->markAtom(name);
val = name ? StringValue(name) : NullValue();
if (!startWrite(val))
return false;
auto cause = savedFrame->getAsyncCause();
context()->markAtom(cause);
if (cause)
context()->markAtom(cause);
val = cause ? StringValue(cause) : NullValue();
if (!startWrite(val))
return false;

View File

@ -52,7 +52,8 @@ Symbol::new_(JSContext* cx, JS::SymbolCode code, JSString* description)
AutoAtomsCompartment ac(cx, lock);
sym = newInternal(cx, code, cx->compartment()->randomHashCode(), atom, lock);
}
cx->markAtom(sym);
if (sym)
cx->markAtom(sym);
return sym;
}

View File

@ -1305,10 +1305,10 @@ nsIFrame::GetMarginRectRelativeToSelf() const
}
bool
nsIFrame::IsTransformed() const
nsIFrame::IsTransformed(const nsStyleDisplay* aStyleDisplay) const
{
return ((mState & NS_FRAME_MAY_BE_TRANSFORMED) &&
(StyleDisplay()->HasTransform(this) ||
(StyleDisplayWithOptionalParam(aStyleDisplay)->HasTransform(this) ||
IsSVGTransformed() ||
HasAnimationOfTransform()));
}
@ -1365,12 +1365,12 @@ nsIFrame::Extend3DContext() const
}
bool
nsIFrame::Combines3DTransformWithAncestors() const
nsIFrame::Combines3DTransformWithAncestors(const nsStyleDisplay* aStyleDisplay) const
{
if (!GetParent() || !GetParent()->Extend3DContext()) {
return false;
}
return IsTransformed() || BackfaceIsHidden();
return IsTransformed(aStyleDisplay) || BackfaceIsHidden(aStyleDisplay);
}
bool
@ -2386,7 +2386,7 @@ nsIFrame::BuildDisplayListForStackingContext(nsDisplayListBuilder* aBuilder,
nsRect dirtyRect = aDirtyRect;
bool inTransform = aBuilder->IsInTransform();
bool isTransformed = IsTransformed();
bool isTransformed = IsTransformed(disp);
bool hasPerspective = HasPerspective();
// reset blend mode so we can keep track if this stacking context needs have
// a nsDisplayBlendContainer. Set the blend mode back when the routine exits
@ -3005,7 +3005,7 @@ nsIFrame::BuildDisplayListForChild(nsDisplayListBuilder* aBuilder,
const nsStyleEffects* effects = child->StyleEffects();
const nsStylePosition* pos = child->StylePosition();
bool isVisuallyAtomic = child->HasOpacity()
|| child->IsTransformed()
|| child->IsTransformed(disp)
// strictly speaking, 'perspective' doesn't require visual atomicity,
// but the spec says it acts like the rest of these
|| disp->mChildPerspective.GetUnit() == eStyleUnit_Coord
@ -8907,10 +8907,13 @@ nsIFrame::FinishAndStoreOverflow(nsOverflowAreas& aOverflowAreas,
MOZ_ASSERT(FrameMaintainsOverflow(),
"Don't call - overflow rects not maintained on these SVG frames");
const nsStyleDisplay* disp = StyleDisplay();
bool hasTransform = IsTransformed(disp);
nsRect bounds(nsPoint(0, 0), aNewSize);
// Store the passed in overflow area if we are a preserve-3d frame or we have
// a transform, and it's not just the frame bounds.
if (Combines3DTransformWithAncestors() || IsTransformed()) {
if (hasTransform || Combines3DTransformWithAncestors(disp)) {
if (!aOverflowAreas.VisualOverflow().IsEqualEdges(bounds) ||
!aOverflowAreas.ScrollableOverflow().IsEqualEdges(bounds)) {
nsOverflowAreas* initial =
@ -8954,7 +8957,6 @@ nsIFrame::FinishAndStoreOverflow(nsOverflowAreas& aOverflowAreas,
// children are actually clipped to the padding-box, but since the
// overflow area should include the entire border-box, just set it to
// the border-box here.
const nsStyleDisplay* disp = StyleDisplay();
NS_ASSERTION((disp->mOverflowY == NS_STYLE_OVERFLOW_CLIP) ==
(disp->mOverflowX == NS_STYLE_OVERFLOW_CLIP),
"If one overflow is clip, the other should be too");
@ -9008,7 +9010,6 @@ nsIFrame::FinishAndStoreOverflow(nsOverflowAreas& aOverflowAreas,
}
/* If we're transformed, transform the overflow rect by the current transformation. */
bool hasTransform = IsTransformed();
nsSize oldSize = mRect.Size();
bool sizeChanged = ((aOldSize ? *aOldSize : oldSize) != aNewSize);

View File

@ -3712,6 +3712,16 @@ MeasuringReflow(nsIFrame* aChild,
aChild->Properties().Delete(nsIFrame::BClampMarginBoxMinSizeProperty());
}
ReflowInput childRI(pc, *rs, aChild, aAvailableSize, &aCBSize, riFlags);
// Because we pass ReflowInput::COMPUTE_SIZE_USE_AUTO_BSIZE, and the
// previous reflow of the child might not have, set the child's
// block-resize flag to true.
// FIXME (perf): It would be faster to do this only if the previous
// reflow of the child was not a measuring reflow, and only if the
// child does some of the things that are affected by
// ReflowInput::COMPUTE_SIZE_USE_AUTO_BSIZE.
childRI.SetBResize(true);
ReflowOutput childSize(childRI);
nsReflowStatus childStatus;
const uint32_t flags = NS_FRAME_NO_MOVE_FRAME | NS_FRAME_NO_SIZE_VIEW;
@ -5241,6 +5251,15 @@ nsGridContainerFrame::ReflowInFlowChild(nsIFrame* aChild,
&percentBasis, flags);
childRI.mFlags.mIsTopOfPage = aFragmentainer ? aFragmentainer->mIsTopOfPage : false;
// Because we pass ReflowInput::COMPUTE_SIZE_USE_AUTO_BSIZE, and the
// previous reflow of the child might not have, set the child's
// block-resize flag to true.
// FIXME (perf): It would be faster to do this only if the previous
// reflow of the child was a measuring reflow, and only if the child
// does some of the things that are affected by
// ReflowInput::COMPUTE_SIZE_USE_AUTO_BSIZE.
childRI.SetBResize(true);
// A table-wrapper needs to propagate the CB size we give it to its
// inner table frame later. @see nsTableWrapperFrame::InitChildReflowInput.
if (childType == nsGkAtoms::tableWrapperFrame) {

View File

@ -765,11 +765,22 @@ public:
*
* Callers outside of libxul should use nsIDOMWindow::GetComputedStyle()
* instead of these accessors.
*
* Callers can use Style*WithOptionalParam if they're in a function that
* accepts an *optional* pointer the style struct.
*/
#define STYLE_STRUCT(name_, checkdata_cb_) \
const nsStyle##name_ * Style##name_ () const { \
NS_ASSERTION(mStyleContext, "No style context found!"); \
return mStyleContext->Style##name_ (); \
} \
const nsStyle##name_ * Style##name_##WithOptionalParam( \
const nsStyle##name_ * aStyleStruct) const { \
if (aStyleStruct) { \
MOZ_ASSERT(aStyleStruct == Style##name_()); \
return aStyleStruct; \
} \
return Style##name_(); \
}
#include "nsStyleStructList.h"
#undef STYLE_STRUCT
@ -1638,8 +1649,11 @@ public:
* or if its parent is an SVG frame that has children-only transforms (e.g.
* an SVG viewBox attribute) or if its transform-style is preserve-3d or
* the frame has transform animations.
*
* @param aStyleDisplay: If the caller has this->StyleDisplay(), providing
* it here will improve performance.
*/
bool IsTransformed() const;
bool IsTransformed(const nsStyleDisplay* aStyleDisplay = nullptr) const;
/**
* True if this frame has any animation of transform in effect.
@ -1693,8 +1707,12 @@ public:
* Returns whether this frame has a parent that Extend3DContext() and has
* its own transform (or hidden backface) to be combined with the parent's
* transform.
*
* @param aStyleDisplay: If the caller has this->StyleDisplay(), providing
* it here will improve performance.
*/
bool Combines3DTransformWithAncestors() const;
bool Combines3DTransformWithAncestors(const nsStyleDisplay* aStyleDisplay
= nullptr) const;
/**
* Returns whether this frame has a hidden backface and has a parent that
@ -3630,8 +3648,12 @@ public:
virtual mozilla::dom::Element*
GetPseudoElement(mozilla::CSSPseudoElementType aType);
bool BackfaceIsHidden() const {
return StyleDisplay()->BackfaceIsHidden();
/*
* @param aStyleDisplay: If the caller has this->StyleDisplay(), providing
* it here will improve performance.
*/
bool BackfaceIsHidden(const nsStyleDisplay* aStyleDisplay = nullptr) const {
return StyleDisplayWithOptionalParam(aStyleDisplay)->BackfaceIsHidden();
}
/**

View File

@ -1717,7 +1717,7 @@ needs-focus != 703186-1.html 703186-2.html
fuzzy-if(true,1,21) fuzzy-if(d2d,68,173) fuzzy-if(cocoaWidget,1,170) fails-if(webrender) == 718521.html 718521-ref.html # bug 773482
== 720987.html 720987-ref.html
== 722888-1.html 722888-1-ref.html
fuzzy(1,40000) == 722923-1.html 722923-1-ref.html
fuzzy(2,40000) == 722923-1.html 722923-1-ref.html
== 723484-1.html 723484-1-ref.html
random-if(Android) == 728983-1.html 728983-1-ref.html
== 729143-1.html 729143-1-ref.html

View File

@ -0,0 +1,24 @@
<!DOCTYPE HTML>
<title>Testcase simplified from layout/reftests/css-grid/grid-min-max-content-sizing-002.html</title>
<style type="text/css">
html { overflow: hidden }
body { margin: 0 }
div {
display: inline-block;
border: 1px solid fuchsia;
}
span {
display: block;
border: 4px solid blue;
padding: 0 8px 8px 0;
margin: 0 -8px -8px 0;
}
</style>
<div>
<span>blue should overflow fuchsia on right/bottom</span>
</div>

View File

@ -0,0 +1,37 @@
<!DOCTYPE HTML>
<title>Testcase simplified from layout/reftests/css-grid/grid-min-max-content-sizing-002.html</title>
<style type="text/css">
html { overflow: hidden }
body { margin: 0 }
.grid {
display: grid;
grid-template-columns: minmax(min-content,max-content);
grid-template-rows: minmax(min-content,max-content);
}
.grid > div {
border: 1px solid fuchsia;
}
span {
display: block;
border: 4px solid blue;
width: 100%;
height: 100%;
}
</style>
<div class="grid">
<div>
<span id="s"></span>
</div>
</div>
<script>
var s = document.getElementById("s");
s.offsetWidth; // flush layout
s.textContent = "blue should overflow fuchsia on right/bottom";
</script>

View File

@ -0,0 +1,32 @@
<!DOCTYPE HTML>
<title>Testcase simplified from layout/reftests/css-grid/grid-min-max-content-sizing-002.html</title>
<style type="text/css">
html { overflow: hidden }
body { margin: 0 }
.grid {
display: grid;
grid-template-columns: minmax(min-content,max-content);
grid-template-rows: minmax(min-content,max-content);
}
.grid > div {
border: 1px solid fuchsia;
}
span {
display: block;
border: 4px solid blue;
width: 100%;
height: 100%;
}
</style>
<div class="grid">
<div>
<span>blue should overflow fuchsia on right/bottom</span>
</div>
</div>

View File

@ -275,3 +275,5 @@ asserts(1-10) == grid-fragmentation-dyn4-021.html grid-fragmentation-021-ref.htm
== grid-fragmentation-dyn2-031.html grid-fragmentation-031-ref.html
== bug1306106.html bug1306106-ref.html
== grid-percent-intrinsic-sizing-001.html grid-percent-intrinsic-sizing-001-ref.html
== grid-measuring-reflow-resize-static-001.html grid-measuring-reflow-resize-001-ref.html
== grid-measuring-reflow-resize-dynamic-001.html grid-measuring-reflow-resize-001-ref.html

View File

@ -1305,10 +1305,10 @@ Vector<T, N, AP>::insert(T* aP, U&& aVal)
}
} else {
T oldBack = Move(back());
if (!append(Move(oldBack))) { /* Dup the last element. */
if (!append(Move(oldBack))) {
return nullptr;
}
for (size_t i = oldLength; i > pos; --i) {
for (size_t i = oldLength - 1; i > pos; --i) {
(*this)[i] = Move((*this)[i - 1]);
}
(*this)[pos] = Forward<U>(aVal);

View File

@ -22,6 +22,7 @@ struct mozilla::detail::VectorTesting
static void testReverse();
static void testExtractRawBuffer();
static void testExtractOrCopyRawBuffer();
static void testInsert();
};
void
@ -141,6 +142,15 @@ struct S
destructCount++;
}
S& operator=(S&& rhs) {
j = rhs.j;
rhs.j = 0;
k = Move(rhs.k);
rhs.k.reset();
moveCount++;
return *this;
}
S(const S&) = delete;
S& operator=(const S&) = delete;
};
@ -346,6 +356,47 @@ mozilla::detail::VectorTesting::testExtractOrCopyRawBuffer()
free(buf);
}
void
mozilla::detail::VectorTesting::testInsert()
{
S::resetCounts();
Vector<S, 8> vec;
MOZ_RELEASE_ASSERT(vec.reserve(8));
for (size_t i = 0; i < 7; i++) {
vec.infallibleEmplaceBack(i, i * i);
}
MOZ_RELEASE_ASSERT(vec.length() == 7);
MOZ_ASSERT(vec.reserved() == 8);
MOZ_RELEASE_ASSERT(S::constructCount == 7);
MOZ_RELEASE_ASSERT(S::moveCount == 0);
MOZ_RELEASE_ASSERT(S::destructCount == 0);
S s(42, 43);
MOZ_RELEASE_ASSERT(vec.insert(vec.begin() + 4, Move(s)));
for (size_t i = 0; i < vec.length(); i++) {
const S& s = vec[i];
MOZ_RELEASE_ASSERT(s.k);
if (i < 4) {
MOZ_RELEASE_ASSERT(s.j == i && *s.k == i * i);
} else if (i == 4) {
MOZ_RELEASE_ASSERT(s.j == 42 && *s.k == 43);
} else {
MOZ_RELEASE_ASSERT(s.j == i - 1 && *s.k == (i - 1) * (i - 1));
}
}
MOZ_RELEASE_ASSERT(vec.length() == 8);
MOZ_ASSERT(vec.reserved() == 8);
MOZ_RELEASE_ASSERT(S::constructCount == 8);
MOZ_RELEASE_ASSERT(S::moveCount == 1 /* move in insert() call */ +
1 /* move the back() element */ +
3 /* elements to shift */);
MOZ_RELEASE_ASSERT(S::destructCount == 1);
}
// Declare but leave (permanently) incomplete.
struct Incomplete;
@ -398,4 +449,5 @@ main()
VectorTesting::testReverse();
VectorTesting::testExtractRawBuffer();
VectorTesting::testExtractOrCopyRawBuffer();
VectorTesting::testInsert();
}

View File

@ -2835,6 +2835,8 @@ pref("plugins.click_to_play", false);
#ifdef NIGHTLY_BUILD
// This only supports one hidden ctp plugin, edit nsPluginArray.cpp if adding a second
pref("plugins.navigator.hidden_ctp_plugin", "Shockwave Flash");
#else
pref("plugins.navigator.hidden_ctp_plugin", "");
#endif
// The default value for nsIPluginTag.enabledState (STATE_ENABLED = 2)
pref("plugin.default.state", 2);

View File

@ -30,15 +30,11 @@ EXPORTS.mozilla += [
UNIFIED_SOURCES += [
'nsPrefBranch.cpp',
'nsPrefsFactory.cpp',
'prefapi.cpp',
'Preferences.cpp',
'prefread.cpp',
]
# prefapi.cpp cannot be built in unified mode because it uses plarena.h
SOURCES += [
'prefapi.cpp',
]
include('/ipc/chromium/chromium-config.mozbuild')
FINAL_LIBRARY = 'xul'

View File

@ -15,9 +15,6 @@
#include "nsReadableUtils.h"
#include "nsCRT.h"
#define PL_ARENA_CONST_ALIGN_MASK 3
#include "plarena.h"
#ifdef _WIN32
#include "windows.h"
#endif /* _WIN32 */
@ -25,6 +22,8 @@
#include "plstr.h"
#include "PLDHashTable.h"
#include "plbase64.h"
#include "mozilla/ArenaAllocator.h"
#include "mozilla/ArenaAllocatorExtensions.h"
#include "mozilla/Logging.h"
#include "mozilla/MemoryReporting.h"
#include "mozilla/dom/PContent.h"
@ -68,7 +67,7 @@ matchPrefEntry(const PLDHashEntryHdr* entry, const void* key)
}
PLDHashTable* gHashTable;
static PLArenaPool gPrefNameArena;
static ArenaAllocator<8192,4> gPrefNameArena;
static struct CallbackNode* gCallbacks = nullptr;
static bool gIsAnyPrefLocked = false;
@ -91,9 +90,6 @@ static PLDHashTableOps pref_HashTableOps = {
#define PR_ALIGN_OF_WORD PR_ALIGN_OF_POINTER
#endif
// making PrefName arena 8k for nice allocation
#define PREFNAME_ARENA_SIZE 8192
#define WORD_ALIGN_MASK (PR_ALIGN_OF_WORD - 1)
// sanity checking
@ -101,18 +97,6 @@ static PLDHashTableOps pref_HashTableOps = {
#error "PR_ALIGN_OF_WORD must be a power of 2!"
#endif
// equivalent to strdup() - does no error checking,
// we're assuming we're only called with a valid pointer
static char *ArenaStrDup(const char* str, PLArenaPool* aArena)
{
void* mem;
uint32_t len = strlen(str);
PL_ARENA_ALLOCATE(mem, aArena, len+1);
if (mem)
memcpy(mem, str, len+1);
return static_cast<char*>(mem);
}
static PrefsDirtyFunc gDirtyCallback = nullptr;
inline void MakeDirtyCallback()
@ -164,9 +148,6 @@ void PREF_Init()
gHashTable = new PLDHashTable(&pref_HashTableOps,
sizeof(PrefHashEntry),
PREF_HASHTABLE_INITIAL_LENGTH);
PL_INIT_ARENA_POOL(&gPrefNameArena, "PrefNameArena",
PREFNAME_ARENA_SIZE);
}
}
@ -196,7 +177,7 @@ void PREF_CleanupPrefs()
if (gHashTable) {
delete gHashTable;
gHashTable = nullptr;
PL_FinishArenaPool(&gPrefNameArena);
gPrefNameArena.Clear();
}
}
@ -817,7 +798,7 @@ nsresult pref_HashPref(const char *key, PrefValue value, PrefType type, uint32_t
// initialize the pref entry
pref->prefFlags.Reset().SetPrefType(type);
pref->key = ArenaStrDup(key, &gPrefNameArena);
pref->key = ArenaStrdup(key, gPrefNameArena);
memset(&pref->defaultPref, 0, sizeof(pref->defaultPref));
memset(&pref->userPref, 0, sizeof(pref->userPref));
} else if (pref->prefFlags.HasDefault() && !pref->prefFlags.IsPrefType(type)) {
@ -878,7 +859,7 @@ nsresult pref_HashPref(const char *key, PrefValue value, PrefType type, uint32_t
size_t
pref_SizeOfPrivateData(MallocSizeOf aMallocSizeOf)
{
size_t n = PL_SizeOfArenaPoolExcludingPool(&gPrefNameArena, aMallocSizeOf);
size_t n = gPrefNameArena.SizeOfExcludingThis(aMallocSizeOf);
for (struct CallbackNode* node = gCallbacks; node; node = node->next) {
n += aMallocSizeOf(node);
n += aMallocSizeOf(node->domain);

View File

@ -1075,6 +1075,26 @@ NS_IMETHODIMP CacheEntry::GetExpirationTime(uint32_t *aExpirationTime)
return mFile->GetExpirationTime(aExpirationTime);
}
nsresult CacheEntry::GetOnStartTime(uint64_t *aTime)
{
NS_ENSURE_SUCCESS(mFileStatus, NS_ERROR_NOT_AVAILABLE);
return mFile->GetOnStartTime(aTime);
}
nsresult CacheEntry::GetOnStopTime(uint64_t *aTime)
{
NS_ENSURE_SUCCESS(mFileStatus, NS_ERROR_NOT_AVAILABLE);
return mFile->GetOnStopTime(aTime);
}
nsresult CacheEntry::SetNetworkTimes(uint64_t aOnStartTime, uint64_t aOnStopTime)
{
if (NS_SUCCEEDED(mFileStatus)) {
return mFile->SetNetworkTimes(aOnStartTime, aOnStopTime);
}
return NS_ERROR_NOT_AVAILABLE;
}
NS_IMETHODIMP CacheEntry::GetIsForcedValid(bool *aIsForcedValid)
{
NS_ENSURE_ARG(aIsForcedValid);

View File

@ -1154,7 +1154,7 @@ CacheFile::SetExpirationTime(uint32_t aExpirationTime)
PostWriteTimer();
if (mHandle && !mHandle->IsDoomed())
CacheFileIOManager::UpdateIndexEntry(mHandle, nullptr, &aExpirationTime, nullptr);
CacheFileIOManager::UpdateIndexEntry(mHandle, nullptr, &aExpirationTime, nullptr, nullptr, nullptr);
return mMetadata->SetExpirationTime(aExpirationTime);
}
@ -1183,7 +1183,7 @@ CacheFile::SetFrecency(uint32_t aFrecency)
PostWriteTimer();
if (mHandle && !mHandle->IsDoomed())
CacheFileIOManager::UpdateIndexEntry(mHandle, &aFrecency, nullptr, nullptr);
CacheFileIOManager::UpdateIndexEntry(mHandle, &aFrecency, nullptr, nullptr, nullptr, nullptr);
return mMetadata->SetFrecency(aFrecency);
}
@ -1198,9 +1198,78 @@ CacheFile::GetFrecency(uint32_t *_retval)
return mMetadata->GetFrecency(_retval);
}
nsresult CacheFile::SetNetworkTimes(uint64_t aOnStartTime, uint64_t aOnStopTime)
{
CacheFileAutoLock lock(this);
LOG(("CacheFile::SetNetworkTimes() this=%p, aOnStartTime=%" PRIu64
", aOnStopTime=%" PRIu64 "", this, aOnStartTime, aOnStopTime));
MOZ_ASSERT(mMetadata);
NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
MOZ_ASSERT(aOnStartTime != kIndexTimeNotAvailable);
MOZ_ASSERT(aOnStopTime != kIndexTimeNotAvailable);
PostWriteTimer();
nsAutoCString onStartTime;
onStartTime.AppendInt(aOnStartTime);
nsresult rv = mMetadata->SetElement("net-response-time-onstart", onStartTime.get());
if (NS_WARN_IF(NS_FAILED(rv))) {
return rv;
}
nsAutoCString onStopTime;
onStopTime.AppendInt(aOnStopTime);
rv = mMetadata->SetElement("net-response-time-onstop", onStopTime.get());
if (NS_WARN_IF(NS_FAILED(rv))) {
return rv;
}
uint16_t onStartTime16 = aOnStartTime <= kIndexTimeOutOfBound ? aOnStartTime : kIndexTimeOutOfBound;
uint16_t onStopTime16 = aOnStopTime <= kIndexTimeOutOfBound ? aOnStopTime : kIndexTimeOutOfBound;
if (mHandle && !mHandle->IsDoomed()) {
CacheFileIOManager::UpdateIndexEntry(mHandle, nullptr, nullptr, nullptr,
&onStartTime16, &onStopTime16);
}
return NS_OK;
}
nsresult CacheFile::GetOnStartTime(uint64_t *_retval)
{
CacheFileAutoLock lock(this);
MOZ_ASSERT(mMetadata);
const char *onStartTimeStr = mMetadata->GetElement("net-response-time-onstart");
if (!onStartTimeStr) {
return NS_ERROR_NOT_AVAILABLE;
}
nsresult rv;
*_retval = nsCString(onStartTimeStr).ToInteger64(&rv);
MOZ_ASSERT(NS_SUCCEEDED(rv));
return NS_OK;
}
nsresult CacheFile::GetOnStopTime(uint64_t *_retval)
{
CacheFileAutoLock lock(this);
MOZ_ASSERT(mMetadata);
const char *onStopTimeStr = mMetadata->GetElement("net-response-time-onstop");
if (!onStopTimeStr) {
return NS_ERROR_NOT_AVAILABLE;
}
nsresult rv;
*_retval = nsCString(onStopTimeStr).ToInteger64(&rv);
MOZ_ASSERT(NS_SUCCEEDED(rv));
return NS_OK;
}
nsresult
CacheFile::SetAltMetadata(const char* aAltMetadata)
{
AssertOwnsLock();
LOG(("CacheFile::SetAltMetadata() this=%p, aAltMetadata=%s",
this, aAltMetadata ? aAltMetadata : ""));
@ -1221,7 +1290,7 @@ CacheFile::SetAltMetadata(const char* aAltMetadata)
}
if (mHandle && !mHandle->IsDoomed()) {
CacheFileIOManager::UpdateIndexEntry(mHandle, nullptr, nullptr, &hasAltData);
CacheFileIOManager::UpdateIndexEntry(mHandle, nullptr, nullptr, &hasAltData, nullptr, nullptr);
}
return rv;
}
@ -2393,7 +2462,23 @@ CacheFile::InitIndexEntry()
bool hasAltData = mMetadata->GetElement(CacheFileUtils::kAltDataKey) ? true : false;
rv = CacheFileIOManager::UpdateIndexEntry(mHandle, &frecency, &expTime, &hasAltData);
static auto toUint16 = [](const char* s) -> uint16_t {
if (s) {
nsresult rv;
uint64_t n64 = nsCString(s).ToInteger64(&rv);
MOZ_ASSERT(NS_SUCCEEDED(rv));
return n64 <= kIndexTimeOutOfBound ? n64 : kIndexTimeOutOfBound ;
}
return kIndexTimeNotAvailable;
};
const char *onStartTimeStr = mMetadata->GetElement("net-response-time-onstart");
uint16_t onStartTime = toUint16(onStartTimeStr);
const char *onStopTimeStr = mMetadata->GetElement("net-response-time-onstop");
uint16_t onStopTime = toUint16(onStopTimeStr);
rv = CacheFileIOManager::UpdateIndexEntry(mHandle, &frecency, &expTime, &hasAltData, &onStartTime, &onStopTime);
NS_ENSURE_SUCCESS(rv, rv);
return NS_OK;

View File

@ -102,6 +102,9 @@ public:
nsresult GetExpirationTime(uint32_t *_retval);
nsresult SetFrecency(uint32_t aFrecency);
nsresult GetFrecency(uint32_t *_retval);
nsresult SetNetworkTimes(uint64_t aOnStartTime, uint64_t aOnStopTime);
nsresult GetOnStartTime(uint64_t *_retval);
nsresult GetOnStopTime(uint64_t *_retval);
nsresult GetLastModified(uint32_t *_retval);
nsresult GetLastFetched(uint32_t *_retval);
nsresult GetFetchCount(uint32_t *_retval);

View File

@ -978,7 +978,8 @@ public:
// parsing the entry file, but we must set the filesize here since nobody is
// going to set it if there is no write to the file.
uint32_t sizeInK = mHandle->FileSizeInK();
CacheIndex::UpdateEntry(mHandle->Hash(), nullptr, nullptr, nullptr, &sizeInK);
CacheIndex::UpdateEntry(mHandle->Hash(), nullptr, nullptr, nullptr, nullptr,
nullptr, &sizeInK);
return NS_OK;
}
@ -995,11 +996,15 @@ public:
UpdateIndexEntryEvent(CacheFileHandle *aHandle,
const uint32_t *aFrecency,
const uint32_t *aExpirationTime,
const bool *aHasAltData)
const bool *aHasAltData,
const uint16_t *aOnStartTime,
const uint16_t *aOnStopTime)
: mHandle(aHandle)
, mHasFrecency(false)
, mHasExpirationTime(false)
, mHasHasAltData(false)
, mHasOnStartTime(false)
, mHasOnStopTime(false)
{
if (aFrecency) {
mHasFrecency = true;
@ -1013,6 +1018,14 @@ public:
mHasHasAltData = true;
mHasAltData = *aHasAltData;
}
if (aOnStartTime) {
mHasOnStartTime = true;
mOnStartTime = *aOnStartTime;
}
if (aOnStopTime) {
mHasOnStopTime = true;
mOnStopTime = *aOnStopTime;
}
}
protected:
@ -1031,6 +1044,8 @@ public:
mHasFrecency ? &mFrecency : nullptr,
mHasExpirationTime ? &mExpirationTime : nullptr,
mHasHasAltData ? &mHasAltData : nullptr,
mHasOnStartTime ? &mOnStartTime : nullptr,
mHasOnStopTime ? &mOnStopTime : nullptr,
nullptr);
return NS_OK;
}
@ -1041,10 +1056,14 @@ protected:
bool mHasFrecency;
bool mHasExpirationTime;
bool mHasHasAltData;
bool mHasOnStartTime;
bool mHasOnStopTime;
uint32_t mFrecency;
uint32_t mExpirationTime;
bool mHasAltData;
uint16_t mOnStartTime;
uint16_t mOnStopTime;
};
class MetadataWriteScheduleEvent : public Runnable
@ -2057,7 +2076,8 @@ CacheFileIOManager::WriteInternal(CacheFileHandle *aHandle, int64_t aOffset,
if (oldSizeInK != newSizeInK && !aHandle->IsDoomed() &&
!aHandle->IsSpecialFile()) {
CacheIndex::UpdateEntry(aHandle->Hash(), nullptr, nullptr, nullptr, &newSizeInK);
CacheIndex::UpdateEntry(aHandle->Hash(), nullptr, nullptr, nullptr,
nullptr, nullptr, &newSizeInK);
if (oldSizeInK < newSizeInK) {
EvictIfOverLimitInternal();
@ -2586,7 +2606,8 @@ CacheFileIOManager::TruncateSeekSetEOFInternal(CacheFileHandle *aHandle,
if (oldSizeInK != newSizeInK && !aHandle->IsDoomed() &&
!aHandle->IsSpecialFile()) {
CacheIndex::UpdateEntry(aHandle->Hash(), nullptr, nullptr, nullptr, &newSizeInK);
CacheIndex::UpdateEntry(aHandle->Hash(), nullptr, nullptr, nullptr, nullptr,
nullptr, &newSizeInK);
if (oldSizeInK < newSizeInK) {
EvictIfOverLimitInternal();
@ -2891,7 +2912,8 @@ CacheFileIOManager::OverLimitEvictionInternal()
// failing on one entry forever.
uint32_t frecency = 0;
uint32_t expTime = nsICacheEntry::NO_EXPIRATION_TIME;
rv = CacheIndex::UpdateEntry(&hash, &frecency, &expTime, nullptr, nullptr);
rv = CacheIndex::UpdateEntry(&hash, &frecency, &expTime, nullptr, nullptr,
nullptr, nullptr);
NS_ENSURE_SUCCESS(rv, rv);
consecutiveFailures++;
@ -3556,13 +3578,17 @@ nsresult
CacheFileIOManager::UpdateIndexEntry(CacheFileHandle *aHandle,
const uint32_t *aFrecency,
const uint32_t *aExpirationTime,
const bool *aHasAltData)
const bool *aHasAltData,
const uint16_t *aOnStartTime,
const uint16_t *aOnStopTime)
{
LOG(("CacheFileIOManager::UpdateIndexEntry() [handle=%p, frecency=%s, "
"expirationTime=%s hasAltData=%s]", aHandle,
"expirationTime=%s, hasAltData=%s, onStartTime=%s, onStopTime=%s]", aHandle,
aFrecency ? nsPrintfCString("%u", *aFrecency).get() : "",
aExpirationTime ? nsPrintfCString("%u", *aExpirationTime).get() : "",
aHasAltData ? (*aHasAltData ? "true" : "false") : ""));
aHasAltData ? (*aHasAltData ? "true" : "false") : "",
aOnStartTime ? nsPrintfCString("%u", *aOnStartTime).get() : "",
aOnStopTime ? nsPrintfCString("%u", *aOnStopTime).get() : ""));
nsresult rv;
RefPtr<CacheFileIOManager> ioMan = gInstance;
@ -3576,7 +3602,8 @@ CacheFileIOManager::UpdateIndexEntry(CacheFileHandle *aHandle,
}
RefPtr<UpdateIndexEntryEvent> ev =
new UpdateIndexEntryEvent(aHandle, aFrecency, aExpirationTime, aHasAltData);
new UpdateIndexEntryEvent(aHandle, aFrecency, aExpirationTime, aHasAltData,
aOnStartTime, aOnStopTime);
rv = ioMan->mIOThread->Dispatch(ev, aHandle->mPriority
? CacheIOThread::WRITE_PRIORITY
: CacheIOThread::WRITE);

View File

@ -337,7 +337,9 @@ public:
static nsresult UpdateIndexEntry(CacheFileHandle *aHandle,
const uint32_t *aFrecency,
const uint32_t *aExpirationTime,
const bool *aHasAltData);
const bool *aHasAltData,
const uint16_t *aOnStartTime,
const uint16_t *aOnStopTime);
static nsresult UpdateIndexEntry();

View File

@ -27,7 +27,7 @@
#define kMinUnwrittenChanges 300
#define kMinDumpInterval 20000 // in milliseconds
#define kMaxBufSize 16384
#define kIndexVersion 0x00000004
#define kIndexVersion 0x00000005
#define kUpdateIndexStartDelay 50000 // in milliseconds
#define INDEX_NAME "index"
@ -918,13 +918,18 @@ CacheIndex::UpdateEntry(const SHA1Sum::Hash *aHash,
const uint32_t *aFrecency,
const uint32_t *aExpirationTime,
const bool *aHasAltData,
const uint16_t *aOnStartTime,
const uint16_t *aOnStopTime,
const uint32_t *aSize)
{
LOG(("CacheIndex::UpdateEntry() [hash=%08x%08x%08x%08x%08x, "
"frecency=%s, expirationTime=%s, hasAltData=%s, size=%s]", LOGSHA1(aHash),
"frecency=%s, expirationTime=%s, hasAltData=%s, onStartTime=%s, "
"onStopTime=%s, size=%s]", LOGSHA1(aHash),
aFrecency ? nsPrintfCString("%u", *aFrecency).get() : "",
aExpirationTime ? nsPrintfCString("%u", *aExpirationTime).get() : "",
aHasAltData ? (*aHasAltData ? "true" : "false") : "",
aOnStartTime ? nsPrintfCString("%u", *aOnStartTime).get() : "",
aOnStopTime ? nsPrintfCString("%u", *aOnStopTime).get() : "",
aSize ? nsPrintfCString("%u", *aSize).get() : ""));
MOZ_ASSERT(CacheFileIOManager::IsOnIOThread());
@ -955,7 +960,8 @@ CacheIndex::UpdateEntry(const SHA1Sum::Hash *aHash,
MOZ_ASSERT(index->mPendingUpdates.Count() == 0);
MOZ_ASSERT(entry);
if (!HasEntryChanged(entry, aFrecency, aExpirationTime, aHasAltData, aSize)) {
if (!HasEntryChanged(entry, aFrecency, aExpirationTime, aHasAltData,
aOnStartTime, aOnStopTime, aSize)) {
return NS_OK;
}
@ -971,6 +977,18 @@ CacheIndex::UpdateEntry(const SHA1Sum::Hash *aHash,
entry->SetExpirationTime(*aExpirationTime);
}
if (aHasAltData) {
entry->SetHasAltData(*aHasAltData);
}
if (aOnStartTime) {
entry->SetOnStartTime(*aOnStartTime);
}
if (aOnStopTime) {
entry->SetOnStopTime(*aOnStopTime);
}
if (aSize) {
entry->SetFileSize(*aSize);
}
@ -1007,6 +1025,18 @@ CacheIndex::UpdateEntry(const SHA1Sum::Hash *aHash,
updated->SetExpirationTime(*aExpirationTime);
}
if (aHasAltData) {
updated->SetHasAltData(*aHasAltData);
}
if (aOnStartTime) {
updated->SetOnStartTime(*aOnStartTime);
}
if (aOnStopTime) {
updated->SetOnStopTime(*aOnStopTime);
}
if (aSize) {
updated->SetFileSize(*aSize);
}
@ -1514,6 +1544,8 @@ CacheIndex::HasEntryChanged(CacheIndexEntry *aEntry,
const uint32_t *aFrecency,
const uint32_t *aExpirationTime,
const bool *aHasAltData,
const uint16_t *aOnStartTime,
const uint16_t *aOnStopTime,
const uint32_t *aSize)
{
if (aFrecency && *aFrecency != aEntry->GetFrecency()) {
@ -1528,6 +1560,14 @@ CacheIndex::HasEntryChanged(CacheIndexEntry *aEntry,
return true;
}
if (aOnStartTime && *aOnStartTime != aEntry->GetOnStartTime()) {
return true;
}
if (aOnStopTime && *aOnStopTime != aEntry->GetOnStopTime()) {
return true;
}
if (aSize &&
(*aSize & CacheIndexEntry::kFileSizeMask) != aEntry->GetFileSize()) {
return true;
@ -2672,6 +2712,20 @@ CacheIndex::InitEntryFromDiskData(CacheIndexEntry *aEntry,
}
aEntry->SetHasAltData(hasAltData);
static auto getUint16MetaData = [&aMetaData](const char *key) -> uint16_t {
const char* s64 = aMetaData->GetElement(key);
if (s64) {
nsresult rv;
uint64_t n64 = nsCString(s64).ToInteger64(&rv);
MOZ_ASSERT(NS_SUCCEEDED(rv));
return n64 <= kIndexTimeOutOfBound ? n64 : kIndexTimeOutOfBound;
}
return kIndexTimeNotAvailable;
};
aEntry->SetOnStartTime(getUint16MetaData("net-response-time-onstart"));
aEntry->SetOnStopTime(getUint16MetaData("net-response-time-onstop"));
aEntry->SetFileSize(static_cast<uint32_t>(
std::min(static_cast<int64_t>(PR_UINT32_MAX),
(aFileSize + 0x3FF) >> 10)));

View File

@ -38,6 +38,9 @@ class CacheFileMetadata;
class FileOpenHelper;
class CacheIndexIterator;
const uint16_t kIndexTimeNotAvailable = 0xFFFFU;
const uint16_t kIndexTimeOutOfBound = 0xFFFEU;
typedef struct {
// Version of the index. The index must be ignored and deleted when the file
// on disk was written with a newer version.
@ -62,11 +65,14 @@ static_assert(
sizeof(CacheIndexHeader::mIsDirty) == sizeof(CacheIndexHeader),
"Unexpected sizeof(CacheIndexHeader)!");
#pragma pack(push, 4)
struct CacheIndexRecord {
SHA1Sum::Hash mHash;
uint32_t mFrecency;
OriginAttrsHash mOriginAttrsHash;
uint32_t mExpirationTime;
uint16_t mOnStartTime;
uint16_t mOnStopTime;
/*
* 1000 0000 0000 0000 0000 0000 0000 0000 : initialized
@ -85,13 +91,17 @@ struct CacheIndexRecord {
: mFrecency(0)
, mOriginAttrsHash(0)
, mExpirationTime(nsICacheEntry::NO_EXPIRATION_TIME)
, mOnStartTime(kIndexTimeNotAvailable)
, mOnStopTime(kIndexTimeNotAvailable)
, mFlags(0)
{}
};
#pragma pack(pop)
static_assert(
sizeof(CacheIndexRecord::mHash) + sizeof(CacheIndexRecord::mFrecency) +
sizeof(CacheIndexRecord::mOriginAttrsHash) + sizeof(CacheIndexRecord::mExpirationTime) +
sizeof(CacheIndexRecord::mOnStartTime) + sizeof(CacheIndexRecord::mOnStopTime) +
sizeof(CacheIndexRecord::mFlags) == sizeof(CacheIndexRecord),
"Unexpected sizeof(CacheIndexRecord)!");
@ -150,6 +160,8 @@ public:
mRec->mFrecency = aOther.mRec->mFrecency;
mRec->mExpirationTime = aOther.mRec->mExpirationTime;
mRec->mOriginAttrsHash = aOther.mRec->mOriginAttrsHash;
mRec->mOnStartTime = aOther.mRec->mOnStartTime;
mRec->mOnStopTime = aOther.mRec->mOnStopTime;
mRec->mFlags = aOther.mRec->mFlags;
return *this;
}
@ -159,6 +171,8 @@ public:
mRec->mFrecency = 0;
mRec->mExpirationTime = nsICacheEntry::NO_EXPIRATION_TIME;
mRec->mOriginAttrsHash = 0;
mRec->mOnStartTime = kIndexTimeNotAvailable;
mRec->mOnStopTime = kIndexTimeNotAvailable;
mRec->mFlags = 0;
}
@ -167,6 +181,8 @@ public:
MOZ_ASSERT(mRec->mFrecency == 0);
MOZ_ASSERT(mRec->mExpirationTime == nsICacheEntry::NO_EXPIRATION_TIME);
MOZ_ASSERT(mRec->mOriginAttrsHash == 0);
MOZ_ASSERT(mRec->mOnStartTime == kIndexTimeNotAvailable);
MOZ_ASSERT(mRec->mOnStopTime == kIndexTimeNotAvailable);
// When we init the entry it must be fresh and may be dirty
MOZ_ASSERT((mRec->mFlags & ~kDirtyMask) == kFreshMask);
@ -216,6 +232,18 @@ public:
}
bool GetHasAltData() const { return !!(mRec->mFlags & kHasAltDataMask); }
void SetOnStartTime(uint16_t aTime)
{
mRec->mOnStartTime = aTime;
}
uint16_t GetOnStartTime() const { return mRec->mOnStartTime; }
void SetOnStopTime(uint16_t aTime)
{
mRec->mOnStopTime = aTime;
}
uint16_t GetOnStopTime() const { return mRec->mOnStopTime; }
// Sets filesize in kilobytes.
void SetFileSize(uint32_t aFileSize)
{
@ -246,6 +274,8 @@ public:
NetworkEndian::writeUint32(ptr, mRec->mFrecency); ptr += sizeof(uint32_t);
NetworkEndian::writeUint64(ptr, mRec->mOriginAttrsHash); ptr += sizeof(uint64_t);
NetworkEndian::writeUint32(ptr, mRec->mExpirationTime); ptr += sizeof(uint32_t);
NetworkEndian::writeUint16(ptr, mRec->mOnStartTime); ptr += sizeof(uint16_t);
NetworkEndian::writeUint16(ptr, mRec->mOnStopTime); ptr += sizeof(uint16_t);
// Dirty and fresh flags should never go to disk, since they make sense only
// during current session.
NetworkEndian::writeUint32(ptr, mRec->mFlags & ~(kDirtyMask | kFreshMask));
@ -258,16 +288,20 @@ public:
mRec->mFrecency = NetworkEndian::readUint32(ptr); ptr += sizeof(uint32_t);
mRec->mOriginAttrsHash = NetworkEndian::readUint64(ptr); ptr += sizeof(uint64_t);
mRec->mExpirationTime = NetworkEndian::readUint32(ptr); ptr += sizeof(uint32_t);
mRec->mOnStartTime = NetworkEndian::readUint16(ptr); ptr += sizeof(uint16_t);
mRec->mOnStopTime = NetworkEndian::readUint16(ptr); ptr += sizeof(uint16_t);
mRec->mFlags = NetworkEndian::readUint32(ptr);
}
void Log() const {
LOG(("CacheIndexEntry::Log() [this=%p, hash=%08x%08x%08x%08x%08x, fresh=%u,"
" initialized=%u, removed=%u, dirty=%u, anonymous=%u, "
"originAttrsHash=%" PRIx64 ", frecency=%u, expirationTime=%u, size=%u]",
"originAttrsHash=%" PRIx64 ", frecency=%u, expirationTime=%u, "
"hasAltData=%u, onStartTime=%u, onStopTime=%u, size=%u]",
this, LOGSHA1(mRec->mHash), IsFresh(), IsInitialized(), IsRemoved(),
IsDirty(), Anonymous(), OriginAttrsHash(), GetFrecency(),
GetExpirationTime(), GetFileSize()));
GetExpirationTime(), GetHasAltData(), GetOnStartTime(),
GetOnStopTime(), GetFileSize()));
}
static bool RecordMatchesLoadContextInfo(CacheIndexRecord *aRec,
@ -357,7 +391,8 @@ public:
void InitNew()
{
mUpdateFlags = kFrecencyUpdatedMask | kExpirationUpdatedMask |
kFileSizeUpdatedMask;
kHasAltDataUpdatedMask | kOnStartTimeUpdatedMask |
kOnStopTimeUpdatedMask | kFileSizeUpdatedMask;
CacheIndexEntry::InitNew();
}
@ -379,6 +414,18 @@ public:
CacheIndexEntry::SetHasAltData(aHasAltData);
}
void SetOnStartTime(uint16_t aTime)
{
mUpdateFlags |= kOnStartTimeUpdatedMask;
CacheIndexEntry::SetOnStartTime(aTime);
}
void SetOnStopTime(uint16_t aTime)
{
mUpdateFlags |= kOnStopTimeUpdatedMask;
CacheIndexEntry::SetOnStopTime(aTime);
}
void SetFileSize(uint32_t aFileSize)
{
mUpdateFlags |= kFileSizeUpdatedMask;
@ -395,7 +442,12 @@ public:
aDst->mRec->mExpirationTime = mRec->mExpirationTime;
}
aDst->mRec->mOriginAttrsHash = mRec->mOriginAttrsHash;
if (mUpdateFlags & kOnStartTimeUpdatedMask) {
aDst->mRec->mOnStartTime = mRec->mOnStartTime;
}
if (mUpdateFlags & kOnStopTimeUpdatedMask) {
aDst->mRec->mOnStopTime = mRec->mOnStopTime;
}
if (mUpdateFlags & kHasAltDataUpdatedMask &&
((aDst->mRec->mFlags ^ mRec->mFlags) & kHasAltDataMask)) {
// Toggle the bit if we need to.
@ -417,6 +469,8 @@ private:
static const uint32_t kExpirationUpdatedMask = 0x00000002;
static const uint32_t kFileSizeUpdatedMask = 0x00000004;
static const uint32_t kHasAltDataUpdatedMask = 0x00000008;
static const uint32_t kOnStartTimeUpdatedMask = 0x00000010;
static const uint32_t kOnStopTimeUpdatedMask = 0x00000020;
uint32_t mUpdateFlags;
};
@ -658,6 +712,8 @@ public:
const uint32_t *aFrecency,
const uint32_t *aExpirationTime,
const bool *aHasAltData,
const uint16_t *aOnStartTime,
const uint16_t *aOnStopTime,
const uint32_t *aSize);
// Remove all entries from the index. Called when clearing the whole cache.
@ -758,6 +814,8 @@ private:
const uint32_t *aFrecency,
const uint32_t *aExpirationTime,
const bool *aHasAltData,
const uint16_t *aOnStartTime,
const uint16_t *aOnStopTime,
const uint32_t *aSize);
// Merge all pending operations from mPendingUpdates into mIndex.

View File

@ -135,6 +135,18 @@ public:
{
return mOldInfo->GetDataSize(aDataSize);
}
NS_IMETHOD GetOnStartTime(uint64_t *aTime) override
{
return NS_ERROR_NOT_IMPLEMENTED;
}
NS_IMETHOD GetOnStopTime(uint64_t *aTime) override
{
return NS_ERROR_NOT_IMPLEMENTED;
}
NS_IMETHOD SetNetworkTimes(uint64_t aOnStartTime, uint64_t aOnStopTime) override
{
return NS_ERROR_NOT_IMPLEMENTED;
}
NS_IMETHOD GetLoadContextInfo(nsILoadContextInfo** aInfo) override
{
return NS_ERROR_NOT_IMPLEMENTED;

View File

@ -60,6 +60,19 @@ interface nsICacheEntry : nsISupports
*/
void setExpirationTime(in uint32_t expirationTime);
/**
* Get the last network response times for onStartReqeust/onStopRequest (in ms).
* @throws
* - NS_ERROR_NOT_AVAILABLE if onStartTime/onStopTime does not exist.
*/
readonly attribute uint64_t onStartTime;
readonly attribute uint64_t onStopTime;
/**
* Set the network response times for onStartReqeust/onStopRequest (in ms).
*/
void setNetworkTimes(in uint64_t onStartTime, in uint64_t onStopTime);
/**
* This method is intended to override the per-spec cache validation
* decisions for a duration specified in seconds. The current state can

View File

@ -6915,13 +6915,9 @@ nsHttpChannel::OnStopRequest(nsIRequest *request, nsISupports *ctxt, nsresult st
if (request == mTransactionPump && mCacheEntry && !mDidReval &&
!mCustomConditionalRequest &&
!mAsyncOpenTime.IsNull() && !mOnStartRequestTimestamp.IsNull()) {
nsAutoCString onStartTime;
onStartTime.AppendInt( (uint64_t) (mOnStartRequestTimestamp - mAsyncOpenTime).ToMilliseconds());
mCacheEntry->SetMetaDataElement("net-response-time-onstart", onStartTime.get());
nsAutoCString responseTime;
responseTime.AppendInt( (uint64_t) (TimeStamp::Now() - mAsyncOpenTime).ToMilliseconds());
mCacheEntry->SetMetaDataElement("net-response-time-onstop", responseTime.get());
uint64_t onStartTime = (mOnStartRequestTimestamp - mAsyncOpenTime).ToMilliseconds();
uint64_t onStopTime = (TimeStamp::Now() - mAsyncOpenTime).ToMilliseconds();
Unused << mCacheEntry->SetNetworkTimes(onStartTime, onStopTime);
}
// at this point, we're done with the transaction
@ -8609,25 +8605,13 @@ nsHttpChannel::ReportNetVSCacheTelemetry()
return;
}
nsXPIDLCString tmpStr;
rv = mCacheEntry->GetMetaDataElement("net-response-time-onstart",
getter_Copies(tmpStr));
if (NS_FAILED(rv)) {
return;
}
uint64_t onStartNetTime = tmpStr.ToInteger64(&rv);
if (NS_FAILED(rv)) {
uint64_t onStartNetTime = 0;
if (NS_FAILED(mCacheEntry->GetOnStartTime(&onStartNetTime))) {
return;
}
tmpStr.Truncate();
rv = mCacheEntry->GetMetaDataElement("net-response-time-onstop",
getter_Copies(tmpStr));
if (NS_FAILED(rv)) {
return;
}
uint64_t onStopNetTime = tmpStr.ToInteger64(&rv);
if (NS_FAILED(rv)) {
uint64_t onStopNetTime = 0;
if (NS_FAILED(mCacheEntry->GetOnStopTime(&onStopNetTime))) {
return;
}

View File

@ -677,6 +677,29 @@ nsHttpConnectionMgr::CloseIdleConnection(nsHttpConnection *conn)
return NS_OK;
}
nsresult
nsHttpConnectionMgr::RemoveIdleConnection(nsHttpConnection *conn)
{
MOZ_ASSERT(PR_GetCurrentThread() == gSocketThread);
LOG(("nsHttpConnectionMgr::RemoveIdleConnection %p conn=%p",
this, conn));
if (!conn->ConnectionInfo()) {
return NS_ERROR_UNEXPECTED;
}
nsConnectionEntry *ent = LookupConnectionEntry(conn->ConnectionInfo(),
conn, nullptr);
if (!ent || !ent->mIdleConns.RemoveElement(conn)) {
return NS_ERROR_UNEXPECTED;
}
mNumIdleConns--;
return NS_OK;
}
// This function lets a connection, after completing the NPN phase,
// report whether or not it is using spdy through the usingSpdy
// argument. It would not be necessary if NPN were driven out of
@ -920,14 +943,10 @@ nsHttpConnectionMgr::DispatchPendingQ(nsTArray<RefPtr<nsHttpConnectionMgr::Pendi
MOZ_ASSERT(!pendingTransInfo->mActiveConn);
RefPtr<nsHalfOpenSocket> halfOpen =
do_QueryReferent(pendingTransInfo->mHalfOpen);
LOG(("nsHttpConnectionMgr::ProcessPendingQForEntry "
"[trans=%p, halfOpen=%p]\n",
pendingTransInfo->mTransaction.get(), halfOpen.get()));
if (halfOpen) {
// The half open socket was made for this transaction, in
// that case ent->mHalfOpens[j]->Transaction() == trans or
// the half open socket was opened speculatively and this
// transaction took it (in this case it must be:
// ent->mHalfOpens[j]->Transaction().IsNullTransaction())
MOZ_ASSERT(halfOpen->Transaction()->IsNullTransaction() ||
halfOpen->Transaction() == pendingTransInfo->mTransaction);
alreadyHalfOpenOrWaitingForTLS = true;
} else {
// If we have not found the halfOpen socket, remove the pointer.
@ -937,6 +956,9 @@ nsHttpConnectionMgr::DispatchPendingQ(nsTArray<RefPtr<nsHttpConnectionMgr::Pendi
MOZ_ASSERT(!pendingTransInfo->mHalfOpen);
RefPtr<nsHttpConnection> activeConn =
do_QueryReferent(pendingTransInfo->mActiveConn);
LOG(("nsHttpConnectionMgr::ProcessPendingQForEntry "
"[trans=%p, activeConn=%p]\n",
pendingTransInfo->mTransaction.get(), activeConn.get()));
// Check if this transaction claimed a connection that is still
// performing tls handshake with a NullHttpTransaction or it is between
// finishing tls and reclaiming (When nullTrans finishes tls handshake,
@ -1266,34 +1288,17 @@ nsHttpConnectionMgr::MakeNewConnection(nsConnectionEntry *ent,
uint32_t halfOpenLength = ent->mHalfOpens.Length();
for (uint32_t i = 0; i < halfOpenLength; i++) {
if (ent->mHalfOpens[i]->IsSpeculative()) {
// We've found a speculative connection in the half
// open list. Remove the speculative bit from it and that
// connection can later be used for this transaction
// (or another one in the pending queue) - we don't
// need to open a new connection here.
if (ent->mHalfOpens[i]->Claim()) {
// We've found a speculative connection or a connection that
// is free to be used in the half open list.
// A free to be used connection is a connection that was
// open for a concrete transaction, but that trunsaction
// ended up using another connection.
LOG(("nsHttpConnectionMgr::MakeNewConnection [ci = %s]\n"
"Found a speculative half open connection\n",
"Found a speculative or a free-to-use half open connection\n",
ent->mConnInfo->HashKey().get()));
uint32_t flags;
ent->mHalfOpens[i]->SetSpeculative(false);
pendingTransInfo->mHalfOpen =
do_GetWeakReference(static_cast<nsISupportsWeakReference*>(ent->mHalfOpens[i]));
nsISocketTransport *transport = ent->mHalfOpens[i]->SocketTransport();
if (transport && NS_SUCCEEDED(transport->GetConnectionFlags(&flags))) {
flags &= ~nsISocketTransport::DISABLE_RFC1918;
transport->SetConnectionFlags(flags);
}
Telemetry::AutoCounter<Telemetry::HTTPCONNMGR_USED_SPECULATIVE_CONN> usedSpeculativeConn;
++usedSpeculativeConn;
if (ent->mHalfOpens[i]->IsFromPredictor()) {
Telemetry::AutoCounter<Telemetry::PREDICTOR_TOTAL_PRECONNECTS_USED> totalPreconnectsUsed;
++totalPreconnectsUsed;
}
// return OK because we have essentially opened a new connection
// by converting a speculative half-open to general use
return NS_OK;
@ -1887,13 +1892,12 @@ nsHttpConnectionMgr::ReleaseClaimedSockets(nsConnectionEntry *ent,
if (pendingTransInfo->mHalfOpen) {
RefPtr<nsHalfOpenSocket> halfOpen =
do_QueryReferent(pendingTransInfo->mHalfOpen);
LOG(("nsHttpConnectionMgr::ReleaseClaimedSockets "
"[trans=%p halfOpen=%p]",
pendingTransInfo->mTransaction.get(),
halfOpen.get()));
if (halfOpen) {
if (halfOpen->Transaction() &&
halfOpen->Transaction()->IsNullTransaction()) {
LOG(("nsHttpConnectionMgr::ReleaseClaimedSockets - mark halfOpne %p "
"speculative again.", halfOpen.get()));
halfOpen->SetSpeculative(true);
}
halfOpen->Unclaim();
}
pendingTransInfo->mHalfOpen = nullptr;
} else if (pendingTransInfo->mActiveConn) {
@ -1919,21 +1923,16 @@ nsHttpConnectionMgr::CreateTransport(nsConnectionEntry *ent,
PendingTransactionInfo *pendingTransInfo)
{
MOZ_ASSERT(PR_GetCurrentThread() == gSocketThread);
MOZ_ASSERT((speculative && !pendingTransInfo) ||
(!speculative && pendingTransInfo));
RefPtr<nsHalfOpenSocket> sock = new nsHalfOpenSocket(ent, trans, caps,
speculative,
isFromPredictor);
RefPtr<nsHalfOpenSocket> sock = new nsHalfOpenSocket(ent, trans, caps);
if (speculative) {
sock->SetSpeculative(true);
sock->SetAllow1918(allow1918);
Telemetry::AutoCounter<Telemetry::HTTPCONNMGR_TOTAL_SPECULATIVE_CONN> totalSpeculativeConn;
++totalSpeculativeConn;
if (isFromPredictor) {
sock->SetIsFromPredictor(true);
Telemetry::AutoCounter<Telemetry::PREDICTOR_TOTAL_PRECONNECTS_CREATED> totalPreconnectsCreated;
++totalPreconnectsCreated;
}
}
// The socket stream holds the reference to the half open
// socket - so if the stream fails to init the half open
// will go away.
@ -1943,6 +1942,8 @@ nsHttpConnectionMgr::CreateTransport(nsConnectionEntry *ent,
if (pendingTransInfo) {
pendingTransInfo->mHalfOpen =
do_GetWeakReference(static_cast<nsISupportsWeakReference*>(sock));
DebugOnly<bool> claimed = sock->Claim();
MOZ_ASSERT(claimed);
}
ent->mHalfOpens.AppendElement(sock);
@ -2291,8 +2292,6 @@ nsHttpConnectionMgr::OnMsgCancelTransaction(int32_t reason, ARefBase *param)
RefPtr<nsHalfOpenSocket> half =
do_QueryReferent(pendingTransInfo->mHalfOpen);
if (half) {
MOZ_ASSERT(trans == half->Transaction() ||
half->Transaction()->IsNullTransaction());
half->Abandon();
}
pendingTransInfo->mHalfOpen = nullptr;
@ -3051,21 +3050,35 @@ NS_INTERFACE_MAP_END
nsHttpConnectionMgr::
nsHalfOpenSocket::nsHalfOpenSocket(nsConnectionEntry *ent,
nsAHttpTransaction *trans,
uint32_t caps)
uint32_t caps,
bool speculative,
bool isFromPredictor)
: mEnt(ent)
, mTransaction(trans)
, mDispatchedMTransaction(false)
, mCaps(caps)
, mSpeculative(false)
, mIsFromPredictor(false)
, mSpeculative(speculative)
, mIsFromPredictor(isFromPredictor)
, mAllow1918(true)
, mHasConnected(false)
, mPrimaryConnectedOK(false)
, mBackupConnectedOK(false)
, mFreeToUse(true)
, mPrimaryStreamStatus(NS_OK)
{
MOZ_ASSERT(ent && trans, "constructor with null arguments");
LOG(("Creating nsHalfOpenSocket [this=%p trans=%p ent=%s key=%s]\n",
this, trans, ent->mConnInfo->Origin(), ent->mConnInfo->HashKey().get()));
if (speculative) {
Telemetry::AutoCounter<Telemetry::HTTPCONNMGR_TOTAL_SPECULATIVE_CONN> totalSpeculativeConn;
++totalSpeculativeConn;
if (isFromPredictor) {
Telemetry::AutoCounter<Telemetry::PREDICTOR_TOTAL_PRECONNECTS_CREATED> totalPreconnectsCreated;
++totalPreconnectsCreated;
}
}
}
nsHttpConnectionMgr::nsHalfOpenSocket::~nsHalfOpenSocket()
@ -3242,8 +3255,6 @@ nsresult
nsHttpConnectionMgr::nsHalfOpenSocket::SetupBackupStreams()
{
MOZ_ASSERT(mTransaction);
MOZ_ASSERT(!mTransaction->IsNullTransaction(),
"null transactions dont have backup streams");
mBackupSynStarted = TimeStamp::Now();
nsresult rv = SetupStreams(getter_AddRefs(mBackupTransport),
@ -3267,7 +3278,7 @@ nsHttpConnectionMgr::nsHalfOpenSocket::SetupBackupTimer()
{
uint16_t timeout = gHttpHandler->GetIdleSynTimeout();
MOZ_ASSERT(!mSynTimer, "timer already initd");
if (timeout && !mTransaction->IsDone() && !mTransaction->IsNullTransaction()) {
if (timeout && !mSpeculative) {
// Setup the timer that will establish a backup socket
// if we do not get a writable event on the main one.
// We do this because a lost SYN takes a very long time
@ -3362,8 +3373,6 @@ nsHttpConnectionMgr::nsHalfOpenSocket::Notify(nsITimer *timer)
{
MOZ_ASSERT(PR_GetCurrentThread() == gSocketThread);
MOZ_ASSERT(timer == mSynTimer, "wrong timer");
MOZ_ASSERT(mTransaction && !mTransaction->IsNullTransaction(),
"null transactions dont have backup streams");
DebugOnly<nsresult> rv = SetupBackupStreams();
MOZ_ASSERT(NS_SUCCEEDED(rv));
@ -3446,8 +3455,6 @@ nsHalfOpenSocket::OnOutputStreamReady(nsIAsyncOutputStream *out)
mStreamIn = nullptr;
mSocketTransport = nullptr;
} else if (out == mBackupStreamOut) {
MOZ_ASSERT(!mTransaction->IsNullTransaction(),
"null transactions dont have backup streams");
TimeDuration rtt = TimeStamp::Now() - mBackupSynStarted;
rv = conn->Init(mEnt->mConnInfo,
gHttpHandler->ConnMgr()->mMaxRequestDelay,
@ -3528,6 +3535,38 @@ nsHalfOpenSocket::OnOutputStreamReady(nsIAsyncOutputStream *out)
LOG(("nsHalfOpenSocket::OnOutputStreamReady no transaction match "
"returning conn %p to pool\n", conn.get()));
gHttpHandler->ConnMgr()->OnMsgReclaimConnection(0, conn);
// We expect that there is at least one tranasction in the pending
// queue that can take this connection, but it can happened that
// all transactions are blocked or they have took other idle
// connections. In that case the connection has been added to the
// idle queue.
// If the connection is in the idle queue but it is using ssl, make
// a nulltransaction for it to finish ssl handshake!
// !!! It can be that mEnt is null after OnMsgReclaimConnection.!!!
if (mEnt &&
mEnt->mConnInfo->FirstHopSSL() &&
!mEnt->mConnInfo->UsingConnect()) {
int32_t idx = mEnt->mIdleConns.IndexOf(conn);
if (idx != -1) {
DebugOnly<nsresult> rv = gHttpHandler->ConnMgr()->RemoveIdleConnection(conn);
MOZ_ASSERT(NS_SUCCEEDED(rv));
conn->EndIdleMonitoring();
RefPtr<nsAHttpTransaction> trans;
if (mTransaction->IsNullTransaction() &&
!mDispatchedMTransaction) {
mDispatchedMTransaction = true;
trans = mTransaction;
} else {
trans = new NullHttpTransaction(mEnt->mConnInfo,
callbacks, mCaps);
}
gHttpHandler->ConnMgr()->AddActiveConn(conn, mEnt);
rv = gHttpHandler->ConnMgr()->
DispatchAbstractTransaction(mEnt, trans, mCaps, conn, 0);
}
}
}
}
@ -3574,6 +3613,8 @@ nsHttpConnectionMgr::nsHalfOpenSocket::OnTransportStatus(nsITransport *trans,
return NS_OK;
}
mPrimaryStreamStatus = status;
// if we are doing spdy coalescing and haven't recorded the ip address
// for this entry before then make the hash key if our dns lookup
// just completed. We can't do coalescing if using a proxy because the
@ -3659,6 +3700,47 @@ nsHttpConnectionMgr::nsHalfOpenSocket::GetInterface(const nsIID &iid,
return NS_ERROR_NO_INTERFACE;
}
bool
nsHttpConnectionMgr::nsHalfOpenSocket::Claim()
{
if (mSpeculative) {
mSpeculative = false;
uint32_t flags;
if (mSocketTransport && NS_SUCCEEDED(mSocketTransport->GetConnectionFlags(&flags))) {
flags &= ~nsISocketTransport::DISABLE_RFC1918;
mSocketTransport->SetConnectionFlags(flags);
}
Telemetry::AutoCounter<Telemetry::HTTPCONNMGR_USED_SPECULATIVE_CONN> usedSpeculativeConn;
++usedSpeculativeConn;
if (mIsFromPredictor) {
Telemetry::AutoCounter<Telemetry::PREDICTOR_TOTAL_PRECONNECTS_USED> totalPreconnectsUsed;
++totalPreconnectsUsed;
}
if ((mPrimaryStreamStatus == NS_NET_STATUS_CONNECTING_TO) &&
mEnt && !mBackupTransport && !mSynTimer) {
SetupBackupTimer();
}
}
if (mFreeToUse) {
mFreeToUse = false;
return true;
}
return false;
}
void
nsHttpConnectionMgr::nsHalfOpenSocket::Unclaim()
{
MOZ_ASSERT(!mSpeculative && !mFreeToUse);
// We will keep the backup-timer running. Most probably this halfOpen will
// be used by a transaction from which this transaction took the halfOpen.
// (this is happening because of the transaction priority.)
mFreeToUse = true;
}
already_AddRefed<nsHttpConnection>
ConnectionHandle::TakeHttpConnection()

View File

@ -184,6 +184,7 @@ public:
// the idle connection list. It is called when the idle connection detects
// that the network peer has closed the transport.
MOZ_MUST_USE nsresult CloseIdleConnection(nsHttpConnection *);
MOZ_MUST_USE nsresult RemoveIdleConnection(nsHttpConnection *);
// The connection manager needs to know when a normal HTTP connection has been
// upgraded to SPDY because the dispatch and idle semantics are a little
@ -330,7 +331,9 @@ private:
nsHalfOpenSocket(nsConnectionEntry *ent,
nsAHttpTransaction *trans,
uint32_t caps);
uint32_t caps,
bool speculative,
bool isFromPredictor);
MOZ_MUST_USE nsresult SetupStreams(nsISocketTransport **,
nsIAsyncInputStream **,
@ -348,10 +351,8 @@ private:
nsAHttpTransaction *Transaction() { return mTransaction; }
bool IsSpeculative() { return mSpeculative; }
void SetSpeculative(bool val) { mSpeculative = val; }
bool IsFromPredictor() { return mIsFromPredictor; }
void SetIsFromPredictor(bool val) { mIsFromPredictor = val; }
bool Allow1918() { return mAllow1918; }
void SetAllow1918(bool val) { mAllow1918 = val; }
@ -359,6 +360,9 @@ private:
bool HasConnected() { return mHasConnected; }
void PrintDiagnostics(nsCString &log);
bool Claim();
void Unclaim();
private:
// To find out whether |mTransaction| is still in the connection entry's
// pending queue. If the transaction is found and |removeWhenFound| is
@ -405,6 +409,13 @@ private:
bool mPrimaryConnectedOK;
bool mBackupConnectedOK;
// A nsHalfOpenSocket can be made for a concrete non-null transaction,
// but the transaction can be dispatch to another connection. In that
// case we can free this transaction to be claimed by other
// transactions.
bool mFreeToUse;
nsresult mPrimaryStreamStatus;
};
friend class nsHalfOpenSocket;

View File

@ -10,4 +10,3 @@
*/
#error "Do not include this header file."

View File

@ -29,6 +29,5 @@ check out the :doc:`how-to section <how-tos>`.
docker-images
cron
actions
action-spec
how-tos
reference

View File

@ -1,5 +1,5 @@
[flake8]
# See http://pep8.readthedocs.io/en/latest/intro.html#configuration
ignore = E121, E123, E126, E129, E133, E226, E241, E242, E704, W503, E402, E501, E202, W601
ignore = E121, E123, E126, E129, E133, E226, E241, E242, E704, W503, E402, E501, W601
max-line-length = 99
filename = *.py, +.lint

View File

@ -4525,6 +4525,32 @@ this.XPIProvider = {
return true;
},
/**
* Determine if an add-on should be blocking multiple content processes.
*
* @param aAddon
* The add-on to test
* @return true if enabling the add-on should block multiple content processes.
*/
isBlockingE10sMulti(aAddon) {
if (aAddon.type != "extension")
return false;
// The hotfix is exempt
let hotfixID = Preferences.get(PREF_EM_HOTFIX_ID, undefined);
if (hotfixID && hotfixID == aAddon.id)
return false;
// System add-ons are exempt
let locName = aAddon._installLocation ? aAddon._installLocation.name
: undefined;
if (locName == KEY_APP_SYSTEM_DEFAULTS ||
locName == KEY_APP_SYSTEM_ADDONS)
return false;
return aAddon.bootstrap;
},
/**
* In some cases having add-ons active blocks e10s but turning off e10s
* requires a restart so some add-ons that are normally restartless will

View File

@ -57,6 +57,7 @@ const PREF_PENDING_OPERATIONS = "extensions.pendingOperations";
const PREF_EM_ENABLED_ADDONS = "extensions.enabledAddons";
const PREF_EM_AUTO_DISABLED_SCOPES = "extensions.autoDisableScopes";
const PREF_E10S_BLOCKED_BY_ADDONS = "extensions.e10sBlockedByAddons";
const PREF_E10S_MULTI_BLOCKED_BY_ADDONS = "extensions.e10sMultiBlockedByAddons";
const PREF_E10S_HAS_NONEXEMPT_ADDON = "extensions.e10s.rollout.hasAddon";
const KEY_APP_PROFILE = "app-profile";
@ -437,6 +438,7 @@ this.XPIDatabase = {
}
this.updateAddonsBlockingE10s();
this.updateAddonsBlockingE10sMulti();
let promise = this._deferredSave.saveChanges();
if (!this._schemaVersionSet) {
this._schemaVersionSet = true;
@ -1372,6 +1374,20 @@ this.XPIDatabase = {
Preferences.set(PREF_E10S_BLOCKED_BY_ADDONS, blockE10s);
},
updateAddonsBlockingE10sMulti() {
let blockMulti = false;
for (let [, addon] of this.addonDB) {
let active = (addon.visible && !addon.disabled && !addon.pendingUninstall);
if (active && XPIProvider.isBlockingE10sMulti(addon)) {
blockMulti = true;
break;
}
}
Preferences.set(PREF_E10S_MULTI_BLOCKED_BY_ADDONS, blockMulti);
},
/**
* Synchronously calculates and updates all the active flags in the database.
*/

View File

@ -19,7 +19,11 @@ createAppInfo("xpcshell@tests.mozilla.org", "XPCShell", "1", "1.9.2");
startupManager();
function* check_normal() {
function check_multi_disabled() {
return Services.prefs.getBoolPref("extensions.e10sMultiBlockedByAddons", false);
}
function* check_normal(checkMultiDisabled) {
let install = yield promiseInstallFile(do_get_addon("test_bootstrap1_1"));
do_check_eq(install.state, AddonManager.STATE_INSTALLED);
do_check_false(hasFlag(install.addon.pendingOperations, AddonManager.PENDING_INSTALL));
@ -30,6 +34,10 @@ function* check_normal() {
let addon = yield promiseAddonByID(ID);
do_check_eq(addon, install.addon);
if (checkMultiDisabled) {
do_check_false(check_multi_disabled());
}
do_check_false(hasFlag(addon.operationsRequiringRestart, AddonManager.OP_NEEDS_RESTART_DISABLE));
addon.userDisabled = true;
BootstrapMonitor.checkAddonNotStarted(ID);
@ -82,6 +90,7 @@ add_task(function*() {
let install = yield promiseInstallFile(do_get_addon("test_bootstrap1_1"));
do_check_eq(install.state, AddonManager.STATE_INSTALLED);
do_check_true(hasFlag(install.addon.pendingOperations, AddonManager.PENDING_INSTALL));
do_check_false(check_multi_disabled());
let addon = yield promiseAddonByID(ID);
do_check_eq(addon, null);
@ -94,12 +103,14 @@ add_task(function*() {
addon = yield promiseAddonByID(ID);
do_check_neq(addon, null);
do_check_true(check_multi_disabled());
do_check_false(hasFlag(addon.operationsRequiringRestart, AddonManager.OP_NEEDS_RESTART_DISABLE));
addon.userDisabled = true;
BootstrapMonitor.checkAddonNotStarted(ID);
do_check_false(addon.isActive);
do_check_false(hasFlag(addon.pendingOperations, AddonManager.PENDING_DISABLE));
do_check_false(check_multi_disabled());
do_check_true(hasFlag(addon.operationsRequiringRestart, AddonManager.OP_NEEDS_RESTART_ENABLE));
addon.userDisabled = false;
@ -116,6 +127,7 @@ add_task(function*() {
BootstrapMonitor.checkAddonStarted(ID);
do_check_false(hasFlag(addon.operationsRequiringRestart, AddonManager.OP_NEEDS_RESTART_UNINSTALL));
do_check_true(check_multi_disabled());
addon.uninstall();
BootstrapMonitor.checkAddonNotStarted(ID);
BootstrapMonitor.checkAddonNotInstalled(ID);
@ -130,6 +142,7 @@ add_task(function*() {
let install = yield promiseInstallFile(do_get_addon("test_bootstrap1_1"));
do_check_eq(install.state, AddonManager.STATE_INSTALLED);
do_check_true(hasFlag(install.addon.pendingOperations, AddonManager.PENDING_INSTALL));
do_check_false(check_multi_disabled());
let addon = yield promiseAddonByID(ID);
do_check_eq(addon, null);
@ -139,6 +152,7 @@ add_task(function*() {
// After install and restart we should block.
let blocked = Services.prefs.getBoolPref("extensions.e10sBlockedByAddons");
do_check_true(blocked);
do_check_true(check_multi_disabled());
BootstrapMonitor.checkAddonInstalled(ID);
BootstrapMonitor.checkAddonStarted(ID);
@ -159,6 +173,7 @@ add_task(function*() {
// After disable and restart we should not block.
blocked = Services.prefs.getBoolPref("extensions.e10sBlockedByAddons");
do_check_false(blocked);
do_check_false(check_multi_disabled());
addon = yield promiseAddonByID(ID);
addon.userDisabled = false;
@ -171,6 +186,7 @@ add_task(function*() {
// After re-enable and restart we should block.
blocked = Services.prefs.getBoolPref("extensions.e10sBlockedByAddons");
do_check_true(blocked);
do_check_true(check_multi_disabled());
addon = yield promiseAddonByID(ID);
do_check_neq(addon, null);
@ -191,6 +207,7 @@ add_task(function*() {
// After uninstall and restart we should not block.
blocked = Services.prefs.getBoolPref("extensions.e10sBlockedByAddons");
do_check_false(blocked);
do_check_false(check_multi_disabled());
restartManager();
});
@ -246,6 +263,7 @@ add_task(function*() {
// After disable one addon and restart we should block.
blocked = Services.prefs.getBoolPref("extensions.e10sBlockedByAddons");
do_check_true(blocked);
do_check_true(check_multi_disabled());
addon2 = yield promiseAddonByID(ID2);
@ -261,6 +279,7 @@ add_task(function*() {
// After disable both addons and restart we should not block.
blocked = Services.prefs.getBoolPref("extensions.e10sBlockedByAddons");
do_check_false(blocked);
do_check_false(check_multi_disabled());
addon = yield promiseAddonByID(ID);
addon.userDisabled = false;
@ -273,6 +292,7 @@ add_task(function*() {
// After re-enable one addon and restart we should block.
blocked = Services.prefs.getBoolPref("extensions.e10sBlockedByAddons");
do_check_true(blocked);
do_check_true(check_multi_disabled());
addon = yield promiseAddonByID(ID);
do_check_neq(addon, null);
@ -292,6 +312,7 @@ add_task(function*() {
// After uninstall the only enabled addon and restart we should not block.
blocked = Services.prefs.getBoolPref("extensions.e10sBlockedByAddons");
do_check_false(blocked);
do_check_false(check_multi_disabled());
addon2 = yield promiseAddonByID(ID2);
addon2.uninstall();
@ -426,5 +447,81 @@ add_task(function*() {
Services.prefs.setCharPref("extensions.hotfix.id", ID);
Services.prefs.setBoolPref("extensions.hotfix.cert.checkAttributes", false);
yield check_normal();
yield check_normal(true);
});
// Test non-restarless add-on's should not block multi
add_task(function*() {
yield promiseInstallAllFiles([do_get_addon("test_install1")], true);
let non_restartless_ID = "addon1@tests.mozilla.org";
restartManager();
let addon = yield promiseAddonByID(non_restartless_ID);
// non-restartless add-on is installed and started
do_check_neq(addon, null);
do_check_false(check_multi_disabled());
addon.uninstall();
BootstrapMonitor.checkAddonNotInstalled(non_restartless_ID);
BootstrapMonitor.checkAddonNotStarted(non_restartless_ID);
yield promiseRestartManager();
});
// Test experiment add-on should not block multi
add_task(function*() {
yield promiseInstallAllFiles([do_get_addon("test_experiment1")], true);
let experiment_ID = "experiment1@tests.mozilla.org";
BootstrapMonitor.checkAddonInstalled(experiment_ID, "1.0");
BootstrapMonitor.checkAddonNotStarted(experiment_ID);
let addon = yield promiseAddonByID(experiment_ID);
// non-restartless add-on is installed and started
do_check_neq(addon, null);
do_check_false(check_multi_disabled());
addon.uninstall();
BootstrapMonitor.checkAddonNotInstalled(experiment_ID);
BootstrapMonitor.checkAddonNotStarted(experiment_ID);
yield promiseRestartManager();
});
const { GlobalManager } = Components.utils.import("resource://gre/modules/Extension.jsm", {});
// Test web extension add-on's should not block multi
add_task(function*() {
yield promiseInstallAllFiles([do_get_addon("webextension_1")], true),
restartManager();
yield promiseWebExtensionStartup();
let we_ID = "webextension1@tests.mozilla.org";
do_check_eq(GlobalManager.extensionMap.size, 1);
let addon = yield promiseAddonByID(we_ID);
do_check_neq(addon, null);
do_check_false(check_multi_disabled());
addon.uninstall();
BootstrapMonitor.checkAddonNotInstalled(we_ID);
BootstrapMonitor.checkAddonNotStarted(we_ID);
yield promiseRestartManager();
});

View File

@ -13,53 +13,16 @@ spinbuttons {
min-width: 13px;
min-height: 11px;
margin: 0 !important;
border: 2px solid;
-moz-border-top-colors: ThreeDHighlight ThreeDLightShadow;
-moz-border-right-colors: ThreeDDarkShadow ThreeDShadow;
-moz-border-bottom-colors: ThreeDDarkShadow ThreeDShadow;
-moz-border-left-colors: ThreeDHighlight ThreeDLightShadow;
background-color: ThreeDFace;
}
.spinbuttons-button > .button-box {
border: 0;
}
.spinbuttons-button:hover:active
{
border: 2px solid;
-moz-border-top-colors: ThreeDDarkShadow ThreeDShadow;
-moz-border-right-colors: ThreeDDarkShadow ThreeDShadow;
-moz-border-bottom-colors: ThreeDDarkShadow ThreeDShadow;
-moz-border-left-colors: ThreeDDarkShadow ThreeDShadow;
}
.spinbuttons-button[disabled="true"] {
border: 2px solid;
-moz-border-top-colors: ThreeDHighlight ThreeDLightShadow !important;
-moz-border-right-colors: ThreeDDarkShadow ThreeDShadow !important;
-moz-border-bottom-colors: ThreeDDarkShadow ThreeDShadow !important;
-moz-border-left-colors: ThreeDHighlight ThreeDLightShadow !important;
}
.spinbuttons-up {
-moz-appearance: spinner-upbutton;
background-image: url("chrome://global/skin/arrow/arrow-up.gif");
background-position: center center;
background-repeat: no-repeat;
}
.spinbuttons-up[disabled="true"] {
background-image: url("chrome://global/skin/arrow/arrow-up-dis.gif");
}
.spinbuttons-down {
-moz-appearance: spinner-downbutton;
background-image: url("chrome://global/skin/arrow/arrow-dn.gif");
background-position: center center;
background-repeat: no-repeat;
}
.spinbuttons-down[disabled="true"] {
background-image: url("chrome://global/skin/arrow/arrow-dn-dis.gif");
}

View File

@ -442,123 +442,7 @@ SamplerThread::SuspendAndSampleAndResumeThread(PS::LockRef aLock,
// END SamplerThread target specifics
////////////////////////////////////////////////////////////////////////
#if defined(GP_OS_android)
static struct sigaction gOldSigstartHandler;
const int SIGSTART = SIGUSR2;
static void
freeArray(const char** aArray, int aSize)
{
for (int i = 0; i < aSize; i++) {
free((void*) aArray[i]);
}
}
static uint32_t
readCSVArray(char* aCsvList, const char** aBuffer)
{
uint32_t count;
char* savePtr;
int newlinePos = strlen(aCsvList) - 1;
if (aCsvList[newlinePos] == '\n') {
aCsvList[newlinePos] = '\0';
}
char* item = strtok_r(aCsvList, ",", &savePtr);
for (count = 0; item; item = strtok_r(nullptr, ",", &savePtr)) {
int length = strlen(item) + 1; // Include \0
char* newBuf = (char*) malloc(sizeof(char) * length);
aBuffer[count] = newBuf;
strncpy(newBuf, item, length);
count++;
}
return count;
}
static void
DoStartTask()
{
uint32_t featureCount = 0;
uint32_t threadCount = 0;
// Just allocate 10 features for now
// FIXME: these don't really point to const chars*
// So we free them later, but we don't want to change the const char**
// declaration in profiler_start. Annoying but ok for now.
const char* threadNames[10];
const char* features[10];
const char* profilerConfigFile = "/data/local/tmp/profiler.options";
// Support some of the usual env variables, plus some extra stuff.
FILE* file = fopen(profilerConfigFile, "r");
int entries = PROFILE_DEFAULT_ENTRIES;
int interval = PROFILE_DEFAULT_INTERVAL;
if (file) {
const int bufferSize = 1024;
char line[bufferSize];
while (fgets(line, bufferSize, file) != nullptr) {
char* savePtr;
char* feature = strtok_r(line, "=", &savePtr);
char* value = strtok_r(nullptr, "", &savePtr);
if (strncmp(feature, "MOZ_PROFILER_STARTUP_ENTRIES", bufferSize) == 0) {
GetEntries(value, &entries);
} else if (strncmp(feature, "MOZ_PROFILER_STARTUP_INTERVAL",
bufferSize) == 0) {
GetInterval(value, &interval);
} else if (strncmp(feature, "MOZ_PROFILER_STARTUP_FEATURES",
bufferSize) == 0) {
featureCount = readCSVArray(value, features);
} else if (strncmp(feature, "threads", bufferSize) == 0) {
threadCount = readCSVArray(value, threadNames);
}
}
fclose(file);
}
MOZ_ASSERT(featureCount < 10);
MOZ_ASSERT(threadCount < 10);
profiler_start(entries, interval,
features, featureCount, threadNames, threadCount);
freeArray(threadNames, threadCount);
freeArray(features, featureCount);
}
static void
SigstartHandler(int aSignal, siginfo_t* aInfo, void* aContext)
{
class StartTask : public Runnable {
public:
NS_IMETHOD Run() override {
DoStartTask();
return NS_OK;
}
};
// XXX: technically NS_DispatchToMainThread is NOT async signal safe. We risk
// nasty things like deadlocks, but the probability is very low and we
// typically only do this once so it tends to be ok. See bug 909403.
NS_DispatchToMainThread(new StartTask());
}
static void
PlatformInit(PS::LockRef aLock)
{
struct sigaction sa;
sa.sa_sigaction = SigstartHandler;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_RESTART | SA_SIGINFO;
if (sigaction(SIGSTART, &sa, &gOldSigstartHandler) != 0) {
MOZ_CRASH("Error installing SIGSTART handler in the profiler");
}
}
#else /* !defined(GP_OS_android) */
#if defined(GP_OS_linux)
// We use pthread_atfork() to temporarily disable signal delivery during any
// fork() call. Without that, fork() can be repeatedly interrupted by signal
@ -609,6 +493,13 @@ PlatformInit(PS::LockRef aLock)
pthread_atfork(paf_prepare, paf_parent, nullptr);
}
#else
static void
PlatformInit(PS::LockRef aLock)
{
}
#endif
void

View File

@ -1421,34 +1421,6 @@ void ProfilerMarker::StreamJSON(SpliceableJSONWriter& aWriter,
aWriter.EndArray();
}
// Only fills in aOut if aStr contains a valid value.
static bool
GetEntries(const char* aStr, int* aOut)
{
MOZ_ASSERT(aStr);
errno = 0;
int entries = strtol(aStr, nullptr, 10);
if (errno == 0 && entries > 0) {
*aOut = entries;
return true;
}
return false;
}
// Only fills in aOut if aStr contains a valid value.
static bool
GetInterval(const char* aStr, int* aOut)
{
MOZ_ASSERT(aStr);
errno = 0;
int interval = strtol(aStr, nullptr, 10);
if (errno == 0 && 1 <= interval && interval <= 1000) {
*aOut = interval;
return true;
}
return false;
}
static void
PrintUsageThenExit(int aExitCode)
{
@ -1956,19 +1928,25 @@ profiler_init(void* aStackTop)
int entries = PROFILE_DEFAULT_ENTRIES;
const char* startupEntries = getenv("MOZ_PROFILER_STARTUP_ENTRIES");
if (startupEntries) {
if (!GetEntries(startupEntries, &entries)) {
errno = 0;
entries = strtol(startupEntries, nullptr, 10);
if (errno == 0 && entries > 0) {
LOG("- MOZ_PROFILER_STARTUP_ENTRIES = %d", entries);
} else {
PrintUsageThenExit(1);
}
LOG("- MOZ_PROFILER_STARTUP_ENTRIES = %d", entries);
}
int interval = PROFILE_DEFAULT_INTERVAL;
const char* startupInterval = getenv("MOZ_PROFILER_STARTUP_INTERVAL");
if (startupInterval) {
if (!GetInterval(startupInterval, &interval)) {
errno = 0;
interval = strtol(startupInterval, nullptr, 10);
if (errno == 0 && 1 <= interval && interval <= 1000) {
LOG("- MOZ_PROFILER_STARTUP_INTERVAL = %d", interval);
} else {
PrintUsageThenExit(1);
}
LOG("- MOZ_PROFILER_STARTUP_INTERVAL = %d", interval);
}
locked_profiler_start(lock, entries, interval,

View File

@ -18,6 +18,7 @@ NS_IMPL_ISUPPORTS(nsFilePickerProxy, nsIFilePicker)
nsFilePickerProxy::nsFilePickerProxy()
: mSelectedType(0)
, mIPCActive(false)
{
}
@ -40,6 +41,8 @@ nsFilePickerProxy::Init(mozIDOMWindowProxy* aParent, const nsAString& aTitle,
NS_ADDREF_THIS();
tabChild->SendPFilePickerConstructor(this, nsString(aTitle), aMode);
mIPCActive = true;
return NS_OK;
}
@ -136,6 +139,10 @@ nsFilePickerProxy::Open(nsIFilePickerShownCallback* aCallback)
mDisplayDirectory->GetPath(displayDirectory);
}
if (!mIPCActive) {
return NS_ERROR_FAILURE;
}
SendOpen(mSelectedType, mAddToRecentDocs, mDefault, mDefaultExtension,
mFilters, mFilterNames, displayDirectory, mOkButtonLabel);
@ -271,3 +278,14 @@ nsFilePickerProxy::GetDomFileOrDirectoryEnumerator(nsISimpleEnumerator** aDomfil
enumerator.forget(aDomfiles);
return NS_OK;
}
void
nsFilePickerProxy::ActorDestroy(ActorDestroyReason aWhy)
{
mIPCActive = false;
if (mCallback) {
mCallback->Done(nsIFilePicker::returnCancel);
mCallback = nullptr;
}
}

View File

@ -60,6 +60,9 @@ private:
~nsFilePickerProxy();
void InitNative(nsIWidget*, const nsAString&) override;
void
ActorDestroy(ActorDestroyReason aWhy) override;
nsTArray<mozilla::dom::OwningFileOrDirectory> mFilesOrDirectories;
nsCOMPtr<nsIFilePickerShownCallback> mCallback;
@ -68,6 +71,8 @@ private:
nsString mDefault;
nsString mDefaultExtension;
bool mIPCActive;
InfallibleTArray<nsString> mFilters;
InfallibleTArray<nsString> mFilterNames;
};

View File

@ -52,9 +52,11 @@ UNIFIED_SOURCES += xpcom_gluens_src_cppsrcs
UNIFIED_SOURCES += xpcom_glue_src_cppsrcs
UNIFIED_SOURCES += [
'FileLocation.cpp',
'IOInterposer.cpp',
'LateWriteChecks.cpp',
'MainThreadIOLogger.cpp',
'Omnijar.cpp',
'Services.cpp',
'XPCOMInit.cpp',
]
@ -64,13 +66,6 @@ if CONFIG['OS_ARCH'] != 'WINNT':
'NSPRInterposer.cpp',
]
# FileLocation.cpp and Omnijar.cpp cannot be built in unified mode because they
# use plarena.h.
SOURCES += [
'FileLocation.cpp',
'Omnijar.cpp',
]
include('/ipc/chromium/chromium-config.mozbuild')
FINAL_LIBRARY = 'xul'

View File

@ -30,18 +30,12 @@ EXPORTS.mozilla += [
'ModuleUtils.h',
]
# nsCategoryManager.cpp and nsComponentManager.cpp cannot be built in
# unified mode because they use thea PL_ARENA_CONST_ALIGN_MASK macro
# with plarena.h.
SOURCES += [
'nsCategoryManager.cpp',
'nsComponentManager.cpp',
]
UNIFIED_SOURCES += [
'GenericFactory.cpp',
'ManifestParser.cpp',
'nsCategoryCache.cpp',
'nsCategoryManager.cpp',
'nsComponentManager.cpp',
'nsComponentManagerUtils.cpp',
]

View File

@ -4,12 +4,9 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#define PL_ARENA_CONST_ALIGN_MASK 7
#include "nsICategoryManager.h"
#include "nsCategoryManager.h"
#include "plarena.h"
#include "prio.h"
#include "prlock.h"
#include "nsCOMPtr.h"
@ -27,6 +24,7 @@
#include "nsQuickSort.h"
#include "nsEnumeratorUtils.h"
#include "nsThreadUtils.h"
#include "mozilla/ArenaAllocatorExtensions.h"
#include "mozilla/MemoryReporting.h"
#include "mozilla/Services.h"
@ -48,11 +46,6 @@ class nsIComponentLoaderManager;
going to change much ;)
*/
#define NS_CATEGORYMANAGER_ARENA_SIZE (1024 * 8)
// pulled in from nsComponentManager.cpp
char* ArenaStrdup(const char* aStr, PLArenaPool* aArena);
//
// BaseStringEnumerator is subclassed by EntryEnumerator and
// CategoryEnumerator
@ -200,7 +193,7 @@ EntryEnumerator::Create(nsTHashtable<CategoryLeaf>& aTable)
//
CategoryNode*
CategoryNode::Create(PLArenaPool* aArena)
CategoryNode::Create(CategoryAllocator* aArena)
{
return new (aArena) CategoryNode();
}
@ -208,11 +201,9 @@ CategoryNode::Create(PLArenaPool* aArena)
CategoryNode::~CategoryNode() = default;
void*
CategoryNode::operator new(size_t aSize, PLArenaPool* aArena)
CategoryNode::operator new(size_t aSize, CategoryAllocator* aArena)
{
void* p;
PL_ARENA_ALLOCATE(p, aArena, aSize);
return p;
return aArena->Allocate(aSize, mozilla::fallible);
}
nsresult
@ -238,7 +229,7 @@ CategoryNode::AddLeaf(const char* aEntryName,
const char* aValue,
bool aReplace,
char** aResult,
PLArenaPool* aArena)
CategoryAllocator* aArena)
{
if (aResult) {
*aResult = nullptr;
@ -248,7 +239,7 @@ CategoryNode::AddLeaf(const char* aEntryName,
CategoryLeaf* leaf = mTable.GetEntry(aEntryName);
if (!leaf) {
const char* arenaEntryName = ArenaStrdup(aEntryName, aArena);
const char* arenaEntryName = ArenaStrdup(aEntryName, *aArena);
if (!arenaEntryName) {
return NS_ERROR_OUT_OF_MEMORY;
}
@ -263,7 +254,7 @@ CategoryNode::AddLeaf(const char* aEntryName,
return NS_ERROR_INVALID_ARG;
}
const char* arenaValue = ArenaStrdup(aValue, aArena);
const char* arenaValue = ArenaStrdup(aValue, *aArena);
if (!arenaValue) {
return NS_ERROR_OUT_OF_MEMORY;
}
@ -410,11 +401,11 @@ nsCategoryManager::Create(nsISupports* aOuter, REFNSIID aIID, void** aResult)
}
nsCategoryManager::nsCategoryManager()
: mLock("nsCategoryManager")
: mArena()
, mTable()
, mLock("nsCategoryManager")
, mSuppressNotifications(false)
{
PL_INIT_ARENA_POOL(&mArena, "CategoryManagerArena",
NS_CATEGORYMANAGER_ARENA_SIZE);
}
void
@ -429,8 +420,6 @@ nsCategoryManager::~nsCategoryManager()
// destroyed, or else you will have PRLocks undestroyed and other Really
// Bad Stuff (TM)
mTable.Clear();
PL_FinishArenaPool(&mArena);
}
inline CategoryNode*
@ -462,7 +451,7 @@ nsCategoryManager::SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf)
{
size_t n = aMallocSizeOf(this);
n += PL_SizeOfArenaPoolExcludingPool(&mArena, aMallocSizeOf);
n += mArena.SizeOfExcludingThis(aMallocSizeOf);
n += mTable.ShallowSizeOfExcludingThis(aMallocSizeOf);
for (auto iter = mTable.ConstIter(); !iter.Done(); iter.Next()) {
@ -609,7 +598,7 @@ nsCategoryManager::AddCategoryEntry(const char* aCategoryName,
// That category doesn't exist yet; let's make it.
category = CategoryNode::Create(&mArena);
char* categoryName = ArenaStrdup(aCategoryName, &mArena);
char* categoryName = ArenaStrdup(aCategoryName, mArena);
mTable.Put(categoryName, category);
}
}

View File

@ -9,16 +9,18 @@
#define NSCATEGORYMANAGER_H
#include "prio.h"
#include "plarena.h"
#include "nsClassHashtable.h"
#include "nsICategoryManager.h"
#include "nsIMemoryReporter.h"
#include "mozilla/ArenaAllocator.h"
#include "mozilla/MemoryReporting.h"
#include "mozilla/Mutex.h"
#include "mozilla/Attributes.h"
class nsIMemoryReporter;
typedef mozilla::ArenaAllocator<1024*8, 8> CategoryAllocator;
/* 16d222a6-1dd2-11b2-b693-f38b02c021b2 */
#define NS_CATEGORYMANAGER_CID \
{ 0x16d222a6, 0x1dd2, 0x11b2, \
@ -55,7 +57,7 @@ public:
const char* aValue,
bool aReplace,
char** aResult,
PLArenaPool* aArena);
CategoryAllocator* aArena);
void DeleteLeaf(const char* aEntryName);
@ -75,7 +77,7 @@ public:
nsresult Enumerate(nsISimpleEnumerator** aResult);
// CategoryNode is arena-allocated, with the strings
static CategoryNode* Create(PLArenaPool* aArena);
static CategoryNode* Create(CategoryAllocator* aArena);
~CategoryNode();
void operator delete(void*) {}
@ -84,7 +86,7 @@ public:
private:
CategoryNode() : mLock("CategoryLeaf") {}
void* operator new(size_t aSize, PLArenaPool* aArena);
void* operator new(size_t aSize, CategoryAllocator* aArena);
nsTHashtable<CategoryLeaf> mTable;
mozilla::Mutex mLock;
@ -137,7 +139,7 @@ private:
const char* aCategoryName, // must be a static string
const char* aEntryName);
PLArenaPool mArena;
CategoryAllocator mArena;
nsClassHashtable<nsDepCharHashKey, CategoryNode> mTable;
mozilla::Mutex mLock;
bool mSuppressNotifications;

View File

@ -22,13 +22,6 @@
#include "nspr.h"
#include "nsCRT.h" // for atoll
// Arena used by component manager for storing contractid string, dll
// location strings and small objects
// CAUTION: Arena align mask needs to be defined before including plarena.h
// currently from nsComponentManager.h
#define PL_ARENA_CONST_ALIGN_MASK 7
#define NS_CM_BLOCK_SIZE (1024 * 8)
#include "nsCategoryManager.h"
#include "nsCOMPtr.h"
#include "nsComponentManager.h"
@ -157,27 +150,6 @@ error:
return rv;
}
////////////////////////////////////////////////////////////////////////////////
// Arena helper functions
////////////////////////////////////////////////////////////////////////////////
char*
ArenaStrndup(const char* aStr, uint32_t aLen, PLArenaPool* aArena)
{
void* mem;
// Include trailing null in the aLen
PL_ARENA_ALLOCATE(mem, aArena, aLen + 1);
if (mem) {
memcpy(mem, aStr, aLen + 1);
}
return static_cast<char*>(mem);
}
char*
ArenaStrdup(const char* aStr, PLArenaPool* aArena)
{
return ArenaStrndup(aStr, strlen(aStr), aArena);
}
// GetService and a few other functions need to exit their mutex mid-function
// without reentering it later in the block. This class supports that
// style of early-exit that MutexAutoUnlock doesn't.
@ -333,9 +305,6 @@ nsComponentManagerImpl::Init()
{
MOZ_ASSERT(NOT_INITIALIZED == mStatus);
// Initialize our arena
PL_INIT_ARENA_POOL(&mArena, "ComponentManagerArena", NS_CM_BLOCK_SIZE);
nsCOMPtr<nsIFile> greDir =
GetLocationFromDirectoryService(NS_GRE_DIR);
nsCOMPtr<nsIFile> appDir =
@ -714,14 +683,12 @@ nsComponentManagerImpl::ManifestComponent(ManifestProcessingContext& aCx,
mKnownModules.Put(hash, km);
}
void* place;
PL_ARENA_ALLOCATE(place, &mArena, sizeof(nsCID));
void* place = mArena.Allocate(sizeof(nsCID));
nsID* permanentCID = static_cast<nsID*>(place);
*permanentCID = cid;
PL_ARENA_ALLOCATE(place, &mArena, sizeof(mozilla::Module::CIDEntry));
auto* e = new (place) mozilla::Module::CIDEntry();
place = mArena.Allocate(sizeof(mozilla::Module::CIDEntry));
auto* e = new (KnownNotNull, place) mozilla::Module::CIDEntry();
e->cid = permanentCID;
f = new nsFactoryEntry(e, km);
@ -856,9 +823,6 @@ nsresult nsComponentManagerImpl::Shutdown(void)
delete sStaticModules;
delete sModuleLocations;
// delete arena for strings and small objects
PL_FinishArenaPool(&mArena);
mStatus = SHUTDOWN_COMPLETE;
MOZ_LOG(nsComponentManagerLog, LogLevel::Debug,
@ -1773,7 +1737,7 @@ nsComponentManagerImpl::SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf)
n += mKnownStaticModules.ShallowSizeOfExcludingThis(aMallocSizeOf);
n += mKnownModules.ShallowSizeOfExcludingThis(aMallocSizeOf);
n += PL_SizeOfArenaPoolExcludingPool(&mArena, aMallocSizeOf);
n += mArena.SizeOfExcludingThis(aMallocSizeOf);
n += mPendingServices.ShallowSizeOfExcludingThis(aMallocSizeOf);

View File

@ -15,6 +15,7 @@
#include "nsIMemoryReporter.h"
#include "nsIServiceManager.h"
#include "nsIFile.h"
#include "mozilla/ArenaAllocator.h"
#include "mozilla/Atomics.h"
#include "mozilla/MemoryReporting.h"
#include "mozilla/Module.h"
@ -29,7 +30,6 @@
#include "nsCOMPtr.h"
#include "nsAutoPtr.h"
#include "nsWeakReference.h"
#include "plarena.h"
#include "nsCOMArray.h"
#include "nsDataHashtable.h"
#include "nsInterfaceHashtable.h"
@ -310,7 +310,7 @@ public:
SHUTDOWN_COMPLETE
} mStatus;
PLArenaPool mArena;
mozilla::ArenaAllocator<1024*8, 8> mArena;
struct PendingServiceInfo
{

224
xpcom/ds/ArenaAllocator.h Normal file
View File

@ -0,0 +1,224 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_ArenaAllocator_h
#define mozilla_ArenaAllocator_h
#include <algorithm>
#include <cstdint>
#include "mozilla/fallible.h"
#include "mozilla/Likely.h"
#include "mozilla/MemoryChecking.h"
#include "mozilla/MemoryReporting.h"
#include "mozilla/OperatorNewExtensions.h"
#include "mozilla/TemplateLib.h"
#include "nsDebug.h"
namespace mozilla {
/**
* A very simple arena allocator based on NSPR's PLArena.
*
* The arena allocator only provides for allocation, all memory is retained
* until the allocator is destroyed. It's useful for situations where a large
* amount of small transient allocations are expected.
*
* Example usage:
*
* // Define an allocator that is page sized and returns allocations that are
* // 8-byte aligned.
* ArenaAllocator<4096, 8> a;
* for (int i = 0; i < 1000; i++) {
* DoSomething(a.Allocate(i));
* }
*/
template<size_t ArenaSize, size_t Alignment=1>
class ArenaAllocator
{
public:
constexpr ArenaAllocator()
: mHead()
, mCurrent(nullptr)
{
static_assert(mozilla::tl::FloorLog2<Alignment>::value ==
mozilla::tl::CeilingLog2<Alignment>::value,
"ArenaAllocator alignment must be a power of two");
}
ArenaAllocator(const ArenaAllocator&) = delete;
ArenaAllocator& operator=(const ArenaAllocator&) = delete;
/**
* Frees all internal arenas but does not call destructors for objects
* allocated out of the arena.
*/
~ArenaAllocator()
{
Clear();
}
/**
* Fallibly allocates a chunk of memory with the given size from the internal
* arenas. If the allocation size is larger than the chosen arena a size an
* entire arena is allocated and used.
*/
MOZ_ALWAYS_INLINE void* Allocate(size_t aSize, const fallible_t&)
{
MOZ_RELEASE_ASSERT(aSize, "Allocation size must be non-zero");
return InternalAllocate(AlignedSize(aSize));
}
void* Allocate(size_t aSize)
{
void* p = Allocate(aSize, fallible);
if (MOZ_UNLIKELY(!p)) {
NS_ABORT_OOM(std::max(aSize, ArenaSize));
}
return p;
}
/**
* Frees all entries. The allocator can be reused after this is called.
*
* NB: This will not run destructors of any objects that were allocated from
* the arena.
*/
void Clear()
{
// Free all chunks.
auto a = mHead.next;
while (a) {
auto tmp = a;
a = a->next;
free(tmp);
}
// Reset the list head.
mHead.next = nullptr;
mCurrent = nullptr;
}
/**
* Adjusts the given size to the required alignment.
*/
static constexpr size_t AlignedSize(size_t aSize)
{
return (aSize + (Alignment - 1)) & ~(Alignment - 1);
}
size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
{
size_t s = 0;
for (auto arena = mHead.next; arena; arena = arena->next) {
s += aMallocSizeOf(arena);
}
return s;
}
private:
struct ArenaHeader
{
/**
* The location in memory of the data portion of the arena.
*/
uintptr_t offset;
/**
* The location in memory of the end of the data portion of the arena.
*/
uintptr_t tail;
};
struct ArenaChunk
{
constexpr ArenaChunk() : header{0, 0}, next(nullptr) {}
explicit ArenaChunk(size_t aSize)
: header{AlignedSize(uintptr_t(this + 1)), uintptr_t(this) + aSize}
, next(nullptr)
{
}
ArenaHeader header;
ArenaChunk* next;
/**
* Allocates a chunk of memory out of the arena and advances the offset.
*/
void* Allocate(size_t aSize)
{
MOZ_ASSERT(aSize <= Available());
char* p = reinterpret_cast<char*>(header.offset);
header.offset += aSize;
MOZ_MAKE_MEM_UNDEFINED(p, aSize);
return p;
}
/**
* Calculates the amount of space available for allocation in this chunk.
*/
size_t Available() const {
return header.tail - header.offset;
}
};
/**
* Allocates an arena chunk of the given size and initializes its header.
*/
ArenaChunk* AllocateChunk(size_t aSize)
{
static const size_t kOffset = AlignedSize(sizeof(ArenaChunk));
MOZ_ASSERT(kOffset < aSize);
const size_t chunkSize = aSize + kOffset;
void* p = malloc(chunkSize);
if (!p) {
return nullptr;
}
ArenaChunk* arena = new (KnownNotNull, p) ArenaChunk(chunkSize);
MOZ_MAKE_MEM_NOACCESS((void*)arena->header.offset,
arena->header.tail - arena->header.offset);
// Insert into the head of the list.
arena->next = mHead.next;
mHead.next = arena;
// Only update |mCurrent| if this is a standard allocation, large
// allocations will always end up full so there's no point in updating
// |mCurrent| in that case.
if (aSize == ArenaSize - kOffset) {
mCurrent = arena;
}
return arena;
}
MOZ_ALWAYS_INLINE void* InternalAllocate(size_t aSize)
{
static_assert(ArenaSize > AlignedSize(sizeof(ArenaChunk)),
"Arena size must be greater than the header size");
static const size_t kMaxArenaCapacity =
ArenaSize - AlignedSize(sizeof(ArenaChunk));
if (mCurrent && aSize <= mCurrent->Available()) {
return mCurrent->Allocate(aSize);
}
ArenaChunk* arena = AllocateChunk(std::max(kMaxArenaCapacity, aSize));
return arena ? arena->Allocate(aSize) : nullptr;
}
ArenaChunk mHead;
ArenaChunk* mCurrent;
};
} // namespace mozilla
#endif // mozilla_ArenaAllocator_h

View File

@ -0,0 +1,94 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_ArenaAllocatorExtensions_h
#define mozilla_ArenaAllocatorExtensions_h
#include "mozilla/ArenaAllocator.h"
#include "mozilla/CheckedInt.h"
#include "nsAString.h"
/**
* Extensions to the ArenaAllocator class.
*/
namespace mozilla {
namespace detail {
template<typename T, size_t ArenaSize, size_t Alignment>
T* DuplicateString(const T* aSrc, const CheckedInt<size_t>& aLen,
ArenaAllocator<ArenaSize, Alignment>& aArena);
} // namespace detail
/**
* Makes an arena allocated null-terminated copy of the source string. The
* source string must be null-terminated.
*
* @param aSrc String to copy.
* @param aArena The arena to allocate the string copy out of.
* @return An arena allocated null-terminated string.
*/
template<size_t ArenaSize, size_t Alignment>
char* ArenaStrdup(const char* aStr,
ArenaAllocator<ArenaSize, Alignment>& aArena)
{
return detail::DuplicateString(aStr, strlen(aStr), aArena);
}
/**
* Makes an arena allocated null-terminated copy of the source string.
*
* @param aSrc String to copy.
* @param aArena The arena to allocate the string copy out of.
* @return An arena allocated null-terminated string.
*/
template<size_t ArenaSize, size_t Alignment>
nsAString::char_type* ArenaStrdup(
const nsAString& aStr, ArenaAllocator<ArenaSize, Alignment>& aArena)
{
return detail::DuplicateString(aStr.BeginReading(), aStr.Length(), aArena);
}
/**
* Makes an arena allocated null-terminated copy of the source string.
*
* @param aSrc String to copy.
* @param aArena The arena to allocate the string copy out of.
* @return An arena allocated null-terminated string.
*/
template<size_t ArenaSize, size_t Alignment>
nsACString::char_type* ArenaStrdup(
const nsACString& aStr, ArenaAllocator<ArenaSize, Alignment>& aArena)
{
return detail::DuplicateString(aStr.BeginReading(), aStr.Length(), aArena);
}
/**
* Copies the source string and adds a null terminator. Source string does not
* have to be null terminated.
*/
template<typename T, size_t ArenaSize, size_t Alignment>
T* detail::DuplicateString(const T* aSrc, const CheckedInt<size_t>& aLen,
ArenaAllocator<ArenaSize, Alignment>& aArena)
{
const auto byteLen = (aLen + 1) * sizeof(T);
if (!byteLen.isValid()) {
return nullptr;
}
T* p = static_cast<T*>(aArena.Allocate(byteLen.value(), mozilla::fallible));
if (p) {
memcpy(p, aSrc, byteLen.value() - sizeof(T));
p[aLen.value()] = T(0);
}
return p;
}
} // namespace mozilla
#endif // mozilla_ArenaAllocatorExtensions_h

View File

@ -79,6 +79,8 @@ EXPORTS += [
]
EXPORTS.mozilla += [
'ArenaAllocator.h',
'ArenaAllocatorExtensions.h',
'ArrayIterator.h',
'IncrementalTokenizer.h',
'Observer.h',
@ -101,6 +103,7 @@ UNIFIED_SOURCES += [
'nsINIParserImpl.cpp',
'nsObserverList.cpp',
'nsObserverService.cpp',
'nsPersistentProperties.cpp',
'nsProperties.cpp',
'nsQuickSort.cpp',
'nsStaticNameTable.cpp',
@ -113,12 +116,6 @@ UNIFIED_SOURCES += [
'Tokenizer.cpp',
]
# This file cannot be built in unified mode because it uses the
# PL_ARENA_CONST_ALIGN_MASK macro with plarena.h.
SOURCES += [
'nsPersistentProperties.cpp',
]
EXTRA_COMPONENTS += [
'nsINIProcessor.js',
'nsINIProcessor.manifest',

Some files were not shown because too many files have changed in this diff Show More