Merge autoland to mozilla-central. a=merge

This commit is contained in:
Cosmin Sabou 2020-04-24 19:27:38 +03:00
commit 383e319d00
70 changed files with 4056 additions and 4775 deletions

View File

@ -40,9 +40,16 @@ void AccGroupInfo::Update() {
// If the sibling is separator then the group is ended.
if (siblingRole == roles::SEPARATOR) break;
// If sibling is not visible and hasn't the same base role.
if (BaseRole(siblingRole) != mRole || sibling->State() & states::INVISIBLE)
if (BaseRole(siblingRole) != mRole) {
continue;
}
bool siblingHasGroupInfo =
sibling->mBits.groupInfo && !sibling->HasDirtyGroupInfo();
// Skip invisible siblings.
// If the sibling has calculated group info, that means it's visible.
if (!siblingHasGroupInfo && sibling->State() & states::INVISIBLE) {
continue;
}
// Check if it's hierarchical flatten structure, i.e. if the sibling
// level is lesser than this one then group is ended, if the sibling level
@ -59,7 +66,7 @@ void AccGroupInfo::Update() {
// If the previous item in the group has calculated group information then
// build group information for this item based on found one.
if (sibling->mBits.groupInfo && !sibling->HasDirtyGroupInfo()) {
if (siblingHasGroupInfo) {
mPosInSet += sibling->mBits.groupInfo->mPosInSet;
mParent = sibling->mBits.groupInfo->mParent;
mSetSize = sibling->mBits.groupInfo->mSetSize;
@ -80,9 +87,16 @@ void AccGroupInfo::Update() {
// If the sibling is separator then the group is ended.
if (siblingRole == roles::SEPARATOR) break;
// If sibling is visible and has the same base role
if (BaseRole(siblingRole) != mRole || sibling->State() & states::INVISIBLE)
if (BaseRole(siblingRole) != mRole) {
continue;
}
bool siblingHasGroupInfo =
sibling->mBits.groupInfo && !sibling->HasDirtyGroupInfo();
// Skip invisible siblings.
// If the sibling has calculated group info, that means it's visible.
if (!siblingHasGroupInfo && sibling->State() & states::INVISIBLE) {
continue;
}
// and check if it's hierarchical flatten structure.
int32_t siblingLevel = nsAccUtils::GetARIAOrDefaultLevel(sibling);
@ -93,7 +107,7 @@ void AccGroupInfo::Update() {
// If the next item in the group has calculated group information then
// build group information for this item based on found one.
if (sibling->mBits.groupInfo && !sibling->HasDirtyGroupInfo()) {
if (siblingHasGroupInfo) {
mParent = sibling->mBits.groupInfo->mParent;
mSetSize = sibling->mBits.groupInfo->mSetSize;
return;

View File

@ -19,8 +19,6 @@ const { ExtensionUtils } = ChromeUtils.import(
var { promiseEvent } = ExtensionUtils;
const XUL_NS = "http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul";
function getBrowser(panel) {
let browser = document.getElementById("webext-panels-browser");
if (browser) {

View File

@ -8,8 +8,6 @@ var EXPORTED_SYMBOLS = [
"DefaultBrowserCheck",
];
const XULNS = "http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul";
const { XPCOMUtils } = ChromeUtils.import(
"resource://gre/modules/XPCOMUtils.jsm"
);
@ -2005,7 +2003,7 @@ BrowserGlue.prototype = {
let win = BrowserWindowTracker.getTopWindow();
let stack = win.gBrowser.getPanel().querySelector(".browserStack");
let mask = win.document.createElementNS(XULNS, "box");
let mask = win.document.createXULElement("box");
mask.setAttribute("id", "content-mask");
stack.appendChild(mask);

View File

@ -16,7 +16,7 @@ add_task(async function() {
dummyBtn.setAttribute("removable", "true");
CustomizableUI.getCustomizationTarget(gNavBar).appendChild(dummyBtn);
let popupSet = document.getElementById("mainPopupSet");
gLazyArea = document.createElementNS(kNSXUL, "panel");
gLazyArea = document.createXULElement("panel");
gLazyArea.id = kLazyAreaId;
gLazyArea.setAttribute("hidden", "true");
popupSet.appendChild(gLazyArea);

View File

@ -64,7 +64,7 @@ add_task(async function asyncCleanup() {
});
function setupArea() {
let lazyArea = document.createElementNS(kNSXUL, "hbox");
let lazyArea = document.createXULElement("hbox");
lazyArea.id = kLazyAreaId;
document.getElementById("nav-bar").appendChild(lazyArea);
CustomizableUI.registerArea(kLazyAreaId, {

View File

@ -8,37 +8,64 @@ var initialPageZoom = ZoomManager.zoom;
add_task(async function() {
info("Check zoom reset button existence and functionality");
is(initialPageZoom, 1, "Page zoom reset correctly");
ZoomManager.zoom = 0.5;
CustomizableUI.addWidgetToArea(
"zoom-controls",
CustomizableUI.AREA_FIXED_OVERFLOW_PANEL
await BrowserTestUtils.withNewTab(
{ gBrowser, url: "http://example.com", waitForLoad: true },
async function(browser) {
CustomizableUI.addWidgetToArea(
"zoom-controls",
CustomizableUI.AREA_FIXED_OVERFLOW_PANEL
);
registerCleanupFunction(() => CustomizableUI.reset());
CustomizableUI.addWidgetToArea(
"zoom-controls",
CustomizableUI.AREA_FIXED_OVERFLOW_PANEL
);
await waitForOverflowButtonShown();
{
let zoomChange = BrowserTestUtils.waitForEvent(
gBrowser,
"FullZoomChange"
);
ZoomManager.zoom = 0.5;
await zoomChange;
}
await document.getElementById("nav-bar").overflowable.show();
info("Menu panel was opened");
let zoomResetButton = document.getElementById("zoom-reset-button");
ok(zoomResetButton, "Zoom reset button exists in Panel Menu");
let zoomChange = BrowserTestUtils.waitForEvent(
gBrowser,
"FullZoomChange"
);
zoomResetButton.click();
await zoomChange;
let pageZoomLevel = Math.floor(ZoomManager.zoom * 100);
let expectedZoomLevel = 100;
let buttonZoomLevel = parseInt(zoomResetButton.getAttribute("label"), 10);
is(pageZoomLevel, expectedZoomLevel, "Page zoom reset correctly");
is(
pageZoomLevel,
buttonZoomLevel,
"Button displays the correct zoom level"
);
// close the panel
let panelHiddenPromise = promiseOverflowHidden(window);
document.getElementById("widget-overflow").hidePopup();
await panelHiddenPromise;
info("Menu panel was closed");
}
);
registerCleanupFunction(() => CustomizableUI.reset());
await waitForOverflowButtonShown();
await document.getElementById("nav-bar").overflowable.show();
info("Menu panel was opened");
let zoomResetButton = document.getElementById("zoom-reset-button");
ok(zoomResetButton, "Zoom reset button exists in Panel Menu");
zoomResetButton.click();
await new Promise(SimpleTest.executeSoon);
let pageZoomLevel = Math.floor(ZoomManager.zoom * 100);
let expectedZoomLevel = 100;
let buttonZoomLevel = parseInt(zoomResetButton.getAttribute("label"), 10);
is(pageZoomLevel, expectedZoomLevel, "Page zoom reset correctly");
is(pageZoomLevel, buttonZoomLevel, "Button displays the correct zoom level");
// close the panel
let panelHiddenPromise = promiseOverflowHidden(window);
document.getElementById("widget-overflow").hidePopup();
await panelHiddenPromise;
info("Menu panel was closed");
});
add_task(async function asyncCleanup() {

View File

@ -6,7 +6,7 @@
const TOOLBARID = "test-noncustomizable-toolbar-for-toggling";
function test() {
let tb = document.createElementNS(kNSXUL, "toolbar");
let tb = document.createXULElement("toolbar");
tb.id = TOOLBARID;
gNavToolbox.appendChild(tb);
try {

View File

@ -124,7 +124,7 @@ add_task(async function() {
);
let otherWin = await openAndLoadWindow({}, true);
let otherTB = otherWin.document.createElementNS(kNSXUL, "toolbar");
let otherTB = otherWin.document.createXULElement("toolbar");
otherTB.id = TOOLBARID;
otherTB.setAttribute("customizable", "true");
let wasInformedCorrectlyOfAreaAppearing = false;

View File

@ -33,12 +33,10 @@ registerCleanupFunction(() =>
var { synthesizeDrop, synthesizeMouseAtCenter } = EventUtils;
const kNSXUL = "http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul";
const kForceOverflowWidthPx = 450;
function createDummyXULButton(id, label, win = window) {
let btn = win.document.createElementNS(kNSXUL, "toolbarbutton");
let btn = win.document.createXULElement("toolbarbutton");
btn.id = id;
btn.setAttribute("label", label || id);
btn.className = "toolbarbutton-1 chromeclass-toolbar-additional";
@ -50,7 +48,7 @@ var gAddedToolbars = new Set();
function createToolbarWithPlacements(id, placements = []) {
gAddedToolbars.add(id);
let tb = document.createElementNS(kNSXUL, "toolbar");
let tb = document.createXULElement("toolbar");
tb.id = id;
tb.setAttribute("customizable", "true");
CustomizableUI.registerArea(id, {
@ -65,24 +63,24 @@ function createToolbarWithPlacements(id, placements = []) {
function createOverflowableToolbarWithPlacements(id, placements) {
gAddedToolbars.add(id);
let tb = document.createElementNS(kNSXUL, "toolbar");
let tb = document.createXULElement("toolbar");
tb.id = id;
tb.setAttribute("customizationtarget", id + "-target");
let customizationtarget = document.createElementNS(kNSXUL, "hbox");
let customizationtarget = document.createXULElement("hbox");
customizationtarget.id = id + "-target";
customizationtarget.setAttribute("flex", "1");
tb.appendChild(customizationtarget);
let overflowPanel = document.createElementNS(kNSXUL, "panel");
let overflowPanel = document.createXULElement("panel");
overflowPanel.id = id + "-overflow";
document.getElementById("mainPopupSet").appendChild(overflowPanel);
let overflowList = document.createElementNS(kNSXUL, "vbox");
let overflowList = document.createXULElement("vbox");
overflowList.id = id + "-overflow-list";
overflowPanel.appendChild(overflowList);
let chevron = document.createElementNS(kNSXUL, "toolbarbutton");
let chevron = document.createXULElement("toolbarbutton");
chevron.id = id + "-chevron";
tb.appendChild(chevron);

View File

@ -171,7 +171,7 @@ const TAB_EVENTS = [
"TabUnpinned",
];
const NS_XUL = "http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul";
const XUL_NS = "http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul";
/**
* When calling restoreTabContent, we can supply a reason why
@ -1560,7 +1560,7 @@ var SessionStoreInternal = {
break;
case "XULFrameLoaderCreated":
if (
target.namespaceURI == NS_XUL &&
target.namespaceURI == XUL_NS &&
target.localName == "browser" &&
target.frameLoader &&
target.permanentKey

View File

@ -13,14 +13,14 @@ Services.scriptloader.loadSubScript(
// So that PERFHERDER data can be extracted from the logs.
SimpleTest.requestCompleteLog();
function getFilteredModules(filter, loaders) {
function getFilteredModules(filters, loaders) {
let modules = [];
for (const l of loaders) {
const loaderModulesMap = l.modules;
const loaderModulesPaths = Object.keys(loaderModulesMap);
modules = modules.concat(loaderModulesPaths);
}
return modules.filter(url => url.includes(filter));
return modules.filter(url => filters.some(filter => url.includes(filter)));
}
function countCharsInModules(modules) {
@ -43,14 +43,20 @@ function countCharsInModules(modules) {
* - panelName {String} reused in identifiers for perfherder data
*/
function runMetricsTest({ filterString, loaders, panelName }) {
const allModules = getFilteredModules("", loaders);
const panelModules = getFilteredModules(filterString, loaders);
const allModules = getFilteredModules([""], loaders);
const panelModules = getFilteredModules([filterString], loaders);
const vendoredModules = getFilteredModules(
["devtools/client/debugger/dist/vendors", "devtools/client/shared/vendor/"],
loaders
);
const allModulesCount = allModules.length;
const panelModulesCount = panelModules.length;
const vendoredModulesCount = vendoredModules.length;
const allModulesChars = countCharsInModules(allModules);
const panelModulesChars = countCharsInModules(panelModules);
const vendoredModulesChars = countCharsInModules(vendoredModules);
const PERFHERDER_DATA = {
framework: {
@ -77,6 +83,14 @@ function runMetricsTest({ filterString, loaders, panelName }) {
name: "all-chars",
value: allModulesChars,
},
{
name: "vendored-modules",
value: vendoredModulesCount,
},
{
name: "vendored-chars",
value: vendoredModulesChars,
},
],
},
],
@ -92,10 +106,16 @@ function runMetricsTest({ filterString, loaders, panelName }) {
allModulesChars > panelModulesChars && panelModulesChars > 0,
"Successfully recorded char count for " + panelName
);
// Easy way to check how many vendored chars we have for a given panel.
const percentage = ((100 * vendoredModulesChars) / allModulesChars).toFixed(
1
);
info(`Percentage of vendored chars for ${panelName}: ${percentage}%`);
}
function getDuplicatedModules(loaders) {
const allModules = getFilteredModules("", loaders);
const allModules = getFilteredModules([""], loaders);
const uniqueModules = new Set();
const duplicatedModules = new Set();

View File

@ -187,7 +187,7 @@ h2 {
.perf-presets-description {
font-size: 13px;
color: var(--grey-60);
color: var(--in-content-deemphasized-text);
margin: 5px 0;
}

View File

@ -43,8 +43,9 @@ function isXULBrowser(aBrowser) {
if (!aBrowser || !aBrowser.namespaceURI || !aBrowser.localName) {
return false;
}
const XUL = "http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul";
return aBrowser.namespaceURI === XUL && aBrowser.localName === "browser";
const XUL_NS =
"http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul";
return aBrowser.namespaceURI === XUL_NS && aBrowser.localName === "browser";
}
function checkForManifest(aWindow) {

View File

@ -109,8 +109,9 @@ function isXULBrowser(aBrowser) {
if (!aBrowser || !aBrowser.namespaceURI || !aBrowser.localName) {
return false;
}
const XUL = "http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul";
return aBrowser.namespaceURI === XUL && aBrowser.localName === "browser";
const XUL_NS =
"http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul";
return aBrowser.namespaceURI === XUL_NS && aBrowser.localName === "browser";
}
/**

View File

@ -35,7 +35,6 @@
each preference.
*/
const kXULNS = "http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul";
const kContentDoc =
"http://www.example.com/browser/dom/tests/browser/test_new_window_from_content_child.html";
const kNewWindowPrefKey = "browser.link.open_newwindow";

View File

@ -10,7 +10,7 @@
use api::{ApiMsg, ClearCache, DebugCommand, DebugFlags};
use api::{DocumentId, DocumentLayer, ExternalScrollId, FrameMsg, HitTestFlags, HitTestResult};
use api::{IdNamespace, MemoryReport, PipelineId, RenderNotifier, SceneMsg, ScrollClamping};
use api::{IdNamespace, MemoryReport, PipelineId, RenderNotifier, ScrollClamping};
use api::{ScrollLocation, TransactionMsg, ResourceUpdate, BlobImageKey};
use api::{NotificationRequest, Checkpoint, QualitySettings};
use api::{ClipIntern, FilterDataIntern, PrimitiveKeyKind};
@ -1015,6 +1015,12 @@ impl RenderBackend {
// in the update_document call below.
resume_rx.recv().ok();
}
for pipeline_id in &txn.discard_frame_state_for_pipelines {
doc.scene
.spatial_tree
.discard_frame_state_for_pipeline(*pipeline_id);
}
} else {
// The document was removed while we were building it, skip it.
// TODO: we might want to just ensure that removed documents are
@ -1346,19 +1352,6 @@ impl RenderBackend {
&mut profile_counters.resources,
);
// TODO(nical) I believe this is wrong. We should discard this state when swapping the
// scene after it is built.
for msg in &transaction_msg.scene_ops {
if let SceneMsg::SetDisplayList { preserve_frame_state: false, pipeline_id, .. } = *msg {
self.documents
.get_mut(&document_id)
.unwrap()
.scene
.spatial_tree
.discard_frame_state_for_pipeline(pipeline_id);
}
}
let mut txn = Box::new(Transaction {
document_id,
blob_rasterizer: None,

View File

@ -102,6 +102,7 @@ pub struct BuiltTransaction {
pub scene_build_end_time: u64,
pub render_frame: bool,
pub invalidate_rendered_frame: bool,
pub discard_frame_state_for_pipelines: Vec<PipelineId>,
pub timings: Option<TransactionTimings>,
}
@ -486,6 +487,7 @@ impl SceneBuilderThread {
blob_rasterizer: None,
frame_ops: Vec::new(),
removed_pipelines: Vec::new(),
discard_frame_state_for_pipelines: Vec::new(),
notifications: Vec::new(),
scene_build_start_time,
scene_build_end_time: precise_time_ns(),
@ -603,6 +605,7 @@ impl SceneBuilderThread {
let mut timings = None;
let mut discard_frame_state_for_pipelines = Vec::new();
let mut removed_pipelines = Vec::new();
let rebuild_scene = !txn.scene_ops.is_empty();
for message in txn.scene_ops.drain(..) {
@ -628,7 +631,7 @@ impl SceneBuilderThread {
content_size,
list_descriptor,
list_data,
..
preserve_frame_state,
} => {
let built_display_list =
BuiltDisplayList::from_data(list_data, list_descriptor);
@ -659,6 +662,10 @@ impl SceneBuilderThread {
blob_rasterization_end_time_ns: 0,
display_list_len,
});
if !preserve_frame_state {
discard_frame_state_for_pipelines.push(pipeline_id);
}
}
SceneMsg::SetRootPipeline(pipeline_id) => {
scene.set_root_pipeline_id(pipeline_id);
@ -737,6 +744,7 @@ impl SceneBuilderThread {
blob_rasterizer: replace(&mut txn.blob_rasterizer, None),
frame_ops: replace(&mut txn.frame_ops, Vec::new()),
removed_pipelines,
discard_frame_state_for_pipelines,
notifications: replace(&mut txn.notifications, Vec::new()),
interner_updates,
scene_build_start_time,

View File

@ -121,7 +121,7 @@
#include "vm/JSAtom-inl.h" // for AtomToId, ValueToId
#include "vm/JSContext-inl.h" // for JSContext::check
#include "vm/JSObject-inl.h" // for JSObject::isCallable, NewTenuredObjectWithGivenProto
#include "vm/JSScript-inl.h" // for JSScript::isDebuggee, JSScript
#include "vm/JSScript-inl.h" // for JSScript::isDebuggee, JSScript
#include "vm/NativeObject-inl.h" // for NativeObject::ensureDenseInitializedLength
#include "vm/ObjectOperations-inl.h" // for GetProperty, HasProperty
#include "vm/Realm-inl.h" // for AutoRealm::AutoRealm
@ -6075,6 +6075,7 @@ const JSPropertySpec Debugger::properties[] = {
JS_DEBUG_PSGS("collectCoverageInfo", getCollectCoverageInfo,
setCollectCoverageInfo),
JS_DEBUG_PSG("memory", getMemory),
JS_STRING_SYM_PS(toStringTag, "Debugger", JSPROP_READONLY),
JS_PS_END};
const JSFunctionSpec Debugger::methods[] = {

View File

@ -2920,8 +2920,8 @@ void GCRuntime::maybeAllocTriggerZoneGC(Zone* zone, size_t nbytes) {
MOZ_ASSERT(!JS::RuntimeHeapIsCollecting());
TriggerResult trigger = checkHeapThreshold(
zone->gcHeapSize, zone->gcHeapThreshold, zone->isCollecting());
TriggerResult trigger =
checkHeapThreshold(zone, zone->gcHeapSize, zone->gcHeapThreshold);
if (trigger.kind == TriggerKind::None) {
return;
@ -2990,8 +2990,7 @@ bool GCRuntime::maybeMallocTriggerZoneGC(Zone* zone, const HeapSize& heap,
return false;
}
TriggerResult trigger =
checkHeapThreshold(heap, threshold, zone->isCollecting());
TriggerResult trigger = checkHeapThreshold(zone, heap, threshold);
if (trigger.kind == TriggerKind::None) {
return false;
}
@ -3010,22 +3009,29 @@ bool GCRuntime::maybeMallocTriggerZoneGC(Zone* zone, const HeapSize& heap,
return true;
}
TriggerResult GCRuntime::checkHeapThreshold(const HeapSize& heapSize,
const HeapThreshold& heapThreshold,
bool isCollecting) {
TriggerResult GCRuntime::checkHeapThreshold(
Zone* zone, const HeapSize& heapSize, const HeapThreshold& heapThreshold) {
size_t usedBytes = heapSize.bytes();
size_t thresholdBytes = heapThreshold.bytes();
if (usedBytes < thresholdBytes) {
return TriggerResult{TriggerKind::None, 0, 0};
}
size_t niThreshold = thresholdBytes * tunables.nonIncrementalFactor();
size_t niThreshold = heapThreshold.nonIncrementalBytes(zone, tunables);
if (usedBytes >= niThreshold) {
// We have passed the non-incremental threshold: immediately trigger a
// non-incremental GC.
return TriggerResult{TriggerKind::NonIncremental, usedBytes, niThreshold};
}
// Don't trigger incremental slices during background sweeping or decommit, as
// these will have no effect. A slice will be triggered automatically when
// these tasks finish.
if (zone->wasGCStarted() &&
(state() == State::Finalize || state() == State::Decommit)) {
return TriggerResult{TriggerKind::None, 0, 0};
}
// Start or continue an in progress incremental GC.
return TriggerResult{TriggerKind::Incremental, usedBytes, thresholdBytes};
}
@ -3294,7 +3300,6 @@ void GCRuntime::sweepBackgroundThings(ZoneList& zones) {
emptyArenas = emptyArenas->next;
releaseArena(arena, lock);
}
zone->updateGCThresholds(*this, invocationKind, lock);
}
}
}
@ -5392,7 +5397,6 @@ IncrementalProgress GCRuntime::endSweepingSweepGroup(JSFreeOp* fop,
}
AutoLockGC lock(this);
zone->changeGCState(Zone::Sweep, Zone::Finished);
zone->updateGCThresholds(*this, invocationKind, lock);
zone->arenas.unmarkPreMarkedFreeCells();
}
@ -6257,13 +6261,17 @@ void GCRuntime::finishCollection() {
schedulingState.updateHighFrequencyMode(lastGCEndTime_, currentTime,
tunables);
for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
if (zone->isCollecting()) {
{
AutoLockGC lock(this);
for (GCZonesIter zone(this); !zone.done(); zone.next()) {
zone->changeGCState(Zone::Finished, Zone::NoGC);
zone->gcDelayBytes = 0;
zone->notifyObservingDebuggers();
zone->updateGCThresholds(*this, invocationKind, lock);
}
}
for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
MOZ_ASSERT(!zone->wasGCStarted());
MOZ_ASSERT(!zone->needsIncrementalBarrier());
MOZ_ASSERT(!zone->isOnList());
@ -6801,7 +6809,7 @@ GCRuntime::IncrementalResult GCRuntime::budgetIncrementalGC(
}
if (zone->gcHeapSize.bytes() >=
zone->gcHeapThreshold.nonIncrementalTriggerBytes(tunables)) {
zone->gcHeapThreshold.nonIncrementalBytes(zone, tunables)) {
checkZoneIsScheduled(zone, reason, "GC bytes");
budget.makeUnlimited();
stats().nonincremental(AbortReason::GCBytesTrigger);
@ -6811,7 +6819,7 @@ GCRuntime::IncrementalResult GCRuntime::budgetIncrementalGC(
}
if (zone->mallocHeapSize.bytes() >=
zone->mallocHeapThreshold.nonIncrementalTriggerBytes(tunables)) {
zone->mallocHeapThreshold.nonIncrementalBytes(zone, tunables)) {
checkZoneIsScheduled(zone, reason, "malloc bytes");
budget.makeUnlimited();
stats().nonincremental(AbortReason::MallocBytesTrigger);
@ -6821,7 +6829,7 @@ GCRuntime::IncrementalResult GCRuntime::budgetIncrementalGC(
}
if (zone->jitHeapSize.bytes() >=
zone->jitHeapThreshold.nonIncrementalTriggerBytes(tunables)) {
zone->jitHeapThreshold.nonIncrementalBytes(zone, tunables)) {
checkZoneIsScheduled(zone, reason, "JIT code bytes");
budget.makeUnlimited();
stats().nonincremental(AbortReason::JitCodeBytesTrigger);

View File

@ -595,9 +595,8 @@ class GCRuntime {
private:
enum IncrementalResult { ResetIncremental = 0, Ok };
TriggerResult checkHeapThreshold(const HeapSize& heapSize,
const HeapThreshold& heapThreshold,
bool isCollecting);
TriggerResult checkHeapThreshold(Zone* zone, const HeapSize& heapSize,
const HeapThreshold& heapThreshold);
// Delete an empty zone after its contents have been merged.
void deleteEmptyZone(Zone* zone);

View File

@ -338,6 +338,20 @@ void GCSchedulingTunables::resetParameter(JSGCParamKey key,
}
}
size_t HeapThreshold::nonIncrementalBytes(
ZoneAllocator* zone, const GCSchedulingTunables& tunables) const {
size_t bytes = bytes_ * tunables.nonIncrementalFactor();
// Increase the non-incremental threshold when we start background sweeping
// for the zone. The splay latency benchmark depends on this to avoid pauses
// due to non-incremental GC.
if (zone->gcState() > ZoneAllocator::Sweep) {
bytes *= tunables.lowFrequencyHeapGrowth();
}
return bytes;
}
float HeapThreshold::eagerAllocTrigger(bool highFrequencyGC) const {
float eagerTriggerFactor = highFrequencyGC
? HighFrequencyEagerAllocTriggerFactor

View File

@ -322,6 +322,7 @@
namespace js {
class AutoLockGC;
class ZoneAllocator;
class ZoneAllocPolicy;
namespace gc {
@ -751,13 +752,15 @@ class HeapThreshold {
HeapThreshold() = default;
// GC trigger threshold.
//
// TODO: This is currently read off-thread during parsing, but at some point
// we should be able to make this MainThreadData<>.
AtomicByteCount bytes_;
public:
size_t bytes() const { return bytes_; }
size_t nonIncrementalTriggerBytes(GCSchedulingTunables& tunables) const {
return bytes_ * tunables.nonIncrementalFactor();
}
size_t nonIncrementalBytes(ZoneAllocator* zone,
const GCSchedulingTunables& tunables) const;
float eagerAllocTrigger(bool highFrequencyGC) const;
};

View File

@ -20,7 +20,7 @@
#include "gc/ZoneAllocator.h"
#include "js/GCHashTable.h"
#include "vm/AtomsTable.h"
#include "vm/JSScript.h"
#include "vm/JSFunction.h"
#include "vm/TypeInference.h"
namespace js {

View File

@ -1637,13 +1637,13 @@ AttachDecision GetPropIRGenerator::tryAttachProxy(HandleObject obj,
static TypedThingLayout GetTypedThingLayout(const JSClass* clasp) {
if (IsTypedArrayClass(clasp)) {
return Layout_TypedArray;
return TypedThingLayout::TypedArray;
}
if (IsOutlineTypedObjectClass(clasp)) {
return Layout_OutlineTypedObject;
return TypedThingLayout::OutlineTypedObject;
}
if (IsInlineTypedObjectClass(clasp)) {
return Layout_InlineTypedObject;
return TypedThingLayout::InlineTypedObject;
}
MOZ_CRASH("Bad object class");
}
@ -2243,9 +2243,14 @@ AttachDecision GetPropIRGenerator::tryAttachTypedElement(
// Don't handle out-of-bounds accesses here because we have to ensure the
// |undefined| type is monitored. See also tryAttachTypedArrayNonInt32Index.
writer.loadTypedElementResult(objId, indexId, layout,
TypedThingElementType(obj),
/* handleOOB = */ false);
if (layout == TypedThingLayout::TypedArray) {
writer.loadTypedArrayElementResult(objId, indexId,
TypedThingElementType(obj),
/* handleOOB = */ false);
} else {
writer.loadTypedObjectElementResult(objId, indexId, layout,
TypedThingElementType(obj));
}
// Reading from Uint32Array may produce an int32 now but a double value
// later, so ensure we monitor the result.
@ -2272,13 +2277,10 @@ AttachDecision GetPropIRGenerator::tryAttachTypedArrayNonInt32Index(
ValOperandId keyId = getElemKeyValueId();
Int32OperandId indexId = writer.guardToTypedArrayIndex(keyId);
TypedThingLayout layout = GetTypedThingLayout(obj->getClass());
writer.guardShapeForClass(objId, obj->as<TypedArrayObject>().shape());
writer.loadTypedElementResult(objId, indexId, layout,
TypedThingElementType(obj),
/* handleOOB = */ true);
writer.loadTypedArrayElementResult(objId, indexId, TypedThingElementType(obj),
/* handleOOB = */ true);
// Always monitor the result when out-of-bounds accesses are expected.
writer.typeMonitorResult();

View File

@ -419,10 +419,10 @@ enum class GuardClassKind : uint8_t {
JSObject* NewWrapperWithObjectShape(JSContext* cx, HandleNativeObject obj);
// Enum for stubs handling a combination of typed arrays and typed objects.
enum TypedThingLayout : uint8_t {
Layout_TypedArray,
Layout_OutlineTypedObject,
Layout_InlineTypedObject
enum class TypedThingLayout : uint8_t {
TypedArray,
OutlineTypedObject,
InlineTypedObject
};
void LoadShapeWrapperContents(MacroAssembler& masm, Register obj, Register dst,

View File

@ -3940,6 +3940,22 @@ bool CacheIRCompiler::emitLoadTypedElementResult(ObjOperandId objId,
return true;
}
bool CacheIRCompiler::emitLoadTypedArrayElementResult(ObjOperandId objId,
Int32OperandId indexId,
Scalar::Type elementType,
bool handleOOB) {
return emitLoadTypedElementResult(
objId, indexId, TypedThingLayout::TypedArray, elementType, handleOOB);
}
bool CacheIRCompiler::emitLoadTypedObjectElementResult(
ObjOperandId objId, Int32OperandId indexId, TypedThingLayout layout,
Scalar::Type elementType) {
return emitLoadTypedElementResult(objId, indexId, layout, elementType,
/* handleOOB = */ false);
}
bool CacheIRCompiler::emitStoreTypedObjectScalarProperty(
ObjOperandId objId, uint32_t offsetOffset, TypedThingLayout layout,
Scalar::Type type, uint32_t rhsId) {
@ -5585,13 +5601,13 @@ bool CacheIRCompiler::emitBooleanToString(Int32OperandId inputId,
void js::jit::LoadTypedThingData(MacroAssembler& masm, TypedThingLayout layout,
Register obj, Register result) {
switch (layout) {
case Layout_TypedArray:
case TypedThingLayout::TypedArray:
masm.loadPtr(Address(obj, TypedArrayObject::dataOffset()), result);
break;
case Layout_OutlineTypedObject:
case TypedThingLayout::OutlineTypedObject:
masm.loadPtr(Address(obj, OutlineTypedObject::offsetOfData()), result);
break;
case Layout_InlineTypedObject:
case TypedThingLayout::InlineTypedObject:
masm.computeEffectiveAddress(
Address(obj, InlineTypedObject::offsetOfDataStart()), result);
break;
@ -5604,11 +5620,11 @@ void js::jit::LoadTypedThingLength(MacroAssembler& masm,
TypedThingLayout layout, Register obj,
Register result) {
switch (layout) {
case Layout_TypedArray:
case TypedThingLayout::TypedArray:
masm.unboxInt32(Address(obj, TypedArrayObject::lengthOffset()), result);
break;
case Layout_OutlineTypedObject:
case Layout_InlineTypedObject:
case TypedThingLayout::OutlineTypedObject:
case TypedThingLayout::InlineTypedObject:
masm.loadTypedObjectLength(obj, result);
break;
default:

View File

@ -715,6 +715,10 @@ class MOZ_RAII CacheIRCompiler {
!allocator.isDeadAfterInstruction(objId);
}
bool emitLoadTypedElementResult(ObjOperandId objId, Int32OperandId indexId,
TypedThingLayout layout,
Scalar::Type elementType, bool handleOOB);
void emitStoreTypedObjectReferenceProp(ValueOperand val, ReferenceType type,
const Address& dest, Register scratch);

View File

@ -137,7 +137,7 @@
- name: GuardToTypedArrayIndex
shared: true
transpile: false
transpile: true
args:
input: ValId
result: Int32Id
@ -861,7 +861,16 @@
obj: ObjId
index: Int32Id
- name: LoadTypedElementResult
- name: LoadTypedArrayElementResult
shared: true
transpile: true
args:
obj: ObjId
index: Int32Id
elementType: ScalarTypeImm
handleOOB: BoolImm
- name: LoadTypedObjectElementResult
shared: true
transpile: false
args:
@ -869,7 +878,6 @@
index: Int32Id
layout: TypedThingLayoutImm
elementType: ScalarTypeImm
handleOOB: BoolImm
- name: LoadInt32ArrayLengthResult
shared: true

View File

@ -63,13 +63,13 @@ class MOZ_RAII CacheIROpsJitSpewer {
}
void spewTypedThingLayoutImm(const char* name, TypedThingLayout layout) {
switch (layout) {
case Layout_TypedArray:
case TypedThingLayout::TypedArray:
out_.printf("%s TypedArray", name);
return;
case Layout_OutlineTypedObject:
case TypedThingLayout::OutlineTypedObject:
out_.printf("%s OutlineTypedObject", name);
return;
case Layout_InlineTypedObject:
case TypedThingLayout::InlineTypedObject:
out_.printf("%s InlineTypedObject", name);
return;
}

View File

@ -167,6 +167,15 @@ bool WarpCacheIRTranspiler::emitGuardToInt32Index(ValOperandId inputId,
return defineOperand(resultId, ins);
}
bool WarpCacheIRTranspiler::emitGuardToTypedArrayIndex(
ValOperandId inputId, Int32OperandId resultId) {
MDefinition* input = getOperand(inputId);
auto* ins = MTypedArrayIndexToInt32::New(alloc(), input);
current->add(ins);
return defineOperand(resultId, ins);
}
bool WarpCacheIRTranspiler::emitLoadEnclosingEnvironment(
ObjOperandId objId, ObjOperandId resultId) {
MDefinition* env = getOperand(objId);
@ -316,6 +325,39 @@ bool WarpCacheIRTranspiler::emitLoadDenseElementResult(ObjOperandId objId,
return true;
}
bool WarpCacheIRTranspiler::emitLoadTypedArrayElementResult(
ObjOperandId objId, Int32OperandId indexId, Scalar::Type elementType,
bool handleOOB) {
MDefinition* obj = getOperand(objId);
MDefinition* index = getOperand(indexId);
if (handleOOB) {
bool allowDouble = true;
auto* load = MLoadTypedArrayElementHole::New(alloc(), obj, index,
elementType, allowDouble);
current->add(load);
setResult(load);
return true;
}
auto* length = MTypedArrayLength::New(alloc(), obj);
current->add(length);
index = addBoundsCheck(index, length);
auto* elements = MTypedArrayElements::New(alloc(), obj);
current->add(elements);
auto* load = MLoadUnboxedScalar::New(alloc(), elements, index, elementType);
// TODO: Uint32 always loaded as double.
load->setResultType(MIRTypeForTypedArrayRead(elementType, true));
current->add(load);
setResult(load);
return true;
}
bool WarpCacheIRTranspiler::emitLoadStringCharResult(StringOperandId strId,
Int32OperandId indexId) {
MDefinition* str = getOperand(strId);

View File

@ -10224,6 +10224,10 @@ static JSObject* NewGlobalObject(JSContext* cx, JS::RealmOptions& options,
/* Initialize FakeDOMObject.prototype */
InitDOMObject(domProto);
if (!DefineToStringTag(cx, glob, cx->names().global)) {
return nullptr;
}
JS_FireOnNewGlobalObject(cx, glob);
}

View File

@ -394,6 +394,7 @@ const JSClass SavedFrame::protoClass_ = {
JS_PSG("asyncCause", SavedFrame::asyncCauseProperty, 0),
JS_PSG("asyncParent", SavedFrame::asyncParentProperty, 0),
JS_PSG("parent", SavedFrame::parentProperty, 0),
JS_STRING_SYM_PS(toStringTag, "SavedFrame", JSPROP_READONLY),
JS_PS_END};
/* static */

View File

@ -4,17 +4,25 @@
const Cm = Components.manager;
const {Services} = ChromeUtils.import("resource://gre/modules/Services.jsm");
const {XPCOMUtils} = ChromeUtils.import("resource://gre/modules/XPCOMUtils.jsm");
const { Services } = ChromeUtils.import("resource://gre/modules/Services.jsm");
const { XPCOMUtils } = ChromeUtils.import(
"resource://gre/modules/XPCOMUtils.jsm"
);
var OnRefTestLoad, OnRefTestUnload;
XPCOMUtils.defineLazyServiceGetter(this, "resProto",
"@mozilla.org/network/protocol;1?name=resource",
"nsISubstitutingProtocolHandler");
XPCOMUtils.defineLazyServiceGetter(
this,
"resProto",
"@mozilla.org/network/protocol;1?name=resource",
"nsISubstitutingProtocolHandler"
);
XPCOMUtils.defineLazyServiceGetter(this, "aomStartup",
"@mozilla.org/addons/addon-manager-startup;1",
"amIAddonManagerStartup");
XPCOMUtils.defineLazyServiceGetter(
this,
"aomStartup",
"@mozilla.org/addons/addon-manager-startup;1",
"amIAddonManagerStartup"
);
function processTerminated() {
return new Promise(resolve => {
@ -29,35 +37,46 @@ function processTerminated() {
function startAndroid(win) {
// Add setTimeout here because windows.innerWidth/Height are not set yet.
win.setTimeout(function() {OnRefTestLoad(win);}, 0);
win.setTimeout(function() {
OnRefTestLoad(win);
}, 0);
}
var WindowListener = {
onOpenWindow: function(xulWin) {
Services.wm.removeListener(WindowListener);
let win = xulWin.docShell.domWindow;
win.addEventListener("load", function listener() {
// Load into any existing windows.
for (win of Services.wm.getEnumerator("navigator:browser")) {
break;
}
win.addEventListener("pageshow", function() {
startAndroid(win);
}, {once: true});
}, {once: true});
function GetMainWindow() {
let win = Services.wm.getMostRecentWindow("navigator:browser");
if (!win) {
// There is no navigator:browser in the geckoview TestRunnerActivity;
// try navigator.geckoview instead.
win = Services.wm.getMostRecentWindow("navigator:geckoview");
}
};
return win;
}
this.reftest = class extends ExtensionAPI {
onStartup() {
let uri = Services.io.newURI("chrome/reftest/res/", null, this.extension.rootURI);
resProto.setSubstitutionWithFlags("reftest", uri, resProto.ALLOW_CONTENT_ACCESS);
let uri = Services.io.newURI(
"chrome/reftest/res/",
null,
this.extension.rootURI
);
resProto.setSubstitutionWithFlags(
"reftest",
uri,
resProto.ALLOW_CONTENT_ACCESS
);
const manifestURI = Services.io.newURI("manifest.json", null, this.extension.rootURI);
const manifestURI = Services.io.newURI(
"manifest.json",
null,
this.extension.rootURI
);
this.chromeHandle = aomStartup.registerChrome(manifestURI, [
["content", "reftest", "chrome/reftest/content/", "contentaccessible=yes"],
[
"content",
"reftest",
"chrome/reftest/content/",
"contentaccessible=yes",
],
]);
// Starting tests is handled quite differently on android and desktop.
@ -66,19 +85,24 @@ this.reftest = class extends ExtensionAPI {
// On desktop, a separate window (dummy) is created and explicitly given
// focus (see bug 859339 for details), then tests are launched in a new
// top-level window.
let win = Services.wm.getMostRecentWindow("navigator:browser");
if (!win) {
// There is no navigator:browser in the geckoview TestRunnerActivity;
// try navigator.geckoview instead.
win = Services.wm.getMostRecentWindow("navigator:geckoview");
}
let win = GetMainWindow();
if (Services.appinfo.OS == "Android") {
({OnRefTestLoad, OnRefTestUnload} = ChromeUtils.import("resource://reftest/reftest.jsm"));
({ OnRefTestLoad, OnRefTestUnload } = ChromeUtils.import(
"resource://reftest/reftest.jsm"
));
if (win) {
startAndroid(win);
} else {
Services.wm.addListener(WindowListener);
// The window type parameter is only available once the window's document
// element has been created. The main window has already been created
// however and it is in an in-between state which means that you can't
// find it by its type nor will domwindowcreated be fired.
// So we listen to either initial-document-element-inserted which
// indicates when it's okay to search for the main window by type again.
Services.obs.addObserver(function observer(aSubject, aTopic, aData) {
Services.obs.removeObserver(observer, aTopic);
startAndroid(GetMainWindow());
}, "initial-document-element-inserted");
}
return;
}
@ -86,13 +110,20 @@ this.reftest = class extends ExtensionAPI {
Services.io.manageOfflineStatus = false;
Services.io.offline = false;
let dummy = Services.ww.openWindow(null, "about:blank", "dummy",
"chrome,dialog=no,left=800,height=200,width=200,all",null);
let dummy = Services.ww.openWindow(
null,
"about:blank",
"dummy",
"chrome,dialog=no,left=800,height=200,width=200,all",
null
);
dummy.onload = async function() {
// Close pre-existing window
win.close();
const {PerTestCoverageUtils} = ChromeUtils.import("resource://reftest/PerTestCoverageUtils.jsm");
const { PerTestCoverageUtils } = ChromeUtils.import(
"resource://reftest/PerTestCoverageUtils.jsm"
);
if (PerTestCoverageUtils.enabled) {
// In PerTestCoverage mode, wait for the process belonging to the window we just closed
// to be terminated, to avoid its shutdown interfering when we reset the counters.
@ -100,8 +131,13 @@ this.reftest = class extends ExtensionAPI {
}
dummy.focus();
Services.ww.openWindow(null, "chrome://reftest/content/reftest.xhtml",
"_blank", "chrome,dialog=no,all", {});
Services.ww.openWindow(
null,
"chrome://reftest/content/reftest.xhtml",
"_blank",
"chrome,dialog=no,all",
{}
);
};
}

View File

@ -571,6 +571,11 @@
value: 150
mirror: always
- name: apz.windows.force_disable_direct_manipulation
type: RelaxedAtomicBool
value: false
mirror: always
- name: apz.x_skate_highmem_adjust
type: AtomicFloat
value: 0.0f

View File

@ -15,40 +15,28 @@
namespace mozilla {
namespace baseprofiler {
// 65536 bytes should be plenty for a single backtrace.
static constexpr auto DuplicationBufferBytes = MakePowerOfTwo32<65536>();
ProfileBuffer::ProfileBuffer(BlocksRingBuffer& aBuffer, PowerOfTwo32 aCapacity)
: mEntries(aBuffer),
mDuplicationBuffer(MakeUnique<BlocksRingBuffer::Byte[]>(
DuplicationBufferBytes.Value())) {
// Only ProfileBuffer should control this buffer, and it should be empty when
// there is no ProfileBuffer using it.
MOZ_ASSERT(mEntries.BufferLength().isNothing());
// Allocate the requested capacity.
mEntries.Set(aCapacity);
}
ProfileBuffer::ProfileBuffer(BlocksRingBuffer& aBuffer) : mEntries(aBuffer) {
// Assume the given buffer is not empty.
MOZ_ASSERT(mEntries.BufferLength().isSome());
ProfileBuffer::ProfileBuffer(ProfileChunkedBuffer& aBuffer)
: mEntries(aBuffer) {
// Assume the given buffer is in-session.
MOZ_ASSERT(mEntries.IsInSession());
}
ProfileBuffer::~ProfileBuffer() {
// Only ProfileBuffer controls this buffer, and it should be empty when there
// is no ProfileBuffer using it.
mEntries.Reset();
MOZ_ASSERT(mEntries.BufferLength().isNothing());
mEntries.ResetChunkManager();
MOZ_ASSERT(!mEntries.IsInSession());
}
/* static */
ProfileBufferBlockIndex ProfileBuffer::AddEntry(
BlocksRingBuffer& aBlocksRingBuffer, const ProfileBufferEntry& aEntry) {
ProfileChunkedBuffer& aProfileChunkedBuffer,
const ProfileBufferEntry& aEntry) {
switch (aEntry.GetKind()) {
# define SWITCH_KIND(KIND, TYPE, SIZE) \
case ProfileBufferEntry::Kind::KIND: { \
return aBlocksRingBuffer.PutFrom(&aEntry, 1 + (SIZE)); \
break; \
# define SWITCH_KIND(KIND, TYPE, SIZE) \
case ProfileBufferEntry::Kind::KIND: { \
return aProfileChunkedBuffer.PutFrom(&aEntry, 1 + (SIZE)); \
break; \
}
FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(SWITCH_KIND)
@ -67,8 +55,9 @@ uint64_t ProfileBuffer::AddEntry(const ProfileBufferEntry& aEntry) {
/* static */
ProfileBufferBlockIndex ProfileBuffer::AddThreadIdEntry(
BlocksRingBuffer& aBlocksRingBuffer, int aThreadId) {
return AddEntry(aBlocksRingBuffer, ProfileBufferEntry::ThreadId(aThreadId));
ProfileChunkedBuffer& aProfileChunkedBuffer, int aThreadId) {
return AddEntry(aProfileChunkedBuffer,
ProfileBufferEntry::ThreadId(aThreadId));
}
uint64_t ProfileBuffer::AddThreadIdEntry(int aThreadId) {
@ -171,7 +160,8 @@ void ProfileBuffer::CollectOverheadStats(TimeDuration aSamplingTime,
ProfilerBufferInfo ProfileBuffer::GetProfilerBufferInfo() const {
return {BufferRangeStart(),
BufferRangeEnd(),
mEntries.BufferLength()->Value() / 8, // 8 bytes per entry.
static_cast<uint32_t>(*mEntries.BufferLength() /
8), // 8 bytes per entry.
mIntervalsNs,
mOverheadsNs,
mLockingsNs,

View File

@ -8,27 +8,24 @@
#include "ProfileBufferEntry.h"
#include "mozilla/BlocksRingBuffer.h"
#include "mozilla/Maybe.h"
#include "mozilla/PowerOfTwo.h"
#include "mozilla/ProfileBufferChunkManagerSingle.h"
#include "mozilla/ProfileChunkedBuffer.h"
namespace mozilla {
namespace baseprofiler {
// Class storing most profiling data in a BlocksRingBuffer.
// Class storing most profiling data in a ProfileChunkedBuffer.
//
// This class is used as a queue of entries which, after construction, never
// allocates. This makes it safe to use in the profiler's "critical section".
class ProfileBuffer final {
public:
// ProfileBuffer constructor
// @param aBuffer The empty BlocksRingBuffer to use as buffer manager.
// @param aCapacity The capacity of the buffer in memory.
ProfileBuffer(BlocksRingBuffer& aBuffer, PowerOfTwo32 aCapacity);
// ProfileBuffer constructor
// @param aBuffer The pre-filled BlocksRingBuffer to use as buffer manager.
explicit ProfileBuffer(BlocksRingBuffer& aBuffer);
// @param aBuffer The in-session ProfileChunkedBuffer to use as buffer
// manager.
explicit ProfileBuffer(ProfileChunkedBuffer& aBuffer);
~ProfileBuffer();
@ -91,21 +88,22 @@ class ProfileBuffer final {
ProfilerBufferInfo GetProfilerBufferInfo() const;
private:
// Add |aEntry| to the provider BlocksRingBuffer.
// `static` because it may be used to add an entry to a `BlocksRingBuffer`
// Add |aEntry| to the provider ProfileChunkedBuffer.
// `static` because it may be used to add an entry to a `ProfileChunkedBuffer`
// that is not attached to a `ProfileBuffer`.
static ProfileBufferBlockIndex AddEntry(BlocksRingBuffer& aBlocksRingBuffer,
const ProfileBufferEntry& aEntry);
static ProfileBufferBlockIndex AddEntry(
ProfileChunkedBuffer& aProfileChunkedBuffer,
const ProfileBufferEntry& aEntry);
// Add a sample start (ThreadId) entry for aThreadId to the provided
// BlocksRingBuffer. Returns the position of the entry.
// `static` because it may be used to add an entry to a `BlocksRingBuffer`
// ProfileChunkedBuffer. Returns the position of the entry.
// `static` because it may be used to add an entry to a `ProfileChunkedBuffer`
// that is not attached to a `ProfileBuffer`.
static ProfileBufferBlockIndex AddThreadIdEntry(
BlocksRingBuffer& aBlocksRingBuffer, int aThreadId);
ProfileChunkedBuffer& aProfileChunkedBuffer, int aThreadId);
// The circular-ring storage in which this ProfileBuffer stores its data.
BlocksRingBuffer& mEntries;
// The storage in which this ProfileBuffer stores its entries.
ProfileChunkedBuffer& mEntries;
public:
// `BufferRangeStart()` and `BufferRangeEnd()` return `uint64_t` values
@ -121,16 +119,21 @@ class ProfileBuffer final {
// - It is safe to try and read entries at any index strictly less than
// `BufferRangeEnd()` -- but note that these reads may fail by the time you
// request them, as old entries get overwritten by new ones.
uint64_t BufferRangeStart() const {
return mEntries.GetState().mRangeStart.ConvertToProfileBufferIndex();
}
uint64_t BufferRangeEnd() const {
return mEntries.GetState().mRangeEnd.ConvertToProfileBufferIndex();
}
uint64_t BufferRangeStart() const { return mEntries.GetState().mRangeStart; }
uint64_t BufferRangeEnd() const { return mEntries.GetState().mRangeEnd; }
private:
// Used when duplicating sleeping stacks (to avoid spurious mallocs).
const UniquePtr<BlocksRingBuffer::Byte[]> mDuplicationBuffer;
// 65536 bytes should be plenty for a single backtrace.
static constexpr auto WorkerBufferBytes = MakePowerOfTwo32<65536>();
// Single pre-allocated chunk (to avoid spurious mallocs), used when:
// - Duplicating sleeping stacks.
// - Adding JIT info.
// - Streaming stacks to JSON.
// Mutable because it's accessed from non-multithreaded const methods.
mutable ProfileBufferChunkManagerSingle mWorkerChunkManager{
ProfileBufferChunk::Create(ProfileBufferChunk::SizeofChunkMetadata() +
WorkerBufferBytes.Value())};
// Time from launch (ns) when first sampling was recorded.
double mFirstSamplingTimeNs = 0.0;

View File

@ -371,7 +371,7 @@ static void WriteSample(SpliceableJSONWriter& aWriter,
class EntryGetter {
public:
explicit EntryGetter(BlocksRingBuffer::Reader& aReader,
explicit EntryGetter(ProfileChunkedBuffer::Reader& aReader,
uint64_t aInitialReadPos = 0)
: mBlockIt(
aReader.At(ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
@ -429,6 +429,7 @@ class EntryGetter {
static_cast<ProfileBufferEntry::KindUnderlyingType>(
ProfileBufferEntry::Kind::MODERN_LIMIT));
if (type >= ProfileBufferEntry::Kind::LEGACY_LIMIT) {
aER.SetRemainingBytes(0);
return false;
}
// Here, we have a legacy item, we need to read it from the start.
@ -440,8 +441,8 @@ class EntryGetter {
}
ProfileBufferEntry mEntry;
BlocksRingBuffer::BlockIterator mBlockIt;
const BlocksRingBuffer::BlockIterator mBlockItEnd;
ProfileChunkedBuffer::BlockIterator mBlockIt;
const ProfileChunkedBuffer::BlockIterator mBlockItEnd;
};
// The following grammar shows legal sequences of profile buffer entries.
@ -588,10 +589,10 @@ void ProfileBuffer::StreamSamplesToJSON(SpliceableJSONWriter& aWriter,
UniqueStacks& aUniqueStacks) const {
UniquePtr<char[]> dynStrBuf = MakeUnique<char[]>(kMaxFrameKeyLength);
mEntries.Read([&](BlocksRingBuffer::Reader* aReader) {
MOZ_ASSERT(
aReader,
"BlocksRingBuffer cannot be out-of-session when sampler is running");
mEntries.Read([&](ProfileChunkedBuffer::Reader* aReader) {
MOZ_ASSERT(aReader,
"ProfileChunkedBuffer cannot be out-of-session when sampler is "
"running");
EntryGetter e(*aReader);
@ -845,6 +846,8 @@ void ProfileBuffer::StreamMarkersToJSON(SpliceableJSONWriter& aWriter,
}
}
aWriter.EndArray();
} else {
aER.SetRemainingBytes(0);
}
});
}
@ -852,10 +855,10 @@ void ProfileBuffer::StreamMarkersToJSON(SpliceableJSONWriter& aWriter,
void ProfileBuffer::StreamProfilerOverheadToJSON(
SpliceableJSONWriter& aWriter, const TimeStamp& aProcessStartTime,
double aSinceTime) const {
mEntries.Read([&](BlocksRingBuffer::Reader* aReader) {
MOZ_ASSERT(
aReader,
"BlocksRingBuffer cannot be out-of-session when sampler is running");
mEntries.Read([&](ProfileChunkedBuffer::Reader* aReader) {
MOZ_ASSERT(aReader,
"ProfileChunkedBuffer cannot be out-of-session when sampler is "
"running");
EntryGetter e(*aReader);
@ -1003,10 +1006,10 @@ void ProfileBuffer::StreamCountersToJSON(SpliceableJSONWriter& aWriter,
// error indicates a bug in the ProfileBuffer writing or the parser itself,
// or possibly flaky hardware.
mEntries.Read([&](BlocksRingBuffer::Reader* aReader) {
MOZ_ASSERT(
aReader,
"BlocksRingBuffer cannot be out-of-session when sampler is running");
mEntries.Read([&](ProfileChunkedBuffer::Reader* aReader) {
MOZ_ASSERT(aReader,
"ProfileChunkedBuffer cannot be out-of-session when sampler is "
"running");
EntryGetter e(*aReader);
@ -1181,10 +1184,10 @@ static void AddPausedRange(SpliceableJSONWriter& aWriter, const char* aReason,
void ProfileBuffer::StreamPausedRangesToJSON(SpliceableJSONWriter& aWriter,
double aSinceTime) const {
mEntries.Read([&](BlocksRingBuffer::Reader* aReader) {
MOZ_ASSERT(
aReader,
"BlocksRingBuffer cannot be out-of-session when sampler is running");
mEntries.Read([&](ProfileChunkedBuffer::Reader* aReader) {
MOZ_ASSERT(aReader,
"ProfileChunkedBuffer cannot be out-of-session when sampler is "
"running");
EntryGetter e(*aReader);
@ -1226,13 +1229,16 @@ bool ProfileBuffer::DuplicateLastSample(int aThreadId,
return false;
}
BlocksRingBuffer tempBuffer(BlocksRingBuffer::ThreadSafety::WithoutMutex,
mDuplicationBuffer.get(), DuplicationBufferBytes);
ProfileChunkedBuffer tempBuffer(
ProfileChunkedBuffer::ThreadSafety::WithoutMutex, mWorkerChunkManager);
const bool ok = mEntries.Read([&](BlocksRingBuffer::Reader* aReader) {
MOZ_ASSERT(
aReader,
"BlocksRingBuffer cannot be out-of-session when sampler is running");
auto retrieveWorkerChunk = MakeScopeExit(
[&]() { mWorkerChunkManager.Reset(tempBuffer.GetAllChunks()); });
const bool ok = mEntries.Read([&](ProfileChunkedBuffer::Reader* aReader) {
MOZ_ASSERT(aReader,
"ProfileChunkedBuffer cannot be out-of-session when sampler is "
"running");
EntryGetter e(*aReader, *aLastSample);
@ -1345,7 +1351,7 @@ bool ProfileBuffer::DuplicateLastSample(int aThreadId,
aLastSample = Some(AddThreadIdEntry(aThreadId));
tempBuffer.Read([&](BlocksRingBuffer::Reader* aReader) {
tempBuffer.Read([&](ProfileChunkedBuffer::Reader* aReader) {
MOZ_ASSERT(aReader, "tempBuffer cannot be out-of-session");
EntryGetter e(*aReader);
@ -1360,55 +1366,9 @@ bool ProfileBuffer::DuplicateLastSample(int aThreadId,
}
void ProfileBuffer::DiscardSamplesBeforeTime(double aTime) {
const ProfileBufferBlockIndex firstBlockToKeep =
mEntries.Read([&](BlocksRingBuffer::Reader* aReader) {
MOZ_ASSERT(aReader,
"BlocksRingBuffer cannot be out-of-session when sampler is "
"running");
EntryGetter e(*aReader);
const ProfileBufferBlockIndex bufferStartPos = e.CurBlockIndex();
for (;;) {
// This block skips entries until we find the start of the next
// sample. This is useful in three situations.
//
// - The circular buffer overwrites old entries, so when we start
// parsing we might be in the middle of a sample, and we must skip
// forward to the start of the next sample.
//
// - We skip samples that don't have an appropriate ThreadId or Time.
//
// - We skip range Pause, Resume, CollectionStart, Marker, and
// CollectionEnd entries between samples.
while (e.Has()) {
if (e.Get().IsThreadId()) {
break;
}
e.Next();
}
if (!e.Has()) {
return bufferStartPos;
}
MOZ_RELEASE_ASSERT(e.Get().IsThreadId());
const ProfileBufferBlockIndex sampleStartPos = e.CurBlockIndex();
e.Next();
if (e.Has() && e.Get().IsTime()) {
double sampleTime = e.Get().GetDouble();
if (sampleTime >= aTime) {
// This is the first sample within the window of time that we want
// to keep. Throw away all samples before sampleStartPos and
// return.
return sampleStartPos;
}
}
}
});
mEntries.ClearBefore(firstBlockToKeep);
// This function does nothing!
// The duration limit will be removed from Firefox, see bug 1632365.
Unused << aTime;
}
// END ProfileBuffer

View File

@ -56,9 +56,9 @@ class ProfileBufferEntry {
// stored in a `ProfileBufferEntry`, as per the list in
// `FOR_EACH_PROFILE_BUFFER_ENTRY_KIND`.
//
// This byte is also used to identify entries in BlocksRingBuffer blocks, for
// both "legacy" entries that do contain a `ProfileBufferEntry`, and for new
// types of entries that may carry more data of different types.
// This byte is also used to identify entries in ProfileChunkedBuffer blocks,
// for both "legacy" entries that do contain a `ProfileBufferEntry`, and for
// new types of entries that may carry more data of different types.
// TODO: Eventually each type of "legacy" entry should be replaced with newer,
// more efficient kinds of entries (e.g., stack frames could be stored in one
// bigger entry, instead of multiple `ProfileBufferEntry`s); then we could

View File

@ -20,20 +20,21 @@ namespace baseprofiler {
ProfilerBacktrace::ProfilerBacktrace(
const char* aName, int aThreadId,
UniquePtr<BlocksRingBuffer> aBlocksRingBuffer,
UniquePtr<ProfileChunkedBuffer> aProfileChunkedBuffer,
UniquePtr<ProfileBuffer> aProfileBuffer)
: mName(strdup(aName)),
mThreadId(aThreadId),
mBlocksRingBuffer(std::move(aBlocksRingBuffer)),
mProfileChunkedBuffer(std::move(aProfileChunkedBuffer)),
mProfileBuffer(std::move(aProfileBuffer)) {
MOZ_ASSERT(
!!mBlocksRingBuffer,
"ProfilerBacktrace only takes a non-null UniquePtr<BlocksRingBuffer>");
MOZ_ASSERT(!!mProfileChunkedBuffer,
"ProfilerBacktrace only takes a non-null "
"UniquePtr<ProfileChunkedBuffer>");
MOZ_ASSERT(
!!mProfileBuffer,
"ProfilerBacktrace only takes a non-null UniquePtr<ProfileBuffer>");
MOZ_ASSERT(!mBlocksRingBuffer->IsThreadSafe(),
"ProfilerBacktrace only takes a non-thread-safe BlocksRingBuffer");
MOZ_ASSERT(
!mProfileChunkedBuffer->IsThreadSafe(),
"ProfilerBacktrace only takes a non-thread-safe ProfileChunkedBuffer");
}
ProfilerBacktrace::~ProfilerBacktrace() {}
@ -60,19 +61,20 @@ UniquePtr<baseprofiler::ProfilerBacktrace, Destructor>
ProfileBufferEntryReader::
Deserializer<UniquePtr<baseprofiler::ProfilerBacktrace, Destructor>>::Read(
ProfileBufferEntryReader& aER) {
auto blocksRingBuffer = aER.ReadObject<UniquePtr<BlocksRingBuffer>>();
if (!blocksRingBuffer) {
auto profileChunkedBuffer = aER.ReadObject<UniquePtr<ProfileChunkedBuffer>>();
if (!profileChunkedBuffer) {
return nullptr;
}
MOZ_ASSERT(!blocksRingBuffer->IsThreadSafe(),
"ProfilerBacktrace only stores non-thread-safe BlocksRingBuffers");
MOZ_ASSERT(
!profileChunkedBuffer->IsThreadSafe(),
"ProfilerBacktrace only stores non-thread-safe ProfileChunkedBuffers");
int threadId = aER.ReadObject<int>();
std::string name = aER.ReadObject<std::string>();
auto profileBuffer =
MakeUnique<baseprofiler::ProfileBuffer>(*blocksRingBuffer);
MakeUnique<baseprofiler::ProfileBuffer>(*profileChunkedBuffer);
return UniquePtr<baseprofiler::ProfilerBacktrace, Destructor>{
new baseprofiler::ProfilerBacktrace(name.c_str(), threadId,
std::move(blocksRingBuffer),
std::move(profileChunkedBuffer),
std::move(profileBuffer))};
};

View File

@ -11,7 +11,7 @@
namespace mozilla {
class BlocksRingBuffer;
class ProfileChunkedBuffer;
class TimeStamp;
namespace baseprofiler {
@ -25,7 +25,7 @@ class UniqueStacks;
class ProfilerBacktrace {
public:
ProfilerBacktrace(const char* aName, int aThreadId,
UniquePtr<BlocksRingBuffer> aBlocksRingBuffer,
UniquePtr<ProfileChunkedBuffer> aProfileChunkedBuffer,
UniquePtr<ProfileBuffer> aProfileBuffer);
~ProfilerBacktrace();
@ -46,9 +46,9 @@ class ProfilerBacktrace {
UniqueFreePtr<char> mName;
int mThreadId;
// `BlocksRingBuffer` in which `mProfileBuffer` stores its data; must be
// `ProfileChunkedBuffer` in which `mProfileBuffer` stores its data; must be
// located before `mProfileBuffer` so that it's destroyed after.
UniquePtr<BlocksRingBuffer> mBlocksRingBuffer;
UniquePtr<ProfileChunkedBuffer> mProfileChunkedBuffer;
UniquePtr<ProfileBuffer> mProfileBuffer;
};
@ -63,7 +63,7 @@ struct ProfileBufferEntryWriter::Serializer<baseprofiler::ProfilerBacktrace> {
// No backtrace buffer.
return ULEB128Size<Length>(0);
}
auto bufferBytes = SumBytes(*aBacktrace.mBlocksRingBuffer);
auto bufferBytes = SumBytes(*aBacktrace.mProfileChunkedBuffer);
if (bufferBytes == 0) {
// Empty backtrace buffer.
return ULEB128Size<Length>(0);
@ -76,12 +76,12 @@ struct ProfileBufferEntryWriter::Serializer<baseprofiler::ProfilerBacktrace> {
static void Write(ProfileBufferEntryWriter& aEW,
const baseprofiler::ProfilerBacktrace& aBacktrace) {
if (!aBacktrace.mProfileBuffer ||
SumBytes(aBacktrace.mBlocksRingBuffer) == 0) {
SumBytes(aBacktrace.mProfileChunkedBuffer) == 0) {
// No backtrace buffer, or it is empty.
aEW.WriteULEB128<Length>(0);
return;
}
aEW.WriteObject(aBacktrace.mBlocksRingBuffer);
aEW.WriteObject(aBacktrace.mProfileChunkedBuffer);
aEW.WriteObject(aBacktrace.mThreadId);
aEW.WriteObject(WrapProfileBufferUnownedCString(aBacktrace.mName.get()));
}

View File

@ -49,6 +49,9 @@
# include "mozilla/BaseProfilerDetail.h"
# include "mozilla/DoubleConversion.h"
# include "mozilla/Printf.h"
# include "mozilla/ProfileBufferChunkManagerSingle.h"
# include "mozilla/ProfileBufferChunkManagerWithLocalLimit.h"
# include "mozilla/ProfileChunkedBuffer.h"
# include "mozilla/Services.h"
# include "mozilla/Span.h"
# include "mozilla/StackWalk.h"
@ -284,7 +287,7 @@ class CorePS {
// functions guarded by gPSMutex as well as others without safety (e.g.,
// profiler_add_marker). It is *not* used inside the critical section of
// the sampler, because mutexes cannot be used there.
mCoreBlocksRingBuffer(BlocksRingBuffer::ThreadSafety::WithMutex)
mCoreBuffer(ProfileChunkedBuffer::ThreadSafety::WithMutex)
# ifdef USE_LUL_STACKWALK
,
mLul(nullptr)
@ -349,7 +352,7 @@ class CorePS {
PS_GET_LOCKLESS(TimeStamp, ProcessStartTime)
// No PSLockRef is needed for this field because it's thread-safe.
PS_GET_LOCKLESS(BlocksRingBuffer&, CoreBlocksRingBuffer)
PS_GET_LOCKLESS(ProfileChunkedBuffer&, CoreBuffer)
PS_GET(const Vector<UniquePtr<RegisteredThread>>&, RegisteredThreads)
@ -458,16 +461,16 @@ class CorePS {
// The time that the process started.
const TimeStamp mProcessStartTime;
// The thread-safe blocks-oriented ring buffer into which all profiling data
// is recorded.
// The thread-safe blocks-oriented buffer into which all profiling data is
// recorded.
// ActivePS controls the lifetime of the underlying contents buffer: When
// ActivePS does not exist, mCoreBlocksRingBuffer is empty and rejects all
// reads&writes; see ActivePS for further details.
// ActivePS does not exist, mCoreBuffer is empty and rejects all reads&writes;
// see ActivePS for further details.
// Note: This needs to live here outside of ActivePS, because some producers
// are indirectly controlled (e.g., by atomic flags) and therefore may still
// attempt to write some data shortly after ActivePS has shutdown and deleted
// the underlying buffer in memory.
BlocksRingBuffer mCoreBlocksRingBuffer;
ProfileChunkedBuffer mCoreBuffer;
// Info on all the registered threads.
// ThreadIds in mRegisteredThreads are unique.
@ -523,6 +526,26 @@ class ActivePS {
return aFeatures;
}
constexpr static uint32_t bytesPerEntry = 8;
// We need to decide how many chunks of what size we want to fit in the given
// total maximum capacity for this process, in the (likely) context of
// multiple processes doing the same choice and having an inter-process
// mechanism to control the overal memory limit.
// Ideally we want at least 2 unreleased chunks to work with (1 current and 1
// next), and 2 released chunks (so that one can be recycled when old, leaving
// one with some data).
constexpr static uint32_t minimumNumberOfChunks = 4;
// And we want to limit chunks to a maximum size, which is a compromise
// between:
// - A big size, which helps with reducing the rate of allocations and IPCs.
// - A small size, which helps with equalizing the duration of recorded data
// (as the inter-process controller will discard the oldest chunks in all
// Firefox processes).
constexpr static uint32_t maximumChunkSize = 1024 * 1024;
ActivePS(PSLockRef aLock, PowerOfTwo32 aCapacity, double aInterval,
uint32_t aFeatures, const char** aFilters, uint32_t aFilterCount,
const Maybe<double>& aDuration)
@ -531,9 +554,14 @@ class ActivePS {
mDuration(aDuration),
mInterval(aInterval),
mFeatures(AdjustFeatures(aFeatures, aFilterCount)),
// 8 bytes per entry.
mProfileBuffer(CorePS::CoreBlocksRingBuffer(),
PowerOfTwo32(aCapacity.Value() * 8)),
mProfileBufferChunkManager(
aCapacity.Value() * bytesPerEntry,
std::min(aCapacity.Value() * bytesPerEntry / minimumNumberOfChunks,
maximumChunkSize)),
mProfileBuffer([this]() -> ProfileChunkedBuffer& {
CorePS::CoreBuffer().SetChunkManager(mProfileBufferChunkManager);
return CorePS::CoreBuffer();
}()),
// The new sampler thread doesn't start sampling immediately because the
// main loop within Run() is blocked until this function's caller
// unlocks gPSMutex.
@ -553,7 +581,7 @@ class ActivePS {
}
}
~ActivePS() {}
~ActivePS() { CorePS::CoreBuffer().ResetChunkManager(); }
bool ThreadSelected(const char* aThreadName) {
if (mFilters.empty()) {
@ -674,6 +702,11 @@ class ActivePS {
PS_GET(const Vector<std::string>&, Filters)
static void FulfillChunkRequests(PSLockRef) {
MOZ_ASSERT(sInstance);
sInstance->mProfileBufferChunkManager.FulfillChunkRequests();
}
static ProfileBuffer& Buffer(PSLockRef) {
MOZ_ASSERT(sInstance);
return sInstance->mProfileBuffer;
@ -911,6 +944,9 @@ class ActivePS {
// Substrings of names of threads we want to profile.
Vector<std::string> mFilters;
// The chunk manager used by `mProfileBuffer` below.
ProfileBufferChunkManagerWithLocalLimit mProfileBufferChunkManager;
// The buffer into which all samples are recorded.
ProfileBuffer mProfileBuffer;
@ -2046,16 +2082,16 @@ void SamplerThread::Run() {
}();
// Use local BlocksRingBuffer&ProfileBuffer to capture the stack.
// (This is to avoid touching the CorePS::BlocksRingBuffer lock while
// (This is to avoid touching the CorePS::CoreBuffer lock while
// a thread is suspended, because that thread could be working with
// the CorePS::BlocksRingBuffer as well.)
BlocksRingBuffer localBlocksRingBuffer(
BlocksRingBuffer::ThreadSafety::WithoutMutex);
ProfileBuffer localProfileBuffer(localBlocksRingBuffer,
MakePowerOfTwo32<65536>());
// the CorePS::CoreBuffer as well.)
ProfileBufferChunkManagerSingle localChunkManager(65536);
ProfileChunkedBuffer localBuffer(
ProfileChunkedBuffer::ThreadSafety::WithoutMutex, localChunkManager);
ProfileBuffer localProfileBuffer(localBuffer);
// Will be kept between collections, to know what each collection does.
auto previousState = localBlocksRingBuffer.GetState();
auto previousState = localBuffer.GetState();
// This will be positive if we are running behind schedule (sampling less
// frequently than desired) and negative if we are ahead of schedule.
@ -2155,28 +2191,21 @@ void SamplerThread::Run() {
});
// If data is complete, copy it into the global buffer.
auto state = localBlocksRingBuffer.GetState();
auto state = localBuffer.GetState();
if (state.mClearedBlockCount != previousState.mClearedBlockCount) {
LOG("Stack sample too big for local storage, needed %u bytes",
unsigned(
state.mRangeEnd.ConvertToProfileBufferIndex() -
previousState.mRangeEnd.ConvertToProfileBufferIndex()));
} else if (state.mRangeEnd.ConvertToProfileBufferIndex() -
previousState.mRangeEnd
.ConvertToProfileBufferIndex() >=
CorePS::CoreBlocksRingBuffer().BufferLength()->Value()) {
unsigned(state.mRangeEnd - previousState.mRangeEnd));
} else if (state.mRangeEnd - previousState.mRangeEnd >=
*CorePS::CoreBuffer().BufferLength()) {
LOG("Stack sample too big for profiler storage, needed %u bytes",
unsigned(
state.mRangeEnd.ConvertToProfileBufferIndex() -
previousState.mRangeEnd.ConvertToProfileBufferIndex()));
unsigned(state.mRangeEnd - previousState.mRangeEnd));
} else {
CorePS::CoreBlocksRingBuffer().AppendContents(
localBlocksRingBuffer);
CorePS::CoreBuffer().AppendContents(localBuffer);
}
// Clean up for the next run.
localBlocksRingBuffer.Clear();
previousState = localBlocksRingBuffer.GetState();
localBuffer.Clear();
previousState = localBuffer.GetState();
}
}
@ -2190,6 +2219,11 @@ void SamplerThread::Run() {
# endif
TimeStamp threadsSampled = TimeStamp::NowUnfuzzed();
{
AUTO_PROFILER_STATS(Sampler_FulfillChunkRequests);
ActivePS::FulfillChunkRequests(lock);
}
buffer.CollectOverheadStats(delta, lockAcquired - sampleStart,
expiredMarkersCleaned - lockAcquired,
countersSampled - expiredMarkersCleaned,
@ -3274,10 +3308,10 @@ UniqueProfilerBacktrace profiler_get_backtrace() {
# endif
// 65536 bytes should be plenty for a single backtrace.
auto bufferManager = MakeUnique<BlocksRingBuffer>(
BlocksRingBuffer::ThreadSafety::WithoutMutex);
auto buffer =
MakeUnique<ProfileBuffer>(*bufferManager, MakePowerOfTwo32<65536>());
auto bufferManager = MakeUnique<ProfileChunkedBuffer>(
ProfileChunkedBuffer::ThreadSafety::WithoutMutex,
MakeUnique<ProfileBufferChunkManagerSingle>(65536));
auto buffer = MakeUnique<ProfileBuffer>(*bufferManager);
DoSyncSample(lock, *registeredThread, now, regs, *buffer.get());
@ -3313,7 +3347,7 @@ static void racy_profiler_add_marker(const char* aMarkerName,
? aPayload->GetStartTime()
: TimeStamp::NowUnfuzzed();
TimeDuration delta = origin - CorePS::ProcessStartTime();
CorePS::CoreBlocksRingBuffer().PutObjects(
CorePS::CoreBuffer().PutObjects(
ProfileBufferEntry::Kind::MarkerData, racyRegisteredThread->ThreadId(),
WrapProfileBufferUnownedCString(aMarkerName),
static_cast<uint32_t>(aCategoryPair), aPayload, delta.ToMilliseconds());
@ -3376,7 +3410,7 @@ void profiler_add_marker_for_thread(int aThreadId,
? aPayload->GetStartTime()
: TimeStamp::NowUnfuzzed();
TimeDuration delta = origin - CorePS::ProcessStartTime();
CorePS::CoreBlocksRingBuffer().PutObjects(
CorePS::CoreBuffer().PutObjects(
ProfileBufferEntry::Kind::MarkerData, aThreadId,
WrapProfileBufferUnownedCString(aMarkerName),
static_cast<uint32_t>(aCategoryPair), aPayload, delta.ToMilliseconds());

View File

@ -414,53 +414,53 @@ class BlocksRingBuffer {
// know the size already.
template <typename CallbackBytes, typename Callback>
auto ReserveAndPut(CallbackBytes aCallbackBytes, Callback&& aCallback) {
{ // Locked block.
baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
if (MOZ_LIKELY(mMaybeUnderlyingBuffer)) {
const Length entryBytes = std::forward<CallbackBytes>(aCallbackBytes)();
const Length bufferBytes =
mMaybeUnderlyingBuffer->mBuffer.BufferLength().Value();
MOZ_RELEASE_ASSERT(entryBytes <= bufferBytes - ULEB128Size(entryBytes),
"Entry would wrap and overwrite itself");
// Compute block size from the requested entry size.
const Length blockBytes = ULEB128Size(entryBytes) + entryBytes;
// We will put this new block at the end of the current buffer.
const ProfileBufferIndex blockIndex =
mNextWriteIndex.ConvertToProfileBufferIndex();
// Compute the end of this new block.
const ProfileBufferIndex blockEnd = blockIndex + blockBytes;
while (blockEnd >
mFirstReadIndex.ConvertToProfileBufferIndex() + bufferBytes) {
// About to trample on an old block.
ProfileBufferEntryReader reader = ReaderInBlockAt(mFirstReadIndex);
mMaybeUnderlyingBuffer->mClearedBlockCount += 1;
// Move the buffer reading start past this cleared block.
mFirstReadIndex =
ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
mFirstReadIndex.ConvertToProfileBufferIndex() +
ULEB128Size(reader.RemainingBytes()) +
reader.RemainingBytes());
}
// Store the new end of buffer.
mNextWriteIndex =
ProfileBufferBlockIndex::CreateFromProfileBufferIndex(blockEnd);
mMaybeUnderlyingBuffer->mPushedBlockCount += 1;
// Finally, let aCallback write into the entry.
ProfileBufferEntryWriter entryWriter =
mMaybeUnderlyingBuffer->mBuffer.EntryWriterFromTo(blockIndex,
blockEnd);
entryWriter.WriteULEB128(entryBytes);
MOZ_ASSERT(entryWriter.RemainingBytes() == entryBytes);
#ifdef DEBUG
auto checkAllWritten = MakeScopeExit(
[&]() { MOZ_ASSERT(entryWriter.RemainingBytes() == 0); });
#endif // DEBUG
return std::forward<Callback>(aCallback)(&entryWriter);
Maybe<ProfileBufferEntryWriter> maybeEntryWriter;
baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
if (MOZ_LIKELY(mMaybeUnderlyingBuffer)) {
const Length entryBytes = std::forward<CallbackBytes>(aCallbackBytes)();
MOZ_RELEASE_ASSERT(entryBytes > 0);
const Length bufferBytes =
mMaybeUnderlyingBuffer->mBuffer.BufferLength().Value();
MOZ_RELEASE_ASSERT(entryBytes <= bufferBytes - ULEB128Size(entryBytes),
"Entry would wrap and overwrite itself");
// Compute block size from the requested entry size.
const Length blockBytes = ULEB128Size(entryBytes) + entryBytes;
// We will put this new block at the end of the current buffer.
const ProfileBufferIndex blockIndex =
mNextWriteIndex.ConvertToProfileBufferIndex();
// Compute the end of this new block.
const ProfileBufferIndex blockEnd = blockIndex + blockBytes;
while (blockEnd >
mFirstReadIndex.ConvertToProfileBufferIndex() + bufferBytes) {
// About to trample on an old block.
ProfileBufferEntryReader reader = ReaderInBlockAt(mFirstReadIndex);
mMaybeUnderlyingBuffer->mClearedBlockCount += 1;
// Move the buffer reading start past this cleared block.
mFirstReadIndex = ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
mFirstReadIndex.ConvertToProfileBufferIndex() +
ULEB128Size(reader.RemainingBytes()) + reader.RemainingBytes());
}
} // End of locked block.
// Out-of-session, just invoke the callback with nullptr, no need to hold
// the lock.
return std::forward<Callback>(aCallback)(nullptr);
// Store the new end of buffer.
mNextWriteIndex =
ProfileBufferBlockIndex::CreateFromProfileBufferIndex(blockEnd);
mMaybeUnderlyingBuffer->mPushedBlockCount += 1;
// Finally, let aCallback write into the entry.
mMaybeUnderlyingBuffer->mBuffer.EntryWriterFromTo(maybeEntryWriter,
blockIndex, blockEnd);
MOZ_ASSERT(maybeEntryWriter.isSome(),
"Non-empty entry should always create an EntryWriter");
maybeEntryWriter->WriteULEB128(entryBytes);
MOZ_ASSERT(maybeEntryWriter->RemainingBytes() == entryBytes);
}
#ifdef DEBUG
auto checkAllWritten = MakeScopeExit([&]() {
MOZ_ASSERT(!maybeEntryWriter || maybeEntryWriter->RemainingBytes() == 0);
});
#endif // DEBUG
return std::forward<Callback>(aCallback)(maybeEntryWriter);
}
// Add a new entry of known size, call `aCallback` with a pointer to a
@ -477,8 +477,8 @@ class BlocksRingBuffer {
// Add a new entry copied from the given buffer, return block index.
ProfileBufferBlockIndex PutFrom(const void* aSrc, Length aBytes) {
return ReserveAndPut([aBytes]() { return aBytes; },
[&](ProfileBufferEntryWriter* aEntryWriter) {
if (MOZ_UNLIKELY(!aEntryWriter)) {
[&](Maybe<ProfileBufferEntryWriter>& aEntryWriter) {
if (MOZ_UNLIKELY(aEntryWriter.isNothing())) {
// Out-of-session, return "empty" index.
return ProfileBufferBlockIndex{};
}
@ -495,8 +495,8 @@ class BlocksRingBuffer {
"PutObjects must be given at least one object.");
return ReserveAndPut(
[&]() { return ProfileBufferEntryWriter::SumBytes(aTs...); },
[&](ProfileBufferEntryWriter* aEntryWriter) {
if (MOZ_UNLIKELY(!aEntryWriter)) {
[&](Maybe<ProfileBufferEntryWriter>& aEntryWriter) {
if (MOZ_UNLIKELY(aEntryWriter.isNothing())) {
// Out-of-session, return "empty" index.
return ProfileBufferBlockIndex{};
}

View File

@ -8,6 +8,7 @@
#define ModuloBuffer_h
#include "mozilla/leb128iterator.h"
#include "mozilla/Maybe.h"
#include "mozilla/MemoryReporting.h"
#include "mozilla/NotNull.h"
#include "mozilla/PowerOfTwo.h"
@ -151,6 +152,7 @@ class ModuloBuffer {
EntrySpan(&mBuffer[0], end), aBlockIndex, aNextBlockIndex};
}
// Return an entry writer for the given range.
ProfileBufferEntryWriter EntryWriterFromTo(Index aStart, Index aEnd) const {
using EntrySpan = Span<ProfileBufferEntryReader::Byte>;
if (aStart == aEnd) {
@ -177,6 +179,37 @@ class ModuloBuffer {
ProfileBufferBlockIndex::CreateFromProfileBufferIndex(aEnd)};
}
// Emplace an entry writer into `aMaybeEntryWriter` for the given range.
void EntryWriterFromTo(Maybe<ProfileBufferEntryWriter>& aMaybeEntryWriter,
Index aStart, Index aEnd) const {
MOZ_ASSERT(aMaybeEntryWriter.isNothing(),
"Reference entry writer should be Nothing.");
using EntrySpan = Span<ProfileBufferEntryReader::Byte>;
if (aStart == aEnd) {
return;
}
MOZ_ASSERT(aEnd - aStart <= mMask.MaskValue() + 1);
// Start offset in 0 .. (buffer size - 1)
Offset start = static_cast<Offset>(aStart) & mMask;
// End offset in 1 .. (buffer size)
Offset end = (static_cast<Offset>(aEnd - 1) & mMask) + 1;
if (start < end) {
// Segment doesn't cross buffer threshold, one span is enough.
aMaybeEntryWriter.emplace(
EntrySpan(&mBuffer[start], end - start),
ProfileBufferBlockIndex::CreateFromProfileBufferIndex(aStart),
ProfileBufferBlockIndex::CreateFromProfileBufferIndex(aEnd));
} else {
// Segment crosses buffer threshold, we need one span until the end and
// one span restarting at the beginning of the buffer.
aMaybeEntryWriter.emplace(
EntrySpan(&mBuffer[start], mMask.MaskValue() + 1 - start),
EntrySpan(&mBuffer[0], end),
ProfileBufferBlockIndex::CreateFromProfileBufferIndex(aStart),
ProfileBufferBlockIndex::CreateFromProfileBufferIndex(aEnd));
}
}
// All ModuloBuffer operations should be done through this iterator, which has
// an effectively infinite range. The underlying wrapping-around is hidden.
// Use `ReaderAt(Index)` or `WriterAt(Index)` to create it.

View File

@ -42,11 +42,12 @@ class InChunkPointer {
InChunkPointer()
: mChunk(nullptr), mNextChunkGroup(nullptr), mOffsetInChunk(0) {}
// InChunkPointer over one or two chunk groups, will start at the first
// block (if any).
// InChunkPointer over one or two chunk groups, pointing at the given
// block index (if still in range).
// This constructor should only be used with *trusted* block index values!
InChunkPointer(const ProfileBufferChunk* aChunk,
const ProfileBufferChunk* aNextChunkGroup,
ProfileBufferBlockIndex aBlockIndex = nullptr)
ProfileBufferBlockIndex aBlockIndex)
: mChunk(aChunk), mNextChunkGroup(aNextChunkGroup) {
if (mChunk) {
mOffsetInChunk = mChunk->OffsetFirstBlock();
@ -60,20 +61,94 @@ class InChunkPointer {
mOffsetInChunk = 0;
}
// Try to advance to given position, don't worry about success.
Unused << AdvanceToGlobalRangePosition(
aBlockIndex.ConvertToProfileBufferIndex());
// Try to advance to given position.
if (!AdvanceToGlobalRangePosition(aBlockIndex)) {
// Block does not exist anymore (or block doesn't look valid), reset the
// in-chunk pointer.
mChunk = nullptr;
mNextChunkGroup = nullptr;
}
}
// InChunkPointer over one or two chunk groups, will start at the first
// block (if any). This may be slow, so avoid using it too much.
InChunkPointer(const ProfileBufferChunk* aChunk,
const ProfileBufferChunk* aNextChunkGroup,
ProfileBufferIndex aIndex = ProfileBufferIndex(0))
: mChunk(aChunk), mNextChunkGroup(aNextChunkGroup) {
if (mChunk) {
mOffsetInChunk = mChunk->OffsetFirstBlock();
Adjust();
} else if (mNextChunkGroup) {
mChunk = mNextChunkGroup;
mNextChunkGroup = nullptr;
mOffsetInChunk = mChunk->OffsetFirstBlock();
Adjust();
} else {
mOffsetInChunk = 0;
}
// Try to advance to given position.
if (!AdvanceToGlobalRangePosition(aIndex)) {
// Block does not exist anymore, reset the in-chunk pointer.
mChunk = nullptr;
mNextChunkGroup = nullptr;
}
}
// Compute the current position in the global range.
// 0 if null (including if we're reached the end).
[[nodiscard]] ProfileBufferIndex GlobalRangePosition() const {
if (!mChunk) {
if (IsNull()) {
return 0;
}
return mChunk->RangeStart() + mOffsetInChunk;
}
// Move InChunkPointer forward to the block at the given global block
// position, which is assumed to be valid exactly -- but it may be obsolete.
// 0 stays where it is (if valid already).
// MOZ_ASSERTs if the index is invalid.
[[nodiscard]] bool AdvanceToGlobalRangePosition(
ProfileBufferBlockIndex aBlockIndex) {
if (IsNull()) {
// Pointer is null already. (Not asserting because it's acceptable.)
return false;
}
if (!aBlockIndex) {
// Special null position, just stay where we are.
return ShouldPointAtValidBlock();
}
if (aBlockIndex.ConvertToProfileBufferIndex() < GlobalRangePosition()) {
// Past the requested position, stay where we are (assuming the current
// position was valid).
return ShouldPointAtValidBlock();
}
for (;;) {
if (aBlockIndex.ConvertToProfileBufferIndex() <
mChunk->RangeStart() + mChunk->OffsetPastLastBlock()) {
// Target position is in this chunk's written space, move to it.
mOffsetInChunk =
aBlockIndex.ConvertToProfileBufferIndex() - mChunk->RangeStart();
return ShouldPointAtValidBlock();
}
// Position is after this chunk, try next chunk.
GoToNextChunk();
if (IsNull()) {
return false;
}
// Skip whatever block tail there is, we don't allow pointing in the
// middle of a block.
mOffsetInChunk = mChunk->OffsetFirstBlock();
if (aBlockIndex.ConvertToProfileBufferIndex() < GlobalRangePosition()) {
// Past the requested position, meaning that the given position was in-
// between blocks -> Failure.
MOZ_ASSERT(false, "AdvanceToGlobalRangePosition - In-between blocks");
return false;
}
}
}
// Move InChunkPointer forward to the block at or after the given global
// range position.
// 0 stays where it is (if valid already).
@ -82,7 +157,7 @@ class InChunkPointer {
if (aPosition == 0) {
// Special position '0', just stay where we are.
// Success if this position is already valid.
return !!mChunk;
return !IsNull();
}
for (;;) {
ProfileBufferIndex currentPosition = GlobalRangePosition();
@ -112,7 +187,7 @@ class InChunkPointer {
}
// Position is after this chunk, try next chunk.
GoToNextChunk();
if (!mChunk) {
if (IsNull()) {
return false;
}
// Skip whatever block tail there is, we don't allow pointing in the
@ -122,7 +197,7 @@ class InChunkPointer {
}
[[nodiscard]] Byte ReadByte() {
MOZ_ASSERT(!!mChunk);
MOZ_ASSERT(!IsNull());
MOZ_ASSERT(mOffsetInChunk < mChunk->OffsetPastLastBlock());
Byte byte = mChunk->ByteAt(mOffsetInChunk);
if (MOZ_UNLIKELY(++mOffsetInChunk == mChunk->OffsetPastLastBlock())) {
@ -137,12 +212,12 @@ class InChunkPointer {
// available to read! (EntryReader() below may gracefully fail.)
[[nodiscard]] Length ReadEntrySize() {
ULEB128Reader<Length> reader;
if (!mChunk) {
if (IsNull()) {
return 0;
}
for (;;) {
const bool isComplete = reader.FeedByteIsComplete(ReadByte());
if (MOZ_UNLIKELY(!mChunk)) {
if (MOZ_UNLIKELY(IsNull())) {
// End of chunks, so there's no actual entry after this anyway.
return 0;
}
@ -157,14 +232,14 @@ class InChunkPointer {
}
InChunkPointer& operator+=(Length aLength) {
MOZ_ASSERT(!!mChunk);
MOZ_ASSERT(!IsNull());
mOffsetInChunk += aLength;
Adjust();
return *this;
}
[[nodiscard]] ProfileBufferEntryReader EntryReader(Length aLength) {
if (!mChunk || aLength == 0) {
if (IsNull() || aLength == 0) {
return ProfileBufferEntryReader();
}
@ -195,7 +270,7 @@ class InChunkPointer {
// We need to go to the next chunk for the 2nd part of this block.
GoToNextChunk();
if (!mChunk) {
if (IsNull()) {
return ProfileBufferEntryReader();
}
@ -218,32 +293,27 @@ class InChunkPointer {
GlobalRangePosition()));
}
explicit operator bool() const { return !!mChunk; }
[[nodiscard]] bool operator!() const { return !mChunk; }
[[nodiscard]] bool IsNull() const { return !mChunk; }
[[nodiscard]] bool operator==(const InChunkPointer& aOther) const {
if (!*this || !aOther) {
return !*this && !aOther;
if (IsNull() || aOther.IsNull()) {
return IsNull() && aOther.IsNull();
}
return mChunk == aOther.mChunk && mOffsetInChunk == aOther.mOffsetInChunk;
}
[[nodiscard]] bool operator!=(const InChunkPointer& aOther) const {
if (!*this || !aOther) {
return !!*this || !!aOther;
}
return mChunk != aOther.mChunk || mOffsetInChunk != aOther.mOffsetInChunk;
return !(*this == aOther);
}
[[nodiscard]] Byte operator*() const {
MOZ_ASSERT(!!mChunk);
MOZ_ASSERT(!IsNull());
MOZ_ASSERT(mOffsetInChunk < mChunk->OffsetPastLastBlock());
return mChunk->ByteAt(mOffsetInChunk);
}
InChunkPointer& operator++() {
MOZ_ASSERT(!!mChunk);
MOZ_ASSERT(!IsNull());
MOZ_ASSERT(mOffsetInChunk < mChunk->OffsetPastLastBlock());
if (MOZ_UNLIKELY(++mOffsetInChunk == mChunk->OffsetPastLastBlock())) {
mOffsetInChunk = 0;
@ -255,7 +325,7 @@ class InChunkPointer {
private:
void GoToNextChunk() {
MOZ_ASSERT(!!mChunk);
MOZ_ASSERT(!IsNull());
const ProfileBufferIndex expectedNextRangeStart =
mChunk->RangeStart() + mChunk->BufferBytes();
@ -303,6 +373,32 @@ class InChunkPointer {
}
}
// Check if the current position is likely to point at a valid block.
// (Size should be reasonable, and block should fully fit inside buffer.)
// MOZ_ASSERTs on failure, to catch incorrect uses of block indices (which
// should only point at valid blocks if still in range). Non-asserting build
// fallback should still be handled.
[[nodiscard]] bool ShouldPointAtValidBlock() const {
if (IsNull()) {
// Pointer is null, no blocks here.
MOZ_ASSERT(false, "ShouldPointAtValidBlock - null pointer");
return false;
}
// Use a copy, so we don't modify `*this`.
InChunkPointer pointer = *this;
// Try to read the entry size.
Length entrySize = pointer.ReadEntrySize();
if (entrySize == 0) {
// Entry size of zero means we read 0 or a way-too-big value.
MOZ_ASSERT(false, "ShouldPointAtValidBlock - invalid size");
return false;
}
// See if the last byte of the entry is still inside the buffer.
pointer += entrySize - 1;
MOZ_ASSERT(!IsNull(), "ShouldPointAtValidBlock - past end of buffer");
return !IsNull();
}
const ProfileBufferChunk* mChunk;
const ProfileBufferChunk* mNextChunkGroup;
Length mOffsetInChunk;
@ -327,7 +423,7 @@ class InChunkPointer {
// ```
// ProfileChunkedBuffer cb(...);
// cb.ReserveAndPut([]() { return sizeof(123); },
// [&](ProfileBufferEntryWriter* aEW) {
// [&](Maybe<ProfileBufferEntryWriter>& aEW) {
// if (aEW) { aEW->WriteObject(123); }
// });
// ```
@ -502,7 +598,7 @@ class ProfileChunkedBuffer {
// Reserve a block that can hold an entry of the given `aCallbackEntryBytes()`
// size, write the entry size (ULEB128-encoded), and invoke and return
// `aCallback(ProfileBufferEntryWriter*)`.
// `aCallback(Maybe<ProfileBufferEntryWriter>&)`.
// Note: `aCallbackEntryBytes` is a callback instead of a simple value, to
// delay this potentially-expensive computation until after we're checked that
// we're in-session; use `Put(Length, Callback)` below if you know the size
@ -511,7 +607,7 @@ class ProfileChunkedBuffer {
auto ReserveAndPut(CallbackEntryBytes&& aCallbackEntryBytes,
Callback&& aCallback)
-> decltype(std::forward<Callback>(aCallback)(
std::declval<ProfileBufferEntryWriter*>())) {
std::declval<Maybe<ProfileBufferEntryWriter>&>())) {
baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
// This can only be read in the 2nd lambda below after it has been written
@ -524,12 +620,12 @@ class ProfileChunkedBuffer {
MOZ_ASSERT(entryBytes != 0, "Empty entries are not allowed");
return ULEB128Size(entryBytes) + entryBytes;
},
[&](ProfileBufferEntryWriter* aEntryWriter) {
if (aEntryWriter) {
aEntryWriter->WriteULEB128(entryBytes);
MOZ_ASSERT(aEntryWriter->RemainingBytes() == entryBytes);
[&](Maybe<ProfileBufferEntryWriter>& aMaybeEntryWriter) {
if (aMaybeEntryWriter.isSome()) {
aMaybeEntryWriter->WriteULEB128(entryBytes);
MOZ_ASSERT(aMaybeEntryWriter->RemainingBytes() == entryBytes);
}
return std::forward<Callback>(aCallback)(aEntryWriter);
return std::forward<Callback>(aCallback)(aMaybeEntryWriter);
},
lock);
}
@ -544,12 +640,12 @@ class ProfileChunkedBuffer {
ProfileBufferBlockIndex PutFrom(const void* aSrc, Length aBytes) {
return ReserveAndPut(
[aBytes]() { return aBytes; },
[aSrc, aBytes](ProfileBufferEntryWriter* aEntryWriter) {
if (!aEntryWriter) {
[aSrc, aBytes](Maybe<ProfileBufferEntryWriter>& aMaybeEntryWriter) {
if (aMaybeEntryWriter.isNothing()) {
return ProfileBufferBlockIndex{};
}
aEntryWriter->WriteBytes(aSrc, aBytes);
return aEntryWriter->CurrentBlockIndex();
aMaybeEntryWriter->WriteBytes(aSrc, aBytes);
return aMaybeEntryWriter->CurrentBlockIndex();
});
}
@ -561,12 +657,12 @@ class ProfileChunkedBuffer {
"PutObjects must be given at least one object.");
return ReserveAndPut(
[&]() { return ProfileBufferEntryWriter::SumBytes(aTs...); },
[&](ProfileBufferEntryWriter* aEntryWriter) {
if (!aEntryWriter) {
[&](Maybe<ProfileBufferEntryWriter>& aMaybeEntryWriter) {
if (aMaybeEntryWriter.isNothing()) {
return ProfileBufferBlockIndex{};
}
aEntryWriter->WriteObjects(aTs...);
return aEntryWriter->CurrentBlockIndex();
aMaybeEntryWriter->WriteObjects(aTs...);
return aMaybeEntryWriter->CurrentBlockIndex();
});
}
@ -854,7 +950,7 @@ class ProfileChunkedBuffer {
"ReadEach callback must take ProfileBufferEntryReader& and "
"optionally a ProfileBufferBlockIndex");
detail::InChunkPointer p{aChunks0, aChunks1};
while (p) {
while (!p.IsNull()) {
// The position right before an entry size *is* a block index.
const ProfileBufferBlockIndex blockIndex =
ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
@ -911,7 +1007,7 @@ class ProfileChunkedBuffer {
std::is_invocable_v<Callback, Maybe<ProfileBufferEntryReader>&&>,
"ReadAt callback must take a Maybe<ProfileBufferEntryReader>&&");
Maybe<ProfileBufferEntryReader> maybeEntryReader;
if (detail::InChunkPointer p{aChunks0, aChunks1}; p) {
if (detail::InChunkPointer p{aChunks0, aChunks1}; !p.IsNull()) {
// If the pointer position is before the given position, try to advance.
if (p.GlobalRangePosition() >=
aMinimumBlockIndex.ConvertToProfileBufferIndex() ||
@ -971,16 +1067,17 @@ class ProfileChunkedBuffer {
if (failed) {
return;
}
failed = !Put(aER.RemainingBytes(), [&](ProfileBufferEntryWriter* aEW) {
if (!aEW) {
return false;
}
if (!firstBlockIndex) {
firstBlockIndex = aEW->CurrentBlockIndex();
}
aEW->WriteFromReader(aER, aER.RemainingBytes());
return true;
});
failed =
!Put(aER.RemainingBytes(), [&](Maybe<ProfileBufferEntryWriter>& aEW) {
if (aEW.isNothing()) {
return false;
}
if (!firstBlockIndex) {
firstBlockIndex = aEW->CurrentBlockIndex();
}
aEW->WriteFromReader(aER, aER.RemainingBytes());
return true;
});
});
return failed ? nullptr : firstBlockIndex;
}
@ -1250,8 +1347,8 @@ class ProfileChunkedBuffer {
}
// Reserve a block of `aCallbackBlockBytes()` size, and invoke and return
// `aCallback(ProfileBufferEntryWriter*)`. Note that this is the "raw" version
// that doesn't write the entry size at the beginning of the block.
// `aCallback(Maybe<ProfileBufferEntryWriter>&)`. Note that this is the "raw"
// version that doesn't write the entry size at the beginning of the block.
// Note: `aCallbackBlockBytes` is a callback instead of a simple value, to
// delay this potentially-expensive computation until after we're checked that
// we're in-session; use `Put(Length, Callback)` below if you know the size
@ -1261,6 +1358,10 @@ class ProfileChunkedBuffer {
Callback&& aCallback,
baseprofiler::detail::BaseProfilerMaybeAutoLock& aLock,
uint64_t aBlockCount = 1) {
// The entry writer that will point into one or two chunks to write
// into, empty by default (failure).
Maybe<ProfileBufferEntryWriter> maybeEntryWriter;
// The current chunk will be filled if we need to write more than its
// remaining space.
bool currentChunkFilled = false;
@ -1269,16 +1370,69 @@ class ProfileChunkedBuffer {
// chunk!
bool nextChunkInitialized = false;
// The entry writer that will point into one or two chunks to write
// into, empty by default (failure).
ProfileBufferEntryWriter entryWriter;
if (MOZ_LIKELY(mChunkManager)) {
// In-session.
if (ProfileBufferChunk* current = GetOrCreateCurrentChunk(aLock);
MOZ_LIKELY(current)) {
const Length blockBytes =
std::forward<CallbackBlockBytes>(aCallbackBlockBytes)();
if (blockBytes <= current->RemainingBytes()) {
// Block fits in current chunk with only one span.
currentChunkFilled = blockBytes == current->RemainingBytes();
const auto [mem0, blockIndex] = current->ReserveBlock(blockBytes);
MOZ_ASSERT(mem0.LengthBytes() == blockBytes);
maybeEntryWriter.emplace(
mem0, blockIndex,
ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
blockIndex.ConvertToProfileBufferIndex() + blockBytes));
MOZ_ASSERT(maybeEntryWriter->RemainingBytes() == blockBytes);
mRangeEnd += blockBytes;
mPushedBlockCount += aBlockCount;
} else {
// Block doesn't fit fully in current chunk, it needs to overflow into
// the next one.
// Make sure the next chunk is available (from a previous request),
// otherwise create one on the spot.
if (ProfileBufferChunk* next = GetOrCreateNextChunk(aLock);
MOZ_LIKELY(next)) {
// Here, we know we have a current and a next chunk.
// Reserve head of block at the end of the current chunk.
const auto [mem0, blockIndex] =
current->ReserveBlock(current->RemainingBytes());
MOZ_ASSERT(mem0.LengthBytes() < blockBytes);
MOZ_ASSERT(current->RemainingBytes() == 0);
// Set the next chunk range, and reserve the needed space for the
// tail of the block.
next->SetRangeStart(mNextChunkRangeStart);
mNextChunkRangeStart += next->BufferBytes();
const auto mem1 = next->ReserveInitialBlockAsTail(
blockBytes - mem0.LengthBytes());
MOZ_ASSERT(next->RemainingBytes() != 0);
currentChunkFilled = true;
nextChunkInitialized = true;
// Block is split in two spans.
maybeEntryWriter.emplace(
mem0, mem1, blockIndex,
ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
blockIndex.ConvertToProfileBufferIndex() + blockBytes));
MOZ_ASSERT(maybeEntryWriter->RemainingBytes() == blockBytes);
mRangeEnd += blockBytes;
mPushedBlockCount += aBlockCount;
}
}
} // end of `MOZ_LIKELY(current)`
} // end of `if (MOZ_LIKELY(mChunkManager))`
// Here, we either have a `Nothing` (failure), or a non-empty entry writer
// pointing at the start of the block.
// After we invoke the callback and return, we may need to handle the
// current chunk being filled.
auto handleFilledChunk = MakeScopeExit([&]() {
// If the entry writer was not already empty, the callback *must* have
// filled the full entry.
MOZ_ASSERT(entryWriter.RemainingBytes() == 0);
MOZ_ASSERT(!maybeEntryWriter || maybeEntryWriter->RemainingBytes() == 0);
if (currentChunkFilled) {
// Extract current (now filled) chunk.
@ -1313,81 +1467,12 @@ class ProfileChunkedBuffer {
}
});
if (MOZ_LIKELY(mChunkManager)) {
// In-session.
if (ProfileBufferChunk* current = GetOrCreateCurrentChunk(aLock);
MOZ_LIKELY(current)) {
const Length blockBytes =
std::forward<CallbackBlockBytes>(aCallbackBlockBytes)();
if (blockBytes <= current->RemainingBytes()) {
// Block fits in current chunk with only one span.
currentChunkFilled = blockBytes == current->RemainingBytes();
const auto [mem0, blockIndex] = current->ReserveBlock(blockBytes);
MOZ_ASSERT(mem0.LengthBytes() == blockBytes);
entryWriter.Set(
mem0, blockIndex,
ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
blockIndex.ConvertToProfileBufferIndex() + blockBytes));
} else {
// Block doesn't fit fully in current chunk, it needs to overflow into
// the next one.
// Make sure the next chunk is available (from a previous request),
// otherwise create one on the spot.
if (ProfileBufferChunk* next = GetOrCreateNextChunk(aLock);
MOZ_LIKELY(next)) {
// Here, we know we have a current and a next chunk.
// Reserve head of block at the end of the current chunk.
const auto [mem0, blockIndex] =
current->ReserveBlock(current->RemainingBytes());
MOZ_ASSERT(mem0.LengthBytes() < blockBytes);
MOZ_ASSERT(current->RemainingBytes() == 0);
// Set the next chunk range, and reserve the needed space for the
// tail of the block.
next->SetRangeStart(mNextChunkRangeStart);
mNextChunkRangeStart += next->BufferBytes();
const auto mem1 = next->ReserveInitialBlockAsTail(
blockBytes - mem0.LengthBytes());
MOZ_ASSERT(next->RemainingBytes() != 0);
currentChunkFilled = true;
nextChunkInitialized = true;
// Block is split in two spans.
entryWriter.Set(
mem0, mem1, blockIndex,
ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
blockIndex.ConvertToProfileBufferIndex() + blockBytes));
}
}
// Here, we either have an empty `entryWriter` (failure), or a non-empty
// writer pointing at the start of the block.
// Remember that following the final `return` below, `handleFilledChunk`
// will take care of releasing the current chunk, and cycling to the
// next one, if needed.
if (MOZ_LIKELY(entryWriter.RemainingBytes() != 0)) {
// `entryWriter` is not empty, record some stats and let the user
// write their data in the entry.
MOZ_ASSERT(entryWriter.RemainingBytes() == blockBytes);
mRangeEnd += blockBytes;
mPushedBlockCount += aBlockCount;
return std::forward<Callback>(aCallback)(&entryWriter);
}
// If we're here, `entryWriter` was empty (probably because we couldn't
// get a next chunk to write into), we just fall back to the `nullptr`
// case below...
} // end of `if (current)`
} // end of `if (mChunkManager)`
return std::forward<Callback>(aCallback)(nullptr);
return std::forward<Callback>(aCallback)(maybeEntryWriter);
}
// Reserve a block of `aBlockBytes` size, and invoke and return
// `aCallback(ProfileBufferEntryWriter*)`. Note that this is the "raw" version
// that doesn't write the entry size at the beginning of the block.
// `aCallback(Maybe<ProfileBufferEntryWriter>&)`. Note that this is the "raw"
// version that doesn't write the entry size at the beginning of the block.
template <typename Callback>
auto ReserveAndPutRaw(Length aBlockBytes, Callback&& aCallback,
uint64_t aBlockCount) {
@ -1607,8 +1692,8 @@ struct ProfileBufferEntryReader::Deserializer<ProfileChunkedBuffer> {
// Copy bytes into the buffer.
aBuffer.ReserveAndPutRaw(
len,
[&](ProfileBufferEntryWriter* aEW) {
MOZ_RELEASE_ASSERT(aEW);
[&](Maybe<ProfileBufferEntryWriter>& aEW) {
MOZ_RELEASE_ASSERT(aEW.isSome());
aEW->WriteFromReader(aER, len);
},
0);

View File

@ -850,11 +850,12 @@ static void TestChunkedBuffer() {
MOZ_RELEASE_ASSERT(false);
return 1;
},
[](ProfileBufferEntryWriter* aEW) { return aEW ? 2 : 3; });
[](Maybe<ProfileBufferEntryWriter>& aEW) { return aEW ? 2 : 3; });
MOZ_RELEASE_ASSERT(result == 3);
result = 0;
result = cb.Put(1, [](ProfileBufferEntryWriter* aEW) { return aEW ? 1 : 2; });
result = cb.Put(
1, [](Maybe<ProfileBufferEntryWriter>& aEW) { return aEW ? 1 : 2; });
MOZ_RELEASE_ASSERT(result == 2);
blockIndex = cb.PutFrom(&result, 1);
@ -912,7 +913,7 @@ static void TestChunkedBuffer() {
blockIndex = nullptr;
bool success = cb.ReserveAndPut(
[]() { return sizeof(test); },
[&](ProfileBufferEntryWriter* aEW) {
[&](Maybe<ProfileBufferEntryWriter>& aEW) {
ran = true;
if (!aEW) {
return false;
@ -1171,7 +1172,7 @@ static void TestChunkedBuffer() {
// to store an int), and write an increasing int.
const bool success =
cb.Put(std::max(aThreadNo, int(sizeof(push))),
[&](ProfileBufferEntryWriter* aEW) {
[&](Maybe<ProfileBufferEntryWriter>& aEW) {
if (!aEW) {
return false;
}
@ -1205,10 +1206,11 @@ static void TestChunkedBuffer() {
MOZ_RELEASE_ASSERT(false);
return 1;
},
[](ProfileBufferEntryWriter* aEW) { return !!aEW; });
[](Maybe<ProfileBufferEntryWriter>& aEW) { return !!aEW; });
MOZ_RELEASE_ASSERT(!success);
success = cb.Put(1, [](ProfileBufferEntryWriter* aEW) { return !!aEW; });
success =
cb.Put(1, [](Maybe<ProfileBufferEntryWriter>& aEW) { return !!aEW; });
MOZ_RELEASE_ASSERT(!success);
blockIndex = cb.PutFrom(&success, 1);
@ -1707,8 +1709,8 @@ void TestBlocksRingBufferAPI() {
// Push `2` through ReserveAndPut, check output ProfileBufferBlockIndex.
auto bi2 = rb.ReserveAndPut([]() { return sizeof(uint32_t); },
[](ProfileBufferEntryWriter* aEW) {
MOZ_RELEASE_ASSERT(!!aEW);
[](Maybe<ProfileBufferEntryWriter>& aEW) {
MOZ_RELEASE_ASSERT(aEW.isSome());
aEW->WriteObject(uint32_t(2));
return aEW->CurrentBlockIndex();
});
@ -1790,12 +1792,13 @@ void TestBlocksRingBufferAPI() {
// Push `3` through Put, check writer output
// is returned to the initial caller.
auto put3 = rb.Put(sizeof(uint32_t), [&](ProfileBufferEntryWriter* aEW) {
MOZ_RELEASE_ASSERT(!!aEW);
aEW->WriteObject(uint32_t(3));
MOZ_RELEASE_ASSERT(aEW->CurrentBlockIndex() == bi2Next);
return float(aEW->CurrentBlockIndex().ConvertToProfileBufferIndex());
});
auto put3 =
rb.Put(sizeof(uint32_t), [&](Maybe<ProfileBufferEntryWriter>& aEW) {
MOZ_RELEASE_ASSERT(aEW.isSome());
aEW->WriteObject(uint32_t(3));
MOZ_RELEASE_ASSERT(aEW->CurrentBlockIndex() == bi2Next);
return float(aEW->CurrentBlockIndex().ConvertToProfileBufferIndex());
});
static_assert(std::is_same<decltype(put3), float>::value,
"Expect float as returned by callback.");
MOZ_RELEASE_ASSERT(put3 == 11.0);
@ -1847,11 +1850,12 @@ void TestBlocksRingBufferAPI() {
// Push 5 through Put, no returns.
// This will clear the second entry.
// Check that the EntryWriter can access bi4 but not bi2.
auto bi5 = rb.Put(sizeof(uint32_t), [&](ProfileBufferEntryWriter* aEW) {
MOZ_RELEASE_ASSERT(!!aEW);
aEW->WriteObject(uint32_t(5));
return aEW->CurrentBlockIndex();
});
auto bi5 =
rb.Put(sizeof(uint32_t), [&](Maybe<ProfileBufferEntryWriter>& aEW) {
MOZ_RELEASE_ASSERT(aEW.isSome());
aEW->WriteObject(uint32_t(5));
return aEW->CurrentBlockIndex();
});
auto bi6 = rb.GetState().mRangeEnd;
// 16 17 18 19 20 21 22 23 24 25 26 11 12 13 14 15 (16)
// [4 | int(4) ] [4 | int(5) ]E ? S[4 | int(3) ]
@ -2031,8 +2035,8 @@ void TestBlocksRingBufferUnderlyingBufferChanges() {
MOZ_RELEASE_ASSERT(state.mClearedBlockCount == 0);
// `Put()` functions run the callback with `Nothing`.
int32_t ran = 0;
rb.Put(1, [&](ProfileBufferEntryWriter* aMaybeEntryWriter) {
MOZ_RELEASE_ASSERT(!aMaybeEntryWriter);
rb.Put(1, [&](Maybe<ProfileBufferEntryWriter>& aMaybeEntryWriter) {
MOZ_RELEASE_ASSERT(aMaybeEntryWriter.isNothing());
++ran;
});
MOZ_RELEASE_ASSERT(ran == 1);
@ -2103,12 +2107,13 @@ void TestBlocksRingBufferUnderlyingBufferChanges() {
}
int32_t ran = 0;
// The following three `Put...` will write three int32_t of value 1.
bi = rb.Put(sizeof(ran), [&](ProfileBufferEntryWriter* aMaybeEntryWriter) {
MOZ_RELEASE_ASSERT(!!aMaybeEntryWriter);
++ran;
aMaybeEntryWriter->WriteObject(ran);
return aMaybeEntryWriter->CurrentBlockIndex();
});
bi = rb.Put(sizeof(ran),
[&](Maybe<ProfileBufferEntryWriter>& aMaybeEntryWriter) {
MOZ_RELEASE_ASSERT(aMaybeEntryWriter.isSome());
++ran;
aMaybeEntryWriter->WriteObject(ran);
return aMaybeEntryWriter->CurrentBlockIndex();
});
MOZ_RELEASE_ASSERT(ran == 1);
MOZ_RELEASE_ASSERT(rb.PutFrom(&ran, sizeof(ran)) !=
ProfileBufferBlockIndex{});
@ -2253,8 +2258,8 @@ void TestBlocksRingBufferThreading() {
// Reserve as many bytes as the thread number (but at least enough
// to store an int), and write an increasing int.
rb.Put(std::max(aThreadNo, int(sizeof(push))),
[&](ProfileBufferEntryWriter* aEW) {
MOZ_RELEASE_ASSERT(!!aEW);
[&](Maybe<ProfileBufferEntryWriter>& aEW) {
MOZ_RELEASE_ASSERT(aEW.isSome());
aEW->WriteObject(aThreadNo * 1000000 + push);
*aEW += aEW->RemainingBytes();
});

File diff suppressed because it is too large Load Diff

View File

@ -765,24 +765,17 @@ class RemoteSettingsClient extends EventEmitter {
options = {}
) {
const { retry = false } = options;
const since = retry || !localTimestamp ? undefined : `${localTimestamp}`;
// Fetch collection metadata and list of changes from server
// (or all records on retry).
const client = this.httpClient();
const [
// Fetch collection metadata and list of changes from server.
console.debug(
`Fetch changes from server (expected=${expectedTimestamp}, since=${since})`
);
const {
metadata,
{ data: remoteRecords, last_modified: remoteTimestamp },
] = await Promise.all([
client.getData({
query: { _expected: expectedTimestamp },
}),
client.listRecords({
filters: {
_expected: expectedTimestamp,
},
since: retry || !localTimestamp ? undefined : `${localTimestamp}`,
}),
]);
remoteTimestamp,
remoteRecords,
} = await this._fetchChangeset(expectedTimestamp, since);
// We build a sync result, based on remote changes.
const syncResult = {
@ -893,6 +886,36 @@ class RemoteSettingsClient extends EventEmitter {
return syncResult;
}
/**
* Fetch information from changeset endpoint.
*
* @param expectedTimestamp cache busting value
* @param since timestamp of last sync (optional)
*/
async _fetchChangeset(expectedTimestamp, since) {
const client = this.httpClient();
const {
metadata,
timestamp: remoteTimestamp,
changes: remoteRecords,
} = await client.execute(
{
path: `/buckets/${this.bucketName}/collections/${this.collectionName}/changeset`,
},
{
query: {
_expected: expectedTimestamp,
_since: since,
},
}
);
return {
remoteTimestamp,
metadata,
remoteRecords,
};
}
/**
* Use the filter func to filter the lists of changes obtained from synchronization,
* and return them along with the filtered list of local records.

View File

@ -69,11 +69,7 @@ function run_test() {
handleResponse
);
server.registerPathHandler(
"/v1/buckets/main/collections/password-fields",
handleResponse
);
server.registerPathHandler(
"/v1/buckets/main/collections/password-fields/records",
"/v1/buckets/main/collections/password-fields/changeset",
handleResponse
);
server.registerPathHandler(
@ -81,15 +77,11 @@ function run_test() {
handleResponse
);
server.registerPathHandler(
"/v1/buckets/main/collections/language-dictionaries/records",
"/v1/buckets/main/collections/language-dictionaries/changeset",
handleResponse
);
server.registerPathHandler(
"/v1/buckets/main/collections/with-local-fields",
handleResponse
);
server.registerPathHandler(
"/v1/buckets/main/collections/with-local-fields/records",
"/v1/buckets/main/collections/with-local-fields/changeset",
handleResponse
);
server.registerPathHandler("/fake-x5u", handleResponse);
@ -910,26 +902,6 @@ function getSampleResponse(req, port) {
],
},
},
"GET:/v1/buckets/main/collections/password-fields": {
sampleHeaders: [
"Access-Control-Allow-Origin: *",
"Access-Control-Expose-Headers: Retry-After, Content-Length, Alert, Backoff",
"Content-Type: application/json; charset=UTF-8",
"Server: waitress",
'Etag: "1234"',
],
status: { status: 200, statusText: "OK" },
responseBody: JSON.stringify({
data: {
id: "password-fields",
last_modified: 1234,
signature: {
signature: "abcdef",
x5u: `http://localhost:${port}/fake-x5u`,
},
},
}),
},
"GET:/fake-x5u": {
sampleHeaders: ["Content-Type: application/octet-stream"],
status: { status: 200, statusText: "OK" },
@ -940,7 +912,7 @@ ZARKjbu1TuYQHf0fs+GwID8zeLc2zJL7UzcHFwwQ6Nda9OJN4uPAuC/BKaIpxCLL
wNuvFqc=
-----END CERTIFICATE-----`,
},
"GET:/v1/buckets/main/collections/password-fields/records?_expected=2000&_sort=-last_modified": {
"GET:/v1/buckets/main/collections/password-fields/changeset?_expected=2000": {
sampleHeaders: [
"Access-Control-Allow-Origin: *",
"Access-Control-Expose-Headers: Retry-After, Content-Length, Alert, Backoff",
@ -950,7 +922,16 @@ wNuvFqc=
],
status: { status: 200, statusText: "OK" },
responseBody: {
data: [
timestamp: 3000,
metadata: {
id: "password-fields",
last_modified: 1234,
signature: {
signature: "abcdef",
x5u: `http://localhost:${port}/fake-x5u`,
},
},
changes: [
{
id: "9d500963-d80e-3a91-6e74-66f3811b99cc",
last_modified: 3000,
@ -960,7 +941,7 @@ wNuvFqc=
],
},
},
"GET:/v1/buckets/main/collections/password-fields/records?_expected=3001&_sort=-last_modified&_since=3000": {
"GET:/v1/buckets/main/collections/password-fields/changeset?_expected=3001&_since=3000": {
sampleHeaders: [
"Access-Control-Allow-Origin: *",
"Access-Control-Expose-Headers: Retry-After, Content-Length, Alert, Backoff",
@ -970,7 +951,9 @@ wNuvFqc=
],
status: { status: 200, statusText: "OK" },
responseBody: {
data: [
metadata: {},
timestamp: 4000,
changes: [
{
id: "aabad965-e556-ffe7-4191-074f5dee3df3",
last_modified: 4000,
@ -986,7 +969,7 @@ wNuvFqc=
],
},
},
"GET:/v1/buckets/main/collections/password-fields/records?_expected=4001&_sort=-last_modified&_since=4000": {
"GET:/v1/buckets/main/collections/password-fields/changeset?_expected=4001&_since=4000": {
sampleHeaders: [
"Access-Control-Allow-Origin: *",
"Access-Control-Expose-Headers: Retry-After, Content-Length, Alert, Backoff",
@ -996,7 +979,9 @@ wNuvFqc=
],
status: { status: 200, statusText: "OK" },
responseBody: {
data: [
metadata: {},
timestamp: 5000,
changes: [
{
id: "aabad965-e556-ffe7-4191-074f5dee3df3",
deleted: true,
@ -1004,7 +989,7 @@ wNuvFqc=
],
},
},
"GET:/v1/buckets/main/collections/password-fields/records?_expected=10000&_sort=-last_modified&_since=9999": {
"GET:/v1/buckets/main/collections/password-fields/changeset?_expected=10000&_since=9999": {
sampleHeaders: [
"Access-Control-Allow-Origin: *",
"Access-Control-Expose-Headers: Retry-After, Content-Length, Alert, Backoff",
@ -1018,7 +1003,7 @@ wNuvFqc=
error: "Service Unavailable",
},
},
"GET:/v1/buckets/main/collections/password-fields/records?_expected=10001&_sort=-last_modified&_since=10000": {
"GET:/v1/buckets/main/collections/password-fields/changeset?_expected=10001&_since=10000": {
sampleHeaders: [
"Access-Control-Allow-Origin: *",
"Access-Control-Expose-Headers: Retry-After, Content-Length, Alert, Backoff",
@ -1029,7 +1014,7 @@ wNuvFqc=
status: { status: 200, statusText: "OK" },
responseBody: "<invalid json",
},
"GET:/v1/buckets/main/collections/password-fields/records?_expected=11001&_sort=-last_modified&_since=11000": {
"GET:/v1/buckets/main/collections/password-fields/changeset?_expected=11001&_since=11000": {
sampleHeaders: [
"Access-Control-Allow-Origin: *",
"Access-Control-Expose-Headers: Retry-After, Content-Length, Alert, Backoff",
@ -1038,7 +1023,7 @@ wNuvFqc=
],
status: { status: 503, statusText: "Service Unavailable" },
responseBody: {
data: [
changes: [
{
id: "c4f021e3-f68c-4269-ad2a-d4ba87762b35",
last_modified: 4000,
@ -1083,7 +1068,7 @@ wNuvFqc=
],
},
},
"GET:/v1/buckets/main/collections/password-fields/records?_expected=1337&_sort=-last_modified": {
"GET:/v1/buckets/main/collections/password-fields/changeset?_expected=1337": {
sampleHeaders: [
"Access-Control-Allow-Origin: *",
"Access-Control-Expose-Headers: Retry-After, Content-Length, Alert, Backoff",
@ -1093,7 +1078,9 @@ wNuvFqc=
],
status: { status: 200, statusText: "OK" },
responseBody: {
data: [
metadata: {},
timestamp: 3000,
changes: [
{
id: "312cc78d-9c1f-4291-a4fa-a1be56f6cc69",
last_modified: 3000,
@ -1123,7 +1110,7 @@ wNuvFqc=
},
}),
},
"GET:/v1/buckets/main/collections/language-dictionaries/records": {
"GET:/v1/buckets/main/collections/language-dictionaries/changeset": {
sampleHeaders: [
"Access-Control-Allow-Origin: *",
"Access-Control-Expose-Headers: Retry-After, Content-Length, Alert, Backoff",
@ -1133,7 +1120,16 @@ wNuvFqc=
],
status: { status: 200, statusText: "OK" },
responseBody: {
data: [
timestamp: 5000000000000,
metadata: {
id: "language-dictionaries",
last_modified: 1234,
signature: {
signature: "xyz",
x5u: `http://localhost:${port}/fake-x5u`,
},
},
changes: [
{
id: "xx",
last_modified: 5000000000000,
@ -1152,27 +1148,7 @@ wNuvFqc=
],
},
},
"GET:/v1/buckets/main/collections/with-local-fields": {
sampleHeaders: [
"Access-Control-Allow-Origin: *",
"Access-Control-Expose-Headers: Retry-After, Content-Length, Alert, Backoff",
"Content-Type: application/json; charset=UTF-8",
"Server: waitress",
'Etag: "1234"',
],
status: { status: 200, statusText: "OK" },
responseBody: JSON.stringify({
data: {
id: "with-local-fields",
last_modified: 1234,
signature: {
signature: "xyz",
x5u: `http://localhost:${port}/fake-x5u`,
},
},
}),
},
"GET:/v1/buckets/main/collections/with-local-fields/records?_expected=2000&_sort=-last_modified": {
"GET:/v1/buckets/main/collections/with-local-fields/changeset?_expected=2000": {
sampleHeaders: [
"Access-Control-Allow-Origin: *",
"Access-Control-Expose-Headers: Retry-After, Content-Length, Alert, Backoff",
@ -1182,7 +1158,16 @@ wNuvFqc=
],
status: { status: 200, statusText: "OK" },
responseBody: {
data: [
timestamp: 2000,
metadata: {
id: "with-local-fields",
last_modified: 1234,
signature: {
signature: "xyz",
x5u: `http://localhost:${port}/fake-x5u`,
},
},
changes: [
{
id: "c74279ce-fb0a-42a6-ae11-386b567a6119",
last_modified: 2000,
@ -1190,7 +1175,7 @@ wNuvFqc=
],
},
},
"GET:/v1/buckets/main/collections/with-local-fields/records?_expected=3000&_sort=-last_modified&_since=2000": {
"GET:/v1/buckets/main/collections/with-local-fields/changeset?_expected=3000&_since=2000": {
sampleHeaders: [
"Access-Control-Allow-Origin: *",
"Access-Control-Expose-Headers: Retry-After, Content-Length, Alert, Backoff",
@ -1200,7 +1185,9 @@ wNuvFqc=
],
status: { status: 200, statusText: "OK" },
responseBody: {
data: [
timestamp: 3000,
metadata: {},
changes: [
{
id: "1f5c98b9-6d93-4c13-aa26-978b38695096",
last_modified: 3000,

View File

@ -114,40 +114,11 @@ add_task(async function test_check_signatures() {
add_task(async function test_check_synchronization_with_signatures() {
const port = server.identity.primaryPort;
const x5u = `http://localhost:${port}/test_remote_settings_signatures/test_cert_chain.pem`;
// Telemetry reports.
const TELEMETRY_HISTOGRAM_KEY = client.identifier;
// a response to give the client when the cert chain is expected
function makeMetaResponseBody(lastModified, signature) {
return {
data: {
id: "signed",
last_modified: lastModified,
signature: {
x5u: `http://localhost:${port}/test_remote_settings_signatures/test_cert_chain.pem`,
public_key: "fake",
"content-signature": `x5u=http://localhost:${port}/test_remote_settings_signatures/test_cert_chain.pem;p384ecdsa=${signature}`,
signature_encoding: "rs_base64url",
signature,
hash_algorithm: "sha384",
ref: "1yryrnmzou5rf31ou80znpnq8n",
},
},
};
}
function makeMetaResponse(eTag, body, comment) {
return {
comment,
sampleHeaders: [
"Content-Type: application/json; charset=UTF-8",
`ETag: \"${eTag}\"`,
],
status: { status: 200, statusText: "OK" },
responseBody: JSON.stringify(body),
};
}
function registerHandlers(responses) {
function handleResponse(serverTimeMillis, request, response) {
const key = `${request.method}:${request.path}?${request.queryString}`;
@ -292,35 +263,28 @@ add_task(async function test_check_synchronization_with_signatures() {
'ETag: "1000"',
],
status: { status: 200, statusText: "OK" },
responseBody: JSON.stringify({ data: [] }),
responseBody: JSON.stringify({
timestamp: 1000,
metadata: {
signature: {
x5u,
signature:
"vxuAg5rDCB-1pul4a91vqSBQRXJG_j7WOYUTswxRSMltdYmbhLRH8R8brQ9YKuNDF56F-w6pn4HWxb076qgKPwgcEBtUeZAO_RtaHXRkRUUgVzAr86yQL4-aJTbv3D6u",
},
},
changes: [],
}),
};
// Valid signature for empty collection.
const RESPONSE_BODY_META_EMPTY_SIG = makeMetaResponseBody(
1000,
"vxuAg5rDCB-1pul4a91vqSBQRXJG_j7WOYUTswxRSMltdYmbhLRH8R8brQ9YKuNDF56F-w6pn4HWxb076qgKPwgcEBtUeZAO_RtaHXRkRUUgVzAr86yQL4-aJTbv3D6u"
);
// The collection metadata containing the signature for the empty
// collection.
const RESPONSE_META_EMPTY_SIG = makeMetaResponse(
1000,
RESPONSE_BODY_META_EMPTY_SIG,
"RESPONSE_META_EMPTY_SIG"
);
// Here, we map request method and path to the available responses
const emptyCollectionResponses = {
"GET:/test_remote_settings_signatures/test_cert_chain.pem?": [
RESPONSE_CERT_CHAIN,
],
"GET:/v1/?": [RESPONSE_SERVER_SETTINGS],
"GET:/v1/buckets/main/collections/signed/records?_expected=1000&_sort=-last_modified": [
"GET:/v1/buckets/main/collections/signed/changeset?_expected=1000": [
RESPONSE_EMPTY_INITIAL,
],
"GET:/v1/buckets/main/collections/signed?_expected=1000": [
RESPONSE_META_EMPTY_SIG,
],
};
//
@ -362,28 +326,23 @@ add_task(async function test_check_synchronization_with_signatures() {
'ETag: "3000"',
],
status: { status: 200, statusText: "OK" },
responseBody: JSON.stringify({ data: [RECORD2, RECORD1] }),
responseBody: JSON.stringify({
timestamp: 3000,
metadata: {
signature: {
x5u,
signature:
"dwhJeypadNIyzGj3QdI0KMRTPnHhFPF_j73mNrsPAHKMW46S2Ftf4BzsPMvPMB8h0TjDus13wo_R4l432DHe7tYyMIWXY0PBeMcoe5BREhFIxMxTsh9eGVXBD1e3UwRy",
},
},
changes: [RECORD2, RECORD1],
}),
};
const RESPONSE_BODY_META_TWO_ITEMS_SIG = makeMetaResponseBody(
3000,
"dwhJeypadNIyzGj3QdI0KMRTPnHhFPF_j73mNrsPAHKMW46S2Ftf4BzsPMvPMB8h0TjDus13wo_R4l432DHe7tYyMIWXY0PBeMcoe5BREhFIxMxTsh9eGVXBD1e3UwRy"
);
// A signature response for the collection containg RECORD1 and RECORD2
const RESPONSE_META_TWO_ITEMS_SIG = makeMetaResponse(
3000,
RESPONSE_BODY_META_TWO_ITEMS_SIG,
"RESPONSE_META_TWO_ITEMS_SIG"
);
const twoItemsResponses = {
"GET:/v1/buckets/main/collections/signed/records?_expected=3000&_sort=-last_modified&_since=1000": [
"GET:/v1/buckets/main/collections/signed/changeset?_expected=3000&_since=1000": [
RESPONSE_TWO_ADDED,
],
"GET:/v1/buckets/main/collections/signed?_expected=3000": [
RESPONSE_META_TWO_ITEMS_SIG,
],
};
registerHandlers(twoItemsResponses);
await client.maybeSync(3000);
@ -397,6 +356,8 @@ add_task(async function test_check_synchronization_with_signatures() {
//
// Check the collection with one addition and one removal has a valid
// signature
const THREE_ITEMS_SIG =
"MIEmNghKnkz12UodAAIc3q_Y4a3IJJ7GhHF4JYNYmm8avAGyPM9fYU7NzVo94pzjotG7vmtiYuHyIX2rTHTbT587w0LdRWxipgFd_PC1mHiwUyjFYNqBBG-kifYk7kEw";
// Remove RECORD1, add RECORD3
const RESPONSE_ONE_ADDED_ONE_REMOVED = {
@ -406,28 +367,22 @@ add_task(async function test_check_synchronization_with_signatures() {
'ETag: "4000"',
],
status: { status: 200, statusText: "OK" },
responseBody: JSON.stringify({ data: [RECORD3, RECORD1_DELETION] }),
responseBody: JSON.stringify({
timestamp: 4000,
metadata: {
signature: {
x5u,
signature: THREE_ITEMS_SIG,
},
},
changes: [RECORD3, RECORD1_DELETION],
}),
};
const RESPONSE_BODY_META_THREE_ITEMS_SIG = makeMetaResponseBody(
4000,
"MIEmNghKnkz12UodAAIc3q_Y4a3IJJ7GhHF4JYNYmm8avAGyPM9fYU7NzVo94pzjotG7vmtiYuHyIX2rTHTbT587w0LdRWxipgFd_PC1mHiwUyjFYNqBBG-kifYk7kEw"
);
// signature response for the collection containing RECORD2 and RECORD3
const RESPONSE_META_THREE_ITEMS_SIG = makeMetaResponse(
4000,
RESPONSE_BODY_META_THREE_ITEMS_SIG,
"RESPONSE_META_THREE_ITEMS_SIG"
);
const oneAddedOneRemovedResponses = {
"GET:/v1/buckets/main/collections/signed/records?_expected=4000&_sort=-last_modified&_since=3000": [
"GET:/v1/buckets/main/collections/signed/changeset?_expected=4000&_since=3000": [
RESPONSE_ONE_ADDED_ONE_REMOVED,
],
"GET:/v1/buckets/main/collections/signed?_expected=4000": [
RESPONSE_META_THREE_ITEMS_SIG,
],
};
registerHandlers(oneAddedOneRemovedResponses);
await client.maybeSync(4000);
@ -449,22 +404,29 @@ add_task(async function test_check_synchronization_with_signatures() {
'ETag: "4000"',
],
status: { status: 200, statusText: "OK" },
responseBody: JSON.stringify({ data: [] }),
responseBody: JSON.stringify({
timestamp: 4000,
metadata: {
signature: {
x5u,
signature: THREE_ITEMS_SIG,
},
},
changes: [],
}),
};
const noOpResponses = {
"GET:/v1/buckets/main/collections/signed/records?_expected=4100&_sort=-last_modified&_since=4000": [
"GET:/v1/buckets/main/collections/signed/changeset?_expected=4100&_since=4000": [
RESPONSE_EMPTY_NO_UPDATE,
],
"GET:/v1/buckets/main/collections/signed?_expected=4100": [
RESPONSE_META_THREE_ITEMS_SIG,
],
};
registerHandlers(noOpResponses);
await client.maybeSync(4100);
equal((await client.get()).length, 2);
console.info("---------------------------------------------------------");
//
// 5.
// - collection: [RECORD2, RECORD3] -> [RECORD2, RECORD3]
@ -484,38 +446,42 @@ add_task(async function test_check_synchronization_with_signatures() {
'ETag: "4000"',
],
status: { status: 200, statusText: "OK" },
responseBody: JSON.stringify({ data: [RECORD2, RECORD3] }),
responseBody: JSON.stringify({
timestamp: 4000,
metadata: {
signature: {
x5u,
signature: THREE_ITEMS_SIG,
},
},
changes: [RECORD2, RECORD3],
}),
};
// Prepare a (deliberately) bad signature to check the collection state is
// reset if something is inconsistent
const RESPONSE_BODY_META_BAD_SIG = makeMetaResponseBody(
4000,
"aW52YWxpZCBzaWduYXR1cmUK"
);
const RESPONSE_META_BAD_SIG = makeMetaResponse(
4000,
RESPONSE_BODY_META_BAD_SIG,
"RESPONSE_META_BAD_SIG"
);
const RESPONSE_EMPTY_NO_UPDATE_BAD_SIG = {
...RESPONSE_EMPTY_NO_UPDATE,
responseBody: JSON.stringify({
timestamp: 4000,
metadata: {
signature: {
x5u,
signature: "aW52YWxpZCBzaWduYXR1cmUK",
},
},
changes: [],
}),
};
const badSigGoodSigResponses = {
// In this test, we deliberately serve a bad signature initially. The
// subsequent signature returned is a valid one for the three item
// collection.
"GET:/v1/buckets/main/collections/signed?_expected=5000": [
RESPONSE_META_BAD_SIG,
RESPONSE_META_THREE_ITEMS_SIG,
],
// The first collection state is the three item collection (since
// there's a sync with no updates) - but, since the signature is wrong,
// there was sync with no updates before) - but, since the signature is wrong,
// another request will be made...
"GET:/v1/buckets/main/collections/signed/records?_expected=5000&_sort=-last_modified&_since=4000": [
RESPONSE_EMPTY_NO_UPDATE,
"GET:/v1/buckets/main/collections/signed/changeset?_expected=5000&_since=4000": [
RESPONSE_EMPTY_NO_UPDATE_BAD_SIG,
],
// The next request is for the full collection. This will be checked against the valid signature
// - so the sync should succeed.
"GET:/v1/buckets/main/collections/signed/records?_expected=5000&_sort=-last_modified": [
// Subsequent signature returned is a valid one for the three item
// collection.
"GET:/v1/buckets/main/collections/signed/changeset?_expected=5000": [
RESPONSE_COMPLETE_INITIAL,
],
};
@ -558,23 +524,16 @@ add_task(async function test_check_synchronization_with_signatures() {
// - Sync will be no-op since local is equal to server, no changes to emit.
const badSigGoodOldResponses = {
// In this test, we deliberately serve a bad signature initially. The
// subsequent sitnature returned is a valid one for the three item
// collection.
"GET:/v1/buckets/main/collections/signed?_expected=5000": [
RESPONSE_META_BAD_SIG,
RESPONSE_META_EMPTY_SIG,
],
// The first collection state is the current state (since there's no update
// - but, since the signature is wrong, another request will be made)
"GET:/v1/buckets/main/collections/signed/records?_expected=5000&_sort=-last_modified&_since=4000": [
RESPONSE_EMPTY_NO_UPDATE,
"GET:/v1/buckets/main/collections/signed/changeset?_expected=5000&_since=4000": [
RESPONSE_EMPTY_NO_UPDATE_BAD_SIG,
],
// The next request is for the full collection. This will be
// checked against the valid signature and last_modified times will be
// compared. Sync should be a no-op, even though the signature is good,
// because the local collection is newer.
"GET:/v1/buckets/main/collections/signed/records?_expected=5000&_sort=-last_modified": [
"GET:/v1/buckets/main/collections/signed/changeset?_expected=5000": [
RESPONSE_EMPTY_INITIAL,
],
};
@ -603,17 +562,23 @@ add_task(async function test_check_synchronization_with_signatures() {
// Check that a tampered local DB will be overwritten and
// sync event contain the appropriate data.
const RESPONSE_COMPLETE_BAD_SIG = {
...RESPONSE_EMPTY_NO_UPDATE,
responseBody: JSON.stringify({
timestamp: 5000,
metadata: {
signature: {
x5u,
signature: "aW52YWxpZCBzaWduYXR1cmUK",
},
},
changes: [RECORD2, RECORD3],
}),
};
const badLocalContentGoodSigResponses = {
// In this test, we deliberately serve a bad signature initially. The
// subsequent signature returned is a valid one for the three item
// collection.
"GET:/v1/buckets/main/collections/signed?_expected=5000": [
RESPONSE_META_BAD_SIG,
RESPONSE_META_THREE_ITEMS_SIG,
],
// The next request is for the full collection. This will be checked
// against the valid signature - so the sync should succeed.
"GET:/v1/buckets/main/collections/signed/records?_expected=5000&_sort=-last_modified": [
"GET:/v1/buckets/main/collections/signed/changeset?_expected=5000": [
RESPONSE_COMPLETE_BAD_SIG,
RESPONSE_COMPLETE_INITIAL,
],
};
@ -659,7 +624,7 @@ add_task(async function test_check_synchronization_with_signatures() {
// Check that a failing signature throws after retry, and that sync changes
// are not applied.
const RESPONSE_ONLY_RECORD4 = {
const RESPONSE_ONLY_RECORD4_BAD_SIG = {
comment: "Delete RECORD3, create RECORD4",
sampleHeaders: [
"Content-Type: application/json; charset=UTF-8",
@ -667,7 +632,14 @@ add_task(async function test_check_synchronization_with_signatures() {
],
status: { status: 200, statusText: "OK" },
responseBody: JSON.stringify({
data: [
timestamp: 6000,
metadata: {
signature: {
x5u,
signature: "wrong-sig-here-too",
},
},
changes: [
{
id: "f765df30-b2f1-42f6-9803-7bd5a07b5098",
last_modified: 6000,
@ -676,19 +648,11 @@ add_task(async function test_check_synchronization_with_signatures() {
}),
};
const allBadSigResponses = {
// In this test, we deliberately serve only a bad signature.
"GET:/v1/buckets/main/collections/signed?_expected=6000": [
RESPONSE_META_BAD_SIG,
"GET:/v1/buckets/main/collections/signed/changeset?_expected=6000&_since=4000": [
RESPONSE_EMPTY_NO_UPDATE_BAD_SIG,
],
// The first collection state is the three item collection (since
// there's a sync with no updates) - but, since the signature is wrong,
// another request will be made...
"GET:/v1/buckets/main/collections/signed/records?_expected=6000&_sort=-last_modified&_since=4000": [
RESPONSE_EMPTY_NO_UPDATE,
],
// The next request is for the full collection.
"GET:/v1/buckets/main/collections/signed/records?_expected=6000&_sort=-last_modified": [
RESPONSE_ONLY_RECORD4,
"GET:/v1/buckets/main/collections/signed/changeset?_expected=6000": [
RESPONSE_ONLY_RECORD4_BAD_SIG,
],
};
@ -747,24 +711,26 @@ add_task(async function test_check_synchronization_with_signatures() {
//
// Check that we don't apply changes when signature is missing in remote.
const RESPONSE_META_NO_SIG = {
const RESPONSE_NO_SIG = {
sampleHeaders: [
"Content-Type: application/json; charset=UTF-8",
`ETag: \"123456\"`,
],
status: { status: 200, statusText: "OK" },
responseBody: JSON.stringify({
data: {
metadata: {
last_modified: 123456,
},
changes: [],
timestamp: 123456,
}),
};
const missingSigResponses = {
// In this test, we deliberately serve metadata without the signature attribute.
// As if the collection was not signed.
"GET:/v1/buckets/main/collections/signed?_expected=6000": [
RESPONSE_META_NO_SIG,
"GET:/v1/buckets/main/collections/signed/changeset?_expected=6000": [
RESPONSE_NO_SIG,
],
};

View File

@ -15,7 +15,7 @@ kind-dependencies:
job-defaults:
description: Pushes Flatpaks onto Flathub
run-on-projects: [] # to make sure this never runs as part of CI
shipping-phase: push
shipping-phase: ship
treeherder:
platform: linux64/opt
kind: build

View File

@ -0,0 +1,38 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
---
loader: taskgraph.loader.transform:loader
transforms:
- taskgraph.transforms.release_deps:transforms
- taskgraph.transforms.release_flatpak_push:transforms
- taskgraph.transforms.task:transforms
kind-dependencies:
- release-flatpak-repackage
job-defaults:
description: Pushes Flatpaks onto Flathub
run-on-projects: [] # to make sure this never runs as part of CI
shipping-phase: ship # ship-rc phase
treeherder:
platform: linux64/opt
kind: build
tier: 2
worker-type:
by-release-level:
production: scriptworker-k8s/gecko-3-pushflatpak
staging: scriptworker-k8s/gecko-1-pushflatpak
worker:
implementation: push-flatpak
channel:
by-release-type:
release-rc: "beta"
default: "mock"
jobs:
firefox-rc:
shipping-product: firefox
treeherder:
symbol: Flatpak(push-beta)

View File

@ -89,16 +89,10 @@ name=org.mozilla.firefox
runtime=org.freedesktop.Platform/${ARCH}/${FREEDESKTOP_VERSION}
sdk=org.freedesktop.Sdk/${ARCH}/${FREEDESKTOP_VERSION}
base=app/org.mozilla.Firefox.BaseApp/${ARCH}/${FIREFOX_BASEAPP_CHANNEL}
[Extension org.mozilla.firefox.Locale]
directory=share/runtime/langpack
autodelete=true
locale-subset=true
[Extension org.freedesktop.Platform.ffmpeg-full]
directory=lib/ffmpeg
add-ld-path=.
version=19.08
EOF
cat <<EOF > build/metadata.locale

View File

@ -329,6 +329,11 @@ release-secondary-snap-push
Performs the same function as `release-snap-push`, except for the beta channel as part of RC
Releases.
release-secondary-flatpak-push
------------------------------
Performs the same function as `release-flatpak-push`, except for the beta channel as part of RC
Releases.
release-notify-av-announce
--------------------------
Notify anti-virus vendors when a release is likely shipping.

View File

@ -33,8 +33,7 @@ const FIRST_ORDERED_NODE_TYPE = 9;
const ELEMENT_NODE = 1;
const DOCUMENT_NODE = 9;
const XBLNS = "http://www.mozilla.org/xbl";
const XULNS = "http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul";
const XUL_NS = "http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul";
/** XUL elements that support checked property. */
const XUL_CHECKED_ELS = new Set(["button", "checkbox", "toolbarbutton"]);
@ -1238,13 +1237,13 @@ element.isDOMElement = function(node) {
};
/**
* Ascertains whether <var>el</var> is a XUL- or XBL element.
* Ascertains whether <var>el</var> is a XUL element.
*
* @param {*} node
* Element thought to be a XUL- or XBL element.
* Element to check
*
* @return {boolean}
* True if <var>node</var> is a XULElement or XBLElement,
* True if <var>node</var> is a XULElement,
* false otherwise.
*/
element.isXULElement = function(node) {
@ -1253,7 +1252,7 @@ element.isXULElement = function(node) {
node !== null &&
"nodeType" in node &&
node.nodeType === node.ELEMENT_NODE &&
[XBLNS, XULNS].includes(node.namespaceURI)
node.namespaceURI === XUL_NS
);
};
@ -1610,7 +1609,7 @@ class ChromeWebElement extends WebElement {
if (!(ChromeWebElement.Identifier in json)) {
throw new InvalidArgumentError(
"Expected chrome element reference " +
pprint`for XUL/XBL element, got: ${json}`
pprint`for XUL element, got: ${json}`
);
}
let uuid = json[ChromeWebElement.Identifier];

View File

@ -14,10 +14,9 @@ const { InvalidArgumentError } = ChromeUtils.import(
"chrome://marionette/content/error.js"
);
const SVGNS = "http://www.w3.org/2000/svg";
const XBLNS = "http://www.mozilla.org/xbl";
const XHTMLNS = "http://www.w3.org/1999/xhtml";
const XULNS = "http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul";
const SVG_NS = "http://www.w3.org/2000/svg";
const XHTML_NS = "http://www.w3.org/1999/xhtml";
const XUL_NS = "http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul";
class Element {
constructor(tagName, attrs = {}) {
@ -49,7 +48,7 @@ class DOMElement extends Element {
super(tagName, attrs);
if (typeof this.namespaceURI == "undefined") {
this.namespaceURI = XHTMLNS;
this.namespaceURI = XHTML_NS;
}
if (typeof this.ownerDocument == "undefined") {
this.ownerDocument = { designMode: "off" };
@ -83,28 +82,20 @@ class DOMElement extends Element {
class SVGElement extends Element {
constructor(tagName, attrs = {}) {
super(tagName, attrs);
this.namespaceURI = SVGNS;
this.namespaceURI = SVG_NS;
}
}
class XULElement extends Element {
constructor(tagName, attrs = {}) {
super(tagName, attrs);
this.namespaceURI = XULNS;
}
}
class XBLElement extends XULElement {
constructor(tagName, attrs = {}) {
super(tagName, attrs);
this.namespaceURI = XBLNS;
this.namespaceURI = XUL_NS;
}
}
const domEl = new DOMElement("p");
const svgEl = new SVGElement("rect");
const xulEl = new XULElement("browser");
const xblEl = new XBLElement("framebox");
class WindowProxy {
get parent() {
@ -192,7 +183,6 @@ add_test(function test_isDOMElement() {
add_test(function test_isXULElement() {
ok(element.isXULElement(xulEl));
ok(element.isXULElement(xblEl));
ok(!element.isXULElement(domEl));
ok(!element.isXULElement(svgEl));
ok(!element.isDOMElement(domWin));

View File

@ -5,10 +5,9 @@ const { evaluate } = ChromeUtils.import(
"chrome://marionette/content/evaluate.js"
);
const SVGNS = "http://www.w3.org/2000/svg";
const XBLNS = "http://www.mozilla.org/xbl";
const XHTMLNS = "http://www.w3.org/1999/xhtml";
const XULNS = "http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul";
const SVG_NS = "http://www.w3.org/2000/svg";
const XHTML_NS = "http://www.w3.org/1999/xhtml";
const XUL_NS = "http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul";
class Element {
constructor(tagName, attrs = {}) {
@ -31,35 +30,27 @@ class Element {
class DOMElement extends Element {
constructor(tagName, attrs = {}) {
super(tagName, attrs);
this.namespaceURI = XHTMLNS;
this.namespaceURI = XHTML_NS;
}
}
class SVGElement extends Element {
constructor(tagName, attrs = {}) {
super(tagName, attrs);
this.namespaceURI = SVGNS;
this.namespaceURI = SVG_NS;
}
}
class XULElement extends Element {
constructor(tagName, attrs = {}) {
super(tagName, attrs);
this.namespaceURI = XULNS;
}
}
class XBLElement extends XULElement {
constructor(tagName, attrs = {}) {
super(tagName, attrs);
this.namespaceURI = XBLNS;
this.namespaceURI = XUL_NS;
}
}
const domEl = new DOMElement("p");
const svgEl = new SVGElement("rect");
const xulEl = new XULElement("browser");
const xblEl = new XBLElement("framebox");
const seenEls = new element.Store();
@ -80,7 +71,6 @@ add_test(function test_toJSON_types() {
ok(evaluate.toJSON(domEl, seenEls) instanceof WebElement);
ok(evaluate.toJSON(svgEl, seenEls) instanceof WebElement);
ok(evaluate.toJSON(xulEl, seenEls) instanceof WebElement);
ok(evaluate.toJSON(xblEl, seenEls) instanceof WebElement);
// toJSON
equal(

View File

@ -26,7 +26,7 @@ TooltipTextProvider.prototype = {
}
const XLinkNS = "http://www.w3.org/1999/xlink";
const XULNS =
const XUL_NS =
"http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul";
var titleText = null;
@ -109,7 +109,7 @@ TooltipTextProvider.prototype = {
XULtooltiptextText == null
) {
if (tipElement.nodeType == defView.Node.ELEMENT_NODE) {
if (tipElement.namespaceURI == XULNS) {
if (tipElement.namespaceURI == XUL_NS) {
XULtooltiptextText = tipElement.hasAttribute("tooltiptext")
? tipElement.getAttribute("tooltiptext")
: null;

View File

@ -935,10 +935,10 @@
* nsIDOMXULSelectControlItemElement
*/
get label() {
const XULNS =
const XUL_NS =
"http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul";
return Array.from(
this.getElementsByTagNameNS(XULNS, "label"),
this.getElementsByTagNameNS(XUL_NS, "label"),
label => label.value
).join(" ");
}

View File

@ -325,9 +325,9 @@
return;
}
if (this.parentNode.parentNode.enableColumnDrag) {
var xulns =
var XUL_NS =
"http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul";
var cols = this.parentNode.getElementsByTagNameNS(xulns, "treecol");
var cols = this.parentNode.getElementsByTagNameNS(XUL_NS, "treecol");
// only start column drag operation if there are at least 2 visible columns
var visible = 0;

View File

@ -14,6 +14,9 @@
"resource://gre/modules/Services.jsm"
);
const XUL_NS =
"http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul";
// Note: MozWizard currently supports adding, but not removing MozWizardPage
// children.
class MozWizard extends MozXULElement {
@ -172,9 +175,7 @@
}
get wizardPages() {
const xulns =
"http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul";
return this.getElementsByTagNameNS(xulns, "wizardpage");
return this.getElementsByTagNameNS(XUL_NS, "wizardpage");
}
set currentPage(val) {
@ -646,10 +647,8 @@
}
get defaultButton() {
const kXULNS =
"http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul";
let buttons = this._wizardButtonDeck.selectedPanel.getElementsByTagNameNS(
kXULNS,
XUL_NS,
"button"
);
for (let i = 0; i < buttons.length; i++) {

View File

@ -15,39 +15,27 @@
using namespace mozilla;
// 65536 bytes should be plenty for a single backtrace.
static constexpr auto WorkerBufferBytes = MakePowerOfTwo32<65536>();
ProfileBuffer::ProfileBuffer(BlocksRingBuffer& aBuffer, PowerOfTwo32 aCapacity)
: mEntries(aBuffer),
mWorkerBuffer(
MakeUnique<BlocksRingBuffer::Byte[]>(WorkerBufferBytes.Value())) {
// Only ProfileBuffer should control this buffer, and it should be empty when
// there is no ProfileBuffer using it.
MOZ_ASSERT(!mEntries.IsInSession());
// Allocate the requested capacity.
mEntries.Set(aCapacity);
}
ProfileBuffer::ProfileBuffer(BlocksRingBuffer& aBuffer) : mEntries(aBuffer) {
// Assume the given buffer is not empty.
ProfileBuffer::ProfileBuffer(ProfileChunkedBuffer& aBuffer)
: mEntries(aBuffer) {
// Assume the given buffer is in-session.
MOZ_ASSERT(mEntries.IsInSession());
}
ProfileBuffer::~ProfileBuffer() {
// Only ProfileBuffer controls this buffer, and it should be empty when there
// is no ProfileBuffer using it.
mEntries.Reset();
mEntries.ResetChunkManager();
MOZ_ASSERT(!mEntries.IsInSession());
}
/* static */
ProfileBufferBlockIndex ProfileBuffer::AddEntry(
BlocksRingBuffer& aBlocksRingBuffer, const ProfileBufferEntry& aEntry) {
ProfileChunkedBuffer& aProfileChunkedBuffer,
const ProfileBufferEntry& aEntry) {
switch (aEntry.GetKind()) {
#define SWITCH_KIND(KIND, TYPE, SIZE) \
case ProfileBufferEntry::Kind::KIND: { \
return aBlocksRingBuffer.PutFrom(&aEntry, 1 + (SIZE)); \
#define SWITCH_KIND(KIND, TYPE, SIZE) \
case ProfileBufferEntry::Kind::KIND: { \
return aProfileChunkedBuffer.PutFrom(&aEntry, 1 + (SIZE)); \
}
FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(SWITCH_KIND)
@ -66,8 +54,9 @@ uint64_t ProfileBuffer::AddEntry(const ProfileBufferEntry& aEntry) {
/* static */
ProfileBufferBlockIndex ProfileBuffer::AddThreadIdEntry(
BlocksRingBuffer& aBlocksRingBuffer, int aThreadId) {
return AddEntry(aBlocksRingBuffer, ProfileBufferEntry::ThreadId(aThreadId));
ProfileChunkedBuffer& aProfileChunkedBuffer, int aThreadId) {
return AddEntry(aProfileChunkedBuffer,
ProfileBufferEntry::ThreadId(aThreadId));
}
uint64_t ProfileBuffer::AddThreadIdEntry(int aThreadId) {
@ -164,7 +153,8 @@ void ProfileBuffer::CollectOverheadStats(TimeDuration aSamplingTime,
ProfilerBufferInfo ProfileBuffer::GetProfilerBufferInfo() const {
return {BufferRangeStart(),
BufferRangeEnd(),
mEntries.BufferLength()->Value() / 8, // 8 bytes per entry.
static_cast<uint32_t>(*mEntries.BufferLength() /
8), // 8 bytes per entry.
mIntervalsNs,
mOverheadsNs,
mLockingsNs,

View File

@ -9,25 +9,21 @@
#include "GeckoProfiler.h"
#include "ProfileBufferEntry.h"
#include "mozilla/BlocksRingBuffer.h"
#include "mozilla/Maybe.h"
#include "mozilla/PowerOfTwo.h"
#include "mozilla/ProfileBufferChunkManagerSingle.h"
#include "mozilla/ProfileChunkedBuffer.h"
// Class storing most profiling data in a BlocksRingBuffer.
// Class storing most profiling data in a ProfileChunkedBuffer.
//
// This class is used as a queue of entries which, after construction, never
// allocates. This makes it safe to use in the profiler's "critical section".
class ProfileBuffer final {
public:
// ProfileBuffer constructor
// @param aBuffer The empty BlocksRingBuffer to use as buffer manager.
// @param aCapacity The capacity of the buffer.
ProfileBuffer(mozilla::BlocksRingBuffer& aBuffer,
mozilla::PowerOfTwo32 aCapacity);
// ProfileBuffer constructor
// @param aBuffer The pre-filled BlocksRingBuffer to use as buffer manager.
explicit ProfileBuffer(mozilla::BlocksRingBuffer& aBuffer);
// @param aBuffer The in-session ProfileChunkedBuffer to use as buffer
// manager.
explicit ProfileBuffer(mozilla::ProfileChunkedBuffer& aBuffer);
~ProfileBuffer();
@ -91,27 +87,27 @@ class ProfileBuffer final {
void DiscardSamplesBeforeTime(double aTime);
// Read an entry in the buffer. Slow!
// Read an entry in the buffer.
ProfileBufferEntry GetEntry(uint64_t aPosition) const {
ProfileBufferEntry entry;
mEntries.Read([&](mozilla::BlocksRingBuffer::Reader* aReader) {
// BlocksRingBuffer cannot be out-of-session when sampler is running.
MOZ_ASSERT(aReader);
const auto itEnd = aReader->end();
for (auto it = aReader->begin(); it != itEnd; ++it) {
if (it.CurrentBlockIndex().ConvertToProfileBufferIndex() > aPosition) {
// Passed the block. (We need a precise position.)
return;
}
if (it.CurrentBlockIndex().ConvertToProfileBufferIndex() == aPosition) {
mozilla::ProfileBufferEntryReader er = *it;
MOZ_RELEASE_ASSERT(er.RemainingBytes() <= sizeof(entry));
er.ReadBytes(&entry, er.RemainingBytes());
return;
}
}
});
return entry;
return mEntries.ReadAt(
mozilla::ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
aPosition),
[&](mozilla::Maybe<mozilla::ProfileBufferEntryReader>&& aMER) {
ProfileBufferEntry entry;
if (aMER.isSome()) {
if (aMER->CurrentBlockIndex().ConvertToProfileBufferIndex() ==
aPosition) {
// If we're here, it means `aPosition` pointed at a valid block.
MOZ_RELEASE_ASSERT(aMER->RemainingBytes() <= sizeof(entry));
aMER->ReadBytes(&entry, aMER->RemainingBytes());
} else {
// EntryReader at the wrong position, pretend to have read
// everything.
aMER->SetRemainingBytes(0);
}
}
return entry;
});
}
size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
@ -126,22 +122,22 @@ class ProfileBuffer final {
ProfilerBufferInfo GetProfilerBufferInfo() const;
private:
// Add |aEntry| to the provided BlocksRingBuffer.
// `static` because it may be used to add an entry to a `BlocksRingBuffer`
// Add |aEntry| to the provided ProfileChunkedBuffer.
// `static` because it may be used to add an entry to a `ProfileChunkedBuffer`
// that is not attached to a `ProfileBuffer`.
static mozilla::ProfileBufferBlockIndex AddEntry(
mozilla::BlocksRingBuffer& aBlocksRingBuffer,
mozilla::ProfileChunkedBuffer& aProfileChunkedBuffer,
const ProfileBufferEntry& aEntry);
// Add a sample start (ThreadId) entry for aThreadId to the provided
// BlocksRingBuffer. Returns the position of the entry.
// `static` because it may be used to add an entry to a `BlocksRingBuffer`
// ProfileChunkedBuffer. Returns the position of the entry.
// `static` because it may be used to add an entry to a `ProfileChunkedBuffer`
// that is not attached to a `ProfileBuffer`.
static mozilla::ProfileBufferBlockIndex AddThreadIdEntry(
mozilla::BlocksRingBuffer& aBlocksRingBuffer, int aThreadId);
mozilla::ProfileChunkedBuffer& aProfileChunkedBuffer, int aThreadId);
// The circular-ring storage in which this ProfileBuffer stores its data.
mozilla::BlocksRingBuffer& mEntries;
// The storage in which this ProfileBuffer stores its entries.
mozilla::ProfileChunkedBuffer& mEntries;
public:
// `BufferRangeStart()` and `BufferRangeEnd()` return `uint64_t` values
@ -157,19 +153,22 @@ class ProfileBuffer final {
// - It is safe to try and read entries at any index strictly less than
// `BufferRangeEnd()` -- but note that these reads may fail by the time you
// request them, as old entries get overwritten by new ones.
uint64_t BufferRangeStart() const {
return mEntries.GetState().mRangeStart.ConvertToProfileBufferIndex();
}
uint64_t BufferRangeEnd() const {
return mEntries.GetState().mRangeEnd.ConvertToProfileBufferIndex();
}
uint64_t BufferRangeStart() const { return mEntries.GetState().mRangeStart; }
uint64_t BufferRangeEnd() const { return mEntries.GetState().mRangeEnd; }
private:
// Pre-allocated (to avoid spurious mallocs) temporary buffer used when:
// 65536 bytes should be plenty for a single backtrace.
static constexpr auto WorkerBufferBytes = mozilla::MakePowerOfTwo32<65536>();
// Single pre-allocated chunk (to avoid spurious mallocs), used when:
// - Duplicating sleeping stacks.
// - Adding JIT info.
// - Streaming stacks to JSON.
mozilla::UniquePtr<mozilla::BlocksRingBuffer::Byte[]> mWorkerBuffer;
// Mutable because it's accessed from non-multithreaded const methods.
mutable mozilla::ProfileBufferChunkManagerSingle mWorkerChunkManager{
mozilla::ProfileBufferChunk::Create(
mozilla::ProfileBufferChunk::SizeofChunkMetadata() +
WorkerBufferBytes.Value())};
double mFirstSamplingTimeNs = 0.0;
double mLastSamplingTimeNs = 0.0;

View File

@ -566,7 +566,7 @@ static void WriteSample(SpliceableJSONWriter& aWriter,
class EntryGetter {
public:
explicit EntryGetter(BlocksRingBuffer::Reader& aReader,
explicit EntryGetter(ProfileChunkedBuffer::Reader& aReader,
uint64_t aInitialReadPos = 0)
: mBlockIt(
aReader.At(ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
@ -597,7 +597,7 @@ class EntryGetter {
}
}
BlocksRingBuffer::BlockIterator Iterator() const { return mBlockIt; }
ProfileChunkedBuffer::BlockIterator Iterator() const { return mBlockIt; }
ProfileBufferBlockIndex CurBlockIndex() const {
return mBlockIt.CurrentBlockIndex();
@ -619,26 +619,27 @@ class EntryGetter {
return true;
}
// Read the entry "kind", which is always at the start of all entries.
ProfileBufferEntryReader aER = *mBlockIt;
ProfileBufferEntryReader er = *mBlockIt;
auto type = static_cast<ProfileBufferEntry::Kind>(
aER.ReadObject<ProfileBufferEntry::KindUnderlyingType>());
er.ReadObject<ProfileBufferEntry::KindUnderlyingType>());
MOZ_ASSERT(static_cast<ProfileBufferEntry::KindUnderlyingType>(type) <
static_cast<ProfileBufferEntry::KindUnderlyingType>(
ProfileBufferEntry::Kind::MODERN_LIMIT));
if (type >= ProfileBufferEntry::Kind::LEGACY_LIMIT) {
er.SetRemainingBytes(0);
return false;
}
// Here, we have a legacy item, we need to read it from the start.
// Because the above `ReadObject` moved the reader, we ned to reset it to
// the start of the entry before reading the whole entry.
aER = *mBlockIt;
aER.ReadBytes(&mEntry, aER.RemainingBytes());
er = *mBlockIt;
er.ReadBytes(&mEntry, er.RemainingBytes());
return true;
}
ProfileBufferEntry mEntry;
BlocksRingBuffer::BlockIterator mBlockIt;
const BlocksRingBuffer::BlockIterator mBlockItEnd;
ProfileChunkedBuffer::BlockIterator mBlockIt;
const ProfileChunkedBuffer::BlockIterator mBlockItEnd;
};
// The following grammar shows legal sequences of profile buffer entries.
@ -789,10 +790,10 @@ void ProfileBuffer::StreamSamplesToJSON(SpliceableJSONWriter& aWriter,
UniqueStacks& aUniqueStacks) const {
UniquePtr<char[]> dynStrBuf = MakeUnique<char[]>(kMaxFrameKeyLength);
mEntries.Read([&](BlocksRingBuffer::Reader* aReader) {
MOZ_ASSERT(
aReader,
"BlocksRingBuffer cannot be out-of-session when sampler is running");
mEntries.Read([&](ProfileChunkedBuffer::Reader* aReader) {
MOZ_ASSERT(aReader,
"ProfileChunkedBuffer cannot be out-of-session when sampler is "
"running");
EntryGetter e(*aReader);
@ -1019,7 +1020,7 @@ void ProfileBuffer::StreamSamplesToJSON(SpliceableJSONWriter& aWriter,
Maybe<double> unresponsiveDuration;
BlocksRingBuffer::BlockIterator it = e.Iterator();
ProfileChunkedBuffer::BlockIterator it = e.Iterator();
for (;;) {
++it;
if (it.IsAtEnd()) {
@ -1036,13 +1037,13 @@ void ProfileBuffer::StreamSamplesToJSON(SpliceableJSONWriter& aWriter,
}
if (kind == ProfileBufferEntry::Kind::CompactStack) {
BlocksRingBuffer tempBuffer(
BlocksRingBuffer::ThreadSafety::WithoutMutex,
mWorkerBuffer.get(), WorkerBufferBytes);
ProfileChunkedBuffer tempBuffer(
ProfileChunkedBuffer::ThreadSafety::WithoutMutex,
mWorkerChunkManager);
er.ReadIntoObject(tempBuffer);
tempBuffer.Read([&](BlocksRingBuffer::Reader* aReader) {
tempBuffer.Read([&](ProfileChunkedBuffer::Reader* aReader) {
MOZ_ASSERT(aReader,
"Local BlocksRingBuffer cannot be out-of-session");
"Local ProfileChunkedBuffer cannot be out-of-session");
EntryGetter stackEntryGetter(*aReader);
if (stackEntryGetter.Has()) {
ReadStack(stackEntryGetter,
@ -1050,12 +1051,14 @@ void ProfileBuffer::StreamSamplesToJSON(SpliceableJSONWriter& aWriter,
unresponsiveDuration);
}
});
mWorkerChunkManager.Reset(tempBuffer.GetAllChunks());
break;
}
MOZ_ASSERT(kind >= ProfileBufferEntry::Kind::LEGACY_LIMIT,
"There should be no legacy entries between "
"TimeBeforeCompactStack and CompactStack");
er.SetRemainingBytes(0);
}
e.Next();
@ -1079,10 +1082,10 @@ void ProfileBuffer::AddJITInfoForRange(uint64_t aRangeStart, int aThreadId,
// Find all JitReturnAddr entries in the given range for the given
// thread, and call aJITAddressConsumer with those addresses.
mEntries.Read([&](BlocksRingBuffer::Reader* aReader) {
mEntries.Read([&](ProfileChunkedBuffer::Reader* aReader) {
MOZ_ASSERT(aReader,
"BlocksRingBuffer cannot be out-of-session when sampler "
"is running");
"ProfileChunkedBuffer cannot be out-of-session when "
"sampler is running");
EntryGetter e(*aReader, aRangeStart);
@ -1115,7 +1118,7 @@ void ProfileBuffer::AddJITInfoForRange(uint64_t aRangeStart, int aThreadId,
}
} else if (e.Has() && e.Get().IsTimeBeforeCompactStack()) {
// Compact stack.
BlocksRingBuffer::BlockIterator it = e.Iterator();
ProfileChunkedBuffer::BlockIterator it = e.Iterator();
for (;;) {
++it;
if (it.IsAtEnd()) {
@ -1125,14 +1128,14 @@ void ProfileBuffer::AddJITInfoForRange(uint64_t aRangeStart, int aThreadId,
ProfileBufferEntry::Kind kind =
er.ReadObject<ProfileBufferEntry::Kind>();
if (kind == ProfileBufferEntry::Kind::CompactStack) {
BlocksRingBuffer tempBuffer(
BlocksRingBuffer::ThreadSafety::WithoutMutex,
mWorkerBuffer.get(), WorkerBufferBytes);
ProfileChunkedBuffer tempBuffer(
ProfileChunkedBuffer::ThreadSafety::WithoutMutex,
mWorkerChunkManager);
er.ReadIntoObject(tempBuffer);
tempBuffer.Read([&](BlocksRingBuffer::Reader* aReader) {
tempBuffer.Read([&](ProfileChunkedBuffer::Reader* aReader) {
MOZ_ASSERT(
aReader,
"Local BlocksRingBuffer cannot be out-of-session");
"Local ProfileChunkedBuffer cannot be out-of-session");
EntryGetter stackEntryGetter(*aReader);
while (stackEntryGetter.Has()) {
if (stackEntryGetter.Get().IsJitReturnAddr()) {
@ -1141,11 +1144,14 @@ void ProfileBuffer::AddJITInfoForRange(uint64_t aRangeStart, int aThreadId,
stackEntryGetter.Next();
}
});
mWorkerChunkManager.Reset(tempBuffer.GetAllChunks());
break;
}
MOZ_ASSERT(kind >= ProfileBufferEntry::Kind::LEGACY_LIMIT,
"There should be no legacy entries between "
"TimeBeforeCompactStack and CompactStack");
er.SetRemainingBytes(0);
}
e.Next();
@ -1193,6 +1199,8 @@ void ProfileBuffer::StreamMarkersToJSON(SpliceableJSONWriter& aWriter,
}
}
aWriter.EndArray();
} else {
aER.SetRemainingBytes(0);
}
});
}
@ -1200,10 +1208,10 @@ void ProfileBuffer::StreamMarkersToJSON(SpliceableJSONWriter& aWriter,
void ProfileBuffer::StreamProfilerOverheadToJSON(
SpliceableJSONWriter& aWriter, const TimeStamp& aProcessStartTime,
double aSinceTime) const {
mEntries.Read([&](BlocksRingBuffer::Reader* aReader) {
MOZ_ASSERT(
aReader,
"BlocksRingBuffer cannot be out-of-session when sampler is running");
mEntries.Read([&](ProfileChunkedBuffer::Reader* aReader) {
MOZ_ASSERT(aReader,
"ProfileChunkedBuffer cannot be out-of-session when sampler is "
"running");
EntryGetter e(*aReader);
@ -1353,10 +1361,10 @@ void ProfileBuffer::StreamCountersToJSON(SpliceableJSONWriter& aWriter,
// error indicates a bug in the ProfileBuffer writing or the parser itself,
// or possibly flaky hardware.
mEntries.Read([&](BlocksRingBuffer::Reader* aReader) {
MOZ_ASSERT(
aReader,
"BlocksRingBuffer cannot be out-of-session when sampler is running");
mEntries.Read([&](ProfileChunkedBuffer::Reader* aReader) {
MOZ_ASSERT(aReader,
"ProfileChunkedBuffer cannot be out-of-session when sampler is "
"running");
EntryGetter e(*aReader);
@ -1536,10 +1544,10 @@ static void AddPausedRange(SpliceableJSONWriter& aWriter, const char* aReason,
void ProfileBuffer::StreamPausedRangesToJSON(SpliceableJSONWriter& aWriter,
double aSinceTime) const {
mEntries.Read([&](BlocksRingBuffer::Reader* aReader) {
MOZ_ASSERT(
aReader,
"BlocksRingBuffer cannot be out-of-session when sampler is running");
mEntries.Read([&](ProfileChunkedBuffer::Reader* aReader) {
MOZ_ASSERT(aReader,
"ProfileChunkedBuffer cannot be out-of-session when sampler is "
"running");
EntryGetter e(*aReader);
@ -1581,13 +1589,16 @@ bool ProfileBuffer::DuplicateLastSample(int aThreadId,
return false;
}
BlocksRingBuffer tempBuffer(BlocksRingBuffer::ThreadSafety::WithoutMutex,
mWorkerBuffer.get(), WorkerBufferBytes);
ProfileChunkedBuffer tempBuffer(
ProfileChunkedBuffer::ThreadSafety::WithoutMutex, mWorkerChunkManager);
const bool ok = mEntries.Read([&](BlocksRingBuffer::Reader* aReader) {
MOZ_ASSERT(
aReader,
"BlocksRingBuffer cannot be out-of-session when sampler is running");
auto retrieveWorkerChunk = MakeScopeExit(
[&]() { mWorkerChunkManager.Reset(tempBuffer.GetAllChunks()); });
const bool ok = mEntries.Read([&](ProfileChunkedBuffer::Reader* aReader) {
MOZ_ASSERT(aReader,
"ProfileChunkedBuffer cannot be out-of-session when sampler is "
"running");
EntryGetter e(*aReader, *aLastSample);
@ -1632,7 +1643,7 @@ bool ProfileBuffer::DuplicateLastSample(int aThreadId,
// immediately follow `TimeBeforeCompactStack` (e.g., some markers
// could be written in-between), so we need to look for it in the
// following entries.
BlocksRingBuffer::BlockIterator it = e.Iterator();
ProfileChunkedBuffer::BlockIterator it = e.Iterator();
for (;;) {
++it;
if (it.IsAtEnd()) {
@ -1650,15 +1661,17 @@ bool ProfileBuffer::DuplicateLastSample(int aThreadId,
er = *it;
auto bytes = er.RemainingBytes();
MOZ_ASSERT(bytes < 65536);
tempBuffer.Put(bytes, [&](ProfileBufferEntryWriter* aEW) {
MOZ_ASSERT(aEW, "tempBuffer cannot be out-of-session");
tempBuffer.Put(bytes, [&](Maybe<ProfileBufferEntryWriter>& aEW) {
MOZ_ASSERT(aEW.isSome(), "tempBuffer cannot be out-of-session");
aEW->WriteFromReader(er, bytes);
});
break;
}
MOZ_ASSERT(kind >= ProfileBufferEntry::Kind::LEGACY_LIMIT,
"There should be no legacy entries between "
"TimeBeforeCompactStack and CompactStack");
er.SetRemainingBytes(0);
// Here, we have encountered a non-legacy entry that was not the
// CompactStack we're looking for; just continue the search...
}
@ -1749,56 +1762,9 @@ bool ProfileBuffer::DuplicateLastSample(int aThreadId,
}
void ProfileBuffer::DiscardSamplesBeforeTime(double aTime) {
const ProfileBufferBlockIndex firstBlockToKeep =
mEntries.Read([&](BlocksRingBuffer::Reader* aReader) {
MOZ_ASSERT(aReader,
"BlocksRingBuffer cannot be out-of-session when sampler is "
"running");
EntryGetter e(*aReader);
const ProfileBufferBlockIndex bufferStartPos = e.CurBlockIndex();
for (;;) {
// This block skips entries until we find the start of the next
// sample. This is useful in three situations.
//
// - The circular buffer overwrites old entries, so when we start
// parsing we might be in the middle of a sample, and we must skip
// forward to the start of the next sample.
//
// - We skip samples that don't have an appropriate ThreadId or Time.
//
// - We skip range Pause, Resume, CollectionStart, Marker, and
// CollectionEnd entries between samples.
while (e.Has()) {
if (e.Get().IsThreadId()) {
break;
}
e.Next();
}
if (!e.Has()) {
return bufferStartPos;
}
MOZ_RELEASE_ASSERT(e.Get().IsThreadId());
const ProfileBufferBlockIndex sampleStartPos = e.CurBlockIndex();
e.Next();
if (e.Has() &&
(e.Get().IsTime() || e.Get().IsTimeBeforeCompactStack())) {
double sampleTime = e.Get().GetDouble();
if (sampleTime >= aTime) {
// This is the first sample within the window of time that we want
// to keep. Throw away all samples before sampleStartPos and
// return.
return sampleStartPos;
}
}
}
});
mEntries.ClearBefore(firstBlockToKeep);
// This function does nothing!
// The duration limit will be removed from Firefox, see bug 1632365.
Unused << aTime;
}
// END ProfileBuffer

View File

@ -55,9 +55,9 @@ class ProfileBufferEntry {
// stored in a `ProfileBufferEntry`, as per the list in
// `FOR_EACH_PROFILE_BUFFER_ENTRY_KIND`.
//
// This byte is also used to identify entries in BlocksRingBuffer blocks, for
// both "legacy" entries that do contain a `ProfileBufferEntry`, and for new
// types of entries that may carry more data of different types.
// This byte is also used to identify entries in ProfileChunkedBuffer blocks,
// for both "legacy" entries that do contain a `ProfileBufferEntry`, and for
// new types of entries that may carry more data of different types.
// TODO: Eventually each type of "legacy" entry should be replaced with newer,
// more efficient kinds of entries (e.g., stack frames could be stored in one
// bigger entry, instead of multiple `ProfileBufferEntry`s); then we could

View File

@ -13,21 +13,22 @@
ProfilerBacktrace::ProfilerBacktrace(
const char* aName, int aThreadId,
UniquePtr<mozilla::BlocksRingBuffer> aBlocksRingBuffer,
UniquePtr<mozilla::ProfileChunkedBuffer> aProfileChunkedBuffer,
mozilla::UniquePtr<ProfileBuffer> aProfileBuffer)
: mName(strdup(aName)),
mThreadId(aThreadId),
mBlocksRingBuffer(std::move(aBlocksRingBuffer)),
mProfileChunkedBuffer(std::move(aProfileChunkedBuffer)),
mProfileBuffer(std::move(aProfileBuffer)) {
MOZ_COUNT_CTOR(ProfilerBacktrace);
MOZ_ASSERT(
!!mBlocksRingBuffer,
"ProfilerBacktrace only takes a non-null UniquePtr<BlocksRingBuffer>");
MOZ_ASSERT(!!mProfileChunkedBuffer,
"ProfilerBacktrace only takes a non-null "
"UniquePtr<ProfileChunkedBuffer>");
MOZ_ASSERT(
!!mProfileBuffer,
"ProfilerBacktrace only takes a non-null UniquePtr<ProfileBuffer>");
MOZ_ASSERT(!mBlocksRingBuffer->IsThreadSafe(),
"ProfilerBacktrace only takes a non-thread-safe BlocksRingBuffer");
MOZ_ASSERT(
!mProfileChunkedBuffer->IsThreadSafe(),
"ProfilerBacktrace only takes a non-thread-safe ProfileChunkedBuffer");
}
ProfilerBacktrace::~ProfilerBacktrace() { MOZ_COUNT_DTOR(ProfilerBacktrace); }

View File

@ -18,16 +18,17 @@ class ThreadInfo;
class UniqueStacks;
namespace mozilla {
class BlocksRingBuffer;
class ProfileChunkedBuffer;
class TimeStamp;
} // namespace mozilla
// ProfilerBacktrace encapsulates a synchronous sample.
class ProfilerBacktrace {
public:
ProfilerBacktrace(const char* aName, int aThreadId,
UniquePtr<mozilla::BlocksRingBuffer> aBlocksRingBuffer,
mozilla::UniquePtr<ProfileBuffer> aProfileBuffer);
ProfilerBacktrace(
const char* aName, int aThreadId,
UniquePtr<mozilla::ProfileChunkedBuffer> aProfileChunkedBuffer,
mozilla::UniquePtr<ProfileBuffer> aProfileBuffer);
~ProfilerBacktrace();
// ProfilerBacktraces' stacks are deduplicated in the context of the
@ -47,9 +48,9 @@ class ProfilerBacktrace {
mozilla::UniqueFreePtr<char> mName;
int mThreadId;
// `BlocksRingBuffer` in which `mProfileBuffer` stores its data; must be
// `ProfileChunkedBuffer` in which `mProfileBuffer` stores its data; must be
// located before `mProfileBuffer` so that it's destroyed after.
UniquePtr<mozilla::BlocksRingBuffer> mBlocksRingBuffer;
UniquePtr<mozilla::ProfileChunkedBuffer> mProfileChunkedBuffer;
mozilla::UniquePtr<ProfileBuffer> mProfileBuffer;
};
@ -63,7 +64,7 @@ struct ProfileBufferEntryWriter::Serializer<ProfilerBacktrace> {
if (!aBacktrace.mProfileBuffer) {
return ULEB128Size<Length>(0);
}
auto bufferBytes = SumBytes(*aBacktrace.mBlocksRingBuffer);
auto bufferBytes = SumBytes(*aBacktrace.mProfileChunkedBuffer);
if (bufferBytes == 0) {
return ULEB128Size<Length>(0);
}
@ -74,11 +75,11 @@ struct ProfileBufferEntryWriter::Serializer<ProfilerBacktrace> {
static void Write(ProfileBufferEntryWriter& aEW,
const ProfilerBacktrace& aBacktrace) {
if (!aBacktrace.mProfileBuffer ||
SumBytes(*aBacktrace.mBlocksRingBuffer) == 0) {
SumBytes(*aBacktrace.mProfileChunkedBuffer) == 0) {
aEW.WriteULEB128(0u);
return;
}
aEW.WriteObject(*aBacktrace.mBlocksRingBuffer);
aEW.WriteObject(*aBacktrace.mProfileChunkedBuffer);
aEW.WriteObject(aBacktrace.mThreadId);
aEW.WriteObject(WrapProfileBufferUnownedCString(aBacktrace.mName.get()));
}
@ -113,18 +114,19 @@ struct ProfileBufferEntryReader::Deserializer<
}
static UniquePtr<ProfilerBacktrace, Destructor> Read(
ProfileBufferEntryReader& aER) {
auto blocksRingBuffer = aER.ReadObject<UniquePtr<BlocksRingBuffer>>();
if (!blocksRingBuffer) {
auto profileChunkedBuffer =
aER.ReadObject<UniquePtr<ProfileChunkedBuffer>>();
if (!profileChunkedBuffer) {
return nullptr;
}
MOZ_ASSERT(
!blocksRingBuffer->IsThreadSafe(),
"ProfilerBacktrace only stores non-thread-safe BlocksRingBuffers");
!profileChunkedBuffer->IsThreadSafe(),
"ProfilerBacktrace only stores non-thread-safe ProfileChunkedBuffers");
int threadId = aER.ReadObject<int>();
std::string name = aER.ReadObject<std::string>();
auto profileBuffer = MakeUnique<ProfileBuffer>(*blocksRingBuffer);
auto profileBuffer = MakeUnique<ProfileBuffer>(*profileChunkedBuffer);
return UniquePtr<ProfilerBacktrace, Destructor>{new ProfilerBacktrace(
name.c_str(), threadId, std::move(blocksRingBuffer),
name.c_str(), threadId, std::move(profileChunkedBuffer),
std::move(profileBuffer))};
}
};

View File

@ -51,6 +51,9 @@
#include "mozilla/ExtensionPolicyService.h"
#include "mozilla/extensions/WebExtensionPolicy.h"
#include "mozilla/Printf.h"
#include "mozilla/ProfileBufferChunkManagerSingle.h"
#include "mozilla/ProfileBufferChunkManagerWithLocalLimit.h"
#include "mozilla/ProfileChunkedBuffer.h"
#include "mozilla/SchedulerGroup.h"
#include "mozilla/Services.h"
#include "mozilla/StackWalk.h"
@ -383,7 +386,7 @@ class CorePS {
// functions guarded by gPSMutex as well as others without safety (e.g.,
// profiler_add_marker). It is *not* used inside the critical section of
// the sampler, because mutexes cannot be used there.
mCoreBlocksRingBuffer(BlocksRingBuffer::ThreadSafety::WithMutex)
mCoreBuffer(ProfileChunkedBuffer::ThreadSafety::WithMutex)
#ifdef USE_LUL_STACKWALK
,
mLul(nullptr)
@ -443,7 +446,7 @@ class CorePS {
PS_GET_LOCKLESS(TimeStamp, ProcessStartTime)
// No PSLockRef is needed for this field because it's thread-safe.
PS_GET_LOCKLESS(BlocksRingBuffer&, CoreBlocksRingBuffer)
PS_GET_LOCKLESS(ProfileChunkedBuffer&, CoreBuffer)
PS_GET(const Vector<UniquePtr<RegisteredThread>>&, RegisteredThreads)
@ -552,16 +555,16 @@ class CorePS {
// The time that the process started.
const TimeStamp mProcessStartTime;
// The thread-safe blocks-oriented ring buffer into which all profiling data
// is recorded.
// The thread-safe blocks-oriented buffer into which all profiling data is
// recorded.
// ActivePS controls the lifetime of the underlying contents buffer: When
// ActivePS does not exist, mCoreBlocksRingBuffer is empty and rejects all
// reads&writes; see ActivePS for further details.
// ActivePS does not exist, mCoreBuffer is empty and rejects all reads&writes;
// see ActivePS for further details.
// Note: This needs to live here outside of ActivePS, because some producers
// are indirectly controlled (e.g., by atomic flags) and therefore may still
// attempt to write some data shortly after ActivePS has shutdown and deleted
// the underlying buffer in memory.
BlocksRingBuffer mCoreBlocksRingBuffer;
ProfileChunkedBuffer mCoreBuffer;
// Info on all the registered threads.
// ThreadIds in mRegisteredThreads are unique.
@ -625,6 +628,26 @@ class ActivePS {
return aFeatures;
}
constexpr static uint32_t bytesPerEntry = 8;
// We need to decide how many chunks of what size we want to fit in the given
// total maximum capacity for this process, in the (likely) context of
// multiple processes doing the same choice and having an inter-process
// mechanism to control the overal memory limit.
// Ideally we want at least 2 unreleased chunks to work with (1 current and 1
// next), and 2 released chunks (so that one can be recycled when old, leaving
// one with some data).
constexpr static uint32_t minimumNumberOfChunks = 4;
// And we want to limit chunks to a maximum size, which is a compromise
// between:
// - A big size, which helps with reducing the rate of allocations and IPCs.
// - A small size, which helps with equalizing the duration of recorded data
// (as the inter-process controller will discard the oldest chunks in all
// Firefox processes).
constexpr static uint32_t maximumChunkSize = 1024 * 1024;
ActivePS(PSLockRef aLock, PowerOfTwo32 aCapacity, double aInterval,
uint32_t aFeatures, const char** aFilters, uint32_t aFilterCount,
uint64_t aActiveBrowsingContextID, const Maybe<double>& aDuration)
@ -634,9 +657,14 @@ class ActivePS {
mInterval(aInterval),
mFeatures(AdjustFeatures(aFeatures, aFilterCount)),
mActiveBrowsingContextID(aActiveBrowsingContextID),
// 8 bytes per entry.
mProfileBuffer(CorePS::CoreBlocksRingBuffer(),
PowerOfTwo32(aCapacity.Value() * 8)),
mProfileBufferChunkManager(
aCapacity.Value() * bytesPerEntry,
std::min(aCapacity.Value() * bytesPerEntry / minimumNumberOfChunks,
maximumChunkSize)),
mProfileBuffer([this]() -> ProfileChunkedBuffer& {
CorePS::CoreBuffer().SetChunkManager(mProfileBufferChunkManager);
return CorePS::CoreBuffer();
}()),
// The new sampler thread doesn't start sampling immediately because the
// main loop within Run() is blocked until this function's caller
// unlocks gPSMutex.
@ -694,6 +722,7 @@ class ActivePS {
}
}
#endif
CorePS::CoreBuffer().ResetChunkManager();
}
bool ThreadSelected(const char* aThreadName) {
@ -893,6 +922,11 @@ class ActivePS {
PS_GET(const Vector<std::string>&, Filters)
static void FulfillChunkRequests(PSLockRef) {
MOZ_ASSERT(sInstance);
sInstance->mProfileBufferChunkManager.FulfillChunkRequests();
}
static ProfileBuffer& Buffer(PSLockRef) {
MOZ_ASSERT(sInstance);
return sInstance->mProfileBuffer;
@ -1081,8 +1115,9 @@ class ActivePS {
// |============| <-- Buffer range full and sliding.
// ^ mGeckoIndexWhenBaseProfileAdded < Start TRUE! -> Discard it
if (sInstance->mBaseProfileThreads &&
sInstance->mGeckoIndexWhenBaseProfileAdded <
CorePS::CoreBlocksRingBuffer().GetState().mRangeStart) {
sInstance->mGeckoIndexWhenBaseProfileAdded
.ConvertToProfileBufferIndex() <
CorePS::CoreBuffer().GetState().mRangeStart) {
DEBUG_LOG("ClearExpiredExitProfiles() - Discarding base profile %p",
sInstance->mBaseProfileThreads.get());
sInstance->mBaseProfileThreads.reset();
@ -1101,7 +1136,8 @@ class ActivePS {
DEBUG_LOG("AddBaseProfileThreads(%p)", aBaseProfileThreads.get());
sInstance->mBaseProfileThreads = std::move(aBaseProfileThreads);
sInstance->mGeckoIndexWhenBaseProfileAdded =
CorePS::CoreBlocksRingBuffer().GetState().mRangeEnd;
ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
CorePS::CoreBuffer().GetState().mRangeEnd);
}
static UniquePtr<char[]> MoveBaseProfileThreads(PSLockRef aLock) {
@ -1183,6 +1219,9 @@ class ActivePS {
// get the ID.
const uint64_t mActiveBrowsingContextID;
// The chunk manager used by `mProfileBuffer` below.
ProfileBufferChunkManagerWithLocalLimit mProfileBufferChunkManager;
// The buffer into which all samples are recorded.
ProfileBuffer mProfileBuffer;
@ -2361,14 +2400,10 @@ static JS::ProfilingCategoryPair InferJavaCategory(nsACString& aName) {
return JS::ProfilingCategoryPair::OTHER;
}
static UniquePtr<ProfileBuffer> CollectJavaThreadProfileData(
BlocksRingBuffer& bufferManager) {
static void CollectJavaThreadProfileData(ProfileBuffer& aProfileBuffer) {
// locked_profiler_start uses sample count is 1000 for Java thread.
// This entry size is enough now, but we might have to estimate it
// if we can customize it
auto buffer = MakeUnique<ProfileBuffer>(bufferManager,
MakePowerOfTwo32<8 * 1024 * 1024>());
int sampleId = 0;
while (true) {
// Gets the data from the java main thread only.
@ -2377,8 +2412,8 @@ static UniquePtr<ProfileBuffer> CollectJavaThreadProfileData(
break;
}
buffer->AddThreadIdEntry(0);
buffer->AddEntry(ProfileBufferEntry::Time(sampleTime));
aProfileBuffer.AddThreadIdEntry(0);
aProfileBuffer.AddEntry(ProfileBufferEntry::Time(sampleTime));
int frameId = 0;
while (true) {
jni::String::LocalRef frameName =
@ -2389,12 +2424,12 @@ static UniquePtr<ProfileBuffer> CollectJavaThreadProfileData(
nsCString frameNameString = frameName->ToCString();
auto categoryPair = InferJavaCategory(frameNameString);
buffer->CollectCodeLocation("", frameNameString.get(), 0, 0, Nothing(),
Nothing(), Some(categoryPair));
aProfileBuffer.CollectCodeLocation("", frameNameString.get(), 0, 0,
Nothing(), Nothing(),
Some(categoryPair));
}
sampleId++;
}
return buffer;
}
#endif
@ -2474,17 +2509,22 @@ static void locked_profiler_stream_json_for_this_process(
if (ActivePS::FeatureJava(aLock)) {
java::GeckoJavaSampler::Pause();
BlocksRingBuffer bufferManager(
BlocksRingBuffer::ThreadSafety::WithoutMutex);
UniquePtr<ProfileBuffer> javaBuffer =
CollectJavaThreadProfileData(bufferManager);
// locked_profiler_start uses sample count is 1000 for Java thread.
// This entry size is enough now, but we might have to estimate it
// if we can customize it
mozilla::ProfileBufferChunkManagerWithLocalLimit chunkManager(
8 * 1024 * 1024, 1024 * 1024);
ProfileChunkedBuffer bufferManager(
ProfileChunkedBuffer::ThreadSafety::WithoutMutex, chunkManager);
ProfileBuffer javaBuffer(bufferManager);
CollectJavaThreadProfileData(javaBuffer);
// Thread id of java Main thread is 0, if we support profiling of other
// java thread, we have to get thread id and name via JNI.
RefPtr<ThreadInfo> threadInfo = new ThreadInfo(
"Java Main Thread", 0, false, CorePS::ProcessStartTime());
ProfiledThreadData profiledThreadData(threadInfo, nullptr);
profiledThreadData.StreamJSON(*javaBuffer.get(), nullptr, aWriter,
profiledThreadData.StreamJSON(javaBuffer, nullptr, aWriter,
CorePS::ProcessName(aLock),
CorePS::ProcessStartTime(), aSinceTime,
ActivePS::FeatureJSTracer(aLock), nullptr);
@ -2879,17 +2919,17 @@ void SamplerThread::Run() {
return ActivePS::FeatureNoStackSampling(lock);
}();
// Use local BlocksRingBuffer&ProfileBuffer to capture the stack.
// (This is to avoid touching the CorePS::BlocksRingBuffer lock while
// a thread is suspended, because that thread could be working with
// the CorePS::BlocksRingBuffer as well.)
BlocksRingBuffer localBlocksRingBuffer(
BlocksRingBuffer::ThreadSafety::WithoutMutex);
ProfileBuffer localProfileBuffer(localBlocksRingBuffer,
MakePowerOfTwo32<65536>());
// Use local ProfileBuffer and underlying buffer to capture the stack.
// (This is to avoid touching the CorePS::CoreBuffer lock while a thread is
// suspended, because that thread could be working with the CorePS::CoreBuffer
// as well.)
mozilla::ProfileBufferChunkManagerSingle localChunkManager(65536);
ProfileChunkedBuffer localBuffer(
ProfileChunkedBuffer::ThreadSafety::WithoutMutex, localChunkManager);
ProfileBuffer localProfileBuffer(localBuffer);
// Will be kept between collections, to know what each collection does.
auto previousState = localBlocksRingBuffer.GetState();
auto previousState = localBuffer.GetState();
// This will be positive if we are running behind schedule (sampling less
// frequently than desired) and negative if we are ahead of schedule.
@ -3206,7 +3246,7 @@ void SamplerThread::Run() {
// Note: It is not stored inside the CompactStack so that it doesn't
// get incorrectly duplicated when the thread is sleeping.
if (unresponsiveDuration_ms.isSome()) {
CorePS::CoreBlocksRingBuffer().PutObjects(
CorePS::CoreBuffer().PutObjects(
ProfileBufferEntry::Kind::UnresponsiveDurationMs,
*unresponsiveDuration_ms);
}
@ -3217,40 +3257,33 @@ void SamplerThread::Run() {
// sample from `DoPeriodicSample` is complete, copy it into the
// global buffer, otherwise add an empty one to satisfy the parser
// that expects one.
auto state = localBlocksRingBuffer.GetState();
auto state = localBuffer.GetState();
if (NS_WARN_IF(state.mClearedBlockCount !=
previousState.mClearedBlockCount)) {
LOG("Stack sample too big for local storage, needed %u bytes",
unsigned(
state.mRangeEnd.ConvertToProfileBufferIndex() -
previousState.mRangeEnd.ConvertToProfileBufferIndex()));
unsigned(state.mRangeEnd - previousState.mRangeEnd));
// There *must* be a CompactStack after a TimeBeforeCompactStack,
// even an empty one.
CorePS::CoreBlocksRingBuffer().PutObjects(
CorePS::CoreBuffer().PutObjects(
ProfileBufferEntry::Kind::CompactStack,
UniquePtr<BlocksRingBuffer>(nullptr));
} else if (state.mRangeEnd.ConvertToProfileBufferIndex() -
previousState.mRangeEnd
.ConvertToProfileBufferIndex() >=
CorePS::CoreBlocksRingBuffer().BufferLength()->Value()) {
UniquePtr<ProfileChunkedBuffer>(nullptr));
} else if (state.mRangeEnd - previousState.mRangeEnd >=
*CorePS::CoreBuffer().BufferLength()) {
LOG("Stack sample too big for profiler storage, needed %u bytes",
unsigned(
state.mRangeEnd.ConvertToProfileBufferIndex() -
previousState.mRangeEnd.ConvertToProfileBufferIndex()));
unsigned(state.mRangeEnd - previousState.mRangeEnd));
// There *must* be a CompactStack after a TimeBeforeCompactStack,
// even an empty one.
CorePS::CoreBlocksRingBuffer().PutObjects(
CorePS::CoreBuffer().PutObjects(
ProfileBufferEntry::Kind::CompactStack,
UniquePtr<BlocksRingBuffer>(nullptr));
UniquePtr<ProfileChunkedBuffer>(nullptr));
} else {
CorePS::CoreBlocksRingBuffer().PutObjects(
ProfileBufferEntry::Kind::CompactStack,
localBlocksRingBuffer);
CorePS::CoreBuffer().PutObjects(
ProfileBufferEntry::Kind::CompactStack, localBuffer);
}
// Clean up for the next run.
localBlocksRingBuffer.Clear();
previousState = localBlocksRingBuffer.GetState();
localBuffer.Clear();
previousState = localBuffer.GetState();
}
} else {
samplingState = SamplingState::NoStackSamplingCompleted;
@ -3266,6 +3299,11 @@ void SamplerThread::Run() {
#endif
TimeStamp threadsSampled = TimeStamp::NowUnfuzzed();
{
AUTO_PROFILER_STATS(Sampler_FulfillChunkRequests);
ActivePS::FulfillChunkRequests(lock);
}
buffer.CollectOverheadStats(delta, lockAcquired - sampleStart,
expiredMarkersCleaned - lockAcquired,
countersSampled - expiredMarkersCleaned,
@ -4773,10 +4811,10 @@ UniqueProfilerBacktrace profiler_get_backtrace() {
#endif
// 65536 bytes should be plenty for a single backtrace.
auto bufferManager = MakeUnique<BlocksRingBuffer>(
BlocksRingBuffer::ThreadSafety::WithoutMutex);
auto buffer =
MakeUnique<ProfileBuffer>(*bufferManager, MakePowerOfTwo32<65536>());
auto bufferManager = MakeUnique<ProfileChunkedBuffer>(
ProfileChunkedBuffer::ThreadSafety::WithoutMutex,
MakeUnique<ProfileBufferChunkManagerSingle>(65536));
auto buffer = MakeUnique<ProfileBuffer>(*bufferManager);
DoSyncSample(lock, *registeredThread, now, regs, *buffer.get());
@ -4812,7 +4850,7 @@ static void racy_profiler_add_marker(const char* aMarkerName,
? aPayload->GetStartTime()
: TimeStamp::NowUnfuzzed();
TimeDuration delta = origin - CorePS::ProcessStartTime();
CorePS::CoreBlocksRingBuffer().PutObjects(
CorePS::CoreBuffer().PutObjects(
ProfileBufferEntry::Kind::MarkerData, racyRegisteredThread->ThreadId(),
WrapProfileBufferUnownedCString(aMarkerName),
static_cast<uint32_t>(aCategoryPair), aPayload, delta.ToMilliseconds());
@ -4933,7 +4971,7 @@ void profiler_add_marker_for_thread(int aThreadId,
? aPayload->GetStartTime()
: TimeStamp::NowUnfuzzed();
TimeDuration delta = origin - CorePS::ProcessStartTime();
CorePS::CoreBlocksRingBuffer().PutObjects(
CorePS::CoreBuffer().PutObjects(
ProfileBufferEntry::Kind::MarkerData, aThreadId,
WrapProfileBufferUnownedCString(aMarkerName),
static_cast<uint32_t>(aCategoryPair), aPayload, delta.ToMilliseconds());

View File

@ -1320,6 +1320,8 @@ TEST(GeckoProfiler, Markers)
EXPECT_EQ(GTestMarkerPayload::sNumDestroyed, 10 + 10 + 0 + 10);
}
// The duration limit will be removed from Firefox, see bug 1632365.
#if 0
TEST(GeckoProfiler, DurationLimit)
{
uint32_t features = ProfilerFeature::StackWalk;
@ -1345,12 +1347,13 @@ TEST(GeckoProfiler, DurationLimit)
// Both markers created, serialized, destroyed; Only the first marker should
// have been deserialized, streamed, and destroyed again.
ASSERT_EQ(GTestMarkerPayload::sNumCreated, 2);
ASSERT_EQ(GTestMarkerPayload::sNumSerialized, 2);
ASSERT_EQ(GTestMarkerPayload::sNumDeserialized, 1);
ASSERT_EQ(GTestMarkerPayload::sNumStreamed, 1);
ASSERT_EQ(GTestMarkerPayload::sNumDestroyed, 3);
EXPECT_EQ(GTestMarkerPayload::sNumCreated, 2);
EXPECT_EQ(GTestMarkerPayload::sNumSerialized, 2);
EXPECT_EQ(GTestMarkerPayload::sNumDeserialized, 1);
EXPECT_EQ(GTestMarkerPayload::sNumStreamed, 1);
EXPECT_EQ(GTestMarkerPayload::sNumDestroyed, 3);
}
#endif
#define COUNTER_NAME "TestCounter"
#define COUNTER_DESCRIPTION "Test of counters in profiles"

View File

@ -4,21 +4,24 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "ProfileBufferEntry.h"
#include "ProfileBuffer.h"
#include "ThreadInfo.h"
#include "mozilla/PowerOfTwo.h"
#include "mozilla/ProfileBufferChunkManagerWithLocalLimit.h"
#include "mozilla/ProfileChunkedBuffer.h"
#include "gtest/gtest.h"
// Make sure we can record one entry and read it
TEST(ThreadProfile, InsertOneEntry)
{
mozilla::BlocksRingBuffer blocksRingBuffer(
BlocksRingBuffer::ThreadSafety::WithMutex);
auto pb = MakeUnique<ProfileBuffer>(
blocksRingBuffer,
mozilla::PowerOfTwo32(2 * (1 + uint32_t(sizeof(ProfileBufferEntry)))));
mozilla::ProfileBufferChunkManagerWithLocalLimit chunkManager(
2 * (1 + uint32_t(sizeof(ProfileBufferEntry))) * 4,
2 * (1 + uint32_t(sizeof(ProfileBufferEntry))));
mozilla::ProfileChunkedBuffer profileChunkedBuffer(
mozilla::ProfileChunkedBuffer::ThreadSafety::WithMutex, chunkManager);
auto pb = mozilla::MakeUnique<ProfileBuffer>(profileChunkedBuffer);
pb->AddEntry(ProfileBufferEntry::Time(123.1));
ProfileBufferEntry entry = pb->GetEntry(pb->BufferRangeStart());
ASSERT_TRUE(entry.IsTime());
@ -28,11 +31,12 @@ TEST(ThreadProfile, InsertOneEntry)
// See if we can insert some entries
TEST(ThreadProfile, InsertEntriesNoWrap)
{
mozilla::BlocksRingBuffer blocksRingBuffer(
BlocksRingBuffer::ThreadSafety::WithMutex);
auto pb = MakeUnique<ProfileBuffer>(
blocksRingBuffer,
mozilla::PowerOfTwo32(100 * (1 + uint32_t(sizeof(ProfileBufferEntry)))));
mozilla::ProfileBufferChunkManagerWithLocalLimit chunkManager(
100 * (1 + uint32_t(sizeof(ProfileBufferEntry))),
100 * (1 + uint32_t(sizeof(ProfileBufferEntry))) / 4);
mozilla::ProfileChunkedBuffer profileChunkedBuffer(
mozilla::ProfileChunkedBuffer::ThreadSafety::WithMutex, chunkManager);
auto pb = mozilla::MakeUnique<ProfileBuffer>(profileChunkedBuffer);
const int test_size = 50;
for (int i = 0; i < test_size; i++) {
pb->AddEntry(ProfileBufferEntry::Time(i));

View File

@ -9,6 +9,7 @@
#include "mozilla/layers/SynchronousTask.h"
#include "mozilla/StaticPtr.h"
#include "mtransport/runnable_utils.h"
#include "mozilla/StaticPrefs_apz.h"
#if WINVER < 0x0602
# define WS_EX_NOREDIRECTIONBITMAP 0x00200000L
@ -158,9 +159,14 @@ WinCompositorWnds WinCompositorWindowThread::CreateCompositorWindow() {
nullptr, WS_POPUP | WS_DISABLED, 0, 0, 1, 1,
nullptr, 0, GetModuleHandle(nullptr), 0);
DWORD extendedStyle = WS_EX_NOPARENTNOTIFY | WS_EX_NOREDIRECTIONBITMAP;
if (!StaticPrefs::apz_windows_force_disable_direct_manipulation()) {
extendedStyle |= WS_EX_LAYERED;
}
compositorWnd = ::CreateWindowEx(
WS_EX_LAYERED | WS_EX_NOPARENTNOTIFY | WS_EX_NOREDIRECTIONBITMAP,
kClassNameCompositor, nullptr,
extendedStyle, kClassNameCompositor, nullptr,
WS_CHILDWINDOW | WS_DISABLED | WS_VISIBLE, 0, 0, 1, 1,
initialParentWnd, 0, GetModuleHandle(nullptr), 0);
});