Merge m-c to b2g-inbound.

This commit is contained in:
Ryan VanderMeulen 2014-04-22 13:08:18 -07:00
commit b9fc64ae11
99 changed files with 2982 additions and 783 deletions

View File

@ -663,13 +663,7 @@ pref("plugins.click_to_play", true);
pref("plugins.hideMissingPluginsNotification", false);
#ifdef RELEASE_BUILD
// For now, plugins other than Java and Flash are enabled in beta/release
// and click-to-activate in earlier channels.
pref("plugin.default.state", 2);
#else
pref("plugin.default.state", 1);
#endif
// Plugins bundled in XPIs are enabled by default.
pref("plugin.defaultXpi.state", 2);
@ -679,6 +673,124 @@ pref("plugin.defaultXpi.state", 2);
pref("plugin.state.flash", 2);
pref("plugin.state.java", 1);
// Whitelist Requests
// Unity player, bug 979849
#ifdef XP_WIN
pref("plugin.state.npunity3d", 2);
#endif
#ifdef XP_MACOSX
pref("plugin.state.unity web player", 2);
#endif
// Cisco Jabber SDK, bug 980133
#ifdef XP_WIN
pref("plugin.state.npciscowebcommunicator", 2);
#endif
#ifdef XP_MACOSX
pref("plugin.state.ciscowebcommunicator", 2);
#endif
// McAfee Security Scanner detection plugin, bug 980772
#ifdef XP_WIN
pref("plugin.state.npmcafeemss", 2);
#endif
// Cisco VGConnect for directv.com, bug 981403
#ifdef XP_WIN
pref("plugin.state.npplayerplugin", 2);
#endif
#ifdef XP_MACOSX
pref("plugin.state.playerplugin", 2);
#endif
// Cisco Jabber Client, bug 981905
#ifdef XP_WIN
pref("plugin.state.npchip", 2);
#endif
#ifdef XP_MACOSX
pref("plugin.state.cisco jabber guest plug-in", 2);
#endif
// Estonian ID-card plugin, bug 982045
#ifdef XP_WIN
pref("plugin.state.npesteid-firefox-plugin", 2);
#endif
#ifdef XP_MACOSX
pref("plugin.state.esteidfirefoxplugin", 2);
#endif
#ifdef UNIX_BUT_NOT_MAC
pref("plugin.state.npesteid-firefox-plugin", 2);
#endif
// coupons.com, bug 984441
#ifdef XP_WIN
pref("plugin.state.npmozcouponprinter", 2);
#endif
#ifdef XP_MACOSX
pref("plugin.state.couponprinter-firefox_v", 2);
#endif
// Nexus Personal BankID, bug 987056
pref("plugin.state.npbispbrowser", 2);
// Gradecam, bug 988119
#ifdef XP_WIN
pref("plugin.state.npgcplugin", 2);
#endif
#ifdef XP_MACOSX
pref("plugin.state.gcplugin", 2);
#endif
// Smart Card Plugin, bug 988781
#ifdef XP_WIN
pref("plugin.state.npwebcard", 2);
#endif
// Cisco WebEx, bug 989096
#ifdef XP_WIN
pref("plugin.state.npatgpc", 2);
#endif
#ifdef XP_MACOSX
pref("plugin.state.webex", 2);
#endif
#ifdef UNIX_BUT_NOT_MAC
pref("plugin.state.npatgpc", 2);
#endif
// Skype, bug 990067
#ifdef XP_WIN
pref("plugin.state.npskypewebplugin", 2);
#endif
#ifdef XP_MACOSX
pref("plugin.state.skypewebplugin", 2);
#endif
// Facebook video calling, bug 990068
#ifdef XP_WIN
pref("plugin.state.npfacebookvideocalling", 2);
#endif
#ifdef XP_MACOSX
pref("plugin.state.facebookvideocalling", 2);
#endif
// MS Office Lync plugin, bug 990069
#ifdef XP_WIN
pref("plugin.state.npmeetingjoinpluginoc", 2);
#endif
#ifdef XP_MACOSX
pref("plugin.state.lwaplugin", 2);
#endif
// VidyoWeb, bug 990286
#ifdef XP_WIN
pref("plugin.state.npvidyoweb", 2);
#endif
#ifdef XP_MACOSX
pref("plugin.state.npvidyoweb", 2);
pref("plugin.state.vidyoweb", 2);
#endif
// display door hanger if flash not installed
pref("plugins.notifyMissingFlash", true);
@ -1286,7 +1398,7 @@ pref("devtools.eyedropper.zoom", 6);
// - keymap: which keymap to use (can be 'default', 'emacs' or 'vim')
// - autoclosebrackets: whether to permit automatic bracket/quote closing.
// - detectindentation: whether to detect the indentation from the file
pref("devtools.editor.tabsize", 4);
pref("devtools.editor.tabsize", 2);
pref("devtools.editor.expandtab", true);
pref("devtools.editor.keymap", "default");
pref("devtools.editor.autoclosebrackets", true);

View File

@ -122,36 +122,25 @@ tabbrowser {
visibility: hidden;
}
.tab-background,
.tab-close-button,
.tab-label {
.tab-background {
/* Explicitly set the visibility to override the value (collapsed)
* we inherit from #TabsToolbar[collapsed] upon opening a browser window. */
visibility: visible;
}
.tab-close-button[fadein],
.tab-background[fadein] {
/* This transition is only wanted for opening tabs. */
transition: visibility 0ms 25ms;
}
.tab-close-button:not([fadein]),
.tab-background:not([fadein]) {
visibility: hidden;
}
.tab-close-button[fadein],
.tab-label[fadein] {
/* This transition is only wanted for opening tabs. */
transition: opacity 70ms 230ms,
visibility 0ms 230ms;
}
.tab-close-button:not([fadein]),
.tab-label:not([fadein]) {
visibility: collapse;
opacity: .6;
}
.tab-label:not([fadein]),
.tab-throbber:not([fadein]),
.tab-icon-image:not([fadein]) {
display: none;

View File

@ -27,6 +27,6 @@ add_task(function() {
is(newWinBookmarksToolbarPlaceholder.getAttribute("wrap"), "true",
"Button in new window should have 'wrap' attribute");
yield newWin.PanelUI.hide();
newWin.close();
yield promiseWindowClosed(newWin);
CustomizableUI.reset();
});

View File

@ -133,7 +133,6 @@ PlacesController.prototype = {
case "cmd_paste":
case "cmd_delete":
case "placesCmd_delete":
case "placesCmd_moveBookmarks":
case "cmd_paste":
case "placesCmd_paste":
case "placesCmd_new:folder":

View File

@ -23,27 +23,38 @@ var gMoveBookmarksDialog = {
},
onOK: function MBD_onOK(aEvent) {
var selectedNode = this.foldersTree.selectedNode;
NS_ASSERT(selectedNode,
"selectedNode must be set in a single-selection tree with initial selection set");
var selectedFolderID = PlacesUtils.getConcreteItemId(selectedNode);
let selectedNode = this.foldersTree.selectedNode;
let selectedFolderId = PlacesUtils.getConcreteItemId(selectedNode);
var transactions = [];
for (var i=0; i < this._nodes.length; i++) {
// Nothing to do if the node is already under the selected folder
if (this._nodes[i].parent.itemId == selectedFolderID)
continue;
if (!PlacesUIUtils.useAsyncTransactions) {
let transactions = [];
for (var i=0; i < this._nodes.length; i++) {
// Nothing to do if the node is already under the selected folder
if (this._nodes[i].parent.itemId == selectedFolderId)
continue;
let txn = new PlacesMoveItemTransaction(this._nodes[i].itemId,
selectedFolderID,
PlacesUtils.bookmarks.DEFAULT_INDEX);
transactions.push(txn);
let txn = new PlacesMoveItemTransaction(this._nodes[i].itemId,
selectedFolderId,
PlacesUtils.bookmarks.DEFAULT_INDEX);
transactions.push(txn);
}
if (transactions.length != 0) {
let txn = new PlacesAggregatedTransaction("Move Items", transactions);
PlacesUtils.transactionManager.doTransaction(txn);
}
return;
}
if (transactions.length != 0) {
let txn = new PlacesAggregatedTransaction("Move Items", transactions);
PlacesUtils.transactionManager.doTransaction(txn);
}
PlacesTransactions.transact(function* () {
let newParentGUID = yield PlacesUtils.promiseItemGUID(selectedFolderId);
for (let node of this._nodes) {
// Nothing to do if the node is already under the selected folder.
if (node.parent.itemId == selectedFolderId)
continue;
yield PlacesTransactions.MoveItem({ GUID: node.bookmarkGuid
, newParentGUID: newParentGUID });
}
}.bind(this)).then(null, Components.utils.reportError);
},
newFolder: function MBD_newFolder() {

View File

@ -52,7 +52,7 @@ function test(){
}
function testSourceIsUgly() {
ok(!gEditor.getText().contains("\n "),
ok(!gEditor.getText().contains("\n "),
"The source shouldn't be pretty printed yet.");
}
@ -93,7 +93,7 @@ function testAutoPrettyPrintOff(){
}
function testSourceIsPretty() {
ok(gEditor.getText().contains("\n "),
ok(gEditor.getText().contains("\n "),
"The source should be pretty printed.")
}

View File

@ -63,7 +63,7 @@ function test(){
}
function testSourceIsUgly() {
ok(!gEditor.getText().contains("\n "),
ok(!gEditor.getText().contains("\n "),
"The source shouldn't be pretty printed yet.");
}
@ -96,7 +96,7 @@ function disableAutoPrettyPrint(){
}
function testSourceIsPretty() {
ok(gEditor.getText().contains("\n "),
ok(gEditor.getText().contains("\n "),
"The source should be pretty printed.")
}

View File

@ -38,7 +38,7 @@ function test() {
}
function testSourceIsUgly() {
ok(!gEditor.getText().contains("\n "),
ok(!gEditor.getText().contains("\n "),
"The source shouldn't be pretty printed yet.");
}
@ -52,7 +52,7 @@ function testProgressBarShown() {
}
function testSourceIsPretty() {
ok(gEditor.getText().contains("\n "),
ok(gEditor.getText().contains("\n "),
"The source should be pretty printed.")
}
@ -66,7 +66,7 @@ function testSourceIsStillPretty() {
const { source } = gSources.selectedItem.attachment;
gDebugger.DebuggerController.SourceScripts.getText(source).then(([, text]) => {
ok(text.contains("\n "),
ok(text.contains("\n "),
"Subsequent calls to getText return the pretty printed source.");
deferred.resolve();
});

View File

@ -43,7 +43,7 @@ function selectContextMenuItem() {
}
function testSourceIsPretty() {
ok(gEditor.getText().contains("\n "),
ok(gEditor.getText().contains("\n "),
"The source should be pretty printed.")
}

View File

@ -37,7 +37,7 @@ function test() {
}
function testSourceIsUgly() {
ok(!gEditor.getText().contains("\n "),
ok(!gEditor.getText().contains("\n "),
"The source shouldn't be pretty printed yet.");
}
@ -46,7 +46,7 @@ function clickPrettyPrintButton() {
}
function testSourceIsPretty() {
ok(gEditor.getText().contains("\n "),
ok(gEditor.getText().contains("\n "),
"The source should be pretty printed.")
}

View File

@ -41,7 +41,7 @@ function test() {
}
function testSourceIsUgly() {
ok(!gEditor.getText().contains("\n "),
ok(!gEditor.getText().contains("\n "),
"The source shouldn't be pretty printed yet.");
}
@ -55,7 +55,7 @@ function testProgressBarShown() {
}
function testSourceIsPretty() {
ok(gEditor.getText().contains("\n "),
ok(gEditor.getText().contains("\n "),
"The source should be pretty printed.")
}
@ -69,7 +69,7 @@ function testSourceIsStillPretty() {
const { source } = gSources.selectedItem.attachment;
gDebugger.DebuggerController.SourceScripts.getText(source).then(([, text]) => {
ok(text.contains("\n "),
ok(text.contains("\n "),
"Subsequent calls to getText return the pretty printed source.");
deferred.resolve();
});

View File

@ -28,7 +28,6 @@ support-files =
# [browser_inspector_bug_831693_searchbox_panel_navigation.js]
# Disabled for too many intermittent failures (bug 851349)
[browser_inspector_bug_840156_destroy_after_navigation.js]
[browser_inspector_changes.js]
[browser_inspector_cmd_inspect.js]
[browser_inspector_dead_node_exception.js]
[browser_inspector_destroyselection.js]

View File

@ -1,156 +0,0 @@
/* -*- Mode: Javascript; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
let doc;
let testDiv;
function test() {
let inspector;
function createDocument()
{
doc.body.innerHTML = '<div id="testdiv">Test div!</div>';
doc.title = "Inspector Change Test";
openInspector(runInspectorTests);
}
function getInspectorRuleProp(aName)
{
let ruleview = inspector.sidebar.getWindowForTab("ruleview").ruleview.view;
let inlineStyles = ruleview._elementStyle.rules[0];
for (let key in inlineStyles.textProps) {
let prop = inlineStyles.textProps[key];
if (prop.name == aName) {
return prop;
}
}
return null;
}
function runInspectorTests(aInspector)
{
inspector = aInspector;
waitForView("computedview", () => {
info("Computed View ready");
inspector.sidebar.select("computedview");
testDiv = doc.getElementById("testdiv");
testDiv.style.fontSize = "10px";
// Start up the style inspector panel...
inspector.once("computed-view-refreshed", () => {
executeSoon(computedStylePanelTests);
});
inspector.selection.setNode(testDiv);
});
}
function computedStylePanelTests()
{
let computedview = inspector.sidebar.getWindowForTab("computedview").computedview;
ok(computedview, "Style Panel has a cssHtmlTree");
let fontSize = getComputedPropertyValue("font-size");
is(fontSize, "10px", "Style inspector should be showing the correct font size.");
testDiv.style.cssText = "font-size: 15px; color: red;";
// Wait until layout-change fires from mutation to skip earlier refresh event
inspector.once("layout-change", () => {
inspector.once("computed-view-refreshed", () => {
executeSoon(computedStylePanelAfterChange);
});
});
}
function computedStylePanelAfterChange()
{
let fontSize = getComputedPropertyValue("font-size");
is(fontSize, "15px", "Style inspector should be showing the new font size.");
let color = getComputedPropertyValue("color");
is(color, "#F00", "Style inspector should be showing the new color.");
computedStylePanelNotActive();
}
function computedStylePanelNotActive()
{
// Tests changes made while the style panel is not active.
inspector.sidebar.select("ruleview");
testDiv.style.cssText = "font-size: 20px; color: blue; text-align: center";
inspector.once("computed-view-refreshed", () => {
executeSoon(computedStylePanelAfterSwitch);
});
}
function computedStylePanelAfterSwitch()
{
let fontSize = getComputedPropertyValue("font-size");
is(fontSize, "20px", "Style inspector should be showing the new font size.");
let color = getComputedPropertyValue("color");
is(color, "#00F", "Style inspector should be showing the new color.");
let textAlign = getComputedPropertyValue("text-align");
is(textAlign, "center", "Style inspector should be showing the new text align.");
rulePanelTests();
}
function rulePanelTests()
{
inspector.sidebar.select("ruleview");
let ruleview = inspector.sidebar.getWindowForTab("ruleview").ruleview;
ok(ruleview, "Style Panel has a ruleview");
let propView = getInspectorRuleProp("text-align");
is(propView.value, "center", "Style inspector should be showing the new text align.");
testDiv.style.cssText = "font-size: 3em; color: lightgoldenrodyellow; text-align: right; text-transform: uppercase";
inspector.once("rule-view-refreshed", () => {
executeSoon(rulePanelAfterChange);
});
}
function rulePanelAfterChange()
{
let propView = getInspectorRuleProp("text-align");
is(propView.value, "right", "Style inspector should be showing the new text align.");
let propView = getInspectorRuleProp("color");
is(propView.value, "lightgoldenrodyellow", "Style inspector should be showing the new color.")
let propView = getInspectorRuleProp("font-size");
is(propView.value, "3em", "Style inspector should be showing the new font size.");
let propView = getInspectorRuleProp("text-transform");
is(propView.value, "uppercase", "Style inspector should be showing the new text transform.");
finishTest();
}
function finishTest()
{
gBrowser.removeCurrentTab();
finish();
}
waitForExplicitFinish();
gBrowser.selectedTab = gBrowser.addTab();
gBrowser.selectedBrowser.addEventListener("load", function() {
gBrowser.selectedBrowser.removeEventListener("load", arguments.callee, true);
doc = content.document;
waitForFocus(createDocument, content);
}, true);
content.location = "data:text/html;charset=utf-8,browser_inspector_changes.js";
}

View File

@ -169,7 +169,25 @@ function onEditorKeypress({ ed, Editor }, event) {
return;
}
if ((event.ctrlKey || event.metaKey) && event.keyCode == event.DOM_VK_SPACE) {
// When Ctrl/Cmd + Space is pressed, two simultaneous keypresses are emitted
// first one for just the Ctrl/Cmd and second one for combo. The first one
// leave the private.doNotAutocomplete as true, so we have to make it false
private.doNotAutocomplete = false;
return;
}
if (event.ctrlKey || event.metaKey || event.altKey) {
private.doNotAutocomplete = true;
private.popup.hidePopup();
return;
}
switch (event.keyCode) {
case event.DOM_VK_RETURN:
private.doNotAutocomplete = true;
break;
case event.DOM_VK_ESCAPE:
if (private.popup.isOpen)
event.preventDefault();

View File

@ -62,26 +62,26 @@ function test() {
waitForExplicitFinish();
setup((ed, win) => {
is(ed.getOption("indentUnit"), 4,
"4 spaces before code added");
is(ed.getOption("indentUnit"), 2,
"2 spaces before code added");
is(ed.getOption("indentWithTabs"), false,
"spaces is default");
ed.setText(TWO_SPACES_CODE);
is(ed.getOption("indentUnit"), 2,
"2 spaces detected in 2 space code");
is(ed.getOption("indentWithTabs"), false,
"spaces detected in 2 space code");
ed.setText(FOUR_SPACES_CODE);
is(ed.getOption("indentUnit"), 4,
"4 spaces detected in 4 space code");
is(ed.getOption("indentWithTabs"), false,
"spaces detected in 4 space code");
ed.setText(TWO_SPACES_CODE);
is(ed.getOption("indentUnit"), 2,
"2 spaces detected in 2 space code");
is(ed.getOption("indentWithTabs"), false,
"spaces detected in 2 space code");
ed.setText(TABS_CODE);
is(ed.getOption("indentUnit"), 4,
"4 space indentation unit");
is(ed.getOption("indentUnit"), 2,
"2 space indentation unit");
is(ed.getOption("indentWithTabs"), true,
"tabs detected in majority tabs code");

View File

@ -31,6 +31,8 @@ support-files =
[browser_computedview_media-queries.js]
[browser_computedview_no-results-placeholder.js]
[browser_computedview_original-source-link.js]
[browser_computedview_refresh-on-style-change_01.js]
[browser_computedview_refresh-on-style-change_02.js]
[browser_computedview_search-filter.js]
[browser_computedview_select-and-copy-styles.js]
[browser_computedview_style-editor-link.js]
@ -77,6 +79,7 @@ skip-if = os == "win" && debug # bug 963492
[browser_ruleview_pseudo-element.js]
[browser_ruleview_refresh-on-attribute-change_01.js]
[browser_ruleview_refresh-on-attribute-change_02.js]
[browser_ruleview_refresh-on-style-change.js]
[browser_ruleview_select-and-copy-styles.js]
[browser_ruleview_style-editor-link.js]
[browser_ruleview_urls-clickable.js]

View File

@ -0,0 +1,35 @@
/* vim: set ft=javascript ts=2 et sw=2 tw=80: */
/* Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/ */
"use strict";
// Test that the computed view refreshes when the current node has its style
// changed
const TESTCASE_URI = 'data:text/html;charset=utf-8,' +
'<div id="testdiv" style="font-size:10px;">Test div!</div>';
let test = asyncTest(function*() {
yield addTab(TESTCASE_URI);
info("Getting the test node");
let div = getNode("#testdiv");
info("Opening the computed view and selecting the test node");
let {toolbox, inspector, view} = yield openComputedView();
yield selectNode(div, inspector);
let fontSize = getComputedViewPropertyValue(view, "font-size");
is(fontSize, "10px", "The computed view shows the right font-size");
info("Changing the node's style and waiting for the update");
let onUpdated = inspector.once("computed-view-refreshed");
div.style.cssText = "font-size: 15px; color: red;";
yield onUpdated;
fontSize = getComputedViewPropertyValue(view, "font-size");
is(fontSize, "15px", "The computed view shows the updated font-size");
let color = getComputedViewPropertyValue(view, "color");
is(color, "#F00", "The computed view also shows the color now");
});

View File

@ -0,0 +1,40 @@
/* vim: set ft=javascript ts=2 et sw=2 tw=80: */
/* Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/ */
"use strict";
// Test that the computed view refreshes when the current node has its style
// changed, even if the view is not the active one
const TESTCASE_URI = 'data:text/html;charset=utf-8,' +
'<div id="testdiv" style="font-size:10px;">Test div!</div>';
let test = asyncTest(function*() {
yield addTab(TESTCASE_URI);
info("Getting the test node");
let div = getNode("#testdiv");
info("Opening the computed view and selecting the test node");
let {toolbox, inspector, view} = yield openComputedView();
yield selectNode(div, inspector);
let fontSize = getComputedViewPropertyValue(view, "font-size");
is(fontSize, "10px", "The computed view shows the right font-size");
info("Now switching to the rule view");
yield openRuleView();
info("Changing the node's style and waiting for the update");
let onUpdated = inspector.once("computed-view-refreshed");
div.style.cssText = "font-size: 20px; color: blue; text-align: center";
yield onUpdated;
fontSize = getComputedViewPropertyValue(view, "font-size");
is(fontSize, "20px", "The computed view shows the updated font-size");
let color = getComputedViewPropertyValue(view, "color");
is(color, "#00F", "The computed view also shows the color now");
let textAlign = getComputedViewPropertyValue(view, "text-align");
is(textAlign, "center", "The computed view also shows the text-align now");
});

View File

@ -0,0 +1,41 @@
/* vim: set ft=javascript ts=2 et sw=2 tw=80: */
/* Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/ */
"use strict";
// Test that the rule view refreshes when the current node has its style
// changed
const TESTCASE_URI = 'data:text/html;charset=utf-8,' +
'<div id="testdiv" style="font-size:10px;">Test div!</div>';
let test = asyncTest(function*() {
yield addTab(TESTCASE_URI);
Services.prefs.setCharPref("devtools.defaultColorUnit", "name");
info("Getting the test node");
let div = getNode("#testdiv");
info("Opening the rule view and selecting the test node");
let {toolbox, inspector, view} = yield openRuleView();
yield selectNode(div, inspector);
let fontSize = getRuleViewPropertyValue(view, "element", "font-size");
is(fontSize, "10px", "The rule view shows the right font-size");
info("Changing the node's style and waiting for the update");
let onUpdated = inspector.once("rule-view-refreshed");
div.style.cssText = "font-size: 3em; color: lightgoldenrodyellow; text-align: right; text-transform: uppercase";
yield onUpdated;
let textAlign = getRuleViewPropertyValue(view, "element", "text-align");
is(textAlign, "right", "The rule view shows the new text align.");
let color = getRuleViewPropertyValue(view, "element", "color");
is(color, "lightgoldenrodyellow", "The rule view shows the new color.")
let fontSize = getRuleViewPropertyValue(view, "element", "font-size");
is(fontSize, "3em", "The rule view shows the new font size.");
let textTransform = getRuleViewPropertyValue(view, "element", "text-transform");
is(textTransform, "uppercase", "The rule view shows the new text transform.");
});

View File

@ -34,9 +34,14 @@ registerCleanupFunction(() => {
}
});
// Uncomment to log events
// Services.prefs.setBoolPref("devtools.dump.emit", true);
// Clean-up all prefs that might have been changed during a test run
// (safer here because if the test fails, then the pref is never reverted)
registerCleanupFunction(() => {
Services.prefs.clearUserPref("devtools.dump.emit");
Services.prefs.clearUserPref("devtools.defaultColorUnit");
});
/**
@ -477,7 +482,7 @@ function hasSideBarTab(inspector, id) {
function getRuleViewRule(view, selectorText) {
let rule;
for (let r of view.doc.querySelectorAll(".ruleview-rule")) {
let selector = r.querySelector(".ruleview-selector-matched");
let selector = r.querySelector(".ruleview-selector, .ruleview-selector-matched");
if (selector && selector.textContent === selectorText) {
rule = r;
break;
@ -515,6 +520,20 @@ function getRuleViewProperty(view, selectorText, propertyName) {
return prop;
}
/**
* Get the text value of the property corresponding to a given selector and name
* in the rule-view
* @param {CssRuleView} view The instance of the rule-view panel
* @param {String} selectorText The selector in the rule-view to look for the
* property in
* @param {String} propertyName The name of the property
* @return {String} The property value
*/
function getRuleViewPropertyValue(view, selectorText, propertyName) {
return getRuleViewProperty(view, selectorText, propertyName)
.valueSpan.textContent;
}
/**
* Simulate a color change in a given color picker tooltip, and optionally wait
* for a given element in the page to have its style changed as a result
@ -655,6 +674,18 @@ function getComputedViewProperty(view, name) {
return prop;
}
/**
* Get the text value of the property corresponding to a given name in the
* computed-view
* @param {CssHtmlTree} view The instance of the computed view panel
* @param {String} name The name of the property to retrieve
* @return {String} The property value
*/
function getComputedViewPropertyValue(view, selectorText, propertyName) {
return getComputedViewProperty(view, selectorText, propertyName)
.valueSpan.textContent;
}
/**
* Expand a given property, given its index in the current property list of
* the computed view

View File

@ -51,17 +51,20 @@
padding-bottom: 1px;
}
#nav-bar {
background-image: linear-gradient(@toolbarHighlight@, rgba(255,255,255,0));
box-shadow: 0 1px 0 @toolbarHighlight@ inset;
#TabsToolbar:not([collapsed="true"]) + #nav-bar {
margin-top: -@tabToolbarNavbarOverlap@; /* Move up into the TabsToolbar */
padding-top: 2px;
padding-bottom: 2px;
/* Position the toolbar above the bottom of background tabs */
position: relative;
z-index: 1;
}
#nav-bar {
background-image: linear-gradient(@toolbarHighlight@, rgba(255,255,255,0));
box-shadow: 0 1px 0 @toolbarHighlight@ inset;
padding-top: 2px;
padding-bottom: 2px;
}
#nav-bar-overflow-button {
-moz-image-region: rect(-5px, 12px, 11px, -4px);
}

View File

@ -109,6 +109,13 @@ toolbarseparator {
background: url(chrome://browser/skin/Toolbar-background-noise.png) hsl(0,0%,83%);
}
#TabsToolbar:not([collapsed="true"]) + #nav-bar {
margin-top: -@tabToolbarNavbarOverlap@; /* Move up into the TabsToolbar */
/* Position the toolbar above the bottom of background tabs */
position: relative;
z-index: 1;
}
#nav-bar {
-moz-appearance: none;
background: url(chrome://browser/skin/Toolbar-background-noise.png),
@ -124,10 +131,6 @@ toolbarseparator {
background-position: 0 1px, 0 0;
box-shadow: inset 0 1px 0 hsla(0,0%,100%,.4);
margin-top: -@tabToolbarNavbarOverlap@;
/* Position the toolbar above the bottom of background tabs */
position: relative;
z-index: 1;
}
@media (min-resolution: 2dppx) {

View File

@ -99,6 +99,10 @@
background-position: top 5px left 4px;
}
.subviewbutton[checked="true"]:-moz-locale-dir(rtl) {
background-position: top 5px right 4px;
}
.subviewbutton:not(:-moz-any([image],[targetURI],.cui-withicon, .bookmark-item)) > .menu-iconic-left {
display: none;
}

View File

@ -1068,6 +1068,10 @@ toolbaritem[overflowedItem=true],
background: url("chrome://global/skin/menu/shared-menu-check.png") center left 7px / 11px 11px no-repeat transparent;
}
.subviewbutton[checked="true"]:-moz-locale-dir(rtl) {
background-position: center right 7px;
}
.subviewbutton > .menu-iconic-left {
-moz-appearance: none;
-moz-margin-end: 3px;

View File

@ -283,15 +283,18 @@
}
}
#nav-bar {
background-image: linear-gradient(@toolbarHighlight@, rgba(255,255,255,0));
box-shadow: 0 1px 0 @toolbarHighlight@ inset;
#TabsToolbar:not([collapsed="true"]) + #nav-bar {
margin-top: -@tabToolbarNavbarOverlap@; /* Move up into the TabsToolbar */
/* Position the toolbar above the bottom of background tabs */
position: relative;
z-index: 1;
}
#nav-bar {
background-image: linear-gradient(@toolbarHighlight@, rgba(255,255,255,0));
box-shadow: 0 1px 0 @toolbarHighlight@ inset;
}
#personal-bookmarks {
min-height: 24px;
}

View File

@ -35,6 +35,7 @@ gyp_vars = {
'arm_neon_optional': 1,
'moz_widget_toolkit_gonk': 0,
'moz_omx_encoder': 0,
# (for vp8) chromium sets to 0 also
'use_temporal_layers': 0,
@ -61,6 +62,8 @@ elif os == 'Android':
if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'gonk':
gyp_vars['build_with_gonk'] = 1
gyp_vars['moz_widget_toolkit_gonk'] = 1
if CONFIG['MOZ_OMX_ENCODER']:
gyp_vars['moz_omx_encoder'] = 1
else:
gyp_vars.update(
gtest_target_type='executable',

View File

@ -1103,6 +1103,7 @@ vpx/vpx_decoder.h
vpx/vpx_encoder.h
vpx/vp8cx.h
vpx/vp8dx.h
vpx_mem/vpx_mem.h
#endif
#ifdef GKMEDIAS_SHARED_LIBRARY
vpx/vpx_codec.h
@ -1110,6 +1111,7 @@ vpx/vpx_decoder.h
vpx/vpx_encoder.h
vpx/vp8cx.h
vpx/vp8dx.h
vpx_mem/vpx_mem.h
vorbis/codec.h
theora/theoradec.h
tremor/ivorbiscodec.h

View File

@ -5397,6 +5397,8 @@ if test -n "$MOZ_VPX"; then
AC_DEFINE(MOZ_VPX_ERROR_CONCEALMENT)
fi
_SAVE_CFLAGS=$CFLAGS
_SAVE_LIBS=$LIBS
if test -n "$MOZ_NATIVE_LIBVPX"; then
dnl ============================
dnl === libvpx Version check ===
@ -5404,14 +5406,24 @@ if test -n "$MOZ_VPX"; then
dnl Check to see if we have a system libvpx package.
PKG_CHECK_MODULES(MOZ_LIBVPX, vpx >= 1.3.0)
CFLAGS="$CFLAGS $MOZ_LIBVPX_CFLAGS"
LIBS="$LIBS $MOZ_LIBVPX_LIBS"
MOZ_CHECK_HEADER([vpx/vpx_decoder.h], [],
[AC_MSG_ERROR([Couldn't find vpx/vpx_decoder.h which is required for build with system libvpx. Use --without-system-libvpx to build with in-tree libvpx.])])
_SAVE_LIBS=$LIBS
AC_CHECK_LIB(vpx, vpx_codec_dec_init_ver, [],
[AC_MSG_ERROR([--with-system-libvpx requested but symbol vpx_codec_dec_init_ver not found])])
LIBS=$_SAVE_LIBS
MOZ_CHECK_HEADER([vpx_mem/vpx_mem.h],
[AC_CHECK_FUNC(vpx_mem_set_functions)])
if test "$ac_cv_header_vpx_mem_vpx_mem_h" = no -o \
"$ac_cv_func_vpx_mem_set_functions" = no; then
AC_DEFINE(MOZ_VPX_NO_MEM_REPORTING)
fi
fi
CFLAGS=$_SAVE_CFLAGS
LIBS=$_SAVE_LIBS
fi
AC_SUBST(MOZ_NATIVE_LIBVPX)

View File

@ -12,8 +12,8 @@ class nsDOMAttributeMap;
class nsIContent;
#define NS_IATTRIBUTE_IID \
{ 0x8d9d7dbf, 0xc42d, 0x4715, \
{ 0x95, 0xcf, 0x7a, 0x5e, 0xd5, 0xa4, 0x47, 0x70 } }
{ 0x233a9c4d, 0xb27f, 0x4662, \
{ 0xbd, 0x90, 0xba, 0xd6, 0x2e, 0x76, 0xc8, 0xe1 } }
class nsIAttribute : public nsINode
{
@ -32,8 +32,6 @@ public:
return mNodeInfo;
}
virtual nsIContent* GetContent() const = 0;
/**
* Called when our ownerElement is moved into a new document.
* Updates the nodeinfo of this node.

View File

@ -78,7 +78,7 @@ NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(Attr)
NS_IMPL_CYCLE_COLLECTION_UNLINK_END
NS_IMPL_CYCLE_COLLECTION_CAN_SKIP_BEGIN(Attr)
Element* ownerElement = tmp->GetContentInternal();
Element* ownerElement = tmp->GetElement();
if (tmp->IsBlack()) {
if (ownerElement) {
// The attribute owns the element via attribute map so we can
@ -129,10 +129,14 @@ Attr::SetMap(nsDOMAttributeMap *aMap)
mAttrMap = aMap;
}
nsIContent*
Attr::GetContent() const
Element*
Attr::GetElement() const
{
return GetContentInternal();
if (!mAttrMap) {
return nullptr;
}
nsIContent* content = mAttrMap->GetContent();
return content ? content->AsElement() : nullptr;
}
nsresult
@ -182,10 +186,10 @@ Attr::GetNameAtom(nsIContent* aContent)
NS_IMETHODIMP
Attr::GetValue(nsAString& aValue)
{
nsIContent* content = GetContentInternal();
if (content) {
nsCOMPtr<nsIAtom> nameAtom = GetNameAtom(content);
content->GetAttr(mNodeInfo->NamespaceID(), nameAtom, aValue);
Element* element = GetElement();
if (element) {
nsCOMPtr<nsIAtom> nameAtom = GetNameAtom(element);
element->GetAttr(mNodeInfo->NamespaceID(), nameAtom, aValue);
}
else {
aValue = mValue;
@ -197,14 +201,14 @@ Attr::GetValue(nsAString& aValue)
void
Attr::SetValue(const nsAString& aValue, ErrorResult& aRv)
{
nsIContent* content = GetContentInternal();
if (!content) {
Element* element = GetElement();
if (!element) {
mValue = aValue;
return;
}
nsCOMPtr<nsIAtom> nameAtom = GetNameAtom(content);
aRv = content->SetAttr(mNodeInfo->NamespaceID(),
nsCOMPtr<nsIAtom> nameAtom = GetNameAtom(element);
aRv = element->SetAttr(mNodeInfo->NamespaceID(),
nameAtom,
mNodeInfo->GetPrefixAtom(),
aValue,
@ -238,7 +242,7 @@ Element*
Attr::GetOwnerElement(ErrorResult& aRv)
{
OwnerDoc()->WarnOnceAbout(nsIDocument::eOwnerElement);
return GetContentInternal();
return GetElement();
}
NS_IMETHODIMP
@ -247,9 +251,9 @@ Attr::GetOwnerElement(nsIDOMElement** aOwnerElement)
NS_ENSURE_ARG_POINTER(aOwnerElement);
OwnerDoc()->WarnOnceAbout(nsIDocument::eOwnerElement);
nsIContent* content = GetContentInternal();
if (content) {
return CallQueryInterface(content, aOwnerElement);
Element* element = GetElement();
if (element) {
return CallQueryInterface(element, aOwnerElement);
}
*aOwnerElement = nullptr;
@ -293,7 +297,7 @@ Attr::Clone(nsINodeInfo *aNodeInfo, nsINode **aResult) const
already_AddRefed<nsIURI>
Attr::GetBaseURI(bool aTryUseXHRDocBaseURI) const
{
nsINode *parent = GetContentInternal();
Element* parent = GetElement();
return parent ? parent->GetBaseURI(aTryUseXHRDocBaseURI) : nullptr;
}
@ -318,16 +322,14 @@ Attr::SetTextContentInternal(const nsAString& aTextContent,
NS_IMETHODIMP
Attr::GetIsId(bool* aReturn)
{
nsIContent* content = GetContentInternal();
if (!content)
{
Element* element = GetElement();
if (!element) {
*aReturn = false;
return NS_OK;
}
nsIAtom* idAtom = content->GetIDAttributeName();
if (!idAtom)
{
nsIAtom* idAtom = element->GetIDAttributeName();
if (!idAtom) {
*aReturn = false;
return NS_OK;
}
@ -404,11 +406,5 @@ Attr::WrapObject(JSContext* aCx)
return AttrBinding::Wrap(aCx, this);
}
Element*
Attr::GetContentInternal() const
{
return mAttrMap ? mAttrMap->GetContent() : nullptr;
}
} // namespace dom
} // namespace mozilla

View File

@ -56,7 +56,7 @@ public:
// nsIAttribute interface
void SetMap(nsDOMAttributeMap *aMap) MOZ_OVERRIDE;
nsIContent *GetContent() const MOZ_OVERRIDE;
Element* GetElement() const;
nsresult SetOwnerDocument(nsIDocument* aDocument) MOZ_OVERRIDE;
// nsINode interface
@ -98,14 +98,13 @@ public:
protected:
virtual Element* GetNameSpaceElement()
{
return GetContentInternal();
return GetElement();
}
static bool sInitialized;
private:
already_AddRefed<nsIAtom> GetNameAtom(nsIContent* aContent);
Element* GetContentInternal() const;
nsString mValue;
};

View File

@ -127,6 +127,7 @@
#include "nsStyledElement.h"
#include "nsXBLService.h"
#include "nsITextControlElement.h"
#include "nsITextControlFrame.h"
#include "nsISupportsImpl.h"
#include "mozilla/dom/DocumentFragment.h"
#include "mozilla/IntegerPrintfMacros.h"
@ -201,7 +202,28 @@ nsIContent::UpdateEditableState(bool aNotify)
NS_ASSERTION(!IsElement(), "What happened here?");
nsIContent *parent = GetParent();
SetEditableFlag(parent && parent->HasFlag(NODE_IS_EDITABLE));
// Skip over unknown native anonymous content to avoid setting a flag we
// can't clear later
bool isUnknownNativeAnon = false;
if (IsInNativeAnonymousSubtree()) {
isUnknownNativeAnon = true;
nsCOMPtr<nsIContent> root = this;
while (root && !root->IsRootOfNativeAnonymousSubtree()) {
root = root->GetParent();
}
// root should always be true here, but isn't -- bug 999416
if (root) {
nsIFrame* rootFrame = root->GetPrimaryFrame();
if (rootFrame) {
nsIFrame* parentFrame = rootFrame->GetParent();
nsITextControlFrame* textCtrl = do_QueryFrame(parentFrame);
isUnknownNativeAnon = !textCtrl;
}
}
}
SetEditableFlag(parent && parent->HasFlag(NODE_IS_EDITABLE) &&
!isUnknownNativeAnon);
}
void

View File

@ -7750,7 +7750,13 @@ nsIDocument::CreateEvent(const nsAString& aEventType, ErrorResult& rv) const
rv = EventDispatcher::CreateEvent(const_cast<nsIDocument*>(this),
presContext, nullptr, aEventType,
getter_AddRefs(ev));
return ev ? dont_AddRef(ev.forget().take()->InternalDOMEvent()) : nullptr;
if (!ev) {
return nullptr;
}
WidgetEvent* e = ev->GetInternalNSEvent();
e->mFlags.mBubbles = false;
e->mFlags.mCancelable = false;
return dont_AddRef(ev.forget().take()->InternalDOMEvent());
}
void

View File

@ -809,10 +809,10 @@ nsINode::CompareDocumentPosition(nsINode& aOtherNode) const
const nsINode *node1 = &aOtherNode, *node2 = this;
// Check if either node is an attribute
const nsIAttribute* attr1 = nullptr;
const Attr* attr1 = nullptr;
if (node1->IsNodeOfType(nsINode::eATTRIBUTE)) {
attr1 = static_cast<const nsIAttribute*>(node1);
const nsIContent* elem = attr1->GetContent();
attr1 = static_cast<const Attr*>(node1);
const Element* elem = attr1->GetElement();
// If there is an owner element add the attribute
// to the chain and walk up to the element
if (elem) {
@ -821,8 +821,8 @@ nsINode::CompareDocumentPosition(nsINode& aOtherNode) const
}
}
if (node2->IsNodeOfType(nsINode::eATTRIBUTE)) {
const nsIAttribute* attr2 = static_cast<const nsIAttribute*>(node2);
const nsIContent* elem = attr2->GetContent();
const Attr* attr2 = static_cast<const Attr*>(node2);
const Element* elem = attr2->GetElement();
if (elem == node1 && attr1) {
// Both nodes are attributes on the same element.
// Compare position between the attributes.

View File

@ -7,26 +7,34 @@
namespace android {
// NAL unit start code.
static const uint8_t kNALUnitStartCode[] = { 0x00, 0x00, 0x00, 0x01 };
// This class is used to generate AVC/H.264 decoder config descriptor blob from
// the sequence parameter set(SPS) + picture parameter set(PPS) data.
// The utility functions in this file concatenate two AVC/H.264 parameter sets,
// sequence parameter set(SPS) and picture parameter set(PPS), into byte stream
// format or construct AVC decoder config descriptor blob from them.
//
// SPS + PPS format:
// --- SPS NAL unit ---
// Start code <0x00 0x00 0x00 0x01> (4 bytes)
// NAL unit type <0x07> (5 bits)
// SPS (1+ bytes)
// * NAL unit defined in ISO/IEC 14496-10 7.3.1
// * SPS defined ISO/IEC 14496-10 7.3.2.1.1
// * PPS defined in ISO/IEC 14496-10 7.3.2.2
//
// Byte stream format:
// Start code <0x00 0x00 0x00 0x01> (4 bytes)
// --- (SPS) NAL unit ---
// ... (3 bits)
// NAL unit type <0x07> (5 bits)
// SPS (3+ bytes)
// Profile (1 byte)
// Compatible profiles (1 byte)
// Level (1 byte)
// ...
// --- PPS NAL unit ---
// Start code <0x00 0x00 0x00 0x01> (4 bytes)
// NAL unit type <0x08> (5 bits)
// PPS (1+ bytes)
// --- End ---
// Start code <0x00 0x00 0x00 0x01> (4 bytes)
// --- (PPS) NAL unit ---
// ... (3 bits)
// NAL unit type <0x08> (5 bits)
// PPS (1+ bytes)
// ...
// --- End ---
//
// Descriptor format:
// Descriptor format: (defined in ISO/IEC 14496-15 5.2.4.1.1)
// --- Header (5 bytes) ---
// Version <0x01> (1 byte)
// Profile (1 byte)
@ -47,227 +55,151 @@ static const uint8_t kNALUnitStartCode[] = { 0x00, 0x00, 0x00, 0x01 };
// PPS NAL unit (1+ bytes)
// ...
// --- End ---
class AVCDecodeConfigDescMaker {
public:
// Convert SPS + PPS data to decoder config descriptor blob. aParamSets
// contains the source data, and the generated blob will be appended to
// aOutputBuf.
status_t ConvertParamSetsToDescriptorBlob(ABuffer* aParamSets,
nsTArray<uint8_t>* aOutputBuf)
{
uint8_t header[] = {
0x01, // Version.
0x00, // Will be filled with 'profile' when parsing SPS later.
0x00, // Will be filled with 'compatible profiles' when parsing SPS later.
0x00, // Will be filled with 'level' when parsing SPS later.
0xFF, // 6 bits reserved value <111111> + 2 bits NAL length type <11>
};
size_t paramSetsSize = ParseParamSets(aParamSets, header);
NS_ENSURE_TRUE(paramSetsSize > 0, ERROR_MALFORMED);
// NAL unit start code.
static const uint8_t kNALUnitStartCode[] = { 0x00, 0x00, 0x00, 0x01 };
// Extra 1 byte for number of SPS & the other for number of PPS.
aOutputBuf->SetCapacity(sizeof(header) + paramSetsSize + 2);
// 5 bytes Header.
aOutputBuf->AppendElements(header, sizeof(header));
// 3 bits <111> + 5 bits number of SPS.
uint8_t n = mSPS.Length();
aOutputBuf->AppendElement(0xE0 | n);
// SPS NAL units.
for (int i = 0; i < n; i++) {
mSPS.ElementAt(i).AppendTo(aOutputBuf);
}
// 1 byte number of PPS.
n = mPPS.Length();
aOutputBuf->AppendElement(n);
// PPS NAL units.
for (int i = 0; i < n; i++) {
mPPS.ElementAt(i).AppendTo(aOutputBuf);
}
return OK;
}
private:
// Sequence parameter set or picture parameter set.
struct AVCParamSet {
AVCParamSet(const uint8_t* aPtr, const size_t aSize)
: mPtr(aPtr)
, mSize(aSize)
{}
// Append 2 bytes length value and NAL unit bitstream to aOutputBuf.
void AppendTo(nsTArray<uint8_t>* aOutputBuf)
{
MOZ_ASSERT(mPtr && mSize > 0);
// 2 bytes length value.
uint8_t size[] = {
(mSize & 0xFF00) >> 8, // MSB.
mSize & 0x00FF, // LSB.
};
aOutputBuf->AppendElements(size, sizeof(size));
aOutputBuf->AppendElements(mPtr, mSize);
}
const uint8_t* mPtr; // Pointer to NAL unit.
const size_t mSize; // NAL unit length in bytes.
};
// NAL unit types.
enum {
kNALUnitTypeSPS = 0x07, // Value for sequence parameter set.
kNALUnitTypePPS = 0x08, // Value for picture parameter set.
};
// Search for next start code to determine the location of parameter set data
// and save the result to corresponding parameter set arrays. The search range
// is from aPtr to (aPtr + aSize - 4), and aType indicates which array to save
// the result.
// The size (in bytes) of found parameter set will be stored in
// aParameterSize.
// This function also returns the pointer to found start code that caller can
// use for the next iteration of search. If the returned pointer is beyond
// the end of search range, it means no start code is found.
uint8_t* ParseParamSet(uint8_t* aPtr, size_t aSize, uint8_t aType,
size_t* aParamSetSize)
{
MOZ_ASSERT(aPtr && aSize > 0);
MOZ_ASSERT(aType == kNALUnitTypeSPS || aType == kNALUnitTypePPS);
MOZ_ASSERT(aParamSetSize);
// Find next start code.
size_t index = 0;
size_t end = aSize - sizeof(kNALUnitStartCode);
uint8_t* nextStartCode = aPtr;
while (index <= end &&
memcmp(kNALUnitStartCode, aPtr + index, sizeof(kNALUnitStartCode))) {
++index;
}
if (index <= end) {
// Found.
nextStartCode = aPtr + index;
} else {
nextStartCode = aPtr + aSize;
}
*aParamSetSize = nextStartCode - aPtr;
NS_ENSURE_TRUE(*aParamSetSize > 0, nullptr);
AVCParamSet paramSet(aPtr, *aParamSetSize);
if (aType == kNALUnitTypeSPS) {
// SPS should have at least 4 bytes.
NS_ENSURE_TRUE(*aParamSetSize >= 4, nullptr);
mSPS.AppendElement(paramSet);
} else {
mPPS.AppendElement(paramSet);
}
return nextStartCode;
}
// Walk through SPS + PPS data and save the pointer & size of each parameter
// set to corresponding arrays. It also fills several values in aHeader.
// Will return total size of all parameter sets, or 0 if fail to parse.
size_t ParseParamSets(ABuffer* aParamSets, uint8_t* aHeader)
{
// Data starts with a start code.
// SPS and PPS are separated with start codes.
// Also, SPS must come before PPS
uint8_t type = kNALUnitTypeSPS;
bool hasSPS = false;
bool hasPPS = false;
uint8_t* ptr = aParamSets->data();
uint8_t* nextStartCode = ptr;
size_t remain = aParamSets->size();
size_t paramSetSize = 0;
size_t totalSize = 0;
// Examine
while (remain > sizeof(kNALUnitStartCode) &&
!memcmp(kNALUnitStartCode, ptr, sizeof(kNALUnitStartCode))) {
ptr += sizeof(kNALUnitStartCode);
remain -= sizeof(kNALUnitStartCode);
// NAL unit format is defined in ISO/IEC 14496-10 7.3.1:
// --- NAL unit ---
// Reserved <111> (3 bits)
// Type (5 bits)
// Parameter set (4+ bytes for SPS, 1+ bytes for PPS)
// --- End ---
type = (ptr[0] & 0x1F);
if (type == kNALUnitTypeSPS) {
// SPS must come before PPS.
NS_ENSURE_FALSE(hasPPS, 0);
if (!hasSPS) {
// SPS contains some header values.
aHeader[1] = ptr[1]; // Profile.
aHeader[2] = ptr[2]; // Compatible Profiles.
aHeader[3] = ptr[3]; // Level.
hasSPS = true;
}
nextStartCode = ParseParamSet(ptr, remain, type, &paramSetSize);
} else if (type == kNALUnitTypePPS) {
// SPS must come before PPS.
NS_ENSURE_TRUE(hasSPS, 0);
if (!hasPPS) {
hasPPS = true;
}
nextStartCode = ParseParamSet(ptr, remain, type, &paramSetSize);
} else {
// Should never contain NAL unit other than SPS or PPS.
NS_ENSURE_TRUE(false, 0);
}
NS_ENSURE_TRUE(nextStartCode, 0);
// Move to next start code.
remain -= (nextStartCode - ptr);
ptr = nextStartCode;
totalSize += (2 + paramSetSize); // 2 bytes length + NAL unit.
}
// Sanity check on the number of parameter sets.
size_t n = mSPS.Length();
NS_ENSURE_TRUE(n > 0 && n <= 0x1F, 0); // 5 bits length only.
n = mPPS.Length();
NS_ENSURE_TRUE(n > 0 && n <= 0xFF, 0); // 1 byte length only.
return totalSize;
}
nsTArray<AVCParamSet> mSPS;
nsTArray<AVCParamSet> mPPS;
// NAL unit types.
enum {
kNALUnitTypeSPS = 0x07, // Value for sequence parameter set.
kNALUnitTypePPS = 0x08, // Value for picture parameter set.
kNALUnitTypeBad = -1, // Malformed data.
};
// Blob from OMX encoder could be in descriptor format already, or sequence
// parameter set(SPS) + picture parameter set(PPS). If later, it needs to be
// parsed and converted into descriptor format.
// See MPEG4Writer::Track::makeAVCCodecSpecificData() and
// MPEG4Writer::Track::writeAvccBox() implementation in libstagefright.
status_t
GenerateAVCDescriptorBlob(ABuffer* aData, nsTArray<uint8_t>* aOutputBuf)
{
const size_t csdSize = aData->size();
const uint8_t* csd = aData->data();
MOZ_ASSERT(csdSize > sizeof(kNALUnitStartCode),
"Size of codec specific data is too short. "
"There could be a serious problem in MediaCodec.");
NS_ENSURE_TRUE(csdSize > sizeof(kNALUnitStartCode), ERROR_MALFORMED);
if (memcmp(csd, kNALUnitStartCode, sizeof(kNALUnitStartCode))) {
// Already in descriptor format. It should has at least 13 bytes.
NS_ENSURE_TRUE(csdSize >= 13, ERROR_MALFORMED);
aOutputBuf->AppendElements(aData->data(), csdSize);
} else {
// In SPS + PPS format. Generate descriptor blob from parameters sets.
AVCDecodeConfigDescMaker maker;
status_t result = maker.ConvertParamSetsToDescriptorBlob(aData, aOutputBuf);
NS_ENSURE_TRUE(result == OK, result);
// Sequence parameter set or picture parameter set.
struct AVCParamSet {
AVCParamSet(const uint8_t* aPtr, const size_t aSize)
: mPtr(aPtr)
, mSize(aSize)
{
MOZ_ASSERT(mPtr && mSize > 0);
}
size_t Size() {
return mSize + 2; // 2 more bytes for length value.
}
// Append 2 bytes length value and NAL unit bitstream to aOutputBuf.
void AppendTo(nsTArray<uint8_t>* aOutputBuf)
{
// 2 bytes length value.
uint8_t size[] = {
(mSize & 0xFF00) >> 8, // MSB.
mSize & 0x00FF, // LSB.
};
aOutputBuf->AppendElements(size, sizeof(size));
aOutputBuf->AppendElements(mPtr, mSize);
}
const uint8_t* mPtr; // Pointer to NAL unit.
const size_t mSize; // NAL unit length in bytes.
};
// Convert SPS and PPS data into decoder config descriptor blob. The generated
// blob will be appended to aOutputBuf.
static status_t
ConvertParamSetsToDescriptorBlob(sp<ABuffer>& aSPS, sp<ABuffer>& aPPS,
nsTArray<uint8_t>* aOutputBuf)
{
// Strip start code in the input.
AVCParamSet sps(aSPS->data() + sizeof(kNALUnitStartCode),
aSPS->size() - sizeof(kNALUnitStartCode));
AVCParamSet pps(aPPS->data() + sizeof(kNALUnitStartCode),
aPPS->size() - sizeof(kNALUnitStartCode));
size_t paramSetsSize = sps.Size() + pps.Size();
// Profile/level info in SPS.
uint8_t* info = aSPS->data() + 5;
uint8_t header[] = {
0x01, // Version.
info[0], // Profile.
info[1], // Compatible profiles.
info[2], // Level.
0xFF, // 6 bits reserved value <111111> + 2 bits NAL length type <11>
};
// Reserve 1 byte for number of SPS & another 1 for number of PPS.
aOutputBuf->SetCapacity(sizeof(header) + paramSetsSize + 2);
// Build the blob.
aOutputBuf->AppendElements(header, sizeof(header)); // 5 bytes Header.
aOutputBuf->AppendElement(0xE0 | 1); // 3 bits <111> + 5 bits number of SPS.
sps.AppendTo(aOutputBuf); // SPS NALU data.
aOutputBuf->AppendElement(1); // 1 byte number of PPS.
pps.AppendTo(aOutputBuf); // PPS NALU data.
return OK;
}
} // namespace android
static int
NALType(sp<ABuffer>& aBuffer)
{
if (aBuffer == nullptr) {
return kNALUnitTypeBad;
}
// Start code?
uint8_t* data = aBuffer->data();
if (aBuffer->size() <= 4 ||
memcmp(data, kNALUnitStartCode, sizeof(kNALUnitStartCode))) {
return kNALUnitTypeBad;
}
return data[4] & 0x1F;
}
// Generate AVC/H.264 decoder config blob.
// See MPEG4Writer::Track::makeAVCCodecSpecificData() and
// MPEG4Writer::Track::writeAvccBox() implementation in libstagefright.
status_t
GenerateAVCDescriptorBlob(sp<AMessage>& aConfigData,
nsTArray<uint8_t>* aOutputBuf,
OMXVideoEncoder::BlobFormat aFormat)
{
// Search for parameter sets using key "csd-0" and "csd-1".
char key[6] = "csd-";
sp<ABuffer> sps;
sp<ABuffer> pps;
for (int i = 0; i < 2; i++) {
snprintf(key + 4, 2, "%d", i);
sp<ABuffer> paramSet;
bool found = aConfigData->findBuffer(key, &paramSet);
int type = NALType(paramSet);
bool valid = ((type == kNALUnitTypeSPS) || (type == kNALUnitTypePPS));
MOZ_ASSERT(found && valid);
if (!found || !valid) {
return ERROR_MALFORMED;
}
switch (type) {
case kNALUnitTypeSPS:
sps = paramSet;
break;
case kNALUnitTypePPS:
pps = paramSet;
break;
default:
NS_NOTREACHED("Should not get here!");
}
}
MOZ_ASSERT(sps != nullptr && pps != nullptr);
if (sps == nullptr || pps == nullptr) {
return ERROR_MALFORMED;
}
status_t result = OK;
if (aFormat == OMXVideoEncoder::BlobFormat::AVC_NAL) {
// SPS + PPS.
aOutputBuf->AppendElements(sps->data(), sps->size());
aOutputBuf->AppendElements(pps->data(), pps->size());
return OK;
} else {
status_t result = ConvertParamSetsToDescriptorBlob(sps, pps, aOutputBuf);
MOZ_ASSERT(result == OK);
return result;
}
}
} // namespace android

View File

@ -6,18 +6,22 @@
#ifndef OMXCodecDescriptorUtil_h_
#define OMXCodecDescriptorUtil_h_
#include <stagefright/foundation/ABuffer.h>
#include <stagefright/foundation/AMessage.h>
#include <stagefright/MediaErrors.h>
#include <nsTArray.h>
namespace android {
#include "OMXCodecWrapper.h"
// Generate decoder config descriptor (defined in ISO/IEC 14496-15 5.2.4.1.1)
// for AVC/H.264 using codec config blob from encoder.
status_t GenerateAVCDescriptorBlob(ABuffer* aData,
nsTArray<uint8_t>* aOutputBuf);
namespace android {
// Generate decoder config blob using aConfigData provided by encoder.
// The output will be stored in aOutputBuf.
// aFormat specifies the output format: AVC_MP4 is for MP4 file, and AVC_NAL is
// for RTP packet used by WebRTC.
status_t GenerateAVCDescriptorBlob(sp<AMessage>& aConfigData,
nsTArray<uint8_t>* aOutputBuf,
OMXVideoEncoder::BlobFormat aFormat);
}
#endif // OMXCodecDescriptorUtil_h_
#endif // OMXCodecDescriptorUtil_h_

View File

@ -129,8 +129,8 @@ OMXCodecWrapper::Stop()
}
// Check system property to see if we're running on emulator.
static
bool IsRunningOnEmulator()
static bool
IsRunningOnEmulator()
{
char qemu[PROPERTY_VALUE_MAX];
property_get("ro.kernel.qemu", qemu, "");
@ -138,7 +138,8 @@ bool IsRunningOnEmulator()
}
nsresult
OMXVideoEncoder::Configure(int aWidth, int aHeight, int aFrameRate)
OMXVideoEncoder::Configure(int aWidth, int aHeight, int aFrameRate,
BlobFormat aBlobFormat)
{
MOZ_ASSERT(!mStarted, "Configure() was called already.");
@ -185,6 +186,7 @@ OMXVideoEncoder::Configure(int aWidth, int aHeight, int aFrameRate)
mWidth = aWidth;
mHeight = aHeight;
mBlobFormat = aBlobFormat;
result = Start();
@ -200,8 +202,7 @@ OMXVideoEncoder::Configure(int aWidth, int aHeight, int aFrameRate)
// interpolation.
// aSource contains info about source image data, and the result will be stored
// in aDestination, whose size needs to be >= Y plane size * 3 / 2.
static
void
static void
ConvertPlanarYCbCrToNV12(const PlanarYCbCrData* aSource, uint8_t* aDestination)
{
// Fill Y plane.
@ -252,8 +253,7 @@ ConvertPlanarYCbCrToNV12(const PlanarYCbCrData* aSource, uint8_t* aDestination)
// conversion. Currently only 2 source format are supported:
// - NV21/HAL_PIXEL_FORMAT_YCrCb_420_SP (from camera preview window).
// - YV12/HAL_PIXEL_FORMAT_YV12 (from video decoder).
static
void
static void
ConvertGrallocImageToNV12(GrallocImage* aSource, uint8_t* aDestination)
{
// Get graphic buffer.
@ -373,27 +373,61 @@ status_t
OMXVideoEncoder::AppendDecoderConfig(nsTArray<uint8_t>* aOutputBuf,
ABuffer* aData)
{
// AVC/H.264 decoder config descriptor is needed to construct MP4 'avcC' box
// (defined in ISO/IEC 14496-15 5.2.4.1.1).
return GenerateAVCDescriptorBlob(aData, aOutputBuf);
// Codec already parsed aData. Using its result makes generating config blob
// much easier.
sp<AMessage> format;
mCodec->getOutputFormat(&format);
// NAL unit format is needed by WebRTC for RTP packets; AVC/H.264 decoder
// config descriptor is needed to construct MP4 'avcC' box.
status_t result = GenerateAVCDescriptorBlob(format, aOutputBuf, mBlobFormat);
mHasConfigBlob = (result == OK);
return result;
}
// Override to replace NAL unit start code with 4-bytes unit length.
// See ISO/IEC 14496-15 5.2.3.
void OMXVideoEncoder::AppendFrame(nsTArray<uint8_t>* aOutputBuf,
const uint8_t* aData, size_t aSize)
void
OMXVideoEncoder::AppendFrame(nsTArray<uint8_t>* aOutputBuf,
const uint8_t* aData, size_t aSize)
{
aOutputBuf->SetCapacity(aSize);
if (mBlobFormat == BlobFormat::AVC_NAL) {
// Append NAL format data without modification.
aOutputBuf->AppendElements(aData, aSize);
return;
}
// Replace start code with data length.
uint8_t length[] = {
(aSize >> 24) & 0xFF,
(aSize >> 16) & 0xFF,
(aSize >> 8) & 0xFF,
aSize & 0xFF,
};
aOutputBuf->SetCapacity(aSize);
aOutputBuf->AppendElements(length, sizeof(length));
aOutputBuf->AppendElements(aData + sizeof(length), aSize);
}
nsresult
OMXVideoEncoder::GetCodecConfig(nsTArray<uint8_t>* aOutputBuf)
{
MOZ_ASSERT(mHasConfigBlob, "Haven't received codec config yet.");
return AppendDecoderConfig(aOutputBuf, nullptr) == OK ? NS_OK : NS_ERROR_FAILURE;
}
nsresult
OMXVideoEncoder::SetBitrate(int32_t aKbps)
{
sp<AMessage> msg = new AMessage();
msg->setInt32("videoBitrate", aKbps * 1000 /* kbps -> bps */);
status_t result = mCodec->setParameters(msg);
MOZ_ASSERT(result == OK);
return result == OK ? NS_OK : NS_ERROR_FAILURE;
}
nsresult
OMXAudioEncoder::Configure(int aChannels, int aInputSampleRate,
int aEncodedSampleRate)

View File

@ -117,7 +117,8 @@ protected:
/**
* Construct codec specific configuration blob with given data aData generated
* by media codec and append it into aOutputBuf. Needed by MP4 container
* writer for generating decoder config box. Returns OK if succeed.
* writer for generating decoder config box, or WebRTC for generating RTP
* packets. Returns OK if succeed.
*/
virtual status_t AppendDecoderConfig(nsTArray<uint8_t>* aOutputBuf,
ABuffer* aData) = 0;
@ -240,12 +241,22 @@ private:
*/
class OMXVideoEncoder MOZ_FINAL : public OMXCodecWrapper
{
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(OMXVideoEncoder)
public:
// Types of output blob format.
enum BlobFormat {
AVC_MP4, // MP4 file config descripter (defined in ISO/IEC 14496-15 5.2.4.1.1)
AVC_NAL // NAL (Network Abstract Layer) (defined in ITU-T H.264 7.4.1)
};
/**
* Configure video codec parameters and start media codec. It must be called
* before calling Encode() and GetNextEncodedFrame().
* aBlobFormat specifies output blob format provided by encoder. It can be
* AVC_MP4 or AVC_NAL.
*/
nsresult Configure(int aWidth, int aHeight, int aFrameRate);
nsresult Configure(int aWidth, int aHeight, int aFrameRate,
BlobFormat aBlobFormat = BlobFormat::AVC_MP4);
/**
* Encode a aWidth pixels wide and aHeight pixels tall video frame of
@ -256,12 +267,22 @@ public:
nsresult Encode(const mozilla::layers::Image* aImage, int aWidth, int aHeight,
int64_t aTimestamp, int aInputFlags = 0);
/** Set encoding bitrate (in kbps). */
nsresult SetBitrate(int32_t aKbps);
/**
* Get current AVC codec config blob. The output format depends on the
* aBlobFormat argument given when Configure() was called.
*/
nsresult GetCodecConfig(nsTArray<uint8_t>* aOutputBuf);
protected:
virtual status_t AppendDecoderConfig(nsTArray<uint8_t>* aOutputBuf,
ABuffer* aData) MOZ_OVERRIDE;
// AVC/H.264 encoder replaces NAL unit start code with the unit length as
// specified in ISO/IEC 14496-15 5.2.3.
// If configured to output MP4 format blob, AVC/H.264 encoder has to replace
// NAL unit start code with the unit length as specified in
// ISO/IEC 14496-15 5.2.3.
virtual void AppendFrame(nsTArray<uint8_t>* aOutputBuf,
const uint8_t* aData, size_t aSize) MOZ_OVERRIDE;
@ -276,13 +297,20 @@ private:
* CODEC_AVC_ENC.
*/
OMXVideoEncoder(CodecType aCodecType)
: OMXCodecWrapper(aCodecType), mWidth(0), mHeight(0) {}
: OMXCodecWrapper(aCodecType)
, mWidth(0)
, mHeight(0)
, mBlobFormat(BlobFormat::AVC_MP4)
, mHasConfigBlob(false)
{}
// For creator function to access hidden constructor.
friend class OMXCodecWrapper;
int mWidth;
int mHeight;
BlobFormat mBlobFormat;
bool mHasConfigBlob;
};
} // namespace android

View File

@ -538,8 +538,17 @@ nsDOMWindowUtils::SetResolution(float aXResolution, float aYResolution)
}
nsIPresShell* presShell = GetPresShell();
return presShell ? presShell->SetResolution(aXResolution, aYResolution)
: NS_ERROR_FAILURE;
if (!presShell) {
return NS_ERROR_FAILURE;
}
nsIScrollableFrame* sf = presShell->GetRootScrollFrameAsScrollable();
if (sf) {
sf->SetResolution(gfxSize(aXResolution, aYResolution));
presShell->SetResolution(aXResolution, aYResolution);
}
return NS_OK;
}
NS_IMETHODIMP
@ -550,13 +559,21 @@ nsDOMWindowUtils::GetResolution(float* aXResolution, float* aYResolution)
}
nsIPresShell* presShell = GetPresShell();
if (!presShell) {
return NS_ERROR_FAILURE;
}
if (presShell) {
nsIScrollableFrame* sf = presShell->GetRootScrollFrameAsScrollable();
if (sf) {
const gfxSize& res = sf->GetResolution();
*aXResolution = res.width;
*aYResolution = res.height;
} else {
*aXResolution = presShell->GetXResolution();
*aYResolution = presShell->GetYResolution();
return NS_OK;
}
return NS_ERROR_FAILURE;
return NS_OK;
}
NS_IMETHODIMP

View File

@ -5,7 +5,6 @@ support-files =
[test_Document-createElement-namespace.html.json]
[test_Document-createElementNS.html.json]
[test_Document-createEvent.html.json]
[test_Document-getElementsByTagName.html.json]
[test_Node-isEqualNode.xhtml.json]
[test_Node-properties.html.json]

View File

@ -1,26 +0,0 @@
{
"createEvent('CustomEvent') should be initialized correctly.": true,
"createEvent('customevent') should be initialized correctly.": true,
"createEvent('CUSTOMEVENT') should be initialized correctly.": true,
"createEvent('Event') should be initialized correctly.": true,
"createEvent('event') should be initialized correctly.": true,
"createEvent('EVENT') should be initialized correctly.": true,
"createEvent('Events') should be initialized correctly.": true,
"createEvent('events') should be initialized correctly.": true,
"createEvent('EVENTS') should be initialized correctly.": true,
"createEvent('HTMLEvents') should be initialized correctly.": true,
"createEvent('htmlevents') should be initialized correctly.": true,
"createEvent('HTMLEVENTS') should be initialized correctly.": true,
"createEvent('MouseEvent') should be initialized correctly.": true,
"createEvent('mouseevent') should be initialized correctly.": true,
"createEvent('MOUSEEVENT') should be initialized correctly.": true,
"createEvent('MouseEvents') should be initialized correctly.": true,
"createEvent('mouseevents') should be initialized correctly.": true,
"createEvent('MOUSEEVENTS') should be initialized correctly.": true,
"createEvent('UIEvent') should be initialized correctly.": true,
"createEvent('uievent') should be initialized correctly.": true,
"createEvent('UIEVENT') should be initialized correctly.": true,
"createEvent('UIEvents') should be initialized correctly.": true,
"createEvent('uievents') should be initialized correctly.": true,
"createEvent('UIEVENTS') should be initialized correctly.": true
}

View File

@ -7,8 +7,8 @@
#include "txExprResult.h"
#include "txNodeSet.h"
#include "nsError.h"
#include "mozilla/dom/Attr.h"
#include "mozilla/dom/Element.h"
#include "nsIAttribute.h"
#include "nsDOMClassInfoID.h"
#include "nsIDOMNode.h"
#include "nsIDOMDocument.h"
@ -361,8 +361,8 @@ nsXPathResult::Invalidate(const nsIContent* aChangeRoot)
static_cast<nsIContent*>(contextNode.get())
->GetBindingParent();
} else if (contextNode->IsNodeOfType(nsINode::eATTRIBUTE)) {
nsIContent* parent =
static_cast<nsIAttribute*>(contextNode.get())->GetContent();
Element* parent =
static_cast<Attr*>(contextNode.get())->GetElement();
if (parent) {
ctxBindingParent = parent->GetBindingParent();
}

View File

@ -21,10 +21,13 @@
#include "nsUnicharUtils.h"
#include "nsAttrName.h"
#include "nsTArray.h"
#include "mozilla/dom/Attr.h"
#include "mozilla/dom/Element.h"
#include <stdint.h>
#include <algorithm>
using mozilla::dom::Attr;
const uint32_t kUnknownIndex = uint32_t(-1);
txXPathTreeWalker::txXPathTreeWalker(const txXPathTreeWalker& aOther)
@ -691,7 +694,8 @@ txXPathNativeNode::createXPathNode(nsIDOMNode* aNode, bool aKeepRootAlive)
NS_ASSERTION(attr, "doesn't implement nsIAttribute");
nsINodeInfo *nodeInfo = attr->NodeInfo();
nsIContent *parent = attr->GetContent();
mozilla::dom::Element* parent =
static_cast<Attr*>(attr.get())->GetElement();
if (!parent) {
return nullptr;
}

230
gfx/doc/MozSurface.md Normal file
View File

@ -0,0 +1,230 @@
MozSurface {#mozsurface}
==========
**This document is work in progress. Some information may be missing or incomplete.**
## Goals
We need to be able to safely and efficiently render web content into surfaces that may be shared accross processes.
MozSurface is a cross-process and backend-independent Surface API and not a stream API.
## Owner
Nicolas Silva
## Definitions
* Client and Host: In Gecko's compositing architecture, the client process is the producer, while the host process is the consumer side, where compositing takes place.
## Use cases
Drawing web content into a surface and share it with the compositor process to display it on the screen without copies.
## Requirement
* It must be possible to efficiently share a MozSurface with a separate thread or process through IPDL
* It must be possible to obtain read access a MozSurface on both the client and the host side at the same time.
* The creation, update and destrution of surfaces must be safe and race-free. In particular, the ownership of the shared data must be clearly defined.
* MozSurface must be a cross-backend/cross-platform abstraction that we will use on all of the supported platforms.
* It must be possible to efficiently draw into a MozSurface using Moz2D.
* While it should be possible to share MozSurfaces accross processes, it should not be limited to that. MozSurface should also be the preferred abstraction for use with surfaces that are not shared with the compositor process.
## TextureClient and TextureHost
TextureClient and TextureHost are the closest abstractions we currently have to MozSurface.
Inline documentation about TextureClient and TextureHost can be found in:
* [gfx/layers/client/TextureClient.h](http://dxr.mozilla.org/mozilla-central/source/gfx/layers/client/TextureClient.h)
* [gfx/layers/composite/TextureHost.h](http://dxr.mozilla.org/mozilla-central/source/gfx/layers/composite/TextureHost.h)
TextureClient is the client-side handle on a MozSurface, while TextureHost is the equivalent host-side representation. There can only be one TextureClient for a given TextureHost, and one TextureHost for a given TextureClient. Likewise, there can only be one shared object for a given TextureClient/TextureHost pair.
A MozSurface containing data that is shared between a client process and a host process exists in the foolowing form:
```
.
Client process . Host process
.
________________ ______________ ______________
| | | | | |
| TextureClient +----+ <SharedData> +----+ TextureHost |
|________________| |______________| |______________|
.
.
.
Figure 1) A Surface as seen by the client and the host processes
```
The above figure is a logical representation, not a class diagram.
`<SharedData>` is a placeholder for whichever platform specific surface type we are sharing, for example a Gralloc buffer on Gonk or a D3D11 texture on Windows.
## Locking semantics
In order to access the shared surface data users of MozSurface must acquire and release a lock on the surface, specifying the open mode (read/write/read+write).
bool Lock(OpenMode aMode);
void Unlock();
This locking API has two purposes:
* Ensure that access to the shared data is race-free.
* Let the implemetation do whatever is necessary for the user to have access to the data. For example it can be mapping and unmapping the surface data in memory if the underlying backend requires it.
The lock is expected to behave as a cross-process blocking read/write lock that is not reentrant.
## Immutable surfaces
In some cases we know in advance that a surface will not be modified after it has been shared. This is for example true for video frames. In this case the surface can be marked as immutable and the underlying implementation doesn't need to hold an actual blocking lock on the shared data.
Trying to acquire a write lock on a MozSurface that is marked as immutable and already shared must fail (return false).
Note that it is still required to use the Lock/Unlock API to read the data, in order for the implementation to be able to properly map and unmap the memory. This is just an optimization and a safety check.
## Drawing into a surface
In most cases we want to be able to paint directly into a surface through the Moz2D API.
A surface lets you *borrow* a DrawTarget that is only valid between Lock and Unlock.
DrawTarget* GetAsDrawTarget();
It is invalid to hold a reference to the DrawTarget after Unlock, and a different DrawTarget may be obtained during the next Lock/Unlock interval.
In some cases we want to use MozSurface without Drawing into it. For instance to share video frames accross processes. Some surface types may also not be accessible through a DrawTarget (for example YCbCr surfaces).
bool CanExposeDrawTarget();
helps with making sure that a Surface supports exposing a Moz2D DrawTarget.
## Using a MozSurface as a source for Compositing
To interface with the Compositor API, MozSurface gives access to TextureSource objects. TextureSource is the cross-backend representation of a texture that Compositor understands.
While MozSurface handles memory management of (potentially shared) texture data, TextureSource is only an abstraction for Compositing.
## Fence synchronization
TODO: We need to figure this out. Right now we have a Gonk specific implementation, but no cross-platform abstraction/design.
## Ownership of the shared data
MozSurface (TextureClient/TextureHost in its current form) defines ownership rules that depend on the configuration of the surface, in order to satisy efficiency and safety requirements.
These rules rely on the fact that the underlying shared data is strictly owned by the MozSurface. This means that keeping direct references to the shared data is illegal and unsafe.
## Deallocation protocol
The shared data is accessible by both the client-side and the host-side of the MozSurface. A deallocation protocol must be defined to handle which side deallocates the data, and to ensure that it doesn't cause any race condition.
The client side, which contains the web content's logic, always "decides" when a surface is needed or not. So the life time of a MozSurface is driven by the reference count of it's client-side handle (TextureClient).
When a TextureClient's reference count reaches zero, a "Remove" message is sent in order to let the host side that the shared data is not accessible on the client side and that it si safe for it to be deleted. The host side responds with a "Delete" message.
```
client side . host side
.
(A) Client: Send Remove -. .
\ .
\ . ... can receive and send ...
\
Can receive `--> (B) Host: Receive Remove
Can't send |
.-- (C) Host: Send Delete
/
/ . ... can't receive nor send ...
/ .
(D) Client: Receive Delete <--' .
.
Figure 2) MozSurface deallocation handshake
```
This handshake protocol is twofold:
* It defines where and when it is possible to deallocate the shared data without races
* It makes it impossible for asynchronous messages to race with the destruction of the MozSurface.
### Deallocating on the host side
In the common case, the shared data is deallocated asynchronously on the host side. In this case the deallocation takes place at the point (C) of figure 2.
### Deallocating on the client side
In some rare cases, for instance if the underlying implementation requires it, the shared data must be deallocated on the client side. In such cases, deallocation happens at the point (D) of figure 2.
In some exceptional cases, this needs to happen synchronously, meaning that the client-side thread will block until the Delete message is received. This is supported but it is terrible for performance, so it should be avoided as much as possible.
Currently this is needed when shutting down a hardware-decoded video stream with libstagefright on Gonk, because the libstagefright unfortunately assumes it has full ownership over the shared data (gralloc buffers) and crashes if there are still users of the buffers.
### Sharing state
The above deallocation protocol of a MozSurface applies to the common case that is when the surface is shared between two processes. A Surface can also be deallocated while it is not shared.
The sharing state of a MozSurface can be one of the following:
* (1) Uninitialized (it doesn't have any shared data)
* (2) Local (it isn't shared with the another thread/process)
* (3) Shared (the state you would expect it to be most of the time)
* (4) Invalid (when for some rare cases we needed to force the deallocation of the shared data before the destruction of the TextureClient object).
Surfaces can move from state N to state N+1 and be deallocated in any of these states. It could be possible to move from Shared to Local, but we currently don't have a use case for it.
The deallocation protocol above, applies to the Shared state (3).
In the other cases:
* (1) Unitilialized: There is nothing to do.
* (2) Local: The shared data is deallocated by the client side without need for a handshake, since it is not shared with other threads.
* (4) Invalid: There is nothing to do (deallocation has already happenned).
## Internal buffers / direct texturing
Some MozSurface implementations use CPU-side shared memory to share the texture data accross processes, and require a GPU texture upload when interfacing with a TextureSource. In this case we say that the surface has an internal buffer (because it is implicitly equivalent to double buffering where the shared data is the back buffer and the GPU side texture is the front buffer). We also say that it doesn't do "direct texturing" meaning that we don't draw directly into the GPU-side texture.
Examples:
* Shmem MozSurface + OpenGL TextureSource: Has an internal buffer (no direct texturing)
* Gralloc MozSurface + Gralloc TextureSource: No internal buffer (direct texturing)
While direct texturing is usually the most efficient way, it is not always available depending on the platform and the required allocation size or format. Textures with internal buffers have less restrictions around locking since the host side will only need to read from the MozSurface once per update, meaning that we can often get away with single buffering where we would need double buffering with direct texturing.
## Alternative solutions
### Sending ownership back and forth between the client and host sides through message passing, intead of sharing.
The current design of MozSurface makes the surface accessible from both sides at the same time, forcing us to do Locking and have a hand shake around deallocating the shared data, while using pure message passing and making the surface accessible only from one side at a time would avoid these complications.
Using pure message passing was actually the first approach we tried when we created the first version of TextureClient and TextureHost. This strategy failed in several places, partly because of some legacy in Gecko's architecture, and partly because of some of optimizations we do to avoid copying surfaces.
We need a given surface to be accessible on both the client and host for the following reasons:
* Gecko can at any time require read access on the client side to a surface that is shared with the host process, for example to build a temporary layer manager and generate a screenshot. This is mostly a legacy problem.
* We do some copy-on-write optimizations on surfaces that are shared with the compositor in order to keep invalid regions as small as possible. Out tiling implementation is an example of that.
* Our buffer rotation code on scrollable non-tiled layers also requires a synchronization on the client side between the front and back buffers, while the front buffer is used on the host side.
## Backends
We have MozSurface implementaions (classes inheriting from TextureClient/TextureHost) for OpenGL, Software, D3D9, and D3D11 backends.
Some implemtations can be used with any backend (ex. ShmemTextureClient/Host).
## Users of MozSurface
MozSurface is the mechanism used by layers to share surfaces with the compositor, but it is not limited to layers. It should be used by anything that draws into a surface that may be shared with the compositor thread.
## Testing
TODO - How can we make MozSurface more testable and what should we test?
## Future work
### Rename TextureClient/TextureHost
The current terminology is very confusing.
### Unify TextureClient and TextureHost
TextureClient and TextureHost should live under a common interface to better hide the IPC details. The base classe should only expose the non-ipc related methods such as Locking, access through a DrawTarget, access to a TextureSource.
### Using a MozSurface as a source for Drawing
MozSurface should be able to expose a borrowed Moz2D SourceSurface that is valid between Lock and Unlock similarly to how it exposes a DrawTarget.
## Comparison with other APIs
MozSurface is somewhat equivalent to Gralloc on Android/Gonk: it is a reference counted cross-process surface with locking semantics. While Gralloc can interface itself with OpenGL textures for compositing, MozSurface can interface itself to TextureSource objects.
MozSurface should not be confused with higher level APIs such as EGLStream. A swap-chain API like EGLStream can be implemented on top of MozSurface, but MozSurface's purpose is to define and manage the memory and resources of shared texture data.

View File

@ -0,0 +1,16 @@
var total = 0
x = [ [] ]
x[0].valueOf = function () {
total++;
}
function f(y) {
y != Math.abs()
}
(function() {
f()
f(x[0])
f(x[0])
})()
assertEq(total, 2)

View File

@ -2823,6 +2823,10 @@ class MToDouble
{
setResultType(MIRType_Double);
setMovable();
// An object might have "valueOf", which means it is effectful.
if (def->mightBeType(MIRType_Object))
setGuard();
}
public:
@ -2885,6 +2889,10 @@ class MToFloat32
{
setResultType(MIRType_Float32);
setMovable();
// An object might have "valueOf", which means it is effectful.
if (def->mightBeType(MIRType_Object))
setGuard();
}
public:
@ -2993,6 +3001,10 @@ class MToInt32
{
setResultType(MIRType_Int32);
setMovable();
// An object might have "valueOf", which means it is effectful.
if (def->mightBeType(MIRType_Object))
setGuard();
}
public:
@ -3047,6 +3059,10 @@ class MTruncateToInt32 : public MUnaryInstruction
{
setResultType(MIRType_Int32);
setMovable();
// An object might have "valueOf", which means it is effectful.
if (def->mightBeType(MIRType_Object))
setGuard();
}
public:

View File

@ -1843,6 +1843,29 @@ AddTransformedBoundsToRegion(const nsIntRegion& aRegion,
aDest->Or(*aDest, intRect);
}
static bool
CanOptimizeAwayThebesLayer(ThebesLayerData* aData,
FrameLayerBuilder* aLayerBuilder)
{
bool isRetained = aData->mLayer->Manager()->IsWidgetLayerManager();
if (!isRetained) {
return false;
}
// If there's no thebes layer with valid content in it that we can reuse,
// always create a color or image layer (and potentially throw away an
// existing completely invalid thebes layer).
if (aData->mLayer->GetValidRegion().IsEmpty()) {
return true;
}
// There is an existing thebes layer we can reuse. Throwing it away can make
// compositing cheaper (see bug 946952), but it might cause us to re-allocate
// the thebes layer frequently due to an animation. So we only discard it if
// we're in tree compression mode, which is triggered at a low frequency.
return aLayerBuilder->CheckInLayerTreeCompressionMode();
}
void
ContainerState::PopThebesLayerData()
{
@ -1858,9 +1881,8 @@ ContainerState::PopThebesLayerData()
nsRefPtr<Layer> layer;
nsRefPtr<ImageContainer> imageContainer = data->CanOptimizeImageLayer(mBuilder);
bool isRetained = data->mLayer->Manager()->IsWidgetLayerManager();
if (isRetained && (data->mIsSolidColorInVisibleRegion || imageContainer) &&
(data->mLayer->GetValidRegion().IsEmpty() || mLayerBuilder->CheckInLayerTreeCompressionMode())) {
if ((data->mIsSolidColorInVisibleRegion || imageContainer) &&
CanOptimizeAwayThebesLayer(data, mLayerBuilder)) {
NS_ASSERTION(!(data->mIsSolidColorInVisibleRegion && imageContainer),
"Can't be a solid color as well as an image!");
if (imageContainer) {

View File

@ -12,6 +12,7 @@
#define nsPresState_h_
#include "nsPoint.h"
#include "gfxPoint.h"
#include "nsAutoPtr.h"
class nsPresState
@ -20,6 +21,7 @@ public:
nsPresState()
: mContentData(nullptr)
, mScrollState(0, 0)
, mResolution(1.0, 1.0)
, mDisabledSet(false)
, mDisabled(false)
{}
@ -29,18 +31,28 @@ public:
mScrollState = aState;
}
nsPoint GetScrollState()
nsPoint GetScrollState() const
{
return mScrollState;
}
void SetResolution(const gfxSize& aSize)
{
mResolution = aSize;
}
gfxSize GetResolution() const
{
return mResolution;
}
void ClearNonScrollState()
{
mContentData = nullptr;
mDisabledSet = false;
}
bool GetDisabled()
bool GetDisabled() const
{
return mDisabled;
}
@ -51,12 +63,12 @@ public:
mDisabledSet = true;
}
bool IsDisabledSet()
bool IsDisabledSet() const
{
return mDisabledSet;
}
nsISupports* GetStateProperty()
nsISupports* GetStateProperty() const
{
return mContentData;
}
@ -70,6 +82,7 @@ public:
protected:
nsCOMPtr<nsISupports> mContentData;
nsPoint mScrollState;
gfxSize mResolution;
bool mDisabledSet;
bool mDisabled;
};

View File

@ -1600,6 +1600,7 @@ ScrollFrameHelper::ScrollFrameHelper(nsContainerFrame* aOuter,
, mScrollPosAtLastPaint(0, 0)
, mRestorePos(-1, -1)
, mLastPos(-1, -1)
, mResolution(1.0, 1.0)
, mScrollPosForLayerPixelAlignment(-1, -1)
, mLastUpdateImagesPos(-1, -1)
, mNeverHasVerticalScrollbar(false)
@ -2786,6 +2787,18 @@ ScrollFrameHelper::GetScrollPositionClampingScrollPortSize() const
return mScrollPort.Size();
}
gfxSize
ScrollFrameHelper::GetResolution() const
{
return mResolution;
}
void
ScrollFrameHelper::SetResolution(const gfxSize& aResolution)
{
mResolution = aResolution;
}
static void
AdjustForWholeDelta(int32_t aDelta, nscoord* aCoord)
{
@ -4486,7 +4499,7 @@ ScrollFrameHelper::GetCoordAttribute(nsIFrame* aBox, nsIAtom* aAtom,
}
nsPresState*
ScrollFrameHelper::SaveState()
ScrollFrameHelper::SaveState() const
{
nsIScrollbarMediator* mediator = do_QueryFrame(GetScrolledFrame());
if (mediator) {
@ -4511,6 +4524,7 @@ ScrollFrameHelper::SaveState()
pt = mRestorePos;
}
state->SetScrollState(pt);
state->SetResolution(mResolution);
return state;
}
@ -4520,6 +4534,11 @@ ScrollFrameHelper::RestoreState(nsPresState* aState)
mRestorePos = aState->GetScrollState();
mDidHistoryRestore = true;
mLastPos = mScrolledFrame ? GetLogicalScrollPosition() : nsPoint(0,0);
mResolution = aState->GetResolution();
if (mIsRoot) {
mOuter->PresContext()->PresShell()->SetResolution(mResolution.width, mResolution.height);
}
}
void

View File

@ -163,6 +163,9 @@ public:
// Get the scroll range assuming the scrollport has size (aWidth, aHeight).
nsRect GetScrollRange(nscoord aWidth, nscoord aHeight) const;
nsSize GetScrollPositionClampingScrollPortSize() const;
gfxSize GetResolution() const;
void SetResolution(const gfxSize& aResolution);
protected:
nsRect GetScrollRangeForClamping() const;
@ -207,7 +210,7 @@ public:
nsSize GetLineScrollAmount() const;
nsSize GetPageScrollAmount() const;
nsPresState* SaveState();
nsPresState* SaveState() const;
void RestoreState(nsPresState* aState);
nsIFrame* GetScrolledFrame() const { return mScrolledFrame; }
@ -348,6 +351,9 @@ public:
// other than trying to restore mRestorePos.
nsPoint mLastPos;
// The current resolution derived from the zoom level and device pixel ratio.
gfxSize mResolution;
nsExpirationState mActivityExpirationState;
nsCOMPtr<nsITimer> mScrollActivityTimer;
@ -577,6 +583,12 @@ public:
virtual nsSize GetScrollPositionClampingScrollPortSize() const MOZ_OVERRIDE {
return mHelper.GetScrollPositionClampingScrollPortSize();
}
virtual gfxSize GetResolution() const MOZ_OVERRIDE {
return mHelper.GetResolution();
}
virtual void SetResolution(const gfxSize& aResolution) MOZ_OVERRIDE {
return mHelper.SetResolution(aResolution);
}
virtual nsSize GetLineScrollAmount() const MOZ_OVERRIDE {
return mHelper.GetLineScrollAmount();
}
@ -887,6 +899,12 @@ public:
virtual nsSize GetScrollPositionClampingScrollPortSize() const MOZ_OVERRIDE {
return mHelper.GetScrollPositionClampingScrollPortSize();
}
virtual gfxSize GetResolution() const MOZ_OVERRIDE {
return mHelper.GetResolution();
}
virtual void SetResolution(const gfxSize& aResolution) MOZ_OVERRIDE {
return mHelper.SetResolution(aResolution);
}
virtual nsSize GetLineScrollAmount() const MOZ_OVERRIDE {
return mHelper.GetLineScrollAmount();
}

View File

@ -134,7 +134,14 @@ public:
* position.
*/
virtual nsSize GetScrollPositionClampingScrollPortSize() const = 0;
/**
* Get the element resolution.
*/
virtual gfxSize GetResolution() const = 0;
/**
* Set the element resolution.
*/
virtual void SetResolution(const gfxSize& aResolution) = 0;
/**
* Return how much we would try to scroll by in each direction if
* asked to scroll by one "line" vertically and horizontally.

View File

@ -48,3 +48,5 @@
// reflow so that that rare edge case doesn't lead to reftest
// failures.
branch.setBoolPref("layout.interruptible-reflow.enabled", false);
// Don't try to connect to the telemetry server.
branch.setBoolPref("toolkit.telemetry.enabled", false);

View File

@ -94,7 +94,7 @@ static void
sink_info_callback(pa_context * context, const pa_sink_info * info, int eol, void * u)
{
cubeb * ctx = u;
if (!eol) {
if (!eol) {
ctx->default_sink_info = malloc(sizeof(pa_sink_info));
memcpy(ctx->default_sink_info, info, sizeof(pa_sink_info));
}
@ -404,9 +404,11 @@ pulse_get_max_channel_count(cubeb * ctx, uint32_t * max_channels)
{
assert(ctx && max_channels);
WRAP(pa_threaded_mainloop_lock)(ctx->mainloop);
while (!ctx->default_sink_info) {
WRAP(pa_threaded_mainloop_wait)(ctx->mainloop);
}
WRAP(pa_threaded_mainloop_unlock)(ctx->mainloop);
*max_channels = ctx->default_sink_info->channel_map.channels;
@ -416,6 +418,8 @@ pulse_get_max_channel_count(cubeb * ctx, uint32_t * max_channels)
static int
pulse_get_preferred_sample_rate(cubeb * ctx, uint32_t * rate)
{
assert(ctx && rate);
WRAP(pa_threaded_mainloop_lock)(ctx->mainloop);
while (!ctx->default_sink_info) {
WRAP(pa_threaded_mainloop_wait)(ctx->mainloop);

View File

@ -4,6 +4,9 @@
#include <stdlib.h>
#include <cubeb/cubeb.h>
#include <assert.h>
#include <stdio.h>
#define LOG(msg) fprintf(stderr, "%s\n", msg);
int main(int argc, char * argv[])
{
@ -13,16 +16,20 @@ int main(int argc, char * argv[])
uint32_t preferred_rate;
uint32_t latency_ms;
LOG("latency_test start");
rv = cubeb_init(&ctx, "Cubeb audio test");
assert(rv == CUBEB_OK && "Cubeb init failed.");
LOG("cubeb_init ok");
rv = cubeb_get_max_channel_count(ctx, &max_channels);
assert(rv == CUBEB_OK && "Could not query the max channe count.");
assert(max_channels > 0 && "Invalid max channel count.");
LOG("cubeb_get_max_channel_count ok");
rv = cubeb_get_preferred_sample_rate(ctx, &preferred_rate);
assert(rv == CUBEB_OK && "Could not query the preferred sample rate.");
assert(preferred_rate && "Invalid preferred sample rate.");
LOG("cubeb_get_preferred_sample_rate ok");
cubeb_stream_params params = {
CUBEB_SAMPLE_FLOAT32NE,
@ -32,8 +39,10 @@ int main(int argc, char * argv[])
rv = cubeb_get_min_latency(ctx, params, &latency_ms);
assert(rv == CUBEB_OK && "Could not query the minimal latency.");
assert(latency_ms && "Invalid minimal latency.");
LOG("cubeb_get_min_latency ok");
cubeb_destroy(ctx);
LOG("cubeb_destroy ok");
return EXIT_SUCCESS;
}

View File

@ -57,6 +57,7 @@
'../../../content/media',
'../../../media/mtransport',
'../trunk',
'../trunk/webrtc',
'../trunk/webrtc/video_engine/include',
'../trunk/webrtc/voice_engine/include',
'../trunk/webrtc/modules/interface',
@ -191,6 +192,26 @@
# Conditionals
#
'conditions': [
['moz_omx_encoder==1', {
'sources': [
'./src/media-conduit/WebrtcOMXH264VideoCodec.cpp',
'./src/media-conduit/OMXVideoCodec.cpp',
],
'include_dirs': [
'../../../content/media/omx',
'../../../gfx/layers/client',
],
'cflags_mozilla': [
'-I$(ANDROID_SOURCE)/frameworks/av/include/media/stagefright',
'-I$(ANDROID_SOURCE)/frameworks/av/include',
'-I$(ANDROID_SOURCE)/frameworks/native/include/media/openmax',
'-I$(ANDROID_SOURCE)/frameworks/native/include',
'-I$(ANDROID_SOURCE)/frameworks/native/opengl/include',
],
'defines' : [
'MOZ_OMX_ENCODER'
],
}],
['build_for_test==0', {
'defines' : [
'MOZILLA_INTERNAL_API'

View File

@ -78,6 +78,12 @@ struct VideoCodecConfig
mMaxFrameRate(0),
mLoadManager(load_manager)
{
// Replace codec name here because WebRTC.org code has a whitelist of
// supported video codec in |webrtc::ViECodecImpl::CodecValid()| and will
// reject registration of those not in it.
// TODO: bug 995884 to support H.264 in WebRTC.org code.
if (mName == "H264_P0")
mName = "I420";
}
VideoCodecConfig(int type,
@ -93,6 +99,12 @@ struct VideoCodecConfig
mMaxFrameRate(max_fr),
mLoadManager(load_manager)
{
// Replace codec name here because WebRTC.org code has a whitelist of
// supported video codec in |webrtc::ViECodecImpl::CodecValid()| and will
// reject registration of those not in it.
// TODO: bug 995884 to support H.264 in WebRTC.org code.
if (mName == "H264_P0")
mName = "I420";
}

View File

@ -13,6 +13,8 @@
#include "VideoTypes.h"
#include "MediaConduitErrors.h"
#include "ImageContainer.h"
#include <vector>
namespace mozilla {
@ -44,6 +46,20 @@ public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(TransportInterface)
};
/**
* This class wraps image object for VideoRenderer::RenderVideoFrame()
* callback implementation to use for rendering.
*/
class ImageHandle
{
public:
ImageHandle(layers::Image* image) : mImage(image) {}
const RefPtr<layers::Image>& GetImage() const { return mImage; }
private:
RefPtr<layers::Image> mImage;
};
/**
* 1. Abstract renderer for video data
@ -75,16 +91,21 @@ class VideoRenderer
* @param time_stamp: Decoder timestamp, typically 90KHz as per RTP
* @render_time: Wall-clock time at the decoder for synchronization
* purposes in milliseconds
* NOTE: It is the responsibility of the concrete implementations of this
* class to own copy of the frame if needed for time longer than scope of
* this callback.
* @handle: opaque handle for image object of decoded video frame.
* NOTE: If decoded video frame is passed through buffer , it is the
* responsibility of the concrete implementations of this class to own copy
* of the frame if needed for time longer than scope of this callback.
* Such implementations should be quick in processing the frames and return
* immediately.
* On the other hand, if decoded video frame is passed through handle, the
* implementations should keep a reference to the (ref-counted) image object
* inside until it's no longer needed.
*/
virtual void RenderVideoFrame(const unsigned char* buffer,
unsigned int buffer_size,
uint32_t time_stamp,
int64_t render_time) = 0;
int64_t render_time,
const ImageHandle& handle) = 0;
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(VideoRenderer)
};
@ -162,6 +183,18 @@ public:
};
// Abstract base classes for external encoder/decoder.
class VideoEncoder
{
public:
virtual ~VideoEncoder() {};
};
class VideoDecoder
{
public:
virtual ~VideoDecoder() {};
};
/**
* MediaSessionConduit for video
@ -241,6 +274,21 @@ public:
virtual MediaConduitErrorCode ConfigureRecvMediaCodecs(
const std::vector<VideoCodecConfig* >& recvCodecConfigList) = 0;
/**
* Set an external encoder
* @param encoder
* @result: on success, we will use the specified encoder
*/
virtual MediaConduitErrorCode SetExternalSendCodec(int pltype,
VideoEncoder* encoder) = 0;
/**
* Set an external decoder
* @param decoder
* @result: on success, we will use the specified decoder
*/
virtual MediaConduitErrorCode SetExternalRecvCodec(int pltype,
VideoDecoder* decoder) = 0;
/**
* These methods allow unit tests to double-check that the
@ -361,7 +409,3 @@ public:
};
}
#endif

View File

@ -0,0 +1,30 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "OMXVideoCodec.h"
#ifdef WEBRTC_GONK
#include "WebrtcOMXH264VideoCodec.h"
#endif
namespace mozilla {
VideoEncoder*
OMXVideoCodec::CreateEncoder(CodecType aCodecType)
{
if (aCodecType == CODEC_H264) {
return new WebrtcOMXH264VideoEncoder();
}
return nullptr;
}
VideoDecoder*
OMXVideoCodec::CreateDecoder(CodecType aCodecType) {
if (aCodecType == CODEC_H264) {
return new WebrtcOMXH264VideoDecoder();
}
return nullptr;
}
}

View File

@ -0,0 +1,32 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef OMX_VIDEO_CODEC_H_
#define OMX_VIDEO_CODEC_H_
#include "MediaConduitInterface.h"
namespace mozilla {
class OMXVideoCodec {
public:
enum CodecType {
CODEC_H264,
};
/**
* Create encoder object for codec type |aCodecType|. Return |nullptr| when
* failed.
*/
static VideoEncoder* CreateEncoder(CodecType aCodecType);
/**
* Create decoder object for codec type |aCodecType|. Return |nullptr| when
* failed.
*/
static VideoDecoder* CreateDecoder(CodecType aCodecType);
};
}
#endif // OMX_VIDEO_CODEC_H_

View File

@ -14,6 +14,7 @@
#include "LoadManager.h"
#include "webrtc/common_video/interface/native_handle.h"
#include "webrtc/video_engine/include/vie_errors.h"
#ifdef MOZ_WIDGET_ANDROID
@ -125,6 +126,7 @@ WebrtcVideoConduit::~WebrtcVideoConduit()
mPtrViENetwork = nullptr;
mPtrViERender = nullptr;
mPtrRTP = nullptr;
mPtrExtCodec = nullptr;
// only one opener can call Delete. Have it be the last to close.
if(mVideoEngine)
@ -284,6 +286,13 @@ MediaConduitErrorCode WebrtcVideoConduit::Init(WebrtcVideoConduit *other)
return kMediaConduitSessionNotInited;
}
if ( !(mPtrExtCodec = webrtc::ViEExternalCodec::GetInterface(mVideoEngine)))
{
CSFLogError(logTag, "%s Unable to get external codec interface %d ",
__FUNCTION__, mPtrViEBase->LastError());
return kMediaConduitSessionNotInited;
}
if (other) {
mChannel = other->mChannel;
mPtrExtCapture = other->mPtrExtCapture;
@ -844,6 +853,25 @@ WebrtcVideoConduit::SelectSendResolution(unsigned short width,
return true;
}
MediaConduitErrorCode
WebrtcVideoConduit::SetExternalSendCodec(int pltype,
VideoEncoder* encoder) {
int ret = mPtrExtCodec->RegisterExternalSendCodec(mChannel,
pltype,
static_cast<WebrtcVideoEncoder*>(encoder),
false);
return ret ? kMediaConduitInvalidSendCodec : kMediaConduitNoError;
}
MediaConduitErrorCode
WebrtcVideoConduit::SetExternalRecvCodec(int pltype,
VideoDecoder* decoder) {
int ret = mPtrExtCodec->RegisterExternalReceiveCodec(mChannel,
pltype,
static_cast<WebrtcVideoDecoder*>(decoder));
return ret ? kMediaConduitInvalidReceiveCodec : kMediaConduitNoError;
}
MediaConduitErrorCode
WebrtcVideoConduit::SendVideoFrame(unsigned char* video_frame,
unsigned int video_frame_length,
@ -1043,7 +1071,17 @@ WebrtcVideoConduit::DeliverFrame(unsigned char* buffer,
if(mRenderer)
{
mRenderer->RenderVideoFrame(buffer, buffer_size, time_stamp, render_time);
layers::Image* img = nullptr;
// |handle| should be a webrtc::NativeHandle if available.
if (handle) {
webrtc::NativeHandle* native_h = static_cast<webrtc::NativeHandle*>(handle);
// In the handle, there should be a layers::Image.
img = static_cast<layers::Image*>(native_h->GetHandle());
}
const ImageHandle img_h(img);
mRenderer->RenderVideoFrame(buffer, buffer_size, time_stamp, render_time,
img_h);
return 0;
}

View File

@ -12,9 +12,14 @@
// Video Engine Includes
#include "webrtc/common_types.h"
#ifdef FF
#undef FF // Avoid name collision between scoped_ptr.h and nsCRTGlue.h.
#endif
#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
#include "webrtc/video_engine/include/vie_base.h"
#include "webrtc/video_engine/include/vie_capture.h"
#include "webrtc/video_engine/include/vie_codec.h"
#include "webrtc/video_engine/include/vie_external_codec.h"
#include "webrtc/video_engine/include/vie_render.h"
#include "webrtc/video_engine/include/vie_network.h"
#include "webrtc/video_engine/include/vie_rtp_rtcp.h"
@ -29,12 +34,22 @@
using webrtc::ViECapture;
using webrtc::ViERender;
using webrtc::ViEExternalCapture;
using webrtc::ViEExternalCodec;
namespace mozilla {
class WebrtcAudioConduit;
// Interface of external video encoder for WebRTC.
class WebrtcVideoEncoder:public VideoEncoder
,public webrtc::VideoEncoder
{};
// Interface of external video decoder for WebRTC.
class WebrtcVideoDecoder:public VideoDecoder
,public webrtc::VideoDecoder
{};
/**
* Concrete class for Video session. Hooks up
* - media-source and target to external transport
@ -127,6 +142,19 @@ public:
VideoType video_type,
uint64_t capture_time);
/**
* Set an external encoder object |encoder| to the payload type |pltype|
* for sender side codec.
*/
virtual MediaConduitErrorCode SetExternalSendCodec(int pltype,
VideoEncoder* encoder);
/**
* Set an external decoder object |decoder| to the payload type |pltype|
* for receiver side codec.
*/
virtual MediaConduitErrorCode SetExternalRecvCodec(int pltype,
VideoDecoder* decoder);
/**
@ -154,9 +182,16 @@ public:
/**
* Does DeliverFrame() support a null buffer and non-null handle
* (video texture)?
* XXX Investigate! Especially for Android/B2G
* B2G support it (when using HW video decoder with graphic buffer output).
* XXX Investigate! Especially for Android
*/
virtual bool IsTextureSupported() { return false; }
virtual bool IsTextureSupported() {
#ifdef WEBRTC_GONK
return true;
#else
return false;
#endif
}
unsigned short SendingWidth() {
return mSendingWidth;
@ -265,6 +300,7 @@ private:
ScopedCustomReleasePtr<webrtc::ViENetwork> mPtrViENetwork;
ScopedCustomReleasePtr<webrtc::ViERender> mPtrViERender;
ScopedCustomReleasePtr<webrtc::ViERTP_RTCP> mPtrRTP;
ScopedCustomReleasePtr<webrtc::ViEExternalCodec> mPtrExtCodec;
webrtc::ViEExternalCapture* mPtrExtCapture; // shared

View File

@ -0,0 +1,875 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "CSFLog.h"
#include "WebrtcOMXH264VideoCodec.h"
// Android/Stagefright
#include <avc_utils.h>
#include <binder/ProcessState.h>
#include <foundation/ABuffer.h>
#include <foundation/AMessage.h>
#include <gui/Surface.h>
#include <media/ICrypto.h>
#include <MediaCodec.h>
#include <MediaDefs.h>
#include <MediaErrors.h>
#include <MetaData.h>
#include <OMX_Component.h>
using namespace android;
// WebRTC
#include "common_video/interface/texture_video_frame.h"
#include "video_engine/include/vie_external_codec.h"
// Gecko
#include "GonkNativeWindow.h"
#include "GonkNativeWindowClient.h"
#include "mozilla/Atomics.h"
#include "mozilla/Mutex.h"
#include "nsThreadUtils.h"
#include "OMXCodecWrapper.h"
#include "TextureClient.h"
#define DEQUEUE_BUFFER_TIMEOUT_US (100 * 1000ll) // 100ms.
#define START_DEQUEUE_BUFFER_TIMEOUT_US (10 * DEQUEUE_BUFFER_TIMEOUT_US) // 1s.
#define DRAIN_THREAD_TIMEOUT_US (1000 * 1000ll) // 1s.
#define LOG_TAG "WebrtcOMXH264VideoCodec"
#define CODEC_LOGV(...) CSFLogInfo(LOG_TAG, __VA_ARGS__)
#define CODEC_LOGD(...) CSFLogDebug(LOG_TAG, __VA_ARGS__)
#define CODEC_LOGI(...) CSFLogInfo(LOG_TAG, __VA_ARGS__)
#define CODEC_LOGW(...) CSFLogWarn(LOG_TAG, __VA_ARGS__)
#define CODEC_LOGE(...) CSFLogError(LOG_TAG, __VA_ARGS__)
namespace mozilla {
// NS_INLINE_DECL_THREADSAFE_REFCOUNTING() cannot be used directly in
// ImageNativeHandle below because the return type of webrtc::NativeHandle
// AddRef()/Release() conflicts with those defined in macro. To avoid another
// copy/paste of ref-counting implementation here, this dummy base class
// is created to proivde another level of indirection.
class DummyRefCountBase {
public:
// Use the name of real class for logging.
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(ImageNativeHandle)
// To make sure subclass will be deleted/destructed properly.
virtual ~DummyRefCountBase() {}
};
// This function implements 2 interafces:
// 1. webrtc::NativeHandle: to wrap layers::Image object so decoded frames can
// be passed through WebRTC rendering pipeline using TextureVideoFrame.
// 2. ImageHandle: for renderer to get the image object inside without knowledge
// about webrtc::NativeHandle.
class ImageNativeHandle MOZ_FINAL
: public webrtc::NativeHandle
, public DummyRefCountBase
{
public:
ImageNativeHandle(layers::Image* aImage)
: mImage(aImage)
{}
// Implement webrtc::NativeHandle.
virtual void* GetHandle() MOZ_OVERRIDE { return mImage.get(); }
virtual int AddRef() MOZ_OVERRIDE
{
return DummyRefCountBase::AddRef();
}
virtual int Release() MOZ_OVERRIDE
{
return DummyRefCountBase::Release();
}
private:
RefPtr<layers::Image> mImage;
};
// Graphic buffer lifecycle management.
// Return buffer to OMX codec when renderer is done with it.
class RecycleCallback
{
public:
RecycleCallback(const sp<MediaCodec>& aOmx, uint32_t aBufferIndex)
: mOmx(aOmx)
, mBufferIndex(aBufferIndex)
{}
typedef void* CallbackPtr;
static void ReturnOMXBuffer(layers::TextureClient* aClient, CallbackPtr aClosure)
{
aClient->ClearRecycleCallback();
RecycleCallback* self = static_cast<RecycleCallback*>(aClosure);
self->mOmx->releaseOutputBuffer(self->mBufferIndex);
delete self;
}
private:
sp<MediaCodec> mOmx;
uint32_t mBufferIndex;
};
struct EncodedFrame
{
uint32_t mWidth;
uint32_t mHeight;
uint32_t mTimestamp;
int64_t mRenderTimeMs;
};
// Base runnable class to repeatly pull OMX output buffers in seperate thread.
// How to use:
// - implementing DrainOutput() to get output. Remember to return false to tell
// drain not to pop input queue.
// - call QueueInput() to schedule a run to drain output. The input, aFrame,
// should contains corresponding info such as image size and timestamps for
// DrainOutput() implementation to construct data needed by encoded/decoded
// callbacks.
// TODO: Bug 997110 - Revisit queue/drain logic. Current design assumes that
// encoder only generate one output buffer per input frame and won't work
// if encoder drops frames or generates multiple output per input.
class OMXOutputDrain : public nsRunnable
{
public:
void Start() {
MonitorAutoLock lock(mMonitor);
if (mThread == nullptr) {
NS_NewNamedThread("OMXOutputDrain", getter_AddRefs(mThread));
}
CODEC_LOGD("OMXOutputDrain started");
mEnding = false;
mThread->Dispatch(this, NS_DISPATCH_NORMAL);
}
void Stop() {
MonitorAutoLock lock(mMonitor);
mEnding = true;
lock.NotifyAll(); // In case Run() is waiting.
if (mThread != nullptr) {
mThread->Shutdown();
mThread = nullptr;
}
CODEC_LOGD("OMXOutputDrain stopped");
}
void QueueInput(const EncodedFrame& aFrame)
{
MonitorAutoLock lock(mMonitor);
MOZ_ASSERT(mThread);
mInputFrames.push(aFrame);
// Notify Run() about queued input and it can start working.
lock.NotifyAll();
}
NS_IMETHODIMP Run() MOZ_OVERRIDE
{
MOZ_ASSERT(mThread);
MonitorAutoLock lock(mMonitor);
while (true) {
if (mInputFrames.empty()) {
ALOGE("Waiting OMXOutputDrain");
// Wait for new input.
lock.Wait();
}
if (mEnding) {
ALOGE("Ending OMXOutputDrain");
// Stop draining.
break;
}
MOZ_ASSERT(!mInputFrames.empty());
EncodedFrame frame = mInputFrames.front();
bool shouldPop = false;
{
// Release monitor while draining because it's blocking.
MonitorAutoUnlock unlock(mMonitor);
// |frame| provides size and time of corresponding input.
shouldPop = DrainOutput(frame);
}
if (shouldPop) {
mInputFrames.pop();
}
}
CODEC_LOGD("OMXOutputDrain Ended");
return NS_OK;
}
protected:
OMXOutputDrain()
: mMonitor("OMXOutputDrain monitor")
, mEnding(false)
{}
// Drain output buffer for input frame aFrame.
// aFrame contains info such as size and time of the input frame and can be
// used to construct data for encoded/decoded callbacks if needed.
// Return true to indicate we should pop input queue, and return false to
// indicate aFrame should not be removed from input queue (either output is
// not ready yet and should try again later, or the drained output is SPS/PPS
// NALUs that has no corresponding input in queue).
virtual bool DrainOutput(const EncodedFrame& aFrame) = 0;
private:
// This monitor protects all things below it, and is also used to
// wait/notify queued input.
Monitor mMonitor;
nsCOMPtr<nsIThread> mThread;
std::queue<EncodedFrame> mInputFrames;
bool mEnding;
};
// H.264 decoder using stagefright.
class WebrtcOMXDecoder MOZ_FINAL
{
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(WebrtcOMXDecoder)
public:
WebrtcOMXDecoder(const char* aMimeType)
: mWidth(0)
, mHeight(0)
, mStarted(false)
{
// Create binder thread pool required by stagefright.
android::ProcessState::self()->startThreadPool();
mLooper = new ALooper;
mLooper->start();
mCodec = MediaCodec::CreateByType(mLooper, aMimeType, false /* encoder */);
}
virtual ~WebrtcOMXDecoder()
{
if (mStarted) {
Stop();
}
if (mCodec != nullptr) {
mCodec->release();
mCodec.clear();
}
mLooper.clear();
}
// Parse SPS/PPS NALUs.
static sp<MetaData> ParseParamSets(sp<ABuffer>& aParamSets)
{
return MakeAVCCodecSpecificData(aParamSets);
}
// Configure decoder using data returned by ParseParamSets().
status_t ConfigureWithParamSets(const sp<MetaData>& aParamSets)
{
MOZ_ASSERT(mCodec != nullptr);
if (mCodec == nullptr) {
return INVALID_OPERATION;
}
int32_t width = 0;
bool ok = aParamSets->findInt32(kKeyWidth, &width);
MOZ_ASSERT(ok && width > 0);
int32_t height = 0;
ok = aParamSets->findInt32(kKeyHeight, &height);
MOZ_ASSERT(ok && height > 0);
CODEC_LOGD("OMX:%p decoder config width:%d height:%d", this, width, height);
sp<AMessage> config = new AMessage();
config->setString("mime", MEDIA_MIMETYPE_VIDEO_AVC);
config->setInt32("width", width);
config->setInt32("height", height);
mWidth = width;
mHeight = height;
sp<Surface> surface = nullptr;
mNativeWindow = new GonkNativeWindow();
if (mNativeWindow.get()) {
mNativeWindowClient = new GonkNativeWindowClient(mNativeWindow->getBufferQueue());
if (mNativeWindowClient.get()) {
surface = new Surface(mNativeWindowClient->getIGraphicBufferProducer());
}
}
status_t result = mCodec->configure(config, surface, nullptr, 0);
if (result == OK) {
result = Start();
}
return result;
}
status_t
FillInput(const webrtc::EncodedImage& aEncoded, bool aIsFirstFrame,
int64_t& aRenderTimeMs, webrtc::DecodedImageCallback* aCallback)
{
MOZ_ASSERT(mCodec != nullptr);
if (mCodec == nullptr) {
return INVALID_OPERATION;
}
size_t index;
status_t err = mCodec->dequeueInputBuffer(&index,
aIsFirstFrame ? START_DEQUEUE_BUFFER_TIMEOUT_US : DEQUEUE_BUFFER_TIMEOUT_US);
if (err != OK) {
CODEC_LOGE("decode dequeue input buffer error:%d", err);
return err;
}
uint32_t flags = 0;
if (aEncoded._frameType == webrtc::kKeyFrame) {
flags = aIsFirstFrame ? MediaCodec::BUFFER_FLAG_CODECCONFIG : MediaCodec::BUFFER_FLAG_SYNCFRAME;
}
size_t size = aEncoded._length;
MOZ_ASSERT(size);
const sp<ABuffer>& omxIn = mInputBuffers.itemAt(index);
MOZ_ASSERT(omxIn->capacity() >= size);
omxIn->setRange(0, size);
// Copying is needed because MediaCodec API doesn't support externallay
// allocated buffer as input.
memcpy(omxIn->data(), aEncoded._buffer, size);
int64_t inputTimeUs = aEncoded._timeStamp * 1000 / 90; // 90kHz -> us.
err = mCodec->queueInputBuffer(index, 0, size, inputTimeUs, flags);
if (err == OK && !(flags & MediaCodec::BUFFER_FLAG_CODECCONFIG)) {
if (mOutputDrain == nullptr) {
mOutputDrain = new OutputDrain(this, aCallback);
mOutputDrain->Start();
}
EncodedFrame frame;
frame.mWidth = mWidth;
frame.mHeight = mHeight;
frame.mTimestamp = aEncoded._timeStamp;
frame.mRenderTimeMs = aRenderTimeMs;
mOutputDrain->QueueInput(frame);
}
return err;
}
status_t
DrainOutput(const EncodedFrame& aFrame, webrtc::DecodedImageCallback* aCallback)
{
MOZ_ASSERT(mCodec != nullptr);
if (mCodec == nullptr) {
return INVALID_OPERATION;
}
size_t index = 0;
size_t outOffset = 0;
size_t outSize = 0;
int64_t outTime = -1ll;
uint32_t outFlags = 0;
status_t err = mCodec->dequeueOutputBuffer(&index, &outOffset, &outSize,
&outTime, &outFlags,
DRAIN_THREAD_TIMEOUT_US);
switch (err) {
case OK:
break;
case -EAGAIN:
// Not an error: output not available yet. Try later.
CODEC_LOGI("decode dequeue OMX output buffer timed out. Try later.");
return err;
case INFO_FORMAT_CHANGED:
// Not an error: will get this value when OMX output buffer is enabled,
// or when input size changed.
CODEC_LOGD("decode dequeue OMX output buffer format change");
return err;
case INFO_OUTPUT_BUFFERS_CHANGED:
// Not an error: will get this value when OMX output buffer changed
// (probably because of input size change).
CODEC_LOGD("decode dequeue OMX output buffer change");
err = mCodec->getOutputBuffers(&mOutputBuffers);
MOZ_ASSERT(err == OK);
return INFO_OUTPUT_BUFFERS_CHANGED;
default:
CODEC_LOGE("decode dequeue OMX output buffer error:%d", err);
// Return OK to instruct OutputDrain to drop input from queue.
return OK;
}
sp<ABuffer> omxOut = mOutputBuffers.itemAt(index);
nsAutoPtr<webrtc::I420VideoFrame> videoFrame(GenerateVideoFrame(aFrame,
index,
omxOut));
if (videoFrame == nullptr) {
mCodec->releaseOutputBuffer(index);
} else if (aCallback) {
aCallback->Decoded(*videoFrame);
// OMX buffer will be released by RecycleCallback after rendered.
}
return err;
}
private:
class OutputDrain : public OMXOutputDrain
{
public:
OutputDrain(WebrtcOMXDecoder* aOMX, webrtc::DecodedImageCallback* aCallback)
: OMXOutputDrain()
, mOMX(aOMX)
, mCallback(aCallback)
{}
protected:
virtual bool DrainOutput(const EncodedFrame& aFrame) MOZ_OVERRIDE
{
return (mOMX->DrainOutput(aFrame, mCallback) == OK);
}
private:
WebrtcOMXDecoder* mOMX;
webrtc::DecodedImageCallback* mCallback;
};
status_t Start()
{
MOZ_ASSERT(!mStarted);
if (mStarted) {
return OK;
}
status_t err = mCodec->start();
if (err == OK) {
mStarted = true;
mCodec->getInputBuffers(&mInputBuffers);
mCodec->getOutputBuffers(&mOutputBuffers);
}
return err;
}
status_t Stop()
{
MOZ_ASSERT(mStarted);
if (!mStarted) {
return OK;
}
if (mOutputDrain != nullptr) {
mOutputDrain->Stop();
mOutputDrain = nullptr;
}
status_t err = mCodec->stop();
if (err == OK) {
mInputBuffers.clear();
mOutputBuffers.clear();
mStarted = false;
} else {
MOZ_ASSERT(false);
}
return err;
}
webrtc::I420VideoFrame*
GenerateVideoFrame(const EncodedFrame& aEncoded, uint32_t aBufferIndex,
const sp<ABuffer>& aOMXBuffer)
{
// TODO: Get decoded frame buffer through native window to obsolete
// changes to stagefright code.
sp<RefBase> obj;
bool hasGraphicBuffer = aOMXBuffer->meta()->findObject("graphic-buffer", &obj);
if (!hasGraphicBuffer) {
MOZ_ASSERT(false, "Decoder doesn't produce graphic buffer");
// Nothing to render.
return nullptr;
}
sp<GraphicBuffer> gb = static_cast<GraphicBuffer*>(obj.get());
if (!gb.get()) {
MOZ_ASSERT(false, "Null graphic buffer");
return nullptr;
}
RefPtr<mozilla::layers::TextureClient> textureClient =
mNativeWindow->getTextureClientFromBuffer(gb.get());
textureClient->SetRecycleCallback(RecycleCallback::ReturnOMXBuffer,
new RecycleCallback(mCodec, aBufferIndex));
int width = gb->getWidth();
int height = gb->getHeight();
layers::GrallocImage::GrallocData grallocData;
grallocData.mPicSize = gfx::IntSize(width, height);
grallocData.mGraphicBuffer = textureClient;
layers::GrallocImage* grallocImage = new layers::GrallocImage();
grallocImage->SetData(grallocData);
nsAutoPtr<webrtc::I420VideoFrame> videoFrame(
new webrtc::TextureVideoFrame(new ImageNativeHandle(grallocImage),
width, height,
aEncoded.mTimestamp,
aEncoded.mRenderTimeMs));
return videoFrame.forget();
}
sp<ALooper> mLooper;
sp<MediaCodec> mCodec; // OMXCodec
int mWidth;
int mHeight;
android::Vector<sp<ABuffer> > mInputBuffers;
android::Vector<sp<ABuffer> > mOutputBuffers;
bool mStarted;
sp<GonkNativeWindow> mNativeWindow;
sp<GonkNativeWindowClient> mNativeWindowClient;
RefPtr<OutputDrain> mOutputDrain;
};
class EncOutputDrain : public OMXOutputDrain
{
public:
EncOutputDrain(OMXVideoEncoder* aOMX, webrtc::EncodedImageCallback* aCallback)
: OMXOutputDrain()
, mOMX(aOMX)
, mCallback(aCallback)
, mIsPrevOutputParamSets(false)
{}
protected:
virtual bool DrainOutput(const EncodedFrame& aInputFrame) MOZ_OVERRIDE
{
nsTArray<uint8_t> output;
int64_t timeUs = -1ll;
int flags = 0;
nsresult rv = mOMX->GetNextEncodedFrame(&output, &timeUs, &flags,
DRAIN_THREAD_TIMEOUT_US);
if (NS_WARN_IF(NS_FAILED(rv))) {
// Fail to get encoded frame. The corresponding input frame should be
// removed.
return true;
}
if (output.Length() == 0) {
// No encoded data yet. Try later.
CODEC_LOGD("OMX:%p (encode no output available this time)", mOMX);
return false;
}
bool isParamSets = (flags & MediaCodec::BUFFER_FLAG_CODECCONFIG);
bool isIFrame = (flags & MediaCodec::BUFFER_FLAG_SYNCFRAME);
// Should not be parameter sets and I-frame at the same time.
MOZ_ASSERT(!(isParamSets && isIFrame));
if (mCallback) {
// Implementation here assumes encoder output to be a buffer containing
// parameter sets(SPS + PPS) followed by a series of buffers, each for
// one input frame.
// TODO: handle output violating this assumpton in bug 997110.
webrtc::EncodedImage encoded(output.Elements(), output.Length(),
output.Capacity());
encoded._frameType = (isParamSets || isIFrame) ?
webrtc::kKeyFrame : webrtc::kDeltaFrame;
encoded._encodedWidth = aInputFrame.mWidth;
encoded._encodedHeight = aInputFrame.mHeight;
encoded._timeStamp = aInputFrame.mTimestamp;
encoded.capture_time_ms_ = aInputFrame.mRenderTimeMs;
encoded._completeFrame = true;
ALOGE("OMX:%p encode frame type:%d size:%u", mOMX, encoded._frameType, encoded._length);
// Prepend SPS/PPS to I-frames unless they were sent last time.
SendEncodedDataToCallback(encoded, isIFrame && !mIsPrevOutputParamSets);
mIsPrevOutputParamSets = isParamSets;
}
// Tell base class not to pop input for parameter sets blob because they
// don't have corresponding input.
return !isParamSets;
}
private:
// Send encoded data to callback.The data will be broken into individual NALUs
// if necessary and sent to callback one by one. This function can also insert
// SPS/PPS NALUs in front of input data if requested.
void SendEncodedDataToCallback(webrtc::EncodedImage& aEncodedImage,
bool aPrependParamSets)
{
// Individual NALU inherits metadata from input encoded data.
webrtc::EncodedImage nalu(aEncodedImage);
if (aPrependParamSets) {
// Insert current parameter sets in front of the input encoded data.
nsTArray<uint8_t> paramSets;
mOMX->GetCodecConfig(&paramSets);
MOZ_ASSERT(paramSets.Length() > 4); // Start code + ...
// Set buffer range.
nalu._buffer = paramSets.Elements();
nalu._length = paramSets.Length();
// Break into NALUs and send.
SendEncodedDataToCallback(nalu, false);
}
// Break input encoded data into NALUs and send each one to callback.
const uint8_t* data = aEncodedImage._buffer;
size_t size = aEncodedImage._length;
const uint8_t* nalStart = nullptr;
size_t nalSize = 0;
while (getNextNALUnit(&data, &size, &nalStart, &nalSize, true) == OK) {
nalu._buffer = const_cast<uint8_t*>(nalStart);
nalu._length = nalSize;
mCallback->Encoded(nalu, nullptr, nullptr);
}
}
OMXVideoEncoder* mOMX;
webrtc::EncodedImageCallback* mCallback;
bool mIsPrevOutputParamSets;
};
// Encoder.
WebrtcOMXH264VideoEncoder::WebrtcOMXH264VideoEncoder()
: mOMX(nullptr)
, mCallback(nullptr)
, mWidth(0)
, mHeight(0)
, mFrameRate(0)
, mOMXConfigured(false)
{
CODEC_LOGD("WebrtcOMXH264VideoEncoder:%p constructed", this);
}
int32_t
WebrtcOMXH264VideoEncoder::InitEncode(const webrtc::VideoCodec* aCodecSettings,
int32_t aNumOfCores,
uint32_t aMaxPayloadSize)
{
CODEC_LOGD("WebrtcOMXH264VideoEncoder:%p init", this);
if (mOMX == nullptr) {
nsAutoPtr<OMXVideoEncoder> omx(OMXCodecWrapper::CreateAVCEncoder());
if (NS_WARN_IF(omx == nullptr)) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
mOMX = omx.forget();
}
// Defer configuration until 1st frame is received because this function will
// be called more than once, and unfortunately with incorrect setting values
// at first.
mWidth = aCodecSettings->width;
mHeight = aCodecSettings->height;
mFrameRate = aCodecSettings->maxFramerate;
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t
WebrtcOMXH264VideoEncoder::Encode(const webrtc::I420VideoFrame& aInputImage,
const webrtc::CodecSpecificInfo* aCodecSpecificInfo,
const std::vector<webrtc::VideoFrameType>* aFrameTypes)
{
MOZ_ASSERT(mOMX != nullptr);
if (mOMX == nullptr) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
if (!mOMXConfigured) {
mOMX->Configure(mWidth, mHeight, mFrameRate,
OMXVideoEncoder::BlobFormat::AVC_NAL);
mOMXConfigured = true;
CODEC_LOGD("WebrtcOMXH264VideoEncoder:%p start OMX with image size:%ux%u",
this, mWidth, mHeight);
}
// Wrap I420VideoFrame input with PlanarYCbCrImage for OMXVideoEncoder.
layers::PlanarYCbCrData yuvData;
yuvData.mYChannel = const_cast<uint8_t*>(aInputImage.buffer(webrtc::kYPlane));
yuvData.mYSize = gfx::IntSize(aInputImage.width(), aInputImage.height());
yuvData.mYStride = aInputImage.stride(webrtc::kYPlane);
MOZ_ASSERT(aInputImage.stride(webrtc::kUPlane) == aInputImage.stride(webrtc::kVPlane));
yuvData.mCbCrStride = aInputImage.stride(webrtc::kUPlane);
yuvData.mCbChannel = const_cast<uint8_t*>(aInputImage.buffer(webrtc::kUPlane));
yuvData.mCrChannel = const_cast<uint8_t*>(aInputImage.buffer(webrtc::kVPlane));
yuvData.mCbCrSize = gfx::IntSize((yuvData.mYSize.width + 1) / 2,
(yuvData.mYSize.height + 1) / 2);
yuvData.mPicSize = yuvData.mYSize;
yuvData.mStereoMode = StereoMode::MONO;
layers::PlanarYCbCrImage img(nullptr);
img.SetDataNoCopy(yuvData);
nsresult rv = mOMX->Encode(&img,
yuvData.mYSize.width,
yuvData.mYSize.height,
aInputImage.timestamp() * 1000 / 90, // 90kHz -> us.
0);
if (rv == NS_OK) {
if (mOutputDrain == nullptr) {
mOutputDrain = new EncOutputDrain(mOMX, mCallback);
mOutputDrain->Start();
}
EncodedFrame frame;
frame.mWidth = mWidth;
frame.mHeight = mHeight;
frame.mTimestamp = aInputImage.timestamp();
frame.mRenderTimeMs = aInputImage.render_time_ms();
mOutputDrain->QueueInput(frame);
}
return (rv == NS_OK) ? WEBRTC_VIDEO_CODEC_OK : WEBRTC_VIDEO_CODEC_ERROR;
}
int32_t
WebrtcOMXH264VideoEncoder::RegisterEncodeCompleteCallback(
webrtc::EncodedImageCallback* aCallback)
{
CODEC_LOGD("WebrtcOMXH264VideoEncoder:%p set callback:%p", this, aCallback);
MOZ_ASSERT(aCallback);
mCallback = aCallback;
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t
WebrtcOMXH264VideoEncoder::Release()
{
CODEC_LOGD("WebrtcOMXH264VideoEncoder:%p will be released", this);
if (mOutputDrain != nullptr) {
mOutputDrain->Stop();
mOutputDrain = nullptr;
}
mOMX = nullptr;
return WEBRTC_VIDEO_CODEC_OK;
}
WebrtcOMXH264VideoEncoder::~WebrtcOMXH264VideoEncoder()
{
CODEC_LOGD("WebrtcOMXH264VideoEncoder:%p will be destructed", this);
Release();
}
// Inform the encoder of the new packet loss rate and the round-trip time of
// the network. aPacketLossRate is fraction lost and can be 0~255
// (255 means 100% lost).
// Note: stagefright doesn't handle these parameters.
int32_t
WebrtcOMXH264VideoEncoder::SetChannelParameters(uint32_t aPacketLossRate,
int aRoundTripTimeMs)
{
CODEC_LOGD("WebrtcOMXH264VideoEncoder:%p set channel packet loss:%u, rtt:%d",
this, aPacketLossRate, aRoundTripTimeMs);
return WEBRTC_VIDEO_CODEC_OK;
}
// TODO: Bug 997567. Find the way to support frame rate change.
int32_t
WebrtcOMXH264VideoEncoder::SetRates(uint32_t aBitRate, uint32_t aFrameRate)
{
CODEC_LOGD("WebrtcOMXH264VideoEncoder:%p set bitrate:%u, frame rate:%u)",
this, aBitRate, aFrameRate);
MOZ_ASSERT(mOMX != nullptr);
if (mOMX == nullptr) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
mOMX->SetBitrate(aBitRate);
return WEBRTC_VIDEO_CODEC_OK;
}
// Decoder.
WebrtcOMXH264VideoDecoder::WebrtcOMXH264VideoDecoder()
: mCallback(nullptr)
, mOMX(nullptr)
{
CODEC_LOGD("WebrtcOMXH264VideoDecoder:%p will be constructed", this);
}
int32_t
WebrtcOMXH264VideoDecoder::InitDecode(const webrtc::VideoCodec* aCodecSettings,
int32_t aNumOfCores)
{
CODEC_LOGD("WebrtcOMXH264VideoDecoder:%p init OMX:%p", this, mOMX.get());
// Defer configuration until SPS/PPS NALUs (where actual decoder config
// values can be extracted) are received.
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t
WebrtcOMXH264VideoDecoder::Decode(const webrtc::EncodedImage& aInputImage,
bool aMissingFrames,
const webrtc::RTPFragmentationHeader* aFragmentation,
const webrtc::CodecSpecificInfo* aCodecSpecificInfo,
int64_t aRenderTimeMs)
{
if (aInputImage._length== 0 || !aInputImage._buffer) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
ALOGE("WebrtcOMXH264VideoDecoder:%p will decode", this);
bool configured = !!mOMX;
if (!configured) {
// Search for SPS/PPS NALUs in input to get decoder config.
sp<ABuffer> input = new ABuffer(aInputImage._buffer, aInputImage._length);
sp<MetaData> paramSets = WebrtcOMXDecoder::ParseParamSets(input);
if (NS_WARN_IF(paramSets == nullptr)) {
// Cannot config decoder because SPS/PPS NALUs haven't been seen.
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
RefPtr<WebrtcOMXDecoder> omx = new WebrtcOMXDecoder(MEDIA_MIMETYPE_VIDEO_AVC);
status_t result = omx->ConfigureWithParamSets(paramSets);
if (NS_WARN_IF(result != OK)) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
CODEC_LOGD("WebrtcOMXH264VideoDecoder:%p start OMX", this);
mOMX = omx;
}
bool feedFrame = true;
while (feedFrame) {
int64_t timeUs;
status_t err = mOMX->FillInput(aInputImage, !configured, aRenderTimeMs, mCallback);
feedFrame = (err == -EAGAIN); // No input buffer available. Try again.
}
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t
WebrtcOMXH264VideoDecoder::RegisterDecodeCompleteCallback(webrtc::DecodedImageCallback* aCallback)
{
CODEC_LOGD("WebrtcOMXH264VideoDecoder:%p set callback:%p", this, aCallback);
MOZ_ASSERT(aCallback);
mCallback = aCallback;
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t
WebrtcOMXH264VideoDecoder::Release()
{
CODEC_LOGD("WebrtcOMXH264VideoDecoder:%p will be released", this);
mOMX = nullptr;
return WEBRTC_VIDEO_CODEC_OK;
}
WebrtcOMXH264VideoDecoder::~WebrtcOMXH264VideoDecoder()
{
CODEC_LOGD("WebrtcOMXH264VideoDecoder:%p will be destructed", this);
Release();
}
int32_t
WebrtcOMXH264VideoDecoder::Reset()
{
CODEC_LOGW("WebrtcOMXH264VideoDecoder::Reset() will NOT reset decoder");
return WEBRTC_VIDEO_CODEC_OK;
}
}

View File

@ -0,0 +1,88 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef WEBRTC_GONK
#pragma error WebrtcOMXH264VideoCodec works only on B2G.
#endif
#ifndef WEBRTC_OMX_H264_CODEC_H_
#define WEBRTC_OMX_H264_CODEC_H_
#include "AudioConduit.h"
#include "VideoConduit.h"
namespace android {
class OMXVideoEncoder;
}
namespace mozilla {
class WebrtcOMXDecoder;
class OMXOutputDrain;
class WebrtcOMXH264VideoEncoder : public WebrtcVideoEncoder
{
public:
WebrtcOMXH264VideoEncoder();
virtual ~WebrtcOMXH264VideoEncoder();
// Implement VideoEncoder interface.
virtual int32_t InitEncode(const webrtc::VideoCodec* aCodecSettings,
int32_t aNumOfCores,
uint32_t aMaxPayloadSize) MOZ_OVERRIDE;
virtual int32_t Encode(const webrtc::I420VideoFrame& aInputImage,
const webrtc::CodecSpecificInfo* aCodecSpecificInfo,
const std::vector<webrtc::VideoFrameType>* aFrameTypes) MOZ_OVERRIDE;
virtual int32_t RegisterEncodeCompleteCallback(webrtc::EncodedImageCallback* aCallback) MOZ_OVERRIDE;
virtual int32_t Release() MOZ_OVERRIDE;
virtual int32_t SetChannelParameters(uint32_t aPacketLossRate,
int aRoundTripTimeMs) MOZ_OVERRIDE;
virtual int32_t SetRates(uint32_t aBitRate, uint32_t aFrameRate) MOZ_OVERRIDE;
private:
RefPtr<android::OMXVideoEncoder> mOMX;
webrtc::EncodedImageCallback* mCallback;
RefPtr<OMXOutputDrain> mOutputDrain;
uint32_t mWidth;
uint32_t mHeight;
uint32_t mFrameRate;
bool mOMXConfigured;
webrtc::EncodedImage mEncodedImage;
};
class WebrtcOMXH264VideoDecoder : public WebrtcVideoDecoder
{
public:
WebrtcOMXH264VideoDecoder();
virtual ~WebrtcOMXH264VideoDecoder();
// Implement VideoDecoder interface.
virtual int32_t InitDecode(const webrtc::VideoCodec* aCodecSettings,
int32_t aNumOfCores) MOZ_OVERRIDE;
virtual int32_t Decode(const webrtc::EncodedImage& aInputImage,
bool aMissingFrames,
const webrtc::RTPFragmentationHeader* aFragmentation,
const webrtc::CodecSpecificInfo* aCodecSpecificInfo = nullptr,
int64_t aRenderTimeMs = -1) MOZ_OVERRIDE;
virtual int32_t RegisterDecodeCompleteCallback(webrtc::DecodedImageCallback* callback) MOZ_OVERRIDE;
virtual int32_t Release() MOZ_OVERRIDE;
virtual int32_t Reset() MOZ_OVERRIDE;
private:
webrtc::DecodedImageCallback* mCallback;
RefPtr<WebrtcOMXDecoder> mOMX;
};
}
#endif // WEBRTC_OMX_H264_CODEC_H_

View File

@ -37,6 +37,10 @@
#include <sslproto.h>
#include <algorithm>
#ifdef MOZ_OMX_ENCODER
#include "OMXVideoCodec.h"
#endif
extern "C" {
#include "ccsdp.h"
#include "vcm.h"
@ -54,6 +58,11 @@ extern void lsm_update_active_tone(vcm_tones_t tone, cc_call_handle_t call_handl
extern void lsm_stop_multipart_tone_timer(void);
extern void lsm_stop_continuous_tone_timer(void);
static int vcmEnsureExternalCodec(
const mozilla::RefPtr<mozilla::VideoSessionConduit>& conduit,
mozilla::VideoCodecConfig* config,
bool send);
}//end extern "C"
static const char* logTag = "VcmSipccBinding";
@ -1693,6 +1702,9 @@ static int vcmRxStartICE_m(cc_mcapid_t mcap_id,
ccsdpCodecName(payloads[i].codec_type),
payloads[i].video.rtcp_fb_types,
pc.impl()->load_manager());
if (vcmEnsureExternalCodec(conduit, config_raw, false)) {
continue;
}
configs.push_back(config_raw);
}
@ -2100,6 +2112,45 @@ short vcmTxOpen(cc_mcapid_t mcap_id,
return 0;
}
/*
* Add external H.264 video codec.
*/
static int vcmEnsureExternalCodec(
const mozilla::RefPtr<mozilla::VideoSessionConduit>& conduit,
mozilla::VideoCodecConfig* config,
bool send)
{
#ifdef MOZ_OMX_ENCODER
// Here we use "I420" to register H.264 because WebRTC.org code has a
// whitelist of supported video codec in |webrtc::ViECodecImpl::CodecValid()|
// and will reject registration of those not in it.
// TODO: bug 995884 to support H.264 in WebRTC.org code.
if (config->mName != "I420") {
// Do nothing for non-I420 config.
return send ? kMediaConduitInvalidSendCodec : kMediaConduitInvalidReceiveCodec;
}
// Register H.264 codec.
if (send) {
VideoEncoder* encoder = OMXVideoCodec::CreateEncoder(OMXVideoCodec::CodecType::CODEC_H264);
if (encoder) {
return conduit->SetExternalSendCodec(config->mType, encoder);
} else {
return kMediaConduitInvalidSendCodec;
}
} else {
VideoDecoder* decoder = OMXVideoCodec::CreateDecoder(OMXVideoCodec::CodecType::CODEC_H264);
if (decoder) {
return conduit->SetExternalRecvCodec(config->mType, decoder);
} else {
return kMediaConduitInvalidReceiveCodec;
}
}
NS_NOTREACHED("Shouldn't get here!");
#endif
return 0;
}
/**
* start tx stream
* Note: For video calls, for a given call_handle there will be
@ -2363,7 +2414,13 @@ static int vcmTxStartICE_m(cc_mcapid_t mcap_id,
mozilla::VideoSessionConduit::Create(static_cast<VideoSessionConduit *>(rx_conduit.get()));
// Find the appropriate media conduit config
if (!conduit || conduit->ConfigureSendMediaCodec(config))
if (!conduit)
return VCM_ERROR;
if (vcmEnsureExternalCodec(conduit, config_raw, true))
return VCM_ERROR;
if (conduit->ConfigureSendMediaCodec(config))
return VCM_ERROR;
pc.impl()->media()->AddConduit(level, false, conduit);
@ -3218,4 +3275,3 @@ short vcmGetVideoMaxFr(uint16_t codec,
&ret));
return ret;
}

View File

@ -23,7 +23,7 @@
#include "ImageTypes.h"
#include "ImageContainer.h"
#include "VideoUtils.h"
#ifdef MOZ_WIDGET_GONK
#ifdef WEBRTC_GONK
#include "GrallocImages.h"
#include "mozilla/layers/GrallocTextureClient.h"
#endif
@ -1110,7 +1110,7 @@ void MediaPipelineTransmit::PipelineListener::ProcessVideoChunk(
last_img_ = serial;
ImageFormat format = img->GetFormat();
#ifdef MOZ_WIDGET_GONK
#ifdef WEBRTC_GONK
if (format == ImageFormat::GRALLOC_PLANAR_YCBCR) {
layers::GrallocImage *nativeImage = static_cast<layers::GrallocImage*>(img);
android::sp<android::GraphicBuffer> graphicBuffer = nativeImage->GetGraphicBuffer();
@ -1404,40 +1404,49 @@ void MediaPipelineReceiveVideo::PipelineListener::RenderVideoFrame(
const unsigned char* buffer,
unsigned int buffer_size,
uint32_t time_stamp,
int64_t render_time) {
int64_t render_time,
const RefPtr<layers::Image>& video_image) {
#ifdef MOZILLA_INTERNAL_API
ReentrantMonitorAutoEnter enter(monitor_);
// Create a video frame and append it to the track.
if (buffer) {
// Create a video frame using |buffer|.
#ifdef MOZ_WIDGET_GONK
ImageFormat format = ImageFormat::GRALLOC_PLANAR_YCBCR;
ImageFormat format = ImageFormat::GRALLOC_PLANAR_YCBCR;
#else
ImageFormat format = ImageFormat::PLANAR_YCBCR;
ImageFormat format = ImageFormat::PLANAR_YCBCR;
#endif
nsRefPtr<layers::Image> image = image_container_->CreateImage(format);
nsRefPtr<layers::Image> image = image_container_->CreateImage(format);
layers::PlanarYCbCrImage* yuvImage = static_cast<layers::PlanarYCbCrImage*>(image.get());
uint8_t* frame = const_cast<uint8_t*>(static_cast<const uint8_t*> (buffer));
const uint8_t lumaBpp = 8;
const uint8_t chromaBpp = 4;
layers::PlanarYCbCrImage* videoImage = static_cast<layers::PlanarYCbCrImage*>(image.get());
uint8_t* frame = const_cast<uint8_t*>(static_cast<const uint8_t*> (buffer));
const uint8_t lumaBpp = 8;
const uint8_t chromaBpp = 4;
layers::PlanarYCbCrData yuvData;
yuvData.mYChannel = frame;
yuvData.mYSize = IntSize(width_, height_);
yuvData.mYStride = width_ * lumaBpp/ 8;
yuvData.mCbCrStride = width_ * chromaBpp / 8;
yuvData.mCbChannel = frame + height_ * yuvData.mYStride;
yuvData.mCrChannel = yuvData.mCbChannel + height_ * yuvData.mCbCrStride / 2;
yuvData.mCbCrSize = IntSize(width_/ 2, height_/ 2);
yuvData.mPicX = 0;
yuvData.mPicY = 0;
yuvData.mPicSize = IntSize(width_, height_);
yuvData.mStereoMode = StereoMode::MONO;
layers::PlanarYCbCrData data;
data.mYChannel = frame;
data.mYSize = IntSize(width_, height_);
data.mYStride = width_ * lumaBpp/ 8;
data.mCbCrStride = width_ * chromaBpp / 8;
data.mCbChannel = frame + height_ * data.mYStride;
data.mCrChannel = data.mCbChannel + height_ * data.mCbCrStride / 2;
data.mCbCrSize = IntSize(width_/ 2, height_/ 2);
data.mPicX = 0;
data.mPicY = 0;
data.mPicSize = IntSize(width_, height_);
data.mStereoMode = StereoMode::MONO;
yuvImage->SetData(yuvData);
videoImage->SetData(data);
image_ = image.forget();
#endif
image_ = image.forget();
}
#ifdef WEBRTC_GONK
else {
// Decoder produced video frame that can be appended to the track directly.
MOZ_ASSERT(video_image);
image_ = video_image;
}
#endif // WEBRTC_GONK
#endif // MOZILLA_INTERNAL_API
}
void MediaPipelineReceiveVideo::PipelineListener::

View File

@ -643,9 +643,11 @@ class MediaPipelineReceiveVideo : public MediaPipelineReceive {
virtual void RenderVideoFrame(const unsigned char* buffer,
unsigned int buffer_size,
uint32_t time_stamp,
int64_t render_time) {
int64_t render_time,
const ImageHandle& handle) {
pipeline_->listener_->RenderVideoFrame(buffer, buffer_size, time_stamp,
render_time);
render_time,
handle.GetImage());
}
private:
@ -678,8 +680,8 @@ class MediaPipelineReceiveVideo : public MediaPipelineReceive {
void RenderVideoFrame(const unsigned char* buffer,
unsigned int buffer_size,
uint32_t time_stamp,
int64_t render_time);
int64_t render_time,
const RefPtr<layers::Image>& video_image);
private:
int width_;

View File

@ -273,7 +273,12 @@ nsresult PeerConnectionCtx::Initialize() {
// Only adding codecs supported
//codecMask |= VCM_CODEC_RESOURCE_H263;
//codecMask |= VCM_CODEC_RESOURCE_H264;
#ifdef MOZILLA_INTERNAL_API
if (Preferences::GetBool("media.peerconnection.video.h264_enabled")) {
codecMask |= VCM_CODEC_RESOURCE_H264;
}
#endif
codecMask |= VCM_CODEC_RESOURCE_VP8;
//codecMask |= VCM_CODEC_RESOURCE_I420;
mCCM->setVideoCodecs(codecMask);

View File

@ -614,10 +614,6 @@ sip_config_video_supported_codecs_get (rtp_ptype aSupportedCodecs[],
//codec_mask = vcmGetVideoCodecList(DSP_ENCODEONLY);
codec_mask = vcmGetVideoCodecList(VCM_DSP_IGNORE);
}
if ( codec_mask & VCM_CODEC_RESOURCE_VP8) {
aSupportedCodecs[count] = RTP_VP8;
count++;
}
if ( codec_mask & VCM_CODEC_RESOURCE_H264) {
/*
* include payload type for packetization mode 1 only if ucm sis version
@ -635,6 +631,10 @@ sip_config_video_supported_codecs_get (rtp_ptype aSupportedCodecs[],
aSupportedCodecs[count] = RTP_H264_P0;
count++;
}
if ( codec_mask & VCM_CODEC_RESOURCE_VP8) {
aSupportedCodecs[count] = RTP_VP8;
count++;
}
if ( codec_mask & VCM_CODEC_RESOURCE_H263) {
aSupportedCodecs[count] = RTP_H263;
count++;

View File

@ -4666,6 +4666,7 @@ gsmsdp_negotiate_rtcp_fb (cc_sdp_t *cc_sdp_p,
*/
switch (codec) {
case RTP_VP8:
case RTP_I420:
fb_types &=
sdp_rtcp_fb_nack_to_bitmap(SDP_RTCP_FB_NACK_BASIC) |
sdp_rtcp_fb_nack_to_bitmap(SDP_RTCP_FB_NACK_PLI) |

View File

@ -356,7 +356,8 @@ public:
void RenderVideoFrame(const unsigned char* buffer,
unsigned int buffer_size,
uint32_t time_stamp,
int64_t render_time)
int64_t render_time,
const mozilla::ImageHandle& handle)
{
//write the frame to the file
if(VerifyFrame(buffer, buffer_size) == 0)

View File

@ -83,7 +83,7 @@ public class HomeProvider extends SQLiteBridgeContentProvider {
private Cursor queryFakeItems(Uri uri, String[] projection, String selection, String[] selectionArgs, String sortOrder) {
JSONArray items = null;
try {
final String jsonString = RawResource.get(getContext(), R.raw.fake_home_items);
final String jsonString = RawResource.getAsString(getContext(), R.raw.fake_home_items);
items = new JSONArray(jsonString);
} catch (IOException e) {
Log.e(LOGTAG, "Error getting fake home items", e);

View File

@ -86,6 +86,13 @@ public class ToolbarComponent extends BaseComponent {
return (ImageButton) getToolbarView().findViewById(R.id.forward);
}
/**
* Returns the View for the edit cancel button in the browser toolbar.
*/
private ImageButton getEditCancelButton() {
return (ImageButton) getToolbarView().findViewById(R.id.edit_cancel);
}
private CharSequence getTitle() {
return getTitleHelper(true);
}
@ -145,14 +152,19 @@ public class ToolbarComponent extends BaseComponent {
public ToolbarComponent dismissEditingMode() {
assertIsEditing();
if (getUrlEditText().isInputMethodTarget()) {
// Drop the soft keyboard.
// TODO: Solo.hideSoftKeyboard() does not clear focus, causing unexpected
// behavior, but we may want to use it over goBack().
mSolo.goBack();
}
// Cancel Button not implemeneted in tablet.
if (DeviceHelper.isTablet()) {
if (getUrlEditText().isInputMethodTarget()) {
// Drop the soft keyboard.
// TODO: Solo.hideSoftKeyboard() does not clear focus, causing unexpected
// behavior, but we may want to use it over goBack().
mSolo.goBack();
}
mSolo.goBack();
mSolo.goBack();
} else {
mSolo.clickOnView(getEditCancelButton());
}
waitForNotEditing();

View File

@ -12,8 +12,17 @@ import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.StringWriter;
/**
* {@code RawResource} provides API to load raw resources in different
* forms. For now, we only load them as strings. We're using raw resources
* as localizable 'assets' as opposed to a string that can be directly
* translatable e.g. JSON file vs string.
*
* This is just a utility class to avoid code duplication for the different
* cases where need to read such assets.
*/
public final class RawResource {
public static String get(Context context, int id) throws IOException {
public static String getAsString(Context context, int id) throws IOException {
InputStreamReader reader = null;
try {

View File

@ -2882,6 +2882,7 @@ function Tab(aURL, aParams) {
this.lastTouchedAt = Date.now();
this._zoom = 1.0;
this._drawZoom = 1.0;
this._restoreZoom = false;
this._fixedMarginLeft = 0;
this._fixedMarginTop = 0;
this._fixedMarginRight = 0;
@ -3486,6 +3487,7 @@ Tab.prototype = {
let win = this.browser.contentWindow;
win.scrollTo(x, y);
this.saveSessionZoom(aViewport.zoom);
this.userScrollPos.x = win.scrollX;
this.userScrollPos.y = win.scrollY;
@ -3535,12 +3537,13 @@ Tab.prototype = {
getViewport: function() {
let screenW = gScreenWidth - gViewportMargins.left - gViewportMargins.right;
let screenH = gScreenHeight - gViewportMargins.top - gViewportMargins.bottom;
let zoom = this.restoredSessionZoom() || this._zoom;
let viewport = {
width: screenW,
height: screenH,
cssWidth: screenW / this._zoom,
cssHeight: screenH / this._zoom,
cssWidth: screenW / zoom,
cssHeight: screenH / zoom,
pageLeft: 0,
pageTop: 0,
pageRight: screenW,
@ -3548,13 +3551,13 @@ Tab.prototype = {
// We make up matching css page dimensions
cssPageLeft: 0,
cssPageTop: 0,
cssPageRight: screenW / this._zoom,
cssPageBottom: screenH / this._zoom,
cssPageRight: screenW / zoom,
cssPageBottom: screenH / zoom,
fixedMarginLeft: this._fixedMarginLeft,
fixedMarginTop: this._fixedMarginTop,
fixedMarginRight: this._fixedMarginRight,
fixedMarginBottom: this._fixedMarginBottom,
zoom: this._zoom,
zoom: zoom,
};
// Set the viewport offset to current scroll offset
@ -4186,6 +4189,9 @@ Tab.prototype = {
tabID: this.id,
};
// Restore zoom only when moving in session history, not for new page loads.
this._restoreZoom = aMessage != "New";
if (aParams) {
if ("url" in aParams)
message.url = aParams.url;
@ -4198,6 +4204,22 @@ Tab.prototype = {
sendMessageToJava(message);
},
saveSessionZoom: function(aZoom) {
let cwu = this.browser.contentWindow.QueryInterface(Ci.nsIInterfaceRequestor).getInterface(Ci.nsIDOMWindowUtils);
cwu.setResolution(aZoom / window.devicePixelRatio, aZoom / window.devicePixelRatio);
},
restoredSessionZoom: function() {
if (!this._restoreZoom) {
return null;
}
let cwu = this.browser.contentWindow.QueryInterface(Ci.nsIInterfaceRequestor).getInterface(Ci.nsIDOMWindowUtils);
let res = {x: {}, y: {}};
cwu.getResolution(res.x, res.y);
return res.x.value * window.devicePixelRatio;
},
OnHistoryNewEntry: function(aUri) {
this._sendHistoryEvent("New", { url: aUri.spec });
},
@ -4322,8 +4344,11 @@ Tab.prototype = {
// In all of these cases, we maintain how much actual content is visible
// within the screen width. Note that "actual content" may be different
// with respect to CSS pixels because of the CSS viewport size changing.
let zoomScale = (screenW * oldBrowserWidth) / (aOldScreenWidth * viewportW);
let zoom = (aInitialLoad && metadata.defaultZoom) ? metadata.defaultZoom : this.clampZoom(this._zoom * zoomScale);
let zoom = this.restoredSessionZoom() || metadata.defaultZoom;
if (!zoom || !aInitialLoad) {
let zoomScale = (screenW * oldBrowserWidth) / (aOldScreenWidth * viewportW);
zoom = this.clampZoom(this._zoom * zoomScale);
}
this.setResolution(zoom, false);
this.setScrollClampingSize(zoom);
@ -4459,7 +4484,8 @@ Tab.prototype = {
// and zoom when calculating the new ones, so we need to reset these
// things here before calling updateMetadata.
this.setBrowserSize(kDefaultCSSViewportWidth, kDefaultCSSViewportHeight);
this.setResolution(gScreenWidth / this.browserWidth, false);
let zoom = this.restoredSessionZoom() || gScreenWidth / this.browserWidth;
this.setResolution(zoom, true);
ViewportHandler.updateMetadata(this, true);
// Note that if we draw without a display-port, things can go wrong. By the

View File

@ -13,6 +13,7 @@ jar.sources += [
'src/tests/BrowserTestCase.java',
'src/tests/TestGeckoSharedPrefs.java',
'src/tests/TestJarReader.java',
'src/tests/TestRawResource.java',
'src/tests/TestTopSitesCursorWrapper.java',
]
jar.generated_sources = [] # None yet -- try to keep it this way.

View File

@ -0,0 +1,67 @@
/* Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/ */
package org.mozilla.gecko.browser.tests;
import android.content.Context;
import android.content.res.Resources;
import android.test.mock.MockContext;
import android.test.mock.MockResources;
import android.util.TypedValue;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import java.io.IOException;
import org.mozilla.gecko.util.RawResource;
/**
* Tests whether RawResource.getAsString() produces the right String
* result after reading the returned raw resource's InputStream.
*/
public class TestRawResource extends BrowserTestCase {
private static final int RAW_RESOURCE_ID = 1;
private static final String RAW_CONTENTS = "RAW";
private static class TestContext extends MockContext {
private final Resources resources;
public TestContext() {
resources = new TestResources();
}
@Override
public Resources getResources() {
return resources;
}
}
/**
* Browser instrumentation tests can't have access to test-only
* resources (bug 994135) yet so we mock the access to resources
* for now.
*/
private static class TestResources extends MockResources {
@Override
public InputStream openRawResource(int id) {
if (id == RAW_RESOURCE_ID) {
return new ByteArrayInputStream(RAW_CONTENTS.getBytes());
}
return null;
}
}
public void testGet() {
Context context = new TestContext();
String result;
try {
result = RawResource.getAsString(context, RAW_RESOURCE_ID);
} catch (IOException e) {
result = null;
}
assertEquals(RAW_CONTENTS, result);
}
}

View File

@ -245,6 +245,7 @@ pref("media.peerconnection.enabled", true);
pref("media.peerconnection.video.enabled", true);
pref("media.navigator.video.max_fs", 1200); // 640x480 == 1200mb
pref("media.navigator.video.max_fr", 30);
pref("media.peerconnection.video.h264_enabled", false);
#else
pref("media.navigator.video.default_width",640);
pref("media.navigator.video.default_height",480);

View File

@ -22,6 +22,7 @@
#include "nsDiskCacheDevice.h"
#include "nsDiskCacheDeviceSQL.h"
#include "nsCacheUtils.h"
#include "../cache2/CacheObserver.h"
#include "nsIObserverService.h"
#include "nsIPrefService.h"
@ -3108,6 +3109,10 @@ nsCacheService::SetDiskSmartSize_Locked()
{
nsresult rv;
if (mozilla::net::CacheObserver::UseNewCache()) {
return NS_ERROR_NOT_AVAILABLE;
}
if (!mObserver->DiskCacheParentDirectory())
return NS_ERROR_NOT_AVAILABLE;

View File

@ -319,7 +319,10 @@ CacheFile::OnChunkRead(nsresult aResult, CacheFileChunk *aChunk)
LOG(("CacheFile::OnChunkRead() [this=%p, rv=0x%08x, chunk=%p, idx=%d]",
this, aResult, aChunk, index));
// TODO handle ERROR state
if (NS_FAILED(aResult)) {
SetError(aResult);
CacheFileIOManager::DoomFile(mHandle, nullptr);
}
if (HaveChunkListeners(index)) {
rv = NotifyChunkListeners(index, aResult, aChunk);
@ -341,14 +344,11 @@ CacheFile::OnChunkWritten(nsresult aResult, CacheFileChunk *aChunk)
MOZ_ASSERT(!mMemoryOnly);
MOZ_ASSERT(!mOpeningFile);
// TODO handle ERROR state
MOZ_ASSERT(mHandle);
if (NS_FAILED(aResult)) {
// TODO ??? doom entry
// TODO mark this chunk as memory only, since it wasn't written to disk and
// therefore cannot be released from memory
// LOG
SetError(aResult);
CacheFileIOManager::DoomFile(mHandle, nullptr);
}
if (NS_SUCCEEDED(aResult) && !aChunk->IsDirty()) {
@ -359,7 +359,7 @@ CacheFile::OnChunkWritten(nsresult aResult, CacheFileChunk *aChunk)
// notify listeners if there is any
if (HaveChunkListeners(aChunk->Index())) {
// don't release the chunk since there are some listeners queued
rv = NotifyChunkListeners(aChunk->Index(), NS_OK, aChunk);
rv = NotifyChunkListeners(aChunk->Index(), aResult, aChunk);
if (NS_SUCCEEDED(rv)) {
MOZ_ASSERT(aChunk->mRefCnt != 2);
return NS_OK;
@ -374,22 +374,25 @@ CacheFile::OnChunkWritten(nsresult aResult, CacheFileChunk *aChunk)
}
#ifdef CACHE_CHUNKS
LOG(("CacheFile::OnChunkWritten() - Caching unused chunk [this=%p, chunk=%p]",
this, aChunk));
if (NS_SUCCEEDED(aResult)) {
LOG(("CacheFile::OnChunkWritten() - Caching unused chunk [this=%p, "
"chunk=%p]", this, aChunk));
} else {
LOG(("CacheFile::OnChunkWritten() - Removing failed chunk [this=%p, "
"chunk=%p]", this, aChunk));
}
#else
LOG(("CacheFile::OnChunkWritten() - Releasing unused chunk [this=%p, "
"chunk=%p]", this, aChunk));
LOG(("CacheFile::OnChunkWritten() - Releasing %s chunk [this=%p, chunk=%p]",
NS_SUCCEEDED(aResult) ? "unused" : "failed", this, aChunk));
#endif
aChunk->mRemovingChunk = true;
ReleaseOutsideLock(static_cast<CacheFileChunkListener *>(
aChunk->mFile.forget().take()));
RemoveChunkInternal(aChunk,
#ifdef CACHE_CHUNKS
mCachedChunks.Put(aChunk->Index(), aChunk);
NS_SUCCEEDED(aResult));
#else
false);
#endif
mChunks.Remove(aChunk->Index());
WriteMetadataIfNeededLocked();
return NS_OK;
@ -1000,6 +1003,16 @@ CacheFile::GetChunkLocked(uint32_t aIndex, bool aWriter,
LOG(("CacheFile::GetChunkLocked() - Found chunk %p in mChunks [this=%p]",
chunk.get(), this));
// We might get failed chunk between releasing the lock in
// CacheFileChunk::OnDataWritten/Read and CacheFile::OnChunkWritten/Read
rv = chunk->GetStatus();
if (NS_FAILED(rv)) {
SetError(rv);
LOG(("CacheFile::GetChunkLocked() - Found failed chunk in mChunks "
"[this=%p]", this));
return rv;
}
if (chunk->IsReady() || aWriter) {
chunk.swap(*_retval);
}
@ -1056,12 +1069,9 @@ CacheFile::GetChunkLocked(uint32_t aIndex, bool aWriter,
rv = chunk->Read(mHandle, std::min(static_cast<uint32_t>(mDataSize - off),
static_cast<uint32_t>(kChunkSize)),
mMetadata->GetHash(aIndex), this);
if (NS_FAILED(rv)) {
chunk->mRemovingChunk = true;
ReleaseOutsideLock(static_cast<CacheFileChunkListener *>(
chunk->mFile.forget().take()));
mChunks.Remove(aIndex);
NS_ENSURE_SUCCESS(rv, rv);
if (NS_WARN_IF(NS_FAILED(rv))) {
RemoveChunkInternal(chunk, false);
return rv;
}
if (aWriter) {
@ -1204,6 +1214,15 @@ CacheFile::RemoveChunk(CacheFileChunk *aChunk)
}
#endif
if (NS_FAILED(mStatus)) {
// Don't write any chunk to disk since this entry will be doomed
LOG(("CacheFile::RemoveChunk() - Removing chunk because of status "
"[this=%p, chunk=%p, mStatus=0x%08x]", this, chunk.get(), mStatus));
RemoveChunkInternal(chunk, false);
return mStatus;
}
if (chunk->IsDirty() && !mMemoryOnly && !mOpeningFile) {
LOG(("CacheFile::RemoveChunk() - Writing dirty chunk to the disk "
"[this=%p]", this));
@ -1212,10 +1231,15 @@ CacheFile::RemoveChunk(CacheFileChunk *aChunk)
rv = chunk->Write(mHandle, this);
if (NS_FAILED(rv)) {
// TODO ??? doom entry
// TODO mark this chunk as memory only, since it wasn't written to disk
// and therefore cannot be released from memory
// LOG
LOG(("CacheFile::RemoveChunk() - CacheFileChunk::Write() failed "
"synchronously. Removing it. [this=%p, chunk=%p, rv=0x%08x]",
this, chunk.get(), rv));
RemoveChunkInternal(chunk, false);
SetError(rv);
CacheFileIOManager::DoomFile(mHandle, nullptr);
return rv;
}
else {
// Chunk will be removed in OnChunkWritten if it is still unused
@ -1241,18 +1265,14 @@ CacheFile::RemoveChunk(CacheFileChunk *aChunk)
}
#endif
chunk->mRemovingChunk = true;
ReleaseOutsideLock(static_cast<CacheFileChunkListener *>(
chunk->mFile.forget().take()));
#ifndef CACHE_CHUNKS
// Cache the chunk only when we have a reason to do so
if (mMemoryOnly || mOpeningFile)
RemoveChunkInternal(chunk,
#ifdef CACHE_CHUNKS
true);
#else
// Cache the chunk only when we have a reason to do so
mMemoryOnly || mOpeningFile);
#endif
{
mCachedChunks.Put(chunk->Index(), chunk);
}
mChunks.Remove(chunk->Index());
if (!mMemoryOnly)
WriteMetadataIfNeededLocked();
}
@ -1260,6 +1280,20 @@ CacheFile::RemoveChunk(CacheFileChunk *aChunk)
return NS_OK;
}
void
CacheFile::RemoveChunkInternal(CacheFileChunk *aChunk, bool aCacheChunk)
{
aChunk->mRemovingChunk = true;
ReleaseOutsideLock(static_cast<CacheFileChunkListener *>(
aChunk->mFile.forget().take()));
if (aCacheChunk) {
mCachedChunks.Put(aChunk->Index(), aChunk);
}
mChunks.Remove(aChunk->Index());
}
nsresult
CacheFile::RemoveInput(CacheFileInputStream *aInput)
{
@ -1501,11 +1535,10 @@ CacheFile::WriteMetadataIfNeededLocked(bool aFireAndForget)
mWritingMetadata = true;
mDataIsDirty = false;
} else {
LOG(("CacheFile::WriteMetadataIfNeededLocked() - Writing synchronously failed "
"[this=%p]", this));
LOG(("CacheFile::WriteMetadataIfNeededLocked() - Writing synchronously "
"failed [this=%p]", this));
// TODO: close streams with error
if (NS_SUCCEEDED(mStatus))
mStatus = rv;
SetError(rv);
}
}
@ -1615,6 +1648,14 @@ CacheFile::PadChunkWithZeroes(uint32_t aChunkIdx)
return NS_OK;
}
void
CacheFile::SetError(nsresult aStatus)
{
if (NS_SUCCEEDED(mStatus)) {
mStatus = aStatus;
}
}
nsresult
CacheFile::InitIndexEntry()
{

View File

@ -126,6 +126,7 @@ private:
CacheFileChunkListener *aCallback,
CacheFileChunk **_retval);
nsresult RemoveChunk(CacheFileChunk *aChunk);
void RemoveChunkInternal(CacheFileChunk *aChunk, bool aCacheChunk);
nsresult RemoveInput(CacheFileInputStream *aInput);
nsresult RemoveOutput(CacheFileOutputStream *aOutput);
@ -161,6 +162,8 @@ private:
nsresult PadChunkWithZeroes(uint32_t aChunkIdx);
void SetError(nsresult aStatus);
nsresult InitIndexEntry();
mozilla::Mutex mLock;

View File

@ -127,6 +127,7 @@ CacheFileChunk::CacheFileChunk(CacheFile *aFile, uint32_t aIndex)
: CacheMemoryConsumer(aFile->mOpenAsMemoryOnly ? MEMORY_ONLY : DONT_REPORT)
, mIndex(aIndex)
, mState(INITIAL)
, mStatus(NS_OK)
, mIsDirty(false)
, mRemovingChunk(false)
, mDataSize(0)
@ -203,17 +204,17 @@ CacheFileChunk::Read(CacheFileHandle *aHandle, uint32_t aLen,
rv = CacheFileIOManager::Read(aHandle, mIndex * kChunkSize, mRWBuf, aLen,
this);
if (NS_FAILED(rv)) {
mState = READING; // TODO: properly handle error states
// mState = ERROR;
NS_ENSURE_SUCCESS(rv, rv);
if (NS_WARN_IF(NS_FAILED(rv))) {
rv = mIndex ? NS_ERROR_FILE_CORRUPTED : NS_ERROR_FILE_NOT_FOUND;
SetError(rv);
} else {
mState = READING;
mListener = aCallback;
mDataSize = aLen;
mReadHash = aHash;
}
mState = READING;
mListener = aCallback;
mDataSize = aLen;
mReadHash = aHash;
return NS_OK;
return rv;
}
nsresult
@ -239,16 +240,15 @@ CacheFileChunk::Write(CacheFileHandle *aHandle,
rv = CacheFileIOManager::Write(aHandle, mIndex * kChunkSize, mRWBuf,
mDataSize, false, this);
if (NS_FAILED(rv)) {
mState = WRITING; // TODO: properly handle error states
// mState = ERROR;
NS_ENSURE_SUCCESS(rv, rv);
if (NS_WARN_IF(NS_FAILED(rv))) {
SetError(rv);
} else {
mState = WRITING;
mListener = aCallback;
mIsDirty = false;
}
mState = WRITING;
mListener = aCallback;
mIsDirty = false;
return NS_OK;
return rv;
}
void
@ -368,6 +368,10 @@ CacheFileChunk::UpdateDataSize(uint32_t aOffset, uint32_t aLen, bool aEOF)
MOZ_ASSERT(!aEOF, "Implement me! What to do with opened streams?");
MOZ_ASSERT(aOffset <= mDataSize);
// UpdateDataSize() is called only when we've written some data to the chunk
// and we never write data anymore once some error occurs.
MOZ_ASSERT(mState != ERROR);
LOG(("CacheFileChunk::UpdateDataSize() [this=%p, offset=%d, len=%d, EOF=%d]",
this, aOffset, aLen, aEOF));
@ -468,29 +472,23 @@ CacheFileChunk::OnDataWritten(CacheFileHandle *aHandle, const char *aBuf,
MOZ_ASSERT(mState == WRITING);
MOZ_ASSERT(mListener);
#if 0
// TODO: properly handle error states
if (NS_FAILED(aResult)) {
mState = ERROR;
}
else {
#endif
if (NS_WARN_IF(NS_FAILED(aResult))) {
SetError(aResult);
} else {
mState = READY;
if (!mBuf) {
mBuf = mRWBuf;
mBufSize = mRWBufSize;
}
else {
free(mRWBuf);
}
mRWBuf = nullptr;
mRWBufSize = 0;
DoMemoryReport(MemorySize());
#if 0
}
#endif
if (!mBuf) {
mBuf = mRWBuf;
mBufSize = mRWBufSize;
} else {
free(mRWBuf);
}
mRWBuf = nullptr;
mRWBufSize = 0;
DoMemoryReport(MemorySize());
mListener.swap(listener);
}
@ -555,14 +553,10 @@ CacheFileChunk::OnDataRead(CacheFileHandle *aHandle, char *aBuf,
}
if (NS_FAILED(aResult)) {
#if 0
// TODO: properly handle error states
mState = ERROR;
#endif
mState = READY;
aResult = mIndex ? NS_ERROR_FILE_CORRUPTED : NS_ERROR_FILE_NOT_FOUND;
SetError(aResult);
mDataSize = 0;
}
else {
} else {
mState = READY;
}
@ -600,7 +594,7 @@ CacheFileChunk::IsReady() const
{
mFile->AssertOwnsLock();
return (mState == READY || mState == WRITING);
return (NS_SUCCEEDED(mStatus) && (mState == READY || mState == WRITING));
}
bool
@ -611,6 +605,26 @@ CacheFileChunk::IsDirty() const
return mIsDirty;
}
nsresult
CacheFileChunk::GetStatus()
{
mFile->AssertOwnsLock();
return mStatus;
}
void
CacheFileChunk::SetError(nsresult aStatus)
{
if (NS_SUCCEEDED(mStatus)) {
MOZ_ASSERT(mState != ERROR);
mStatus = aStatus;
mState = ERROR;
} else {
MOZ_ASSERT(mState == ERROR);
}
}
char *
CacheFileChunk::BufForWriting() const
{
@ -641,6 +655,10 @@ CacheFileChunk::EnsureBufSize(uint32_t aBufSize)
{
mFile->AssertOwnsLock();
// EnsureBufSize() is called only when we want to write some data to the chunk
// and we never write data anymore once some error occurs.
MOZ_ASSERT(mState != ERROR);
if (mBufSize >= aBufSize)
return;

View File

@ -97,6 +97,9 @@ public:
bool IsReady() const;
bool IsDirty() const;
nsresult GetStatus();
void SetError(nsresult aStatus);
char * BufForWriting() const;
const char * BufForReading() const;
void EnsureBufSize(uint32_t aBufSize);
@ -123,6 +126,7 @@ private:
uint32_t mIndex;
EState mState;
nsresult mStatus;
bool mIsDirty;
bool mRemovingChunk;
uint32_t mDataSize;

View File

@ -27,6 +27,7 @@
#include "nsAppDirectoryServiceDefs.h"
#include "private/pprio.h"
#include "mozilla/VisualEventTracer.h"
#include "mozilla/Preferences.h"
// include files for ftruncate (or equivalent)
#if defined(XP_UNIX)
@ -43,9 +44,16 @@
namespace mozilla {
namespace net {
#define kOpenHandlesLimit 64
#define kMetadataWriteDelay 5000
#define kRemoveTrashStartDelay 60000 // in milliseconds
#define kOpenHandlesLimit 64
#define kMetadataWriteDelay 5000
#define kRemoveTrashStartDelay 60000 // in milliseconds
#define kSmartSizeUpdateInterval 60000 // in milliseconds
#ifdef ANDROID
const uint32_t kMaxCacheSizeKB = 200*1024; // 200 MB
#else
const uint32_t kMaxCacheSizeKB = 350*1024; // 350 MB
#endif
bool
CacheFileHandle::DispatchRelease()
@ -2378,6 +2386,8 @@ CacheFileIOManager::EvictIfOverLimitInternal()
return NS_OK;
}
UpdateSmartCacheSize();
uint32_t cacheUsage;
rv = CacheIndex::GetCacheSize(&cacheUsage);
NS_ENSURE_SUCCESS(rv, rv);
@ -2424,6 +2434,8 @@ CacheFileIOManager::OverLimitEvictionInternal()
return NS_ERROR_NOT_INITIALIZED;
}
UpdateSmartCacheSize();
while (true) {
uint32_t cacheUsage;
rv = CacheIndex::GetCacheSize(&cacheUsage);
@ -3532,6 +3544,116 @@ CacheFileIOManager::SyncRemoveAllCacheFiles()
}
}
// Returns default ("smart") size (in KB) of cache, given available disk space
// (also in KB)
static uint32_t
SmartCacheSize(const uint32_t availKB)
{
uint32_t maxSize = kMaxCacheSizeKB;
if (availKB > 100 * 1024 * 1024) {
return maxSize; // skip computing if we're over 100 GB
}
// Grow/shrink in 10 MB units, deliberately, so that in the common case we
// don't shrink cache and evict items every time we startup (it's important
// that we don't slow down startup benchmarks).
uint32_t sz10MBs = 0;
uint32_t avail10MBs = availKB / (1024*10);
// .5% of space above 25 GB
if (avail10MBs > 2500) {
sz10MBs += static_cast<uint32_t>((avail10MBs - 2500)*.005);
avail10MBs = 2500;
}
// 1% of space between 7GB -> 25 GB
if (avail10MBs > 700) {
sz10MBs += static_cast<uint32_t>((avail10MBs - 700)*.01);
avail10MBs = 700;
}
// 5% of space between 500 MB -> 7 GB
if (avail10MBs > 50) {
sz10MBs += static_cast<uint32_t>((avail10MBs - 50)*.05);
avail10MBs = 50;
}
#ifdef ANDROID
// On Android, smaller/older devices may have very little storage and
// device owners may be sensitive to storage footprint: Use a smaller
// percentage of available space and a smaller minimum.
// 20% of space up to 500 MB (10 MB min)
sz10MBs += std::max<uint32_t>(1, static_cast<uint32_t>(avail10MBs * .2));
#else
// 40% of space up to 500 MB (50 MB min)
sz10MBs += std::max<uint32_t>(5, static_cast<uint32_t>(avail10MBs * .4));
#endif
return std::min<uint32_t>(maxSize, sz10MBs * 10 * 1024);
}
nsresult
CacheFileIOManager::UpdateSmartCacheSize()
{
MOZ_ASSERT(mIOThread->IsCurrentThread());
nsresult rv;
if (!CacheObserver::UseNewCache()) {
return NS_ERROR_NOT_AVAILABLE;
}
if (!CacheObserver::SmartCacheSizeEnabled()) {
return NS_ERROR_NOT_AVAILABLE;
}
// Wait at least kSmartSizeUpdateInterval before recomputing smart size.
static const TimeDuration kUpdateLimit =
TimeDuration::FromMilliseconds(kSmartSizeUpdateInterval);
if (!mLastSmartSizeTime.IsNull() &&
(TimeStamp::NowLoRes() - mLastSmartSizeTime) < kUpdateLimit) {
return NS_OK;
}
// Do not compute smart size when cache size is not reliable.
bool isUpToDate = false;
CacheIndex::IsUpToDate(&isUpToDate);
if (!isUpToDate) {
return NS_ERROR_NOT_AVAILABLE;
}
uint32_t cacheUsage;
rv = CacheIndex::GetCacheSize(&cacheUsage);
if (NS_WARN_IF(NS_FAILED(rv))) {
LOG(("CacheFileIOManager::UpdateSmartCacheSize() - Cannot get cacheUsage! "
"[rv=0x%08x]", rv));
return rv;
}
int64_t avail;
rv = mCacheDirectory->GetDiskSpaceAvailable(&avail);
if (NS_WARN_IF(NS_FAILED(rv))) {
// Do not change smart size.
LOG(("CacheFileIOManager::UpdateSmartCacheSize() - GetDiskSpaceAvailable() "
"failed! [rv=0x%08x]", rv));
return rv;
}
mLastSmartSizeTime = TimeStamp::NowLoRes();
uint32_t smartSize = SmartCacheSize(static_cast<uint32_t>(avail / 1024) +
cacheUsage);
if (smartSize == (CacheObserver::DiskCacheCapacity() >> 10)) {
// Smart size has not changed.
return NS_OK;
}
CacheObserver::SetDiskCacheCapacity(smartSize << 10);
return NS_OK;
}
// Memory reporting
namespace { // anon

View File

@ -359,6 +359,12 @@ private:
static nsresult CacheIndexStateChanged();
nsresult CacheIndexStateChangedInternal();
// Smart size calculation. UpdateSmartCacheSize() must be called on IO thread.
// It is called in EvictIfOverLimitInternal() just before we decide whether to
// start overlimit eviction or not and also in OverLimitEvictionInternal()
// before we start an eviction loop.
nsresult UpdateSmartCacheSize();
// Memory reporting (private part)
size_t SizeOfExcludingThisInternal(mozilla::MallocSizeOf mallocSizeOf) const;
@ -380,6 +386,7 @@ private:
nsCOMPtr<nsIDirectoryEnumerator> mTrashDirEnumerator;
nsTArray<nsCString> mFailedTrashDirs;
nsRefPtr<CacheFileContextEvictor> mContextEvictor;
TimeStamp mLastSmartSizeTime;
};
} // net

View File

@ -83,6 +83,9 @@ CacheFileInputStream::Available(uint64_t *_retval)
}
EnsureCorrectChunk(false);
if (NS_FAILED(mStatus))
return mStatus;
*_retval = 0;
if (mChunk) {
@ -124,6 +127,9 @@ CacheFileInputStream::Read(char *aBuf, uint32_t aCount, uint32_t *_retval)
}
EnsureCorrectChunk(false);
if (NS_FAILED(mStatus))
return mStatus;
if (!mChunk) {
if (mListeningForChunk == -1) {
LOG((" no chunk, returning 0 read and NS_OK"));
@ -194,6 +200,9 @@ CacheFileInputStream::ReadSegments(nsWriteSegmentFun aWriter, void *aClosure,
}
EnsureCorrectChunk(false);
if (NS_FAILED(mStatus))
return mStatus;
if (!mChunk) {
if (mListeningForChunk == -1) {
*_retval = 0;
@ -435,7 +444,17 @@ CacheFileInputStream::OnChunkAvailable(nsresult aResult, uint32_t aChunkIdx,
return NS_OK;
}
mChunk = aChunk;
if (NS_SUCCEEDED(aResult)) {
mChunk = aChunk;
} else if (aResult != NS_ERROR_NOT_AVAILABLE) {
// We store the error in mStatus, so we can propagate it later to consumer
// in Read(), Available() etc. We need to handle NS_ERROR_NOT_AVAILABLE
// differently since it is returned when the requested chunk is not
// available and there is no writer that could create it, i.e. it means that
// we've reached the end of the file.
mStatus = aResult;
}
MaybeNotifyListener();
return NS_OK;
@ -528,7 +547,14 @@ CacheFileInputStream::EnsureCorrectChunk(bool aReleaseOnly)
if (NS_FAILED(rv)) {
LOG(("CacheFileInputStream::EnsureCorrectChunk() - GetChunkLocked failed. "
"[this=%p, idx=%d, rv=0x%08x]", this, chunkIdx, rv));
if (rv != NS_ERROR_NOT_AVAILABLE) {
// We store the error in mStatus, so we can propagate it later to consumer
// in Read(), Available() etc. We need to handle NS_ERROR_NOT_AVAILABLE
// differently since it is returned when the requested chunk is not
// available and there is no writer that could create it, i.e. it means
// that we've reached the end of the file.
mStatus = rv;
}
}
else if (!mChunk) {
mListeningForChunk = static_cast<int64_t>(chunkIdx);
@ -587,7 +613,7 @@ CacheFileInputStream::MaybeNotifyListener()
if (!mCallback)
return;
if (mClosed) {
if (mClosed || NS_FAILED(mStatus)) {
NotifyListener();
return;
}

View File

@ -98,6 +98,8 @@ CacheFileOutputStream::Write(const char * aBuf, uint32_t aCount,
while (aCount) {
EnsureCorrectChunk(false);
if (NS_FAILED(mStatus))
return mStatus;
FillHole();
@ -342,10 +344,13 @@ CacheFileOutputStream::EnsureCorrectChunk(bool aReleaseOnly)
if (aReleaseOnly)
return;
DebugOnly<nsresult> rv;
nsresult rv;
rv = mFile->GetChunkLocked(chunkIdx, true, nullptr, getter_AddRefs(mChunk));
MOZ_ASSERT(NS_SUCCEEDED(rv),
"CacheFile::GetChunkLocked() should always succeed for writer");
if (NS_FAILED(rv)) {
LOG(("CacheFileOutputStream::EnsureCorrectChunk() - GetChunkLocked failed. "
"[this=%p, idx=%d, rv=0x%08x]", this, chunkIdx, rv));
mStatus = rv;
}
}
void

View File

@ -53,6 +53,9 @@ int32_t CacheObserver::sAutoMemoryCacheCapacity = -1;
static uint32_t const kDefaultDiskCacheCapacity = 250 * 1024; // 250 MB
uint32_t CacheObserver::sDiskCacheCapacity = kDefaultDiskCacheCapacity;
static bool const kDefaultSmartCacheSizeEnabled = false;
bool CacheObserver::sSmartCacheSizeEnabled = kDefaultSmartCacheSizeEnabled;
static uint32_t const kDefaultMaxMemoryEntrySize = 4 * 1024; // 4 MB
uint32_t CacheObserver::sMaxMemoryEntrySize = kDefaultMaxMemoryEntrySize;
@ -133,6 +136,8 @@ CacheObserver::AttachToPreferences()
mozilla::Preferences::AddUintVarCache(
&sDiskCacheCapacity, "browser.cache.disk.capacity", kDefaultDiskCacheCapacity);
mozilla::Preferences::AddBoolVarCache(
&sSmartCacheSizeEnabled, "browser.cache.disk.smart_size.enabled", kDefaultSmartCacheSizeEnabled);
mozilla::Preferences::AddIntVarCache(
&sMemoryCacheCapacity, "browser.cache.memory.capacity", kDefaultMemoryCacheCapacity);
@ -273,6 +278,32 @@ bool const CacheObserver::UseNewCache()
return true;
}
// static
void
CacheObserver::SetDiskCacheCapacity(uint32_t aCapacity)
{
sDiskCacheCapacity = aCapacity >> 10;
if (!sSelf) {
return;
}
if (NS_IsMainThread()) {
sSelf->StoreDiskCacheCapacity();
} else {
nsCOMPtr<nsIRunnable> event =
NS_NewRunnableMethod(sSelf, &CacheObserver::StoreDiskCacheCapacity);
NS_DispatchToMainThread(event);
}
}
void
CacheObserver::StoreDiskCacheCapacity()
{
mozilla::Preferences::SetInt("browser.cache.disk.capacity",
sDiskCacheCapacity);
}
// static
void CacheObserver::ParentDirOverride(nsIFile** aDir)
{

View File

@ -17,7 +17,7 @@ namespace net {
class CacheObserver : public nsIObserver
, public nsSupportsWeakReference
{
NS_DECL_ISUPPORTS
NS_DECL_THREADSAFE_ISUPPORTS
NS_DECL_NSIOBSERVER
virtual ~CacheObserver() {}
@ -37,6 +37,9 @@ class CacheObserver : public nsIObserver
static uint32_t const MemoryCacheCapacity(); // result in bytes.
static uint32_t const DiskCacheCapacity() // result in bytes.
{ return sDiskCacheCapacity << 10; }
static void SetDiskCacheCapacity(uint32_t); // parameter in bytes.
static bool const SmartCacheSizeEnabled()
{ return sSmartCacheSizeEnabled; }
static uint32_t const MaxMemoryEntrySize() // result in bytes.
{ return sMaxMemoryEntrySize << 10; }
static uint32_t const MaxDiskEntrySize() // result in bytes.
@ -56,6 +59,7 @@ class CacheObserver : public nsIObserver
private:
static CacheObserver* sSelf;
void StoreDiskCacheCapacity();
void AttachToPreferences();
void SchduleAutoDelete();
@ -66,6 +70,7 @@ private:
static int32_t sMemoryCacheCapacity;
static int32_t sAutoMemoryCacheCapacity;
static uint32_t sDiskCacheCapacity;
static bool sSmartCacheSizeEnabled;
static uint32_t sMaxMemoryEntrySize;
static uint32_t sMaxDiskEntrySize;
static uint32_t sCompressionLevel;

View File

@ -63,6 +63,8 @@ def setup_logging(suite, args, defaults):
prefix = "log_"
found = False
found_stdout_logger = False
if not hasattr(args, 'iteritems'):
args = vars(args)
for name, values in args.iteritems():
if name.startswith(prefix) and values is not None:
for value in values:

View File

@ -1,10 +1,15 @@
import argparse
import os
import time
import unittest
import StringIO
import json
from mozlog.structured import structuredlog, reader
from mozlog.structured import (
commandline,
reader,
structuredlog,
)
class TestHandler(object):
@ -182,6 +187,14 @@ class TestStructuredLog(BaseStructuredTest):
"level": "INFO",
"message": "line 4"})
class TestCommandline(unittest.TestCase):
def test_setup_logging(self):
parser = argparse.ArgumentParser()
commandline.add_logging_group(parser)
args = parser.parse_args(["--log-raw=/tmp/foo"])
logger = commandline.setup_logging("test", args, {})
self.assertEqual(len(logger.handlers), 1)
class TestReader(unittest.TestCase):
def to_file_like(self, obj):
data_str = "\n".join(json.dumps(item) for item in obj)

View File

@ -188,3 +188,6 @@ user_pref('browser.contentHandlers.types.2.uri', 'http://test1.example.org/rss?u
user_pref('browser.contentHandlers.types.3.uri', 'http://test1.example.org/rss?url=%%s')
user_pref('browser.contentHandlers.types.4.uri', 'http://test1.example.org/rss?url=%%s')
user_pref('browser.contentHandlers.types.5.uri', 'http://test1.example.org/rss?url=%%s')
// We want to collect telemetry, but we don't want to send in the results.
user_pref('toolkit.telemetry.server', 'https://%(server)s/telemetry-dummy/');

View File

@ -351,16 +351,20 @@ let PlacesTransactions = {
* are not protected from consumers who use the raw places APIs directly.
*/
transact: function (aToTransact) {
let generatorMode = typeof(aToTransact) == "function";
if (generatorMode) {
if (!aToTransact.isGenerator())
let isGeneratorObj =
o => Object.prototype.toString.call(o) == "[object Generator]";
let generator = null;
if (typeof(aToTransact) == "function") {
generator = aToTransact();
if (!isGeneratorObj(generator))
throw new Error("aToTransact is not a generator function");
}
else {
if (!TransactionsHistory.isProxifiedTransactionObject(aToTransact))
throw new Error("aToTransact is not a valid transaction object");
if (executedTransactions.has(aToTransact))
throw new Error("Transactions objects may not be recycled.");
else if (!TransactionsHistory.isProxifiedTransactionObject(aToTransact)) {
throw new Error("aToTransact is not a valid transaction object");
}
else if (executedTransactions.has(aToTransact)) {
throw new Error("Transactions objects may not be recycled.");
}
return Serialize(function* () {
@ -387,7 +391,7 @@ let PlacesTransactions = {
let next = error ?
aGenerator.throw(sendValue) : aGenerator.next(sendValue);
sendValue = next.value;
if (Object.prototype.toString.call(sendValue) == "[object Generator]") {
if (isGeneratorObj(sendValue)) {
sendValue = yield transactBatch(sendValue);
}
else if (typeof(sendValue) == "object" && sendValue) {
@ -410,8 +414,8 @@ let PlacesTransactions = {
return sendValue;
}
if (generatorMode)
return yield transactBatch(aToTransact());
if (generator)
return yield transactBatch(generator);
else
return yield transactOneTransaction(aToTransact);
}.bind(this));
@ -887,9 +891,10 @@ PT.NewLivemark.prototype = Object.seal({
/**
* Transaction for moving an item.
*
* Required Input Properties: GUID, newParentGUID, newIndex.
* Required Input Properties: GUID, newParentGUID.
* Optional Input Properties newIndex.
*/
PT.MoveItem = DefineTransaction(["GUID", "newParentGUID", "newIndex"]);
PT.MoveItem = DefineTransaction(["GUID", "newParentGUID"], ["newIndex"]);
PT.MoveItem.prototype = Object.seal({
execute: function* (aGUID, aNewParentGUID, aNewIndex) {
let itemId = yield PlacesUtils.promiseItemId(aGUID),

View File

@ -454,8 +454,7 @@ add_task(function* test_move_items_to_folder() {
ensureUndoState([[bkm_b_txn, bkm_a_txn, folder_a_txn]], 0);
let moveTxn = PT.MoveItem({ GUID: bkm_a_info.GUID
, newParentGUID: folder_a_info.GUID
, newIndex: bmsvc.DEFAULT_INDEX });
, newParentGUID: folder_a_info.GUID });
yield PT.transact(moveTxn);
let ensureDo = () => {

View File

@ -13,7 +13,12 @@ Cu.import("resource://gre/modules/Services.jsm");
// Skip all the ones containining "test", because we never need to ask for
// updates for them.
function getLists(prefName) {
return Services.prefs.getCharPref(prefName).split(",")
let pref = Services.prefs.getCharPref(prefName);
// Splitting an empty string returns [''], we really want an empty array.
if (!pref) {
return [];
}
return pref.split(",")
.filter(function(value) { return value.indexOf("test-") == -1; })
.map(function(value) { return value.trim(); });
}

View File

@ -188,13 +188,27 @@ PROT_ListManager.prototype.maybeStartManagingUpdates = function() {
this.maybeToggleUpdateChecking();
}
PROT_ListManager.prototype.kickoffUpdate_ = function (tableData)
/**
* Acts as a nsIUrlClassifierCallback for getTables.
*/
PROT_ListManager.prototype.kickoffUpdate_ = function (onDiskTableData)
{
this.startingUpdate_ = false;
var initialUpdateDelay = 3000;
// Check if any table registered for updates has ever been downloaded.
var diskTablesAreUpdating = false;
for (var tableName in this.tablesData) {
if (this.tablesData[tableName].needsUpdate) {
if (onDiskTableData.indexOf(tableName) != -1) {
diskTablesAreUpdating = true;
}
}
}
// If the user has never downloaded tables, do the check now.
// If the user has tables, add a fuzz of a few minutes.
var initialUpdateDelay = 3000;
if (tableData != "") {
if (diskTablesAreUpdating) {
// Add a fuzz of 0-5 minutes.
initialUpdateDelay += Math.floor(Math.random() * (5 * 60 * 1000));
}

View File

@ -13,7 +13,7 @@ toolkit.jar:
skin/classic/mozapps/downloads/unknownContentType.css (downloads/unknownContentType.css)
skin/classic/mozapps/extensions/about.css (extensions/about.css)
skin/classic/mozapps/extensions/blocklist.css (extensions/blocklist.css)
skin/classic/mozapps/extensions/extensions.css (extensions/extensions.css)
* skin/classic/mozapps/extensions/extensions.css (extensions/extensions.css)
* skin/classic/mozapps/extensions/selectAddons.css (extensions/selectAddons.css)
skin/classic/mozapps/extensions/update.css (extensions/update.css)
skin/classic/mozapps/extensions/extensions.svg (extensions/extensions.svg)

View File

@ -41,7 +41,7 @@ elif CONFIG['ANDROID_VERSION'] == '15':
'GonkNativeWindowICS.h',
]
if CONFIG['MOZ_B2G_CAMERA'] or CONFIG['MOZ_OMX_DECODER']:
if CONFIG['MOZ_B2G_CAMERA'] or CONFIG['MOZ_OMX_DECODER'] or CONFIG['MOZ_WEBRTC']:
if CONFIG['ANDROID_VERSION'] == '19':
SOURCES += [
'GonkBufferQueueKK.cpp',

View File

@ -433,11 +433,11 @@ nsIdleService::AddIdleObserver(nsIObserver* aObserver, uint32_t aIdleTimeInS)
}
PR_LOG(sLog, PR_LOG_DEBUG,
("idleService: Register idle observer %x for %d seconds",
("idleService: Register idle observer %p for %d seconds",
aObserver, aIdleTimeInS));
#ifdef MOZ_WIDGET_ANDROID
__android_log_print(ANDROID_LOG_INFO, "IdleService",
"Register idle observer %x for %d seconds",
"Register idle observer %p for %d seconds",
aObserver, aIdleTimeInS);
#endif
@ -503,11 +503,11 @@ nsIdleService::RemoveIdleObserver(nsIObserver* aObserver, uint32_t aTimeInS)
mIdleObserverCount--;
mArrayListeners.RemoveElementAt(listenerIndex);
PR_LOG(sLog, PR_LOG_DEBUG,
("idleService: Remove observer %x (%d seconds), %d remain idle",
("idleService: Remove observer %p (%d seconds), %d remain idle",
aObserver, aTimeInS, mIdleObserverCount));
#ifdef MOZ_WIDGET_ANDROID
__android_log_print(ANDROID_LOG_INFO, "IdleService",
"Remove observer %x (%d seconds), %d remain idle",
"Remove observer %p (%d seconds), %d remain idle",
aObserver, aTimeInS, mIdleObserverCount);
#endif
return NS_OK;
@ -515,11 +515,11 @@ nsIdleService::RemoveIdleObserver(nsIObserver* aObserver, uint32_t aTimeInS)
// If we get here, we haven't removed anything:
PR_LOG(sLog, PR_LOG_WARNING,
("idleService: Failed to remove idle observer %x (%d seconds)",
("idleService: Failed to remove idle observer %p (%d seconds)",
aObserver, aTimeInS));
#ifdef MOZ_WIDGET_ANDROID
__android_log_print(ANDROID_LOG_INFO, "IdleService",
"Failed to remove idle observer %x (%d seconds)",
"Failed to remove idle observer %p (%d seconds)",
aObserver, aTimeInS);
#endif
return NS_ERROR_FAILURE;
@ -589,11 +589,11 @@ nsIdleService::ResetIdleTimeOut(uint32_t idleDeltaInMS)
// Send the "non-idle" events.
while (numberOfPendingNotifications--) {
PR_LOG(sLog, PR_LOG_DEBUG,
("idleService: Reset idle timeout: tell observer %x user is back",
("idleService: Reset idle timeout: tell observer %p user is back",
notifyList[numberOfPendingNotifications]));
#ifdef MOZ_WIDGET_ANDROID
__android_log_print(ANDROID_LOG_INFO, "IdleService",
"Reset idle timeout: tell observer %x user is back",
"Reset idle timeout: tell observer %p user is back",
notifyList[numberOfPendingNotifications]);
#endif
notifyList[numberOfPendingNotifications]->Observe(this,
@ -773,11 +773,11 @@ nsIdleService::IdleTimerCallback(void)
// Notify all listeners that just timed out.
while (numberOfPendingNotifications--) {
PR_LOG(sLog, PR_LOG_DEBUG,
("idleService: **** Idle timer callback: tell observer %x user is idle",
("idleService: **** Idle timer callback: tell observer %p user is idle",
notifyList[numberOfPendingNotifications]));
#ifdef MOZ_WIDGET_ANDROID
__android_log_print(ANDROID_LOG_INFO, "IdleService",
"Idle timer callback: tell observer %x user is idle",
"Idle timer callback: tell observer %p user is idle",
notifyList[numberOfPendingNotifications]);
#endif
notifyList[numberOfPendingNotifications]->Observe(this,
@ -892,11 +892,11 @@ nsIdleService::ReconfigureTimer(void)
if (nextTimeoutAt > pollTimeout) {
PR_LOG(sLog, PR_LOG_DEBUG,
("idleService: idle observers, reducing timeout to %u msec from now",
("idleService: idle observers, reducing timeout to %lu msec from now",
MIN_IDLE_POLL_INTERVAL_MSEC));
#ifdef MOZ_WIDGET_ANDROID
__android_log_print(ANDROID_LOG_INFO, "IdleService",
"idle observers, reducing timeout to %u msec from now",
"idle observers, reducing timeout to %lu msec from now",
MIN_IDLE_POLL_INTERVAL_MSEC);
#endif
nextTimeoutAt = pollTimeout;

View File

@ -134,7 +134,7 @@ extern nsresult nsStringInputStreamConstructor(nsISupports *, REFNSIID, void **)
#endif
#include "ogg/ogg.h"
#ifdef MOZ_VPX
#if defined(MOZ_VPX) && !defined(MOZ_VPX_NO_MEM_REPORTING)
#include "vpx_mem/vpx_mem.h"
#endif
#ifdef MOZ_WEBM
@ -602,7 +602,7 @@ NS_InitXPCOM2(nsIServiceManager* *result,
OggReporter::CountingRealloc,
OggReporter::CountingFree);
#ifdef MOZ_VPX
#if defined(MOZ_VPX) && !defined(MOZ_VPX_NO_MEM_REPORTING)
// And for VPX.
vpx_mem_set_functions(VPXReporter::CountingMalloc,
VPXReporter::CountingCalloc,