Merge the last PGO-green inbound changeset to m-c.

This commit is contained in:
Ryan VanderMeulen 2012-07-12 20:46:27 -04:00
commit cfea5c2634
166 changed files with 3185 additions and 2574 deletions

View File

@ -140,6 +140,12 @@ var gPluginHandler = {
self.pluginUnavailable(plugin, event.type);
break;
case "PluginVulnerableUpdatable":
let updateLink = doc.getAnonymousElementByAttribute(plugin, "class", "checkForUpdatesLink");
self.addLinkClickCallback(updateLink, "openPluginUpdatePage");
/* FALLTHRU */
case "PluginVulnerableNoUpdate":
case "PluginClickToPlay":
self._handleClickToPlayEvent(plugin);
break;
@ -151,9 +157,10 @@ var gPluginHandler = {
}
// Hide the in-content UI if it's too big. The crashed plugin handler already did this.
if (event.type != "PluginCrashed" && event.type != "PluginClickToPlay") {
if (event.type != "PluginCrashed") {
let overlay = doc.getAnonymousElementByAttribute(plugin, "class", "mainBox");
if (self.isTooSmall(plugin, overlay))
/* overlay might be null, so only operate on it if it exists */
if (overlay != null && self.isTooSmall(plugin, overlay))
overlay.style.visibility = "hidden";
}
},
@ -224,6 +231,12 @@ var gPluginHandler = {
managePlugins: function (aEvent) {
BrowserOpenAddonsMgr("addons://list/plugin");
},
// Callback for user clicking on the link in a click-to-play plugin
// (where the plugin has an update)
openPluginUpdatePage: function (aEvent) {
openURL(Services.urlFormatter.formatURLPref("plugins.update.url"));
},
#ifdef MOZ_CRASHREPORTER
// Callback for user clicking "submit a report" link
@ -258,15 +271,18 @@ var gPluginHandler = {
objLoadingContent.playPlugin();
return;
} else if (pluginsPermission == Ci.nsIPermissionManager.DENY_ACTION) {
overlay.style.visibility = "hidden";
if (overlay)
overlay.style.visibility = "hidden";
return;
}
let overlay = doc.getAnonymousElementByAttribute(aPlugin, "class", "mainBox");
// The overlay is null if the XBL binding is not attached (element is display:none).
if (overlay) {
overlay.addEventListener("click", function(aEvent) {
if (aEvent.button == 0 && aEvent.isTrusted)
// Have to check that the target is a XULElement and not the link
// to update the plugin
if (aEvent.target instanceof XULElement &&
aEvent.button == 0 && aEvent.isTrusted)
gPluginHandler.activateSinglePlugin(aEvent.target.ownerDocument.defaultView.top, aPlugin);
}, true);
}

View File

@ -1001,6 +1001,8 @@ var gBrowserInit = {
gBrowser.addEventListener("PluginOutdated", gPluginHandler, true);
gBrowser.addEventListener("PluginDisabled", gPluginHandler, true);
gBrowser.addEventListener("PluginClickToPlay", gPluginHandler, true);
gBrowser.addEventListener("PluginVulnerableUpdatable", gPluginHandler, true);
gBrowser.addEventListener("PluginVulnerableNoUpdate", gPluginHandler, true);
gBrowser.addEventListener("NewPluginInstalled", gPluginHandler.newPluginInstalled, true);
#ifdef XP_MACOSX
gBrowser.addEventListener("npapi-carbon-event-model-failure", gPluginHandler, true);

View File

@ -517,5 +517,103 @@ function test17() {
var missingNotification = PopupNotifications.getNotification("missing-plugins", gTestBrowser);
ok(!missingNotification, "Test 17, Should not have a missing plugin notification");
registerFakeBlocklistService(Ci.nsIBlocklistService.STATE_VULNERABLE_UPDATE_AVAILABLE);
prepareTest(test18a, gTestRoot + "plugin_test.html");
}
const Cr = Components.results;
const Cm = Components.manager;
const Cc = Components.classes;
const gReg = Cm.QueryInterface(Ci.nsIComponentRegistrar);
const gRealBlocklistServiceCID = Cc["@mozilla.org/extensions/blocklist;1"];
const gFakeBlocklistServiceCID = Components.ID("{614b68a0-3c53-4ec0-8146-28cc1e25f8a1}");
var gFactory = null;
function registerFakeBlocklistService(blockState) {
var BlocklistService = {
getPluginBlocklistState: function(plugin, appVersion, toolkitVersion) {
return blockState;
},
classID: gFakeBlocklistServiceCID,
QueryInterface: XPCOMUtils.generateQI([Ci.nsIBlocklistService])
};
gFactory = {
createInstance: function(outer, iid) {
if (outer != null)
throw Cr.NS_ERROR_NO_AGGREGATION;
return BlocklistService.QueryInterface(iid);
}
};
gReg.registerFactory(gFakeBlocklistServiceCID,
"Fake Blocklist Service",
"@mozilla.org/extensions/blocklist;1",
gFactory);
}
function unregisterFakeBlocklistService() {
if (gFactory != null ) {
gReg.unregisterFactory(gFakeBlocklistServiceCID, gFactory);
gFactory = null;
// This should restore the original blocklist service:
gReg.registerFactory(gRealBlocklistServiceCID,
"Blocklist Service",
"@mozilla.org/extensions/blocklist;1",
null);
}
}
// Tests a vulnerable, updatable plugin
function test18a() {
var clickToPlayNotification = PopupNotifications.getNotification("click-to-play-plugins", gTestBrowser);
ok(clickToPlayNotification, "Test 18a, Should have a click-to-play notification");
var doc = gTestBrowser.contentDocument;
var plugin = doc.getElementById("test");
var objLoadingContent = plugin.QueryInterface(Ci.nsIObjectLoadingContent);
ok(!objLoadingContent.activated, "Test 18a, Plugin should not be activated");
var overlay = doc.getAnonymousElementByAttribute(plugin, "class", "mainBox");
ok(overlay.style.visibility != "hidden", "Test 18a, Plugin overlay should exist, not be hidden");
var updateLink = doc.getAnonymousElementByAttribute(plugin, "class", "checkForUpdatesLink");
ok(updateLink.style.visibility != "hidden", "Test 18a, Plugin should have an update link");
var tabOpenListener = new TabOpenListener(Services.urlFormatter.formatURLPref("plugins.update.url"), false, false);
tabOpenListener.handleEvent = function(event) {
if (event.type == "TabOpen") {
gBrowser.tabContainer.removeEventListener("TabOpen", this, false);
this.tab = event.originalTarget;
ok(event.target.label == this.url, "Test 18a, Update link should open up the plugin check page");
gBrowser.removeTab(this.tab);
test18b();
}
};
EventUtils.synthesizeMouse(updateLink, 5, 5, {}, gTestBrowser.contentWindow);
}
function test18b() {
unregisterFakeBlocklistService();
registerFakeBlocklistService(Ci.nsIBlocklistService.STATE_VULNERABLE_NO_UPDATE);
prepareTest(test18c, gTestRoot + "plugin_test.html");
}
// Tests a vulnerable plugin with no update
function test18c() {
var clickToPlayNotification = PopupNotifications.getNotification("click-to-play-plugins", gTestBrowser);
ok(clickToPlayNotification, "Test 18c, Should have a click-to-play notification");
var doc = gTestBrowser.contentDocument;
var plugin = doc.getElementById("test");
var objLoadingContent = plugin.QueryInterface(Ci.nsIObjectLoadingContent);
ok(!objLoadingContent.activated, "Test 18c, Plugin should not be activated");
var overlay = doc.getAnonymousElementByAttribute(plugin, "class", "mainBox");
ok(overlay.style.visibility != "hidden", "Test 18c, Plugin overlay should exist, not be hidden");
var updateLink = doc.getAnonymousElementByAttribute(plugin, "class", "checkForUpdatesLink");
ok(updateLink.style.display != "block", "Test 18c, Plugin should not have an update link");
unregisterFakeBlocklistService();
var plugin = get_test_plugin();
plugin.clicktoplay = false;
finishTest();
}

View File

@ -509,7 +509,28 @@ class DeviceManager:
success: True
failure: False
"""
@staticmethod
def _escapedCommandLine(cmd):
""" Utility function to return escaped and quoted version of command line """
quotedCmd = []
for arg in cmd:
arg.replace('&', '\&')
needsQuoting = False
for char in [ ' ', '(', ')', '"', '&' ]:
if arg.find(char) >= 0:
needsQuoting = True
break
if needsQuoting:
arg = '\'%s\'' % arg
quotedCmd.append(arg)
return " ".join(quotedCmd)
class NetworkTools:
def __init__(self):
pass

View File

@ -95,24 +95,13 @@ class DeviceManagerADB(DeviceManager):
# success: <return code>
# failure: None
def shell(self, cmd, outputfile, env=None, cwd=None):
# need to quote and escape special characters here
for (index, arg) in enumerate(cmd):
arg.replace('&', '\&')
needsQuoting = False
for char in [ ' ', '(', ')', '"', '&' ]:
if arg.find(char):
needsQuoting = True
break
if needsQuoting:
cmd[index] = '\'%s\'' % arg
# This is more complex than you'd think because adb doesn't actually
# return the return code from a process, so we have to capture the output
# to get it
# FIXME: this function buffers all output of the command into memory,
# always. :(
cmdline = " ".join(cmd) + "; echo $?"
# Getting the return code is more complex than you'd think because adb
# doesn't actually return the return code from a process, so we have to
# capture the output to get it
cmdline = "%s; echo $?" % self._escapedCommandLine(cmd)
# prepend cwd and env to command if necessary
if cwd:

View File

@ -260,7 +260,7 @@ class DeviceManagerSUT(DeviceManager):
# success: <return code>
# failure: None
def shell(self, cmd, outputfile, env=None, cwd=None):
cmdline = subprocess.list2cmdline(cmd)
cmdline = self._escapedCommandLine(cmd)
if env:
cmdline = '%s %s' % (self.formatEnvString(env), cmdline)

View File

@ -80,20 +80,6 @@ static PRLogModuleInfo* gObjectLog = PR_NewLogModule("objlc");
#define LOG(args) PR_LOG(gObjectLog, PR_LOG_DEBUG, args)
#define LOG_ENABLED() PR_LOG_TEST(gObjectLog, PR_LOG_DEBUG)
#include "mozilla/Preferences.h"
static bool gClickToPlayPlugins = false;
static void
InitPrefCache()
{
static bool initializedPrefCache = false;
if (!initializedPrefCache) {
mozilla::Preferences::AddBoolVarCache(&gClickToPlayPlugins, "plugins.click_to_play");
}
initializedPrefCache = true;
}
class nsAsyncInstantiateEvent : public nsRunnable {
public:
nsObjectLoadingContent *mContent;
@ -181,6 +167,12 @@ nsPluginErrorEvent::Run()
case ePluginClickToPlay:
type = NS_LITERAL_STRING("PluginClickToPlay");
break;
case ePluginVulnerableUpdatable:
type = NS_LITERAL_STRING("PluginVulnerableUpdatable");
break;
case ePluginVulnerableNoUpdate:
type = NS_LITERAL_STRING("PluginVulnerableNoUpdate");
break;
case ePluginUnsupported:
type = NS_LITERAL_STRING("PluginNotFound");
break;
@ -484,7 +476,11 @@ nsresult nsObjectLoadingContent::IsPluginEnabledForType(const nsCString& aMIMETy
return rv;
}
if (!mShouldPlay) {
if (!pluginHost->IsPluginClickToPlayForType(aMIMEType.get())) {
mCTPPlayable = true;
}
if (!mCTPPlayable) {
nsCOMPtr<nsIContent> thisContent = do_QueryInterface(static_cast<nsIObjectLoadingContent*>(this));
MOZ_ASSERT(thisContent);
nsIDocument* ownerDoc = thisContent->OwnerDoc();
@ -505,12 +501,17 @@ nsresult nsObjectLoadingContent::IsPluginEnabledForType(const nsCString& aMIMETy
nsCOMPtr<nsIPermissionManager> permissionManager = do_GetService(NS_PERMISSIONMANAGER_CONTRACTID, &rv);
NS_ENSURE_SUCCESS(rv, rv);
PRUint32 permission;
rv = permissionManager->TestPermission(topUri,
"plugins",
&permission);
rv = permissionManager->TestPermission(topUri, "plugins", &permission);
NS_ENSURE_SUCCESS(rv, rv);
if (permission == nsIPermissionManager::ALLOW_ACTION) {
mShouldPlay = true;
PRUint32 state;
rv = pluginHost->GetBlocklistStateForType(aMIMEType.get(), &state);
NS_ENSURE_SUCCESS(rv, rv);
if (permission == nsIPermissionManager::ALLOW_ACTION &&
state != nsIBlocklistService::STATE_VULNERABLE_UPDATE_AVAILABLE &&
state != nsIBlocklistService::STATE_VULNERABLE_NO_UPDATE) {
mCTPPlayable = true;
} else {
return NS_ERROR_PLUGIN_CLICKTOPLAY;
}
@ -542,12 +543,9 @@ GetExtensionFromURI(nsIURI* uri, nsCString& ext)
*/
bool nsObjectLoadingContent::IsPluginEnabledByExtension(nsIURI* uri, nsCString& mimeType)
{
if (!mShouldPlay) {
return false;
}
nsCAutoString ext;
GetExtensionFromURI(uri, ext);
bool enabled = false;
if (ext.IsEmpty()) {
return false;
@ -562,9 +560,18 @@ bool nsObjectLoadingContent::IsPluginEnabledByExtension(nsIURI* uri, nsCString&
const char* typeFromExt;
if (NS_SUCCEEDED(pluginHost->IsPluginEnabledForExtension(ext.get(), typeFromExt))) {
mimeType = typeFromExt;
return true;
enabled = true;
if (!pluginHost->IsPluginClickToPlayForType(mimeType.get())) {
mCTPPlayable = true;
}
}
if (!mCTPPlayable) {
return false;
} else {
return enabled;
}
return false;
}
nsresult
@ -598,13 +605,8 @@ nsObjectLoadingContent::nsObjectLoadingContent()
, mIsStopping(false)
, mSrcStreamLoading(false)
, mFallbackReason(ePluginOtherState)
{
InitPrefCache();
// If plugins.click_to_play is false, plugins should always play
mShouldPlay = !gClickToPlayPlugins;
// If plugins.click_to_play is true, track the activated state of plugins.
mActivated = !gClickToPlayPlugins;
}
, mCTPPlayable(false)
, mActivated(false) {}
nsObjectLoadingContent::~nsObjectLoadingContent()
{
@ -617,10 +619,6 @@ nsObjectLoadingContent::~nsObjectLoadingContent()
nsresult
nsObjectLoadingContent::InstantiatePluginInstance(const char* aMimeType, nsIURI* aURI)
{
if (!mShouldPlay) {
return NS_ERROR_PLUGIN_CLICKTOPLAY;
}
// Don't do anything if we already have an active instance.
if (mInstanceOwner) {
return NS_OK;
@ -665,6 +663,14 @@ nsObjectLoadingContent::InstantiatePluginInstance(const char* aMimeType, nsIURI*
return rv;
}
if (!pluginHost->IsPluginClickToPlayForType(aMimeType)) {
mCTPPlayable = true;
}
if (!mCTPPlayable) {
return NS_ERROR_PLUGIN_CLICKTOPLAY;
}
// If you add early return(s), be sure to balance this call to
// appShell->SuspendNative() with additional call(s) to
// appShell->ReturnNative().
@ -1191,6 +1197,10 @@ nsObjectLoadingContent::ObjectState() const
switch (mFallbackReason) {
case ePluginClickToPlay:
return NS_EVENT_STATE_TYPE_CLICK_TO_PLAY;
case ePluginVulnerableUpdatable:
return NS_EVENT_STATE_VULNERABLE_UPDATABLE;
case ePluginVulnerableNoUpdate:
return NS_EVENT_STATE_VULNERABLE_NO_UPDATE;
case ePluginDisabled:
state |= NS_EVENT_STATE_HANDLER_DISABLED;
break;
@ -1933,8 +1943,10 @@ nsObjectLoadingContent::GetPluginSupportState(nsIContent* aContent,
}
PluginSupportState pluginDisabledState = GetPluginDisabledState(aContentType);
if (pluginDisabledState == ePluginClickToPlay) {
return ePluginClickToPlay;
if (pluginDisabledState == ePluginClickToPlay ||
pluginDisabledState == ePluginVulnerableUpdatable ||
pluginDisabledState == ePluginVulnerableNoUpdate) {
return pluginDisabledState;
} else if (hasAlternateContent) {
return ePluginOtherState;
} else {
@ -1946,12 +1958,28 @@ PluginSupportState
nsObjectLoadingContent::GetPluginDisabledState(const nsCString& aContentType)
{
nsresult rv = IsPluginEnabledForType(aContentType);
if (rv == NS_ERROR_PLUGIN_DISABLED)
if (rv == NS_ERROR_PLUGIN_DISABLED) {
return ePluginDisabled;
if (rv == NS_ERROR_PLUGIN_CLICKTOPLAY)
}
if (rv == NS_ERROR_PLUGIN_CLICKTOPLAY) {
PRUint32 state;
nsCOMPtr<nsIPluginHost> pluginHostCOM(do_GetService(MOZ_PLUGIN_HOST_CONTRACTID));
nsPluginHost *pluginHost = static_cast<nsPluginHost*>(pluginHostCOM.get());
if (pluginHost) {
rv = pluginHost->GetBlocklistStateForType(aContentType.get(), &state);
if (NS_SUCCEEDED(rv)) {
if (state == nsIBlocklistService::STATE_VULNERABLE_UPDATE_AVAILABLE) {
return ePluginVulnerableUpdatable;
} else if (state == nsIBlocklistService::STATE_VULNERABLE_NO_UPDATE) {
return ePluginVulnerableNoUpdate;
}
}
}
return ePluginClickToPlay;
if (rv == NS_ERROR_PLUGIN_BLOCKLISTED)
}
if (rv == NS_ERROR_PLUGIN_BLOCKLISTED) {
return ePluginBlocklisted;
}
return ePluginUnsupported;
}
@ -2213,7 +2241,7 @@ nsObjectLoadingContent::PlayPlugin()
if (!nsContentUtils::IsCallerChrome())
return NS_OK;
mShouldPlay = true;
mCTPPlayable = true;
return LoadObject(mURI, true, mContentType, true);
}

View File

@ -38,7 +38,9 @@ enum PluginSupportState {
ePluginOutdated, // The plugin is considered outdated, but not disabled
ePluginOtherState, // Something else (e.g. uninitialized or not a plugin)
ePluginCrashed,
ePluginClickToPlay // The plugin is disabled until the user clicks on it
ePluginClickToPlay, // The plugin is disabled until the user clicks on it
ePluginVulnerableUpdatable, // The plugin is vulnerable (update available)
ePluginVulnerableNoUpdate // The plugin is vulnerable (no update available)
};
/**
@ -372,9 +374,11 @@ class nsObjectLoadingContent : public nsImageLoadingContent
// it may lose the flag.
bool mNetworkCreated : 1;
// Used to keep track of whether or not a plugin should be played.
// This is used for click-to-play plugins.
bool mShouldPlay : 1;
// Used to keep track of if a plugin is blocked by click-to-play.
// True indicates the plugin is not click-to-play or it has been clicked by
// the user.
// False indicates the plugin is click-to-play and has not yet been clicked.
bool mCTPPlayable : 1;
// Used to keep track of whether or not a plugin has been played.
// This is used for click-to-play plugins.

View File

@ -242,6 +242,10 @@ private:
#define NS_EVENT_STATE_SUB_OPTIMUM NS_DEFINE_EVENT_STATE_MACRO(38)
// Content is in the sub-suboptimal region.
#define NS_EVENT_STATE_SUB_SUB_OPTIMUM NS_DEFINE_EVENT_STATE_MACRO(39)
// Handler for click to play plugin (vulnerable w/update)
#define NS_EVENT_STATE_VULNERABLE_UPDATABLE NS_DEFINE_EVENT_STATE_MACRO(40)
// Handler for click to play plugin (vulnerable w/no update)
#define NS_EVENT_STATE_VULNERABLE_NO_UPDATE NS_DEFINE_EVENT_STATE_MACRO(41)
/**
* NOTE: do not go over 63 without updating nsEventStates::InternalType!

View File

@ -5,7 +5,7 @@
#include "nsISupports.idl"
[scriptable, uuid(88e03453-a773-47ba-9d84-14f672ac99e2)]
[scriptable, uuid(a361a7e7-7f8d-4b68-91e9-30ae096460d4)]
interface nsIPluginTag : nsISupports
{
readonly attribute AUTF8String description;
@ -15,4 +15,5 @@ interface nsIPluginTag : nsISupports
readonly attribute AUTF8String name;
attribute boolean disabled;
attribute boolean blocklisted;
attribute boolean clicktoplay;
};

View File

@ -342,8 +342,10 @@ nsPluginHost::nsPluginHost()
Preferences::GetBool("plugin.override_internal_types", false);
mPluginsDisabled = Preferences::GetBool("plugin.disable", false);
mPluginsClickToPlay = Preferences::GetBool("plugins.click_to_play", false);
Preferences::AddStrongObserver(this, "plugin.disable");
Preferences::AddStrongObserver(this, "plugins.click_to_play");
nsCOMPtr<nsIObserverService> obsService =
mozilla::services::GetObserverService();
@ -1294,6 +1296,36 @@ nsPluginHost::IsPluginEnabledForType(const char* aMimeType)
return NS_OK;
}
bool
nsPluginHost::IsPluginClickToPlayForType(const char* aMimeType)
{
nsPluginTag *plugin = FindPluginForType(aMimeType, true);
if (plugin &&
(plugin->HasFlag(NS_PLUGIN_FLAG_CLICKTOPLAY) || mPluginsClickToPlay)) {
return true;
}
else {
return false;
}
}
nsresult
nsPluginHost::GetBlocklistStateForType(const char *aMimeType, PRUint32 *aState)
{
nsPluginTag *plugin = FindPluginForType(aMimeType, true);
if (plugin) {
nsCOMPtr<nsIBlocklistService> blocklist = do_GetService("@mozilla.org/extensions/blocklist;1");
if (blocklist) {
// The EmptyString()s are so we use the currently running application
// and toolkit versions
return blocklist->GetPluginBlocklistState(plugin, EmptyString(),
EmptyString(), aState);
}
}
return NS_ERROR_FAILURE;
}
// check comma delimitered extensions
static int CompareExtensions(const char *aExtensionList, const char *aExtension)
@ -2074,19 +2106,32 @@ nsresult nsPluginHost::ScanPluginsDirectory(nsIFile *pluginsDir,
EmptyString(), &state);
if (NS_SUCCEEDED(rv)) {
// If the blocklist says so then block the plugin. If the blocklist says
// it is risky and we have never seen this plugin before then disable it
if (state == nsIBlocklistService::STATE_BLOCKED)
pluginTag->Mark(NS_PLUGIN_FLAG_BLOCKLISTED);
else if (state == nsIBlocklistService::STATE_SOFTBLOCKED && !seenBefore)
enabled = false;
else if (state == nsIBlocklistService::STATE_OUTDATED && !seenBefore)
warnOutdated = true;
// If the blocklist says so, block the plugin.
// If the blocklist says it is risky and we have never seen this
// plugin before, then disable it.
// If the blocklist says this is an outdated plugin, warn about
// outdated plugins.
// If the blocklist says the plugin is one of the click-to-play
// states, set the click-to-play flag.
if (state == nsIBlocklistService::STATE_BLOCKED) {
pluginTag->Mark(NS_PLUGIN_FLAG_BLOCKLISTED);
}
if (state == nsIBlocklistService::STATE_SOFTBLOCKED && !seenBefore) {
enabled = false;
}
if (state == nsIBlocklistService::STATE_OUTDATED && !seenBefore) {
warnOutdated = true;
}
if (state == nsIBlocklistService::STATE_VULNERABLE_UPDATE_AVAILABLE ||
state == nsIBlocklistService::STATE_VULNERABLE_NO_UPDATE) {
pluginTag->Mark(NS_PLUGIN_FLAG_CLICKTOPLAY);
}
}
}
if (!enabled)
if (!enabled) {
pluginTag->UnMark(NS_PLUGIN_FLAG_ENABLED);
}
// Plugin unloading is tag-based. If we created a new tag and loaded
// the library in the process then we want to attempt to unload it here.
@ -3302,6 +3347,7 @@ NS_IMETHODIMP nsPluginHost::Observe(nsISupports *aSubject,
}
if (!nsCRT::strcmp(NS_PREFBRANCH_PREFCHANGE_TOPIC_ID, aTopic)) {
mPluginsDisabled = Preferences::GetBool("plugin.disable", false);
mPluginsClickToPlay = Preferences::GetBool("plugins.click_to_play", false);
// Unload or load plugins as needed
if (mPluginsDisabled) {
UnloadPlugins();

View File

@ -85,6 +85,8 @@ public:
nsIPluginInstanceOwner *aOwner);
nsresult IsPluginEnabledForType(const char* aMimeType);
nsresult IsPluginEnabledForExtension(const char* aExtension, const char* &aMimeType);
bool IsPluginClickToPlayForType(const char *aMimeType);
nsresult GetBlocklistStateForType(const char *aMimeType, PRUint32 *state);
nsresult GetPluginCount(PRUint32* aPluginCount);
nsresult GetPlugins(PRUint32 aPluginCount, nsIDOMPlugin** aPluginArray);
@ -281,6 +283,8 @@ private:
// set by pref plugin.disable
bool mPluginsDisabled;
// set by pref plugins.click_to_play
bool mPluginsClickToPlay;
// Any instances in this array will have valid plugin objects via GetPlugin().
// When removing an instance it might not die - be sure to null out it's plugin.

View File

@ -320,6 +320,30 @@ nsPluginTag::SetBlocklisted(bool aBlocklisted)
return NS_OK;
}
NS_IMETHODIMP
nsPluginTag::GetClicktoplay(bool *aClicktoplay)
{
*aClicktoplay = HasFlag(NS_PLUGIN_FLAG_CLICKTOPLAY);
return NS_OK;
}
NS_IMETHODIMP
nsPluginTag::SetClicktoplay(bool aClicktoplay)
{
if (HasFlag(NS_PLUGIN_FLAG_CLICKTOPLAY) == aClicktoplay) {
return NS_OK;
}
if (aClicktoplay) {
Mark(NS_PLUGIN_FLAG_CLICKTOPLAY);
} else {
UnMark(NS_PLUGIN_FLAG_CLICKTOPLAY);
}
mPluginHost->UpdatePluginInfo(nsnull);
return NS_OK;
}
void nsPluginTag::Mark(PRUint32 mask)
{
bool wasEnabled = IsEnabled();

View File

@ -27,6 +27,7 @@ struct nsPluginInfo;
#define NS_PLUGIN_FLAG_FROMCACHE 0x0004 // this plugintag info was loaded from cache
// no longer used 0x0008 // reuse only if regenerating pluginreg.dat
#define NS_PLUGIN_FLAG_BLOCKLISTED 0x0010 // this is a blocklisted plugin
#define NS_PLUGIN_FLAG_CLICKTOPLAY 0x0020 // this is a click-to-play plugin
// A linked-list of plugin information that is used for instantiating plugins
// and reflecting plugin information into JavaScript.

View File

@ -19,6 +19,7 @@
#include "nsAppDirectoryServiceDefs.h"
#include "prprf.h"
#include "mozilla/storage.h"
#include "mozilla/Attributes.h"
#include "nsXULAppAPI.h"
#include "nsIPrincipal.h"
@ -115,7 +116,7 @@ nsHostEntry::nsHostEntry(const nsHostEntry& toCopy)
* Note: Once the callback has been called this DeleteFromMozHostListener cannot
* be reused.
*/
class CloseDatabaseListener : public mozIStorageCompletionCallback
class CloseDatabaseListener MOZ_FINAL : public mozIStorageCompletionCallback
{
public:
NS_DECL_ISUPPORTS
@ -164,7 +165,7 @@ CloseDatabaseListener::Complete()
* Note: Once the callback has been called this DeleteFromMozHostListener cannot
* be reused.
*/
class DeleteFromMozHostListener : public mozIStorageStatementCallback
class DeleteFromMozHostListener MOZ_FINAL : public mozIStorageStatementCallback
{
public:
NS_DECL_ISUPPORTS

View File

@ -140,6 +140,14 @@ EXPORTS_mozilla/layers += ShadowLayerUtilsD3D10.h
DEFINES += -DMOZ_ENABLE_D3D10_LAYER
endif
# NB: Gralloc is available on other platforms that use the android GL
# libraries, but only Gonk is able to use it reliably because Gecko
# has full system permissions there.
ifeq ($(MOZ_WIDGET_TOOLKIT),gonk)
EXPORTS_mozilla/layers += ShadowLayerUtilsGralloc.h
CPPSRCS += ShadowLayerUtilsGralloc.cpp
endif
include $(topsrcdir)/config/rules.mk
include $(topsrcdir)/ipc/chromium/chromium-config.mk

View File

@ -171,6 +171,16 @@ protected:
return tmp.forget();
}
/**
* Set the buffer only. This is intended to be used with the
* shadow-layer Open/CloseDescriptor interface, to ensure we don't
* accidentally touch a buffer when it's not mapped.
*/
void SetBuffer(gfxASurface* aBuffer)
{
mBuffer = aBuffer;
}
/**
* Get a context at the specified resolution for updating |aBounds|,
* which must be contained within a single quadrant.

View File

@ -10,8 +10,8 @@
using namespace mozilla::gfx;
namespace mozilla {
namespace layers {
namespace layers {
static bool
IsClippingCheap(gfxContext* aTarget, const nsIntRegion& aRegion)
@ -47,10 +47,10 @@ BasicThebesLayerBuffer::DrawTo(ThebesLayer* aLayer,
// Pull out the mask surface and transform here, because the mask
// is internal to basic layers
gfxMatrix maskTransform;
if (nsRefPtr<gfxASurface> maskSurface =
GetMaskSurfaceAndTransform(aMaskLayer, &maskTransform)) {
DrawBufferWithRotation(aTarget, aOpacity, maskSurface, &maskTransform);
AutoMaskData mask;
if (GetMaskData(aMaskLayer, &mask)) {
DrawBufferWithRotation(aTarget, aOpacity,
mask.GetSurface(), &mask.GetTransform());
} else {
DrawBufferWithRotation(aTarget, aOpacity);
}

View File

@ -58,6 +58,27 @@ public:
gfxASurface* aSource, const nsIntRect& aRect, const nsIntPoint& aRotation,
const nsIntRegion& aUpdateRegion);
/**
* When BasicThebesLayerBuffer is used with layers that hold
* SurfaceDescriptor, this buffer only has a valid gfxASurface in
* the scope of an AutoOpenSurface for that SurfaceDescriptor. That
* is, it's sort of a "virtual buffer" that's only mapped an
* unmapped within the scope of AutoOpenSurface. None of the
* underlying buffer attributes (rect, rotation) are affected by
* mapping/unmapping.
*
* These helpers just exist to provide more descriptive names of the
* map/unmap process.
*/
void MapBuffer(gfxASurface* aBuffer)
{
SetBuffer(aBuffer);
}
void UnmapBuffer()
{
SetBuffer(nsnull);
}
private:
BasicThebesLayerBuffer(gfxASurface* aBuffer,
const nsIntRect& aRect, const nsIntPoint& aRotation)
@ -87,18 +108,25 @@ public:
MOZ_COUNT_DTOR(ShadowThebesLayerBuffer);
}
void Swap(gfxASurface* aNewBuffer,
const nsIntRect& aNewRect, const nsIntPoint& aNewRotation,
gfxASurface** aOldBuffer,
/**
* Swap in the old "virtual buffer" (see above) attributes in aNew*
* and return the old ones in aOld*.
*
* Swap() must only be called when the buffer is in its "unmapped"
* state, that is the underlying gfxASurface is not available. It
* is expected that the owner of this buffer holds an unmapped
* SurfaceDescriptor as the backing storage for this buffer. That's
* why no gfxASurface or SurfaceDescriptor parameters appear here.
*/
void Swap(const nsIntRect& aNewRect, const nsIntPoint& aNewRotation,
nsIntRect* aOldRect, nsIntPoint* aOldRotation)
{
*aOldRect = BufferRect();
*aOldRotation = BufferRotation();
nsRefPtr<gfxASurface> oldBuffer;
oldBuffer = SetBuffer(aNewBuffer,
aNewRect, aNewRotation);
oldBuffer.forget(aOldBuffer);
oldBuffer = SetBuffer(nsnull, aNewRect, aNewRotation);
MOZ_ASSERT(!oldBuffer);
}
protected:
@ -113,4 +141,4 @@ protected:
}
}
#endif
#endif

View File

@ -339,9 +339,8 @@ BasicShadowableCanvasLayer::Initialize(const Data& aData)
// canvas resizes
if (IsSurfaceDescriptorValid(mBackBuffer)) {
nsRefPtr<gfxASurface> backSurface =
BasicManager()->OpenDescriptor(mBackBuffer);
if (gfxIntSize(mBounds.width, mBounds.height) != backSurface->GetSize()) {
AutoOpenSurface backSurface(OPEN_READ_ONLY, mBackBuffer);
if (gfxIntSize(mBounds.width, mBounds.height) != backSurface.Size()) {
DestroyBackBuffer();
}
}
@ -368,15 +367,13 @@ BasicShadowableCanvasLayer::Paint(gfxContext* aContext, Layer* aMaskLayer)
NS_RUNTIMEABORT("creating CanvasLayer back buffer failed!");
}
nsRefPtr<gfxASurface> backSurface =
BasicManager()->OpenDescriptor(mBackBuffer);
AutoOpenSurface autoBackSurface(OPEN_READ_WRITE, mBackBuffer);
if (aMaskLayer) {
static_cast<BasicImplData*>(aMaskLayer->ImplData())
->Paint(aContext, nsnull);
}
UpdateSurface(backSurface, nsnull);
UpdateSurface(autoBackSurface.Get(), nsnull);
FireDidTransactionCallback();
BasicManager()->PaintedCanvas(BasicManager()->Hold(this),
@ -437,15 +434,14 @@ void
BasicShadowCanvasLayer::Swap(const CanvasSurface& aNewFront, bool needYFlip,
CanvasSurface* aNewBack)
{
nsRefPtr<gfxASurface> surface =
BasicManager()->OpenDescriptor(aNewFront);
AutoOpenSurface autoSurface(OPEN_READ_ONLY, aNewFront);
// Destroy mFrontBuffer if size different
gfxIntSize sz = surface->GetSize();
gfxIntSize sz = autoSurface.Size();
bool surfaceConfigChanged = sz != gfxIntSize(mBounds.width, mBounds.height);
if (IsSurfaceDescriptorValid(mFrontSurface)) {
nsRefPtr<gfxASurface> front = BasicManager()->OpenDescriptor(mFrontSurface);
AutoOpenSurface autoFront(OPEN_READ_ONLY, mFrontSurface);
surfaceConfigChanged = surfaceConfigChanged ||
surface->GetContentType() != front->GetContentType();
autoSurface.ContentType() != autoFront.ContentType();
}
if (surfaceConfigChanged) {
DestroyFrontBuffer();
@ -459,7 +455,7 @@ BasicShadowCanvasLayer::Swap(const CanvasSurface& aNewFront, bool needYFlip,
} else {
*aNewBack = null_t();
}
mFrontSurface = aNewFront.get_SurfaceDescriptor();
mFrontSurface = aNewFront;
}
void
@ -472,9 +468,8 @@ BasicShadowCanvasLayer::Paint(gfxContext* aContext, Layer* aMaskLayer)
return;
}
nsRefPtr<gfxASurface> surface =
BasicManager()->OpenDescriptor(mFrontSurface);
nsRefPtr<gfxPattern> pat = new gfxPattern(surface);
AutoOpenSurface autoSurface(OPEN_READ_ONLY, mFrontSurface);
nsRefPtr<gfxPattern> pat = new gfxPattern(autoSurface.Get());
pat->SetFilter(mFilter);
pat->SetExtend(gfxPattern::EXTEND_PAD);

View File

@ -9,8 +9,8 @@
using namespace mozilla::gfx;
namespace mozilla {
namespace layers {
namespace layers {
class BasicColorLayer : public ColorLayer, public BasicImplData {
public:
BasicColorLayer(BasicLayerManager* aLayerManager) :

View File

@ -9,8 +9,8 @@
using namespace mozilla::gfx;
namespace mozilla {
namespace layers {
namespace layers {
BasicContainerLayer::~BasicContainerLayer()

View File

@ -11,7 +11,7 @@
using namespace mozilla::gfx;
namespace mozilla {
namespace layers {
namespace layers {
class BasicImageLayer : public ImageLayer, public BasicImplData {
public:
@ -41,7 +41,8 @@ public:
gfxContext* aContext,
Layer* aMaskLayer);
virtual already_AddRefed<gfxASurface> GetAsSurface();
virtual bool GetAsSurface(gfxASurface** aSurface,
SurfaceDescriptor* aDescriptor);
protected:
BasicLayerManager* BasicManager()
@ -86,9 +87,6 @@ BasicImageLayer::GetAndPaintCurrentImage(gfxContext* aContext,
return nsnull;
}
NS_ASSERTION(surface->GetContentType() != gfxASurface::CONTENT_ALPHA,
"Image layer has alpha image");
nsRefPtr<gfxPattern> pat = new gfxPattern(surface);
if (!pat) {
return nsnull;
@ -154,15 +152,18 @@ BasicImageLayer::PaintContext(gfxPattern* aPattern,
aPattern->SetExtend(extend);
}
already_AddRefed<gfxASurface>
BasicImageLayer::GetAsSurface()
bool
BasicImageLayer::GetAsSurface(gfxASurface** aSurface,
SurfaceDescriptor* aDescriptor)
{
if (!mContainer) {
return nsnull;
return false;
}
gfxIntSize dontCare;
return mContainer->GetCurrentAsSurface(&dontCare);
nsRefPtr<gfxASurface> surface = mContainer->GetCurrentAsSurface(&dontCare);
*aSurface = surface.forget().get();
return true;
}
class BasicShadowableImageLayer : public BasicImageLayer,
@ -196,9 +197,9 @@ public:
mBackBuffer = aBuffer;
}
virtual void SetBackBufferYUVImage(gfxSharedImageSurface* aYBuffer,
gfxSharedImageSurface* aUBuffer,
gfxSharedImageSurface* aVBuffer)
virtual void SetBackBufferYUVImage(const SurfaceDescriptor& aYBuffer,
const SurfaceDescriptor& aUBuffer,
const SurfaceDescriptor& aVBuffer)
{
mBackBufferY = aYBuffer;
mBackBufferU = aUBuffer;
@ -207,7 +208,9 @@ public:
virtual void Disconnect()
{
mBackBufferY = mBackBufferU = mBackBufferV = nsnull;
mBackBufferY = SurfaceDescriptor();
mBackBufferU = SurfaceDescriptor();
mBackBufferV = SurfaceDescriptor();
mBackBuffer = SurfaceDescriptor();
BasicShadowableLayer::Disconnect();
}
@ -217,11 +220,11 @@ public:
if (IsSurfaceDescriptorValid(mBackBuffer)) {
BasicManager()->ShadowLayerForwarder::DestroySharedSurface(&mBackBuffer);
}
if (mBackBufferY) {
BasicManager()->ShadowLayerForwarder::DestroySharedSurface(mBackBufferY);
BasicManager()->ShadowLayerForwarder::DestroySharedSurface(mBackBufferU);
BasicManager()->ShadowLayerForwarder::DestroySharedSurface(mBackBufferV);
}
if (IsSurfaceDescriptorValid(mBackBufferY)) {
BasicManager()->ShadowLayerForwarder::DestroySharedSurface(&mBackBufferY);
BasicManager()->ShadowLayerForwarder::DestroySharedSurface(&mBackBufferU);
BasicManager()->ShadowLayerForwarder::DestroySharedSurface(&mBackBufferV);
}
}
private:
@ -234,9 +237,9 @@ private:
// for RGB images only mBackSurface is used.
SurfaceDescriptor mBackBuffer;
bool mBufferIsOpaque;
nsRefPtr<gfxSharedImageSurface> mBackBufferY;
nsRefPtr<gfxSharedImageSurface> mBackBufferU;
nsRefPtr<gfxSharedImageSurface> mBackBufferV;
SurfaceDescriptor mBackBufferY;
SurfaceDescriptor mBackBufferU;
SurfaceDescriptor mBackBufferV;
gfxIntSize mCbCrSize;
};
@ -271,38 +274,52 @@ BasicShadowableImageLayer::Paint(gfxContext* aContext, Layer* aMaskLayer)
const PlanarYCbCrImage::Data *data = YCbCrImage->GetData();
NS_ASSERTION(data, "Must be able to retrieve yuv data from image!");
if (mSize != data->mYSize || mCbCrSize != data->mCbCrSize || !mBackBufferY) {
if (mSize != data->mYSize || mCbCrSize != data->mCbCrSize || !IsSurfaceDescriptorValid(mBackBufferY)) {
DestroyBackBuffer();
mSize = data->mYSize;
mCbCrSize = data->mCbCrSize;
if (!BasicManager()->AllocBuffer(mSize, gfxASurface::CONTENT_ALPHA,
getter_AddRefs(mBackBufferY)) ||
!BasicManager()->AllocBuffer(mCbCrSize, gfxASurface::CONTENT_ALPHA,
getter_AddRefs(mBackBufferU)) ||
!BasicManager()->AllocBuffer(mCbCrSize, gfxASurface::CONTENT_ALPHA,
getter_AddRefs(mBackBufferV))) {
// We either allocate all three planes or none.
if (!BasicManager()->AllocBufferWithCaps(mSize,
gfxASurface::CONTENT_ALPHA,
MAP_AS_IMAGE_SURFACE,
&mBackBufferY) ||
!BasicManager()->AllocBufferWithCaps(mCbCrSize,
gfxASurface::CONTENT_ALPHA,
MAP_AS_IMAGE_SURFACE,
&mBackBufferU) ||
!BasicManager()->AllocBufferWithCaps(mCbCrSize,
gfxASurface::CONTENT_ALPHA,
MAP_AS_IMAGE_SURFACE,
&mBackBufferV)) {
NS_RUNTIMEABORT("creating ImageLayer 'front buffer' failed!");
}
}
AutoOpenSurface dyas(OPEN_READ_WRITE, mBackBufferY);
gfxImageSurface* dy = dyas.GetAsImage();
for (int i = 0; i < data->mYSize.height; i++) {
memcpy(mBackBufferY->Data() + i * mBackBufferY->Stride(),
memcpy(dy->Data() + i * dy->Stride(),
data->mYChannel + i * data->mYStride,
data->mYSize.width);
}
AutoOpenSurface duas(OPEN_READ_WRITE, mBackBufferU);
gfxImageSurface* du = duas.GetAsImage();
AutoOpenSurface dvas(OPEN_READ_WRITE, mBackBufferV);
gfxImageSurface* dv = dvas.GetAsImage();
for (int i = 0; i < data->mCbCrSize.height; i++) {
memcpy(mBackBufferU->Data() + i * mBackBufferU->Stride(),
memcpy(du->Data() + i * du->Stride(),
data->mCbChannel + i * data->mCbCrStride,
data->mCbCrSize.width);
memcpy(mBackBufferV->Data() + i * mBackBufferV->Stride(),
memcpy(dv->Data() + i * dv->Stride(),
data->mCrChannel + i * data->mCbCrStride,
data->mCbCrSize.width);
}
YUVImage yuv(mBackBufferY->GetShmem(),
mBackBufferU->GetShmem(),
mBackBufferV->GetShmem(),
YUVImage yuv(mBackBufferY, mBackBufferU, mBackBufferV,
data->GetPictureRect());
BasicManager()->PaintedImage(BasicManager()->Hold(this),
@ -336,9 +353,8 @@ BasicShadowableImageLayer::Paint(gfxContext* aContext, Layer* aMaskLayer)
NS_RUNTIMEABORT("creating ImageLayer 'front buffer' failed!");
}
nsRefPtr<gfxASurface> backSurface =
BasicManager()->OpenDescriptor(mBackBuffer);
nsRefPtr<gfxContext> tmpCtx = new gfxContext(backSurface);
AutoOpenSurface backSurface(OPEN_READ_WRITE, mBackBuffer);
nsRefPtr<gfxContext> tmpCtx = new gfxContext(backSurface.Get());
tmpCtx->SetOperator(gfxContext::OPERATOR_SOURCE);
PaintContext(pat,
nsIntRegion(nsIntRect(0, 0, mSize.width, mSize.height)),
@ -377,7 +393,8 @@ public:
}
virtual void Paint(gfxContext* aContext, Layer* aMaskLayer);
already_AddRefed<gfxASurface> GetAsSurface();
virtual bool GetAsSurface(gfxASurface** aSurface,
SurfaceDescriptor* aDescriptor);
protected:
BasicShadowLayerManager* BasicManager()
@ -393,18 +410,17 @@ void
BasicShadowImageLayer::Swap(const SharedImage& aNewFront,
SharedImage* aNewBack)
{
nsRefPtr<gfxASurface> surface =
BasicManager()->OpenDescriptor(aNewFront);
AutoOpenSurface autoSurface(OPEN_READ_ONLY, aNewFront);
// Destroy mFrontBuffer if size different or image type is different
bool surfaceConfigChanged = surface->GetSize() != mSize;
bool surfaceConfigChanged = autoSurface.Size() != mSize;
if (IsSurfaceDescriptorValid(mFrontBuffer)) {
nsRefPtr<gfxASurface> front = BasicManager()->OpenDescriptor(mFrontBuffer);
AutoOpenSurface autoFront(OPEN_READ_ONLY, mFrontBuffer);
surfaceConfigChanged = surfaceConfigChanged ||
surface->GetContentType() != front->GetContentType();
autoSurface.ContentType() != autoFront.ContentType();
}
if (surfaceConfigChanged) {
DestroyFrontBuffer();
mSize = surface->GetSize();
mSize = autoSurface.Size();
}
// If mFrontBuffer
@ -413,7 +429,7 @@ BasicShadowImageLayer::Swap(const SharedImage& aNewFront,
} else {
*aNewBack = null_t();
}
mFrontBuffer = aNewFront.get_SurfaceDescriptor();
mFrontBuffer = aNewFront;
}
void
@ -423,9 +439,8 @@ BasicShadowImageLayer::Paint(gfxContext* aContext, Layer* aMaskLayer)
return;
}
nsRefPtr<gfxASurface> surface =
BasicManager()->OpenDescriptor(mFrontBuffer);
nsRefPtr<gfxPattern> pat = new gfxPattern(surface);
AutoOpenSurface autoSurface(OPEN_READ_ONLY, mFrontBuffer);
nsRefPtr<gfxPattern> pat = new gfxPattern(autoSurface.Get());
pat->SetFilter(mFilter);
// The visible region can extend outside the image, so just draw
@ -437,14 +452,16 @@ BasicShadowImageLayer::Paint(gfxContext* aContext, Layer* aMaskLayer)
aMaskLayer);
}
already_AddRefed<gfxASurface>
BasicShadowImageLayer::GetAsSurface()
bool
BasicShadowImageLayer::GetAsSurface(gfxASurface** aSurface,
SurfaceDescriptor* aDescriptor)
{
if (!IsSurfaceDescriptorValid(mFrontBuffer)) {
return nsnull;
return false;
}
return BasicManager()->OpenDescriptor(mFrontBuffer);
*aDescriptor = mFrontBuffer;
return true;
}
already_AddRefed<ImageLayer>

View File

@ -101,11 +101,14 @@ public:
/**
* Return a surface for this layer. Will use an existing surface, if
* possible, or may create a temporary surface.
* Implement this method for any layers that might be used as a mask.
* Should only return null if a surface cannor be created.
* possible, or may create a temporary surface. Implement this
* method for any layers that might be used as a mask. Should only
* return false if a surface cannot be created. If true is
* returned, only one of |aSurface| or |aDescriptor| is valid.
*/
virtual already_AddRefed<gfxASurface> GetAsSurface() { return nsnull; }
virtual bool GetAsSurface(gfxASurface** aSurface,
SurfaceDescriptor* aDescriptor)
{ return false; }
bool GetClipToVisibleRegion() { return mClipToVisibleRegion; }
void SetClipToVisibleRegion(bool aClip) { mClipToVisibleRegion = aClip; }

View File

@ -24,7 +24,7 @@
using namespace mozilla::gfx;
namespace mozilla {
namespace layers {
namespace layers {
/**
* Clips to the smallest device-pixel-aligned rectangle containing aRect
@ -839,7 +839,6 @@ BasicLayerManager::PaintLayer(gfxContext* aTarget,
gfxUtils::ClipToRegion(aTarget, aLayer->GetEffectiveVisibleRegion());
}
AutoSetOperator setOperator(aTarget, container->GetOperator());
gfxMatrix temp = aTarget->CurrentMatrix();
PaintWithMask(aTarget, aLayer->GetEffectiveOpacity(),
HasShadowManager() ? nsnull : aLayer->GetMaskLayer());
}
@ -1048,13 +1047,12 @@ BasicShadowLayerManager::ForwardTransaction()
layer->SetBackBuffer(newBack.get_SurfaceDescriptor());
} else if (newBack.type() == SharedImage::TYUVImage) {
const YUVImage& yuv = newBack.get_YUVImage();
nsRefPtr<gfxSharedImageSurface> YSurf = gfxSharedImageSurface::Open(yuv.Ydata());
nsRefPtr<gfxSharedImageSurface> USurf = gfxSharedImageSurface::Open(yuv.Udata());
nsRefPtr<gfxSharedImageSurface> VSurf = gfxSharedImageSurface::Open(yuv.Vdata());
layer->SetBackBufferYUVImage(YSurf, USurf, VSurf);
layer->SetBackBufferYUVImage(yuv.Ydata(), yuv.Udata(), yuv.Vdata());
} else {
layer->SetBackBuffer(SurfaceDescriptor());
layer->SetBackBufferYUVImage(nsnull, nsnull, nsnull);
layer->SetBackBufferYUVImage(SurfaceDescriptor(),
SurfaceDescriptor(),
SurfaceDescriptor());
}
break;
@ -1127,7 +1125,7 @@ BasicShadowLayerManager::CreateThebesLayer()
}
}
BasicShadowableLayer::~BasicShadowableLayer()
{
if (HasShadow()) {

View File

@ -279,11 +279,11 @@ public:
NS_RUNTIMEABORT("if this default impl is called, |aBuffer| leaks");
}
virtual void SetBackBufferYUVImage(gfxSharedImageSurface* aYBuffer,
gfxSharedImageSurface* aUBuffer,
gfxSharedImageSurface* aVBuffer)
virtual void SetBackBufferYUVImage(const SurfaceDescriptor& aYBuffer,
const SurfaceDescriptor& aUBuffer,
const SurfaceDescriptor& aVBuffer)
{
NS_RUNTIMEABORT("if this default impl is called, |aBuffer| leaks");
NS_RUNTIMEABORT("if this default impl is called, the buffers leak");
}
virtual void Disconnect()

View File

@ -4,65 +4,114 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "BasicLayersImpl.h"
#include "mozilla/layers/PLayers.h"
using namespace mozilla::gfx;
namespace mozilla {
namespace layers {
already_AddRefed<gfxASurface>
GetMaskSurfaceAndTransform(Layer* aMaskLayer, gfxMatrix* aMaskTransform)
void
AutoMaskData::Construct(const gfxMatrix& aTransform,
gfxASurface* aSurface)
{
if (aMaskLayer) {
nsRefPtr<gfxASurface> maskSurface =
static_cast<BasicImplData*>(aMaskLayer->ImplData())->GetAsSurface();
if (maskSurface) {
bool maskIs2D =
aMaskLayer->GetEffectiveTransform().CanDraw2D(aMaskTransform);
NS_ASSERTION(maskIs2D, "How did we end up with a 3D transform here?!");
return maskSurface.forget();
}
}
return nsnull;
MOZ_ASSERT(!IsConstructed());
mTransform = aTransform;
mSurface = aSurface;
}
void
AutoMaskData::Construct(const gfxMatrix& aTransform,
const SurfaceDescriptor& aSurface)
{
MOZ_ASSERT(!IsConstructed());
mTransform = aTransform;
mSurfaceOpener.construct(OPEN_READ_ONLY, aSurface);
}
gfxASurface*
AutoMaskData::GetSurface()
{
MOZ_ASSERT(IsConstructed());
if (mSurface) {
return mSurface.get();
}
return mSurfaceOpener.ref().Get();
}
const gfxMatrix&
AutoMaskData::GetTransform()
{
MOZ_ASSERT(IsConstructed());
return mTransform;
}
bool
AutoMaskData::IsConstructed()
{
return !!mSurface || !mSurfaceOpener.empty();
}
bool
GetMaskData(Layer* aMaskLayer, AutoMaskData* aMaskData)
{
if (aMaskLayer) {
nsRefPtr<gfxASurface> surface;
SurfaceDescriptor descriptor;
if (static_cast<BasicImplData*>(aMaskLayer->ImplData())
->GetAsSurface(getter_AddRefs(surface), &descriptor) &&
(surface || IsSurfaceDescriptorValid(descriptor))) {
gfxMatrix transform;
DebugOnly<bool> maskIs2D =
aMaskLayer->GetEffectiveTransform().CanDraw2D(&transform);
NS_ASSERTION(maskIs2D, "How did we end up with a 3D transform here?!");
if (surface) {
aMaskData->Construct(transform, surface);
} else {
aMaskData->Construct(transform, descriptor);
}
return true;
}
}
return false;
}
void
PaintWithMask(gfxContext* aContext, float aOpacity, Layer* aMaskLayer)
{
gfxMatrix maskTransform;
if (nsRefPtr<gfxASurface> maskSurface =
GetMaskSurfaceAndTransform(aMaskLayer, &maskTransform)) {
AutoMaskData mask;
if (GetMaskData(aMaskLayer, &mask)) {
if (aOpacity < 1.0) {
aContext->PushGroup(gfxASurface::CONTENT_COLOR_ALPHA);
aContext->Paint(aOpacity);
aContext->PopGroupToSource();
}
aContext->SetMatrix(maskTransform);
aContext->Mask(maskSurface);
aContext->SetMatrix(mask.GetTransform());
aContext->Mask(mask.GetSurface());
return;
}
// if there is no mask, just paint normally
aContext->Paint(aOpacity);
}
}
void
FillWithMask(gfxContext* aContext, float aOpacity, Layer* aMaskLayer)
{
gfxMatrix maskTransform;
if (nsRefPtr<gfxASurface> maskSurface =
GetMaskSurfaceAndTransform(aMaskLayer, &maskTransform)) {
AutoMaskData mask;
if (GetMaskData(aMaskLayer, &mask)) {
if (aOpacity < 1.0) {
aContext->PushGroup(gfxASurface::CONTENT_COLOR_ALPHA);
aContext->FillWithOpacity(aOpacity);
aContext->PopGroupToSource();
aContext->SetMatrix(maskTransform);
aContext->Mask(maskSurface);
aContext->SetMatrix(mask.GetTransform());
aContext->Mask(mask.GetSurface());
} else {
aContext->Save();
aContext->Clip();
aContext->SetMatrix(maskTransform);
aContext->Mask(maskSurface);
aContext->SetMatrix(mask.GetTransform());
aContext->Mask(mask.GetSurface());
aContext->NewPath();
aContext->Restore();
}
@ -71,8 +120,8 @@ FillWithMask(gfxContext* aContext, float aOpacity, Layer* aMaskLayer)
// if there is no mask, just fill normally
aContext->FillWithOpacity(aOpacity);
}
}
BasicImplData*
ToData(Layer* aLayer)
{
@ -96,6 +145,6 @@ ShouldShadow(Layer* aLayer)
return true;
}
}
}
}
}

View File

@ -6,6 +6,7 @@
#ifndef GFX_BASICLAYERSIMPL_H
#define GFX_BASICLAYERSIMPL_H
#include "ipc/AutoOpenSurface.h"
#include "ipc/ShadowLayerChild.h"
#include "BasicLayers.h"
#include "BasicImplData.h"
@ -62,14 +63,58 @@ protected:
}
};
/**
* Drawing with a mask requires a mask surface and a transform.
* Sometimes the mask surface is a direct gfxASurface, but other times
* it's a SurfaceDescriptor. For SurfaceDescriptor, we need to use a
* scoped AutoOpenSurface to get a gfxASurface for the
* SurfaceDescriptor.
*
* This helper class manages the gfxASurface-or-SurfaceDescriptor
* logic.
*/
class NS_STACK_CLASS AutoMaskData {
public:
AutoMaskData() { }
~AutoMaskData() { }
/**
* Construct this out of either a gfxASurface or a
* SurfaceDescriptor. Construct() must only be called once.
* GetSurface() and GetTransform() must not be called until this has
* been constructed.
*/
void Construct(const gfxMatrix& aTransform,
gfxASurface* aSurface);
void Construct(const gfxMatrix& aTransform,
const SurfaceDescriptor& aSurface);
/** The returned surface can't escape the scope of |this|. */
gfxASurface* GetSurface();
const gfxMatrix& GetTransform();
private:
bool IsConstructed();
gfxMatrix mTransform;
nsRefPtr<gfxASurface> mSurface;
Maybe<AutoOpenSurface> mSurfaceOpener;
AutoMaskData(const AutoMaskData&) MOZ_DELETE;
AutoMaskData& operator=(const AutoMaskData&) MOZ_DELETE;
};
/*
* Extract a mask surface for a mask layer
* Returns a surface for the mask layer if a mask layer is present and has a
* valid surface and transform; nsnull otherwise.
* The transform for the layer will be put in aMaskTransform
* Returns true and through outparams a surface for the mask layer if
* a mask layer is present and has a valid surface and transform;
* false otherwise.
* The transform for the layer will be put in aMaskData
*/
already_AddRefed<gfxASurface>
GetMaskSurfaceAndTransform(Layer* aMaskLayer, gfxMatrix* aMaskTransform);
bool
GetMaskData(Layer* aMaskLayer, AutoMaskData* aMaskData);
// Paint the current source to a context using a mask, if present
void
@ -117,7 +162,7 @@ MaybeCreateShadowFor(BasicShadowableLayer* aLayer,
aLayer->SetShadow(shadow);
(aMgr->*aMethod)(aLayer);
aMgr->Hold(aLayer->AsLayer());
}
}
#define MAYBE_CREATE_SHADOW(_type) \
MaybeCreateShadowFor(layer, this, \

View File

@ -228,6 +228,58 @@ BasicThebesLayer::PaintThebes(gfxContext* aContext,
}
}
/**
* AutoOpenBuffer is a helper that builds on top of AutoOpenSurface,
* which we need to get a gfxASurface from a SurfaceDescriptor. For
* other layer types, simple lexical scoping of AutoOpenSurface is
* easy. For ThebesLayers, the lifetime of buffer mappings doesn't
* exactly match simple lexical scopes, so naively putting
* AutoOpenSurfaces on the stack doesn't always work. We use this
* helper to track openings instead.
*
* Any surface that's opened while painting this ThebesLayer will
* notify this helper and register itself for unmapping.
*
* We ignore buffer destruction here because the shadow layers
* protocol already ensures that destroyed buffers stay alive until
* end-of-transaction.
*/
struct NS_STACK_CLASS AutoBufferTracker {
AutoBufferTracker(BasicShadowableThebesLayer* aLayer)
: mLayer(aLayer)
{
MOZ_ASSERT(!mLayer->mBufferTracker);
mLayer->mBufferTracker = this;
if (IsSurfaceDescriptorValid(mLayer->mBackBuffer)) {
mInitialBuffer.construct(OPEN_READ_WRITE, mLayer->mBackBuffer);
mLayer->mBuffer.MapBuffer(mInitialBuffer.ref().Get());
}
}
~AutoBufferTracker() {
mLayer->mBufferTracker = nsnull;
mLayer->mBuffer.UnmapBuffer();
// mInitialBuffer and mNewBuffer will clean up after themselves if
// they were constructed.
}
gfxASurface*
CreatedBuffer(const SurfaceDescriptor& aDescriptor) {
Maybe<AutoOpenSurface>* surface = mNewBuffers.AppendElement();
surface->construct(OPEN_READ_WRITE, aDescriptor);
return surface->ref().Get();
}
Maybe<AutoOpenSurface> mInitialBuffer;
nsAutoTArray<Maybe<AutoOpenSurface>, 2> mNewBuffers;
BasicShadowableThebesLayer* mLayer;
private:
AutoBufferTracker(const AutoBufferTracker&) MOZ_DELETE;
AutoBufferTracker& operator=(const AutoBufferTracker&) MOZ_DELETE;
};
void
BasicShadowableThebesLayer::PaintThebes(gfxContext* aContext,
Layer* aMaskLayer,
@ -240,6 +292,8 @@ BasicShadowableThebesLayer::PaintThebes(gfxContext* aContext,
return;
}
AutoBufferTracker tracker(this);
BasicThebesLayer::PaintThebes(aContext, nsnull, aCallback, aCallbackData, aReadback);
if (aMaskLayer) {
static_cast<BasicImplData*>(aMaskLayer->ImplData())
@ -280,18 +334,22 @@ BasicShadowableThebesLayer::SyncFrontBufferToBackBuffer()
return;
}
nsRefPtr<gfxASurface> backBuffer;
gfxASurface* backBuffer = mBuffer.GetBuffer();
if (!IsSurfaceDescriptorValid(mBackBuffer)) {
NS_ABORT_IF_FALSE(mROFrontBuffer.type() == OptionalThebesBuffer::TThebesBuffer,
"should have a front RO buffer by now");
MOZ_ASSERT(!backBuffer);
MOZ_ASSERT(mROFrontBuffer.type() == OptionalThebesBuffer::TThebesBuffer);
const ThebesBuffer roFront = mROFrontBuffer.get_ThebesBuffer();
nsRefPtr<gfxASurface> roFrontBuffer = BasicManager()->OpenDescriptor(roFront.buffer());
backBuffer = CreateBuffer(roFrontBuffer->GetContentType(), roFrontBuffer->GetSize());
} else {
backBuffer = BasicManager()->OpenDescriptor(mBackBuffer);
AutoOpenSurface roFrontBuffer(OPEN_READ_ONLY, roFront.buffer());
AllocBackBuffer(roFrontBuffer.ContentType(), roFrontBuffer.Size());
}
mFrontAndBackBufferDiffer = false;
Maybe<AutoOpenSurface> autoBackBuffer;
if (!backBuffer) {
autoBackBuffer.construct(OPEN_READ_WRITE, mBackBuffer);
backBuffer = autoBackBuffer.ref().Get();
}
if (OptionalThebesBuffer::Tnull_t == mROFrontBuffer.type()) {
// We didn't get back a read-only ref to our old back buffer (the
// parent's new front buffer). If the parent is pushing updates
@ -310,10 +368,10 @@ BasicShadowableThebesLayer::SyncFrontBufferToBackBuffer()
mFrontUpdatedRegion.GetBounds().height));
const ThebesBuffer roFront = mROFrontBuffer.get_ThebesBuffer();
nsRefPtr<gfxASurface> roFrontBuffer = BasicManager()->OpenDescriptor(roFront.buffer());
AutoOpenSurface autoROFront(OPEN_READ_ONLY, roFront.buffer());
mBuffer.SetBackingBufferAndUpdateFrom(
backBuffer,
roFrontBuffer, roFront.rect(), roFront.rotation(),
autoROFront.Get(), roFront.rect(), roFront.rotation(),
mFrontUpdatedRegion);
mIsNewBuffer = false;
// Now the new back buffer has the same (interesting) pixels as the
@ -364,6 +422,23 @@ BasicShadowableThebesLayer::PaintBuffer(gfxContext* aContext,
mBackBuffer);
}
void
BasicShadowableThebesLayer::AllocBackBuffer(Buffer::ContentType aType,
const nsIntSize& aSize)
{
// This function may *not* open the buffer it allocates.
if (!BasicManager()->AllocBuffer(gfxIntSize(aSize.width, aSize.height),
aType,
&mBackBuffer)) {
enum { buflen = 256 };
char buf[buflen];
PR_snprintf(buf, buflen,
"creating ThebesLayer 'back buffer' failed! width=%d, height=%d, type=%x",
aSize.width, aSize.height, int(aType));
NS_RUNTIMEABORT(buf);
}
}
already_AddRefed<gfxASurface>
BasicShadowableThebesLayer::CreateBuffer(Buffer::ContentType aType,
const nsIntSize& aSize)
@ -382,24 +457,15 @@ BasicShadowableThebesLayer::CreateBuffer(Buffer::ContentType aType,
mBackBuffer = SurfaceDescriptor();
}
// XXX error handling
if (!BasicManager()->AllocBuffer(gfxIntSize(aSize.width, aSize.height),
aType,
&mBackBuffer)) {
enum { buflen = 256 };
char buf[buflen];
PR_snprintf(buf, buflen,
"creating ThebesLayer 'back buffer' failed! width=%d, height=%d, type=%x",
aSize.width, aSize.height, int(aType));
NS_RUNTIMEABORT(buf);
}
AllocBackBuffer(aType, aSize);
NS_ABORT_IF_FALSE(!mIsNewBuffer,
"Bad! Did we create a buffer twice without painting?");
mIsNewBuffer = true;
return BasicManager()->OpenDescriptor(mBackBuffer);
nsRefPtr<gfxASurface> buffer = mBufferTracker->CreatedBuffer(mBackBuffer);
return buffer.forget();
}
void
@ -483,12 +549,10 @@ BasicShadowThebesLayer::Swap(const ThebesBuffer& aNewFront,
OptionalThebesBuffer* aReadOnlyFront,
nsIntRegion* aFrontUpdatedRegion)
{
nsRefPtr<gfxASurface> newFrontBuffer =
BasicManager()->OpenDescriptor(aNewFront.buffer());
if (IsSurfaceDescriptorValid(mFrontBufferDescriptor)) {
nsRefPtr<gfxASurface> currentFront = BasicManager()->OpenDescriptor(mFrontBufferDescriptor);
if (currentFront->GetSize() != newFrontBuffer->GetSize()) {
AutoOpenSurface autoNewFrontBuffer(OPEN_READ_ONLY, aNewFront.buffer());
AutoOpenSurface autoCurrentFront(OPEN_READ_ONLY, mFrontBufferDescriptor);
if (autoCurrentFront.Size() != autoNewFrontBuffer.Size()) {
// Current front buffer is obsolete
DestroyFrontBuffer();
}
@ -504,12 +568,11 @@ BasicShadowThebesLayer::Swap(const ThebesBuffer& aNewFront,
// They might overlap with our old pixels.
aNewBackValidRegion->Sub(mOldValidRegion, aUpdatedRegion);
nsRefPtr<gfxASurface> unused;
nsIntRect backRect;
nsIntPoint backRotation;
mFrontBuffer.Swap(
newFrontBuffer, aNewFront.rect(), aNewFront.rotation(),
getter_AddRefs(unused), &backRect, &backRotation);
aNewFront.rect(), aNewFront.rotation(),
&backRect, &backRotation);
if (aNewBack->type() != OptionalThebesBuffer::Tnull_t) {
aNewBack->get_ThebesBuffer().rect() = backRect;
@ -534,11 +597,16 @@ BasicShadowThebesLayer::PaintThebes(gfxContext* aContext,
NS_ASSERTION(BasicManager()->IsRetained(),
"ShadowThebesLayer makes no sense without retained mode");
if (!mFrontBuffer.GetBuffer()) {
if (!IsSurfaceDescriptorValid(mFrontBufferDescriptor)) {
return;
}
AutoOpenSurface autoFrontBuffer(OPEN_READ_ONLY, mFrontBufferDescriptor);
mFrontBuffer.MapBuffer(autoFrontBuffer.Get());
mFrontBuffer.DrawTo(this, aContext, GetEffectiveOpacity(), aMaskLayer);
mFrontBuffer.UnmapBuffer();
}
already_AddRefed<ThebesLayer>

View File

@ -103,14 +103,19 @@ protected:
Buffer mBuffer;
};
struct AutoBufferTracker;
class BasicShadowableThebesLayer : public BasicThebesLayer,
public BasicShadowableLayer
{
friend struct AutoBufferTracker;
typedef BasicThebesLayer Base;
public:
BasicShadowableThebesLayer(BasicShadowLayerManager* aManager)
: BasicThebesLayer(aManager)
, mBufferTracker(nsnull)
, mIsNewBuffer(false)
, mFrontAndBackBufferDiffer(false)
{
@ -163,6 +168,10 @@ private:
LayerManager::DrawThebesLayerCallback aCallback,
void* aCallbackData) MOZ_OVERRIDE;
// This function may *not* open the buffer it allocates.
void
AllocBackBuffer(Buffer::ContentType aType, const nsIntSize& aSize);
virtual already_AddRefed<gfxASurface>
CreateBuffer(Buffer::ContentType aType, const nsIntSize& aSize) MOZ_OVERRIDE;
@ -180,6 +189,12 @@ private:
nsIntRect mBackBufferRect;
nsIntPoint mBackBufferRectRotation;
// This helper object lives on the stack during its lifetime and
// keeps track of buffers we might have mapped and/or allocated.
// When it goes out of scope on the stack, it unmaps whichever
// buffers have been mapped (if any).
AutoBufferTracker* mBufferTracker;
bool mIsNewBuffer;
OptionalThebesBuffer mROFrontBuffer;
nsIntRegion mFrontUpdatedRegion;

View File

@ -4,6 +4,7 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "ipc/AutoOpenSurface.h"
#include "mozilla/layers/PLayers.h"
#include "mozilla/layers/ShadowLayers.h"
#include "ShadowBufferD3D9.h"
@ -311,12 +312,11 @@ ShadowCanvasLayerD3D9::Swap(const CanvasSurface& aNewFront,
NS_ASSERTION(aNewFront.type() == CanvasSurface::TSurfaceDescriptor,
"ShadowCanvasLayerD3D9::Swap expected CanvasSurface surface");
nsRefPtr<gfxASurface> surf =
ShadowLayerForwarder::OpenDescriptor(aNewFront);
AutoOpenSurface surf(OPEN_READ_ONLY, aNewFront);
if (!mBuffer) {
Init(needYFlip);
}
mBuffer->Upload(surf, GetVisibleRegion().GetBounds());
mBuffer->Upload(surf.Get(), GetVisibleRegion().GetBounds());
*aNewBack = aNewFront;
}

View File

@ -3,6 +3,7 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "ipc/AutoOpenSurface.h"
#include "mozilla/layers/PLayers.h"
#include "mozilla/layers/ShadowLayers.h"
#include "ShadowBufferD3D9.h"
@ -551,19 +552,17 @@ ShadowImageLayerD3D9::Swap(const SharedImage& aNewFront,
if (!mBuffer) {
mBuffer = new ShadowBufferD3D9(this);
}
nsRefPtr<gfxASurface> surf =
ShadowLayerForwarder::OpenDescriptor(aNewFront.get_SurfaceDescriptor());
mBuffer->Upload(surf, GetVisibleRegion().GetBounds());
AutoOpenSurface surf(OPEN_READ_ONLY, aNewFront.get_SurfaceDescriptor());
mBuffer->Upload(surf.Get(), GetVisibleRegion().GetBounds());
} else {
const YUVImage& yuv = aNewFront.get_YUVImage();
nsRefPtr<gfxSharedImageSurface> surfY =
gfxSharedImageSurface::Open(yuv.Ydata());
nsRefPtr<gfxSharedImageSurface> surfU =
gfxSharedImageSurface::Open(yuv.Udata());
nsRefPtr<gfxSharedImageSurface> surfV =
gfxSharedImageSurface::Open(yuv.Vdata());
AutoOpenSurface asurfY(OPEN_READ_ONLY, yuv.Ydata());
AutoOpenSurface asurfU(OPEN_READ_ONLY, yuv.Udata());
AutoOpenSurface asurfV(OPEN_READ_ONLY, yuv.Vdata());
gfxImageSurface* surfY = asurfY.GetAsImage();
gfxImageSurface* surfU = asurfU.GetAsImage();
gfxImageSurface* surfV = asurfV.GetAsImage();
PlanarYCbCrImage::Data data;
data.mYChannel = surfY->Data();

View File

@ -8,6 +8,7 @@
/* This must occur *after* layers/PLayers.h to avoid typedefs conflicts. */
#include "mozilla/Util.h"
#include "ipc/AutoOpenSurface.h"
#include "mozilla/layers/ShadowLayers.h"
#include "ShadowBufferD3D9.h"
@ -617,8 +618,8 @@ ShadowThebesLayerD3D9::Swap(const ThebesBuffer& aNewFront,
}
if (mBuffer) {
nsRefPtr<gfxASurface> surf = ShadowLayerForwarder::OpenDescriptor(aNewFront.buffer());
mBuffer->Upload(surf, GetVisibleRegion().GetBounds());
AutoOpenSurface surf(OPEN_READ_ONLY, aNewFront.buffer());
mBuffer->Upload(surf.Get(), GetVisibleRegion().GetBounds());
}
*aNewBack = aNewFront;

View File

@ -0,0 +1,73 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: sw=2 ts=8 et :
*/
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_layers_AutoOpenSurface_h
#define mozilla_layers_AutoOpenSurface_h 1
#include "base/basictypes.h"
#include "gfxASurface.h"
#include "mozilla/layers/PLayers.h"
#include "ShadowLayers.h"
namespace mozilla {
namespace layers {
/**
* Some surface types can be fairly expensive to open. This helper
* tries to put off opening surfaces as long as it can, until
* ahsolutely necessary. And after being forced to open, it remembers
* the mapping so it doesn't need to be redone.
*/
class NS_STACK_CLASS AutoOpenSurface
{
public:
typedef gfxASurface::gfxContentType gfxContentType;
/** |aDescriptor| must be valid while AutoOpenSurface is
* in scope. */
AutoOpenSurface(OpenMode aMode, const SurfaceDescriptor& aDescriptor);
~AutoOpenSurface();
/**
* These helpers do not necessarily need to open the descriptor to
* return an answer.
*/
gfxContentType ContentType();
gfxIntSize Size();
/** This can't escape the scope of AutoOpenSurface. */
gfxASurface* Get();
/**
* This can't escape the scope of AutoOpenSurface.
*
* This method is currently just a convenience wrapper around
* gfxASurface::GetAsImageSurface() --- it returns a valid surface
* exactly when this->Get()->GetAsImageSurface() would. Clients
* that need guaranteed (fast) ImageSurfaces should allocate the
* underlying descriptor with capability MAP_AS_IMAGE_SURFACE, in
* which case this helper is guaranteed to succeed.
*/
gfxImageSurface* GetAsImage();
private:
SurfaceDescriptor mDescriptor;
nsRefPtr<gfxASurface> mSurface;
nsRefPtr<gfxImageSurface> mSurfaceAsImage;
OpenMode mMode;
AutoOpenSurface(const AutoOpenSurface&) MOZ_DELETE;
AutoOpenSurface& operator=(const AutoOpenSurface&) MOZ_DELETE;
};
} // namespace layers
} // namespace mozilla
#endif // ifndef mozilla_layers_AutoOpenSurface_h

View File

@ -0,0 +1,28 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: sw=2 ts=8 et :
*/
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
include protocol PLayers;
namespace mozilla {
namespace layers {
/**
* This is a trivial protocol that's used to track gralloc buffers
* across thread contexts. A live PGrallocBuffer actor always
* corresponds 1:1 to a pre-shared gralloc buffer (sharing is done by
* the PGrallocBuffer constructor).
*/
async protocol PGrallocBuffer {
manager PLayers;
/** Gralloc buffers can be "owned" by either parent or child. */
both:
async __delete__();
};
} // namespace layers
} // namespace mozilla

View File

@ -6,20 +6,24 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
include protocol PCompositor;
include protocol PGrallocBuffer;
include protocol PLayer;
include protocol PRenderFrame;
include "gfxipc/ShadowLayerUtils.h";
using gfx3DMatrix;
using gfxIntSize;
using gfxPoint;
using gfxRGBA;
using nsIntPoint;
using nsIntRect;
using nsIntRegion;
using nsIntSize;
using mozilla::gfxContentType;
using mozilla::GraphicsFilterType;
using mozilla::layers::FrameMetrics;
using mozilla::layers::MagicGrallocBufferHandle;
using mozilla::layers::SurfaceDescriptorX11;
using mozilla::null_t;
using mozilla::WindowsHandle;
@ -42,20 +46,30 @@ struct OpCreateImageLayer { PLayer layer; };
struct OpCreateColorLayer { PLayer layer; };
struct OpCreateCanvasLayer { PLayer layer; };
union MaybeMagicGrallocBufferHandle {
MagicGrallocBufferHandle;
null_t;
};
struct SurfaceDescriptorD3D10 {
WindowsHandle handle;
};
struct SurfaceDescriptorGralloc {
PGrallocBuffer buffer;
};
union SurfaceDescriptor {
Shmem;
SurfaceDescriptorD3D10;
SurfaceDescriptorGralloc;
SurfaceDescriptorX11;
};
struct YUVImage {
Shmem Ydata;
Shmem Udata;
Shmem Vdata;
SurfaceDescriptor Ydata;
SurfaceDescriptor Udata;
SurfaceDescriptor Vdata;
nsIntRect picture;
};
@ -206,9 +220,17 @@ union EditReply {
sync protocol PLayers {
manager PRenderFrame or PCompositor;
manages PGrallocBuffer;
manages PLayer;
parent:
/**
* Only the parent side has privileges to allocate the buffer.
* Allocation may fail (pmem is a scarce resource), and if so null_t
* is returned.
*/
sync PGrallocBuffer(gfxIntSize size, gfxContentType content)
returns (MaybeMagicGrallocBufferHandle handle);
async PLayer();
// The isFirstPaint flag can be used to indicate that this is the first update

View File

@ -25,6 +25,16 @@ struct SurfaceDescriptorX11 {
} }
#endif
#if defined(MOZ_WIDGET_GONK)
# include "mozilla/layers/ShadowLayerUtilsGralloc.h"
#else
namespace mozilla { namespace layers {
struct MagicGrallocBufferHandle {
bool operator==(const MagicGrallocBufferHandle&) const { return false; }
};
} }
#endif
namespace IPC {
template <>
@ -62,7 +72,16 @@ struct ParamTraits<mozilla::layers::SurfaceDescriptorX11> {
static void Write(Message*, const paramType&) {}
static bool Read(const Message*, void**, paramType*) { return false; }
};
#endif // !defined(MOZ_HAVE_XSURFACEDESCRIPTOR)
#endif // !defined(MOZ_HAVE_XSURFACEDESCRIPTORX11)
#if !defined(MOZ_HAVE_SURFACEDESCRIPTORGRALLOC)
template <>
struct ParamTraits<mozilla::layers::MagicGrallocBufferHandle> {
typedef mozilla::layers::MagicGrallocBufferHandle paramType;
static void Write(Message*, const paramType&) {}
static bool Read(const Message*, void**, paramType*) { return false; }
};
#endif // !defined(MOZ_HAVE_XSURFACEDESCRIPTORGRALLOC)
}

View File

@ -16,29 +16,48 @@ namespace layers {
// Platform-specific shadow-layers interfaces. See ShadowLayers.h.
// D3D10 doesn't need all these yet.
bool
ShadowLayerForwarder::PlatformAllocDoubleBuffer(const gfxIntSize&,
gfxASurface::gfxContentType,
SurfaceDescriptor*,
SurfaceDescriptor*)
{
return false;
}
bool
ShadowLayerForwarder::PlatformAllocBuffer(const gfxIntSize&,
gfxASurface::gfxContentType,
uint32_t,
SurfaceDescriptor*)
{
return false;
}
/*static*/ already_AddRefed<gfxASurface>
ShadowLayerForwarder::PlatformOpenDescriptor(const SurfaceDescriptor&)
ShadowLayerForwarder::PlatformOpenDescriptor(OpenMode,
const SurfaceDescriptor&)
{
return nsnull;
}
/*static*/ bool
ShadowLayerForwarder::PlatformCloseDescriptor(const SurfaceDescriptor&)
{
return false;
}
/*static*/ bool
ShadowLayerForwarder::PlatformGetDescriptorSurfaceContentType(
const SurfaceDescriptor&,
OpenMode,
gfxContentType*,
gfxASurface**)
{
return false;
}
/*static*/ bool
ShadowLayerForwarder::PlatformGetDescriptorSurfaceSize(
const SurfaceDescriptor&,
OpenMode,
gfxIntSize*,
gfxASurface**)
{
return false;
}
bool
ShadowLayerForwarder::PlatformDestroySharedSurface(SurfaceDescriptor*)
{

View File

@ -0,0 +1,348 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: sw=2 ts=8 et :
*/
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "mozilla/layers/PGrallocBufferChild.h"
#include "mozilla/layers/PGrallocBufferParent.h"
#include "mozilla/layers/PLayersChild.h"
#include "mozilla/layers/ShadowLayers.h"
#include "mozilla/unused.h"
#include "nsXULAppAPI.h"
#include "ShadowLayerUtilsGralloc.h"
#include "gfxImageSurface.h"
using namespace android;
using namespace base;
using namespace mozilla::layers;
namespace IPC {
void
ParamTraits<MagicGrallocBufferHandle>::Write(Message* aMsg,
const paramType& aParam)
{
Flattenable *flattenable = aParam.mGraphicBuffer.get();
size_t nbytes = flattenable->getFlattenedSize();
size_t nfds = flattenable->getFdCount();
char data[nbytes];
int fds[nfds];
flattenable->flatten(data, nbytes, fds, nfds);
aMsg->WriteSize(nbytes);
aMsg->WriteSize(nfds);
aMsg->WriteBytes(data, nbytes);
for (size_t n = 0; n < nfds; ++n) {
// These buffers can't die in transit because they're created
// synchonously and the parent-side buffer can only be dropped if
// there's a crash.
aMsg->WriteFileDescriptor(FileDescriptor(fds[n], false));
}
}
bool
ParamTraits<MagicGrallocBufferHandle>::Read(const Message* aMsg,
void** aIter, paramType* aResult)
{
size_t nbytes;
size_t nfds;
const char* data;
if (!aMsg->ReadSize(aIter, &nbytes) ||
!aMsg->ReadSize(aIter, &nfds) ||
!aMsg->ReadBytes(aIter, &data, nbytes)) {
return false;
}
int fds[nfds];
for (size_t n = 0; n < nfds; ++n) {
FileDescriptor fd;
if (!aMsg->ReadFileDescriptor(aIter, &fd)) {
return false;
}
// If the GraphicBuffer was shared cross-process, SCM_RIGHTS does
// the right thing and dup's the fd. If it's shared cross-thread,
// SCM_RIGHTS doesn't dup the fd. That's surprising, but we just
// deal with it here. NB: only the "default" (master) process can
// alloc gralloc buffers.
bool sameProcess = (XRE_GetProcessType() == GeckoProcessType_Default);
int dupFd = sameProcess ? dup(fd.fd) : fd.fd;
fds[n] = dupFd;
}
sp<GraphicBuffer> buffer(new GraphicBuffer());
Flattenable *flattenable = buffer.get();
if (NO_ERROR == flattenable->unflatten(data, nbytes, fds, nfds)) {
aResult->mGraphicBuffer = buffer;
return true;
}
return false;
}
} // namespace IPC
namespace mozilla {
namespace layers {
MagicGrallocBufferHandle::MagicGrallocBufferHandle(const sp<GraphicBuffer>& aGraphicBuffer)
: mGraphicBuffer(aGraphicBuffer)
{
}
//-----------------------------------------------------------------------------
// Parent process
static gfxASurface::gfxImageFormat
ImageFormatForPixelFormat(android::PixelFormat aFormat)
{
switch (aFormat) {
case PIXEL_FORMAT_RGBA_8888:
return gfxASurface::ImageFormatARGB32;
case PIXEL_FORMAT_RGBX_8888:
return gfxASurface::ImageFormatRGB24;
case PIXEL_FORMAT_RGB_565:
return gfxASurface::ImageFormatRGB16_565;
case PIXEL_FORMAT_A_8:
return gfxASurface::ImageFormatA8;
default:
MOZ_NOT_REACHED("Unknown gralloc pixel format");
}
return gfxASurface::ImageFormatARGB32;
}
static android::PixelFormat
PixelFormatForImageFormat(gfxASurface::gfxImageFormat aFormat)
{
switch (aFormat) {
case gfxASurface::ImageFormatARGB32:
return android::PIXEL_FORMAT_RGBA_8888;
case gfxASurface::ImageFormatRGB24:
return android::PIXEL_FORMAT_RGBX_8888;
case gfxASurface::ImageFormatRGB16_565:
return android::PIXEL_FORMAT_RGB_565;
case gfxASurface::ImageFormatA8:
return android::PIXEL_FORMAT_A_8;
default:
MOZ_NOT_REACHED("Unknown gralloc pixel format");
}
return gfxASurface::ImageFormatARGB32;
}
static android::PixelFormat
PixelFormatForContentType(gfxASurface::gfxContentType aContentType)
{
return PixelFormatForImageFormat(
gfxPlatform::GetPlatform()->OptimalFormatForContent(aContentType));
}
static gfxASurface::gfxContentType
ContentTypeFromPixelFormat(android::PixelFormat aFormat)
{
return gfxASurface::ContentFromFormat(ImageFormatForPixelFormat(aFormat));
}
/*static*/ PGrallocBufferParent*
GrallocBufferActor::Create(const gfxIntSize& aSize,
const gfxContentType& aContent,
MaybeMagicGrallocBufferHandle* aOutHandle)
{
GrallocBufferActor* actor = new GrallocBufferActor();
*aOutHandle = null_t();
android::PixelFormat format = PixelFormatForContentType(aContent);
sp<GraphicBuffer> buffer(
new GraphicBuffer(aSize.width, aSize.height, format,
GraphicBuffer::USAGE_SW_READ_OFTEN |
GraphicBuffer::USAGE_SW_WRITE_OFTEN |
GraphicBuffer::USAGE_HW_TEXTURE));
if (buffer->initCheck() != OK)
return actor;
actor->mGraphicBuffer = buffer;
*aOutHandle = MagicGrallocBufferHandle(buffer);
return actor;
}
bool
ShadowLayerManager::PlatformDestroySharedSurface(SurfaceDescriptor* aSurface)
{
if (SurfaceDescriptor::TSurfaceDescriptorGralloc != aSurface->type()) {
return false;
}
PGrallocBufferParent* gbp =
aSurface->get_SurfaceDescriptorGralloc().bufferParent();
unused << PGrallocBufferParent::Send__delete__(gbp);
*aSurface = SurfaceDescriptor();
return true;
}
//-----------------------------------------------------------------------------
// Child process
/*static*/ PGrallocBufferChild*
GrallocBufferActor::Create()
{
return new GrallocBufferActor();
}
void
GrallocBufferActor::InitFromHandle(const MagicGrallocBufferHandle& aHandle)
{
MOZ_ASSERT(!mGraphicBuffer.get());
MOZ_ASSERT(aHandle.mGraphicBuffer.get());
mGraphicBuffer = aHandle.mGraphicBuffer;
}
bool
ShadowLayerForwarder::PlatformAllocBuffer(const gfxIntSize& aSize,
gfxASurface::gfxContentType aContent,
uint32_t aCaps,
SurfaceDescriptor* aBuffer)
{
// Gralloc buffers are efficiently mappable as gfxImageSurface, so
// no need to check |aCaps & MAP_AS_IMAGE_SURFACE|.
MaybeMagicGrallocBufferHandle handle;
PGrallocBufferChild* gc =
mShadowManager->SendPGrallocBufferConstructor(aSize, aContent, &handle);
if (handle.Tnull_t == handle.type()) {
PGrallocBufferChild::Send__delete__(gc);
return false;
}
GrallocBufferActor* gba = static_cast<GrallocBufferActor*>(gc);
gba->InitFromHandle(handle.get_MagicGrallocBufferHandle());
*aBuffer = SurfaceDescriptorGralloc(nsnull, gc);
return true;
}
//-----------------------------------------------------------------------------
// Both processes
/*static*/ sp<GraphicBuffer>
GrallocBufferActor::GetFrom(const SurfaceDescriptorGralloc& aDescriptor)
{
GrallocBufferActor* gba = nsnull;
if (PGrallocBufferChild* child = aDescriptor.bufferChild()) {
gba = static_cast<GrallocBufferActor*>(child);
} else if (PGrallocBufferParent* parent = aDescriptor.bufferParent()) {
gba = static_cast<GrallocBufferActor*>(parent);
}
return gba->mGraphicBuffer;
}
/*static*/ already_AddRefed<gfxASurface>
ShadowLayerForwarder::PlatformOpenDescriptor(OpenMode aMode,
const SurfaceDescriptor& aSurface)
{
if (SurfaceDescriptor::TSurfaceDescriptorGralloc != aSurface.type()) {
return nsnull;
}
sp<GraphicBuffer> buffer =
GrallocBufferActor::GetFrom(aSurface.get_SurfaceDescriptorGralloc());
uint32_t usage = GRALLOC_USAGE_SW_READ_OFTEN;
if (OPEN_READ_WRITE == aMode) {
usage |= GRALLOC_USAGE_SW_WRITE_OFTEN;
}
void *vaddr;
DebugOnly<status_t> status = buffer->lock(usage, &vaddr);
// If we fail to lock, we'll just end up aborting anyway.
MOZ_ASSERT(status == OK);
gfxIntSize size = gfxIntSize(buffer->getWidth(), buffer->getHeight());
gfxImageFormat format = ImageFormatForPixelFormat(buffer->getPixelFormat());
long pixelStride = buffer->getStride();
long byteStride = pixelStride * gfxASurface::BytePerPixelFromFormat(format);
nsRefPtr<gfxASurface> surf =
new gfxImageSurface((unsigned char*)vaddr, size, byteStride, format);
return surf->CairoStatus() ? nsnull : surf.forget();
}
/*static*/ bool
ShadowLayerForwarder::PlatformGetDescriptorSurfaceContentType(
const SurfaceDescriptor& aDescriptor, OpenMode aMode,
gfxContentType* aContent,
gfxASurface** aSurface)
{
if (SurfaceDescriptor::TSurfaceDescriptorGralloc != aDescriptor.type()) {
return false;
}
sp<GraphicBuffer> buffer =
GrallocBufferActor::GetFrom(aDescriptor.get_SurfaceDescriptorGralloc());
*aContent = ContentTypeFromPixelFormat(buffer->getPixelFormat());
return true;
}
/*static*/ bool
ShadowLayerForwarder::PlatformGetDescriptorSurfaceSize(
const SurfaceDescriptor& aDescriptor, OpenMode aMode,
gfxIntSize* aSize,
gfxASurface** aSurface)
{
if (SurfaceDescriptor::TSurfaceDescriptorGralloc != aDescriptor.type()) {
return false;
}
sp<GraphicBuffer> buffer =
GrallocBufferActor::GetFrom(aDescriptor.get_SurfaceDescriptorGralloc());
*aSize = gfxIntSize(buffer->getWidth(), buffer->getHeight());
return true;
}
/*static*/ bool
ShadowLayerForwarder::PlatformDestroySharedSurface(SurfaceDescriptor* aSurface)
{
if (SurfaceDescriptor::TSurfaceDescriptorGralloc != aSurface->type()) {
return false;
}
PGrallocBufferChild* gbp =
aSurface->get_SurfaceDescriptorGralloc().bufferChild();
PGrallocBufferChild::Send__delete__(gbp);
*aSurface = SurfaceDescriptor();
return true;
}
/*static*/ bool
ShadowLayerForwarder::PlatformCloseDescriptor(const SurfaceDescriptor& aDescriptor)
{
if (SurfaceDescriptor::TSurfaceDescriptorGralloc != aDescriptor.type()) {
return false;
}
sp<GraphicBuffer> buffer =
GrallocBufferActor::GetFrom(aDescriptor);
// If the buffer wasn't lock()d, this probably blows up. But since
// PlatformCloseDescriptor() is private and only used by
// AutoOpenSurface, we want to know if the logic is wrong there.
buffer->unlock();
return true;
}
/*static*/ void
ShadowLayerForwarder::PlatformSyncBeforeUpdate()
{
// Nothing to be done for gralloc.
}
/*static*/ void
ShadowLayerManager::PlatformSyncBeforeReplyUpdate()
{
// Nothing to be done for gralloc.
}
} // namespace layers
} // namespace mozilla

View File

@ -0,0 +1,101 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: sw=2 ts=8 et :
*/
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_layers_ShadowLayerUtilsGralloc_h
#define mozilla_layers_ShadowLayerUtilsGralloc_h
#include <unistd.h>
#include <ui/GraphicBuffer.h>
#include "IPC/IPCMessageUtils.h"
#include "mozilla/layers/PGrallocBufferChild.h"
#include "mozilla/layers/PGrallocBufferParent.h"
#define MOZ_HAVE_SURFACEDESCRIPTORGRALLOC
#define MOZ_HAVE_PLATFORM_SPECIFIC_LAYER_BUFFERS
class gfxASurface;
namespace mozilla {
namespace layers {
class MaybeMagicGrallocBufferHandle;
class SurfaceDescriptorGralloc;
/**
* This class exists to share the underlying GraphicBuffer resources
* from one thread context to another. This requires going through
* slow paths in the kernel so can be somewhat expensive.
*
* This is not just platform-specific, but also
* gralloc-implementation-specific.
*/
struct MagicGrallocBufferHandle {
typedef android::GraphicBuffer GraphicBuffer;
MagicGrallocBufferHandle()
{ }
MagicGrallocBufferHandle(const android::sp<GraphicBuffer>& aGraphicBuffer);
// Default copy ctor and operator= are OK
bool operator==(const MagicGrallocBufferHandle& aOther) const {
return mGraphicBuffer == aOther.mGraphicBuffer;
}
android::sp<GraphicBuffer> mGraphicBuffer;
};
/**
* GrallocBufferActor is an "IPC wrapper" for an underlying
* GraphicBuffer (pmem region). It allows us to cheaply and
* conveniently share gralloc handles between processes.
*/
class GrallocBufferActor : public PGrallocBufferChild
, public PGrallocBufferParent
{
friend class ShadowLayerForwarder;
typedef android::GraphicBuffer GraphicBuffer;
public:
virtual ~GrallocBufferActor() {}
static PGrallocBufferParent*
Create(const gfxIntSize& aSize, const gfxContentType& aContent,
MaybeMagicGrallocBufferHandle* aOutHandle);
static PGrallocBufferChild*
Create();
private:
GrallocBufferActor() {}
void InitFromHandle(const MagicGrallocBufferHandle& aHandle);
static android::sp<GraphicBuffer>
GetFrom(const SurfaceDescriptorGralloc& aDescriptor);
android::sp<GraphicBuffer> mGraphicBuffer;
};
} // namespace layers
} // namespace mozilla
namespace IPC {
template <>
struct ParamTraits<mozilla::layers::MagicGrallocBufferHandle> {
typedef mozilla::layers::MagicGrallocBufferHandle paramType;
static void Write(Message* aMsg, const paramType& aParam);
static bool Read(const Message* aMsg, void** aIter, paramType* aResult);
};
} // namespace IPC
#endif // mozilla_layers_ShadowLayerUtilsGralloc_h

View File

@ -88,19 +88,10 @@ SurfaceDescriptorX11::OpenForeign() const
return surf->CairoStatus() ? nsnull : surf.forget();
}
bool
ShadowLayerForwarder::PlatformAllocDoubleBuffer(const gfxIntSize& aSize,
gfxASurface::gfxContentType aContent,
SurfaceDescriptor* aFrontBuffer,
SurfaceDescriptor* aBackBuffer)
{
return (PlatformAllocBuffer(aSize, aContent, aFrontBuffer) &&
PlatformAllocBuffer(aSize, aContent, aBackBuffer));
}
bool
ShadowLayerForwarder::PlatformAllocBuffer(const gfxIntSize& aSize,
gfxASurface::gfxContentType aContent,
uint32_t aCaps,
SurfaceDescriptor* aBuffer)
{
if (!UsingXCompositing()) {
@ -109,6 +100,11 @@ ShadowLayerForwarder::PlatformAllocBuffer(const gfxIntSize& aSize,
// things down. Use Shmem instead.
return false;
}
if (MAP_AS_IMAGE_SURFACE & aCaps) {
// We can't efficiently map pixmaps as gfxImageSurface, in
// general. Fall back on Shmem.
return false;
}
gfxPlatform* platform = gfxPlatform::GetPlatform();
nsRefPtr<gfxASurface> buffer = platform->CreateOffscreenSurface(aSize, aContent);
@ -127,7 +123,8 @@ ShadowLayerForwarder::PlatformAllocBuffer(const gfxIntSize& aSize,
}
/*static*/ already_AddRefed<gfxASurface>
ShadowLayerForwarder::PlatformOpenDescriptor(const SurfaceDescriptor& aSurface)
ShadowLayerForwarder::PlatformOpenDescriptor(OpenMode aMode,
const SurfaceDescriptor& aSurface)
{
if (SurfaceDescriptor::TSurfaceDescriptorX11 != aSurface.type()) {
return nsnull;
@ -135,6 +132,31 @@ ShadowLayerForwarder::PlatformOpenDescriptor(const SurfaceDescriptor& aSurface)
return aSurface.get_SurfaceDescriptorX11().OpenForeign();
}
/*static*/ bool
ShadowLayerForwarder::PlatformCloseDescriptor(const SurfaceDescriptor& aDescriptor)
{
// XIDs don't need to be "closed".
return false;
}
/*static*/ bool
ShadowLayerForwarder::PlatformGetDescriptorSurfaceContentType(
const SurfaceDescriptor& aDescriptor, OpenMode aMode,
gfxContentType* aContent,
gfxASurface** aSurface)
{
return false;
}
/*static*/ bool
ShadowLayerForwarder::PlatformGetDescriptorSurfaceSize(
const SurfaceDescriptor& aDescriptor, OpenMode aMode,
gfxIntSize* aSize,
gfxASurface** aSurface)
{
return false;
}
bool
ShadowLayerForwarder::PlatformDestroySharedSurface(SurfaceDescriptor* aSurface)
{

View File

@ -11,6 +11,7 @@
#include "gfxSharedImageSurface.h"
#include "gfxPlatform.h"
#include "AutoOpenSurface.h"
#include "mozilla/ipc/SharedMemorySysV.h"
#include "mozilla/layers/PLayerChild.h"
#include "mozilla/layers/PLayersChild.h"
@ -355,7 +356,7 @@ ShadowLayerForwarder::ShadowDrawToTarget(gfxContext* aTarget) {
return false;
}
nsRefPtr<gfxASurface> surface = OpenDescriptor(descriptorOut);
nsRefPtr<gfxASurface> surface = OpenDescriptor(OPEN_READ_WRITE, descriptorOut);
aTarget->SetOperator(gfxContext::OPERATOR_SOURCE);
aTarget->DrawSurface(surface, surface->GetSize());
@ -381,22 +382,6 @@ OptimalShmemType()
#endif
}
bool
ShadowLayerForwarder::AllocDoubleBuffer(const gfxIntSize& aSize,
gfxASurface::gfxContentType aContent,
gfxSharedImageSurface** aFrontBuffer,
gfxSharedImageSurface** aBackBuffer)
{
return AllocBuffer(aSize, aContent, aFrontBuffer) &&
AllocBuffer(aSize, aContent, aBackBuffer);
}
void
ShadowLayerForwarder::DestroySharedSurface(gfxSharedImageSurface* aSurface)
{
mShadowManager->DeallocShmem(aSurface->GetShmem());
}
bool
ShadowLayerForwarder::AllocBuffer(const gfxIntSize& aSize,
gfxASurface::gfxContentType aContent,
@ -417,44 +402,26 @@ ShadowLayerForwarder::AllocBuffer(const gfxIntSize& aSize,
return true;
}
bool
ShadowLayerForwarder::AllocDoubleBuffer(const gfxIntSize& aSize,
gfxASurface::gfxContentType aContent,
SurfaceDescriptor* aFrontBuffer,
SurfaceDescriptor* aBackBuffer)
{
bool tryPlatformSurface = true;
#ifdef DEBUG
tryPlatformSurface = !PR_GetEnv("MOZ_LAYERS_FORCE_SHMEM_SURFACES");
#endif
if (tryPlatformSurface &&
PlatformAllocDoubleBuffer(aSize, aContent, aFrontBuffer, aBackBuffer)) {
return true;
}
nsRefPtr<gfxSharedImageSurface> front;
nsRefPtr<gfxSharedImageSurface> back;
if (!AllocDoubleBuffer(aSize, aContent,
getter_AddRefs(front), getter_AddRefs(back))) {
return false;
}
*aFrontBuffer = front->GetShmem();
*aBackBuffer = back->GetShmem();
return true;
}
bool
ShadowLayerForwarder::AllocBuffer(const gfxIntSize& aSize,
gfxASurface::gfxContentType aContent,
SurfaceDescriptor* aBuffer)
{
return AllocBufferWithCaps(aSize, aContent, DEFAULT_BUFFER_CAPS, aBuffer);
}
bool
ShadowLayerForwarder::AllocBufferWithCaps(const gfxIntSize& aSize,
gfxASurface::gfxContentType aContent,
uint32_t aCaps,
SurfaceDescriptor* aBuffer)
{
bool tryPlatformSurface = true;
#ifdef DEBUG
tryPlatformSurface = !PR_GetEnv("MOZ_LAYERS_FORCE_SHMEM_SURFACES");
#endif
if (tryPlatformSurface &&
PlatformAllocBuffer(aSize, aContent, aBuffer)) {
PlatformAllocBuffer(aSize, aContent, aCaps, aBuffer)) {
return true;
}
@ -468,9 +435,10 @@ ShadowLayerForwarder::AllocBuffer(const gfxIntSize& aSize,
}
/*static*/ already_AddRefed<gfxASurface>
ShadowLayerForwarder::OpenDescriptor(const SurfaceDescriptor& aSurface)
ShadowLayerForwarder::OpenDescriptor(OpenMode aMode,
const SurfaceDescriptor& aSurface)
{
nsRefPtr<gfxASurface> surf = PlatformOpenDescriptor(aSurface);
nsRefPtr<gfxASurface> surf = PlatformOpenDescriptor(aMode, aSurface);
if (surf) {
return surf.forget();
}
@ -486,6 +454,46 @@ ShadowLayerForwarder::OpenDescriptor(const SurfaceDescriptor& aSurface)
}
}
/*static*/ gfxContentType
ShadowLayerForwarder::GetDescriptorSurfaceContentType(
const SurfaceDescriptor& aDescriptor, OpenMode aMode,
gfxASurface** aSurface)
{
gfxContentType content;
if (PlatformGetDescriptorSurfaceContentType(aDescriptor, aMode,
&content, aSurface)) {
return content;
}
nsRefPtr<gfxASurface> surface = OpenDescriptor(aMode, aDescriptor);
content = surface->GetContentType();
*aSurface = surface.forget().get();
return content;
}
/*static*/ gfxIntSize
ShadowLayerForwarder::GetDescriptorSurfaceSize(
const SurfaceDescriptor& aDescriptor, OpenMode aMode,
gfxASurface** aSurface)
{
gfxIntSize size;
if (PlatformGetDescriptorSurfaceSize(aDescriptor, aMode, &size, aSurface)) {
return size;
}
nsRefPtr<gfxASurface> surface = OpenDescriptor(aMode, aDescriptor);
size = surface->GetSize();
*aSurface = surface.forget().get();
return size;
}
/*static*/ void
ShadowLayerForwarder::CloseDescriptor(const SurfaceDescriptor& aDescriptor)
{
PlatformCloseDescriptor(aDescriptor);
// There's no "close" needed for Shmem surfaces.
}
// Destroy the Shmem SurfaceDescriptor |aSurface|.
template<class ShmemDeallocator>
static void
@ -546,29 +554,48 @@ ShadowLayerManager::DestroySharedSurface(SurfaceDescriptor* aSurface,
#if !defined(MOZ_HAVE_PLATFORM_SPECIFIC_LAYER_BUFFERS)
bool
ShadowLayerForwarder::PlatformAllocDoubleBuffer(const gfxIntSize&,
gfxASurface::gfxContentType,
SurfaceDescriptor*,
SurfaceDescriptor*)
{
return false;
}
bool
ShadowLayerForwarder::PlatformAllocBuffer(const gfxIntSize&,
gfxASurface::gfxContentType,
uint32_t,
SurfaceDescriptor*)
{
return false;
}
/*static*/ already_AddRefed<gfxASurface>
ShadowLayerForwarder::PlatformOpenDescriptor(const SurfaceDescriptor&)
ShadowLayerForwarder::PlatformOpenDescriptor(OpenMode,
const SurfaceDescriptor&)
{
return nsnull;
}
/*static*/ bool
ShadowLayerForwarder::PlatformCloseDescriptor(const SurfaceDescriptor&)
{
return false;
}
/*static*/ bool
ShadowLayerForwarder::PlatformGetDescriptorSurfaceContentType(
const SurfaceDescriptor&,
OpenMode,
gfxContentType*,
gfxASurface**)
{
return false;
}
/*static*/ bool
ShadowLayerForwarder::PlatformGetDescriptorSurfaceSize(
const SurfaceDescriptor&,
OpenMode,
gfxIntSize*,
gfxASurface**)
{
return false;
}
bool
ShadowLayerForwarder::PlatformDestroySharedSurface(SurfaceDescriptor*)
{
@ -599,5 +626,60 @@ IsSurfaceDescriptorValid(const SurfaceDescriptor& aSurface)
return SurfaceDescriptor::T__None != aSurface.type();
}
AutoOpenSurface::AutoOpenSurface(OpenMode aMode,
const SurfaceDescriptor& aDescriptor)
: mDescriptor(aDescriptor)
, mMode(aMode)
{
MOZ_ASSERT(IsSurfaceDescriptorValid(mDescriptor));
}
AutoOpenSurface::~AutoOpenSurface()
{
if (mSurface) {
mSurface = nsnull;
ShadowLayerForwarder::CloseDescriptor(mDescriptor);
}
}
gfxContentType
AutoOpenSurface::ContentType()
{
if (mSurface) {
return mSurface->GetContentType();
}
return ShadowLayerForwarder::GetDescriptorSurfaceContentType(
mDescriptor, mMode, getter_AddRefs(mSurface));
}
gfxIntSize
AutoOpenSurface::Size()
{
if (mSurface) {
return mSurface->GetSize();
}
return ShadowLayerForwarder::GetDescriptorSurfaceSize(
mDescriptor, mMode, getter_AddRefs(mSurface));
}
gfxASurface*
AutoOpenSurface::Get()
{
if (!mSurface) {
mSurface = ShadowLayerForwarder::OpenDescriptor(mMode, mDescriptor);
}
return mSurface.get();
}
gfxImageSurface*
AutoOpenSurface::GetAsImage()
{
if (!mSurfaceAsImage) {
mSurfaceAsImage = Get()->GetAsImageSurface();
}
return mSurfaceAsImage.get();
}
} // namespace layers
} // namespace mozilla

View File

@ -38,6 +38,20 @@ class SharedImage;
class CanvasSurface;
class BasicTiledLayerBuffer;
enum BufferCapabilities {
DEFAULT_BUFFER_CAPS = 0,
/**
* The allocated buffer must be efficiently mappable as a
* gfxImageSurface.
*/
MAP_AS_IMAGE_SURFACE = 1 << 0
};
enum OpenMode {
OPEN_READ_ONLY,
OPEN_READ_WRITE
};
/**
* We want to share layer trees across thread contexts and address
* spaces for several reasons; chief among them
@ -81,7 +95,10 @@ class BasicTiledLayerBuffer;
class ShadowLayerForwarder
{
friend class AutoOpenSurface;
public:
typedef gfxASurface::gfxContentType gfxContentType;
typedef LayerManager::LayersBackend LayersBackend;
virtual ~ShadowLayerForwarder();
@ -227,8 +244,8 @@ public:
* The basic lifecycle is
*
* - a Layer needs a buffer. Its ShadowableLayer subclass calls
* AllocDoubleBuffer(), then calls one of the Created*Buffer()
* methods above to transfer the (temporary) front buffer to its
* AllocBuffer(), then calls one of the Created*Buffer() methods
* above to transfer the (temporary) front buffer to its
* ShadowLayer in the other process. The Layer needs a
* gfxASurface to paint, so the ShadowableLayer uses
* OpenDescriptor(backBuffer) to get that surface, and hands it
@ -255,34 +272,17 @@ public:
* Shmem (gfxSharedImageSurface) buffers are available on all
* platforms, but they may not be optimal.
*
* NB: this interface is being deprecated in favor of the
* SurfaceDescriptor variant below.
*/
bool AllocDoubleBuffer(const gfxIntSize& aSize,
gfxASurface::gfxContentType aContent,
gfxSharedImageSurface** aFrontBuffer,
gfxSharedImageSurface** aBackBuffer);
void DestroySharedSurface(gfxSharedImageSurface* aSurface);
bool AllocBuffer(const gfxIntSize& aSize,
gfxASurface::gfxContentType aContent,
gfxSharedImageSurface** aBuffer);
/**
* In the absence of platform-specific buffers these fall back to
* Shmem/gfxSharedImageSurface.
*/
bool AllocDoubleBuffer(const gfxIntSize& aSize,
gfxASurface::gfxContentType aContent,
SurfaceDescriptor* aFrontBuffer,
SurfaceDescriptor* aBackBuffer);
bool AllocBuffer(const gfxIntSize& aSize,
gfxASurface::gfxContentType aContent,
SurfaceDescriptor* aBuffer);
gfxASurface::gfxContentType aContent,
SurfaceDescriptor* aBuffer);
static already_AddRefed<gfxASurface>
OpenDescriptor(const SurfaceDescriptor& aSurface);
bool AllocBufferWithCaps(const gfxIntSize& aSize,
gfxASurface::gfxContentType aContent,
uint32_t aCaps,
SurfaceDescriptor* aBuffer);
void DestroySharedSurface(SurfaceDescriptor* aSurface);
@ -311,17 +311,57 @@ protected:
PLayersChild* mShadowManager;
private:
bool PlatformAllocDoubleBuffer(const gfxIntSize& aSize,
gfxASurface::gfxContentType aContent,
SurfaceDescriptor* aFrontBuffer,
SurfaceDescriptor* aBackBuffer);
bool AllocBuffer(const gfxIntSize& aSize,
gfxASurface::gfxContentType aContent,
gfxSharedImageSurface** aBuffer);
bool PlatformAllocBuffer(const gfxIntSize& aSize,
gfxASurface::gfxContentType aContent,
SurfaceDescriptor* aBuffer);
gfxASurface::gfxContentType aContent,
uint32_t aCaps,
SurfaceDescriptor* aBuffer);
/**
* Try to query the content type efficiently, but at worst map the
* surface and return it in *aSurface.
*/
static gfxContentType
GetDescriptorSurfaceContentType(const SurfaceDescriptor& aDescriptor,
OpenMode aMode,
gfxASurface** aSurface);
/**
* It can be expensive to open a descriptor just to query its
* content type. If the platform impl can do this cheaply, it will
* set *aContent and return true.
*/
static bool
PlatformGetDescriptorSurfaceContentType(const SurfaceDescriptor& aDescriptor,
OpenMode aMode,
gfxContentType* aContent,
gfxASurface** aSurface);
// (Same as above, but for surface size.)
static gfxIntSize
GetDescriptorSurfaceSize(const SurfaceDescriptor& aDescriptor,
OpenMode aMode,
gfxASurface** aSurface);
static bool
PlatformGetDescriptorSurfaceSize(const SurfaceDescriptor& aDescriptor,
OpenMode aMode,
gfxIntSize* aSize,
gfxASurface** aSurface);
static already_AddRefed<gfxASurface>
PlatformOpenDescriptor(const SurfaceDescriptor& aDescriptor);
OpenDescriptor(OpenMode aMode, const SurfaceDescriptor& aSurface);
static already_AddRefed<gfxASurface>
PlatformOpenDescriptor(OpenMode aMode, const SurfaceDescriptor& aDescriptor);
/** Make this descriptor unusable for gfxASurface clients. A
* private interface with AutoOpenSurface. */
static void
CloseDescriptor(const SurfaceDescriptor& aDescriptor);
static bool
PlatformCloseDescriptor(const SurfaceDescriptor& aDescriptor);
bool PlatformDestroySharedSurface(SurfaceDescriptor* aSurface);
@ -334,7 +374,6 @@ private:
bool mIsFirstPaint;
};
class ShadowLayerManager : public LayerManager
{
public:

View File

@ -7,6 +7,7 @@
#include "ShadowLayerChild.h"
#include "ShadowLayersChild.h"
#include "ShadowLayerUtils.h"
namespace mozilla {
namespace layers {
@ -20,6 +21,31 @@ ShadowLayersChild::Destroy()
// WARNING: |this| has gone to the great heap in the sky
}
PGrallocBufferChild*
ShadowLayersChild::AllocPGrallocBuffer(const gfxIntSize&,
const gfxContentType&,
MaybeMagicGrallocBufferHandle*)
{
#ifdef MOZ_HAVE_SURFACEDESCRIPTORGRALLOC
return GrallocBufferActor::Create();
#else
NS_RUNTIMEABORT("No gralloc buffers for you");
return nsnull;
#endif
}
bool
ShadowLayersChild::DeallocPGrallocBuffer(PGrallocBufferChild* actor)
{
#ifdef MOZ_HAVE_SURFACEDESCRIPTORGRALLOC
delete actor;
return true;
#else
NS_RUNTIMEABORT("Um, how did we get here?");
return false;
#endif
}
PLayerChild*
ShadowLayersChild::AllocPLayer()
{

View File

@ -29,8 +29,13 @@ public:
void Destroy();
protected:
NS_OVERRIDE virtual PLayerChild* AllocPLayer();
NS_OVERRIDE virtual bool DeallocPLayer(PLayerChild* actor);
virtual PGrallocBufferChild*
AllocPGrallocBuffer(const gfxIntSize&, const gfxContentType&,
MaybeMagicGrallocBufferHandle*) MOZ_OVERRIDE;
virtual bool
DeallocPGrallocBuffer(PGrallocBufferChild* actor) MOZ_OVERRIDE;
virtual PLayerChild* AllocPLayer() MOZ_OVERRIDE;
virtual bool DeallocPLayer(PLayerChild* actor) MOZ_OVERRIDE;
};
} // namespace layers

View File

@ -7,20 +7,18 @@
#include <vector>
#include "ShadowLayersParent.h"
#include "ShadowLayerParent.h"
#include "ShadowLayers.h"
#include "RenderTrace.h"
#include "mozilla/unused.h"
#include "mozilla/layout/RenderFrameParent.h"
#include "AutoOpenSurface.h"
#include "CompositorParent.h"
#include "gfxSharedImageSurface.h"
#include "TiledLayerBuffer.h"
#include "ImageLayers.h"
#include "mozilla/layout/RenderFrameParent.h"
#include "mozilla/unused.h"
#include "RenderTrace.h"
#include "ShadowLayerParent.h"
#include "ShadowLayersParent.h"
#include "ShadowLayers.h"
#include "ShadowLayerUtils.h"
#include "TiledLayerBuffer.h"
typedef std::vector<mozilla::layers::EditReply> EditReplyVector;
@ -420,21 +418,46 @@ ShadowLayersParent::RecvDrawToSurface(const SurfaceDescriptor& surfaceIn,
return true;
}
nsRefPtr<gfxASurface> sharedSurface = ShadowLayerForwarder::OpenDescriptor(surfaceIn);
AutoOpenSurface sharedSurface(OPEN_READ_WRITE, surfaceIn);
nsRefPtr<gfxASurface> localSurface =
gfxPlatform::GetPlatform()->CreateOffscreenSurface(sharedSurface->GetSize(),
sharedSurface->GetContentType());
gfxPlatform::GetPlatform()->CreateOffscreenSurface(sharedSurface.Size(),
sharedSurface.ContentType());
nsRefPtr<gfxContext> context = new gfxContext(localSurface);
layer_manager()->BeginTransactionWithTarget(context);
layer_manager()->EndTransaction(NULL, NULL);
nsRefPtr<gfxContext> contextForCopy = new gfxContext(sharedSurface);
nsRefPtr<gfxContext> contextForCopy = new gfxContext(sharedSurface.Get());
contextForCopy->SetOperator(gfxContext::OPERATOR_SOURCE);
contextForCopy->DrawSurface(localSurface, localSurface->GetSize());
return true;
}
PGrallocBufferParent*
ShadowLayersParent::AllocPGrallocBuffer(const gfxIntSize& aSize,
const gfxContentType& aContent,
MaybeMagicGrallocBufferHandle* aOutHandle)
{
#ifdef MOZ_HAVE_SURFACEDESCRIPTORGRALLOC
return GrallocBufferActor::Create(aSize, aContent, aOutHandle);
#else
NS_RUNTIMEABORT("No gralloc buffers for you");
return nsnull;
#endif
}
bool
ShadowLayersParent::DeallocPGrallocBuffer(PGrallocBufferParent* actor)
{
#ifdef MOZ_HAVE_SURFACEDESCRIPTORGRALLOC
delete actor;
return true;
#else
NS_RUNTIMEABORT("Um, how did we get here?");
return false;
#endif
}
PLayerParent*
ShadowLayersParent::AllocPLayer()
{

View File

@ -44,18 +44,24 @@ public:
virtual void DestroySharedSurface(SurfaceDescriptor* aSurface);
protected:
NS_OVERRIDE virtual bool RecvUpdate(const EditArray& cset,
const bool& isFirstPaint,
EditReplyArray* reply);
virtual bool RecvUpdate(const EditArray& cset,
const bool& isFirstPaint,
EditReplyArray* reply) MOZ_OVERRIDE;
NS_OVERRIDE virtual bool RecvDrawToSurface(const SurfaceDescriptor& surfaceIn,
SurfaceDescriptor* surfaceOut);
virtual bool RecvDrawToSurface(const SurfaceDescriptor& surfaceIn,
SurfaceDescriptor* surfaceOut) MOZ_OVERRIDE;
NS_OVERRIDE virtual bool RecvUpdateNoSwap(const EditArray& cset,
const bool& isFirstPaint);
virtual bool RecvUpdateNoSwap(const EditArray& cset,
const bool& isFirstPaint) MOZ_OVERRIDE;
NS_OVERRIDE virtual PLayerParent* AllocPLayer();
NS_OVERRIDE virtual bool DeallocPLayer(PLayerParent* actor);
virtual PGrallocBufferParent*
AllocPGrallocBuffer(const gfxIntSize& aSize, const gfxContentType& aContent,
MaybeMagicGrallocBufferHandle* aOutHandle) MOZ_OVERRIDE;
virtual bool
DeallocPGrallocBuffer(PGrallocBufferParent* actor) MOZ_OVERRIDE;
virtual PLayerParent* AllocPLayer() MOZ_OVERRIDE;
virtual bool DeallocPLayer(PLayerParent* actor) MOZ_OVERRIDE;
private:
nsRefPtr<ShadowLayerManager> mLayerManager;

View File

@ -4,6 +4,7 @@
IPDLSRCS = \
PCompositor.ipdl \
PGrallocBuffer.ipdl \
PLayer.ipdl \
PLayers.ipdl \
$(NULL)

View File

@ -3,6 +3,7 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "ipc/AutoOpenSurface.h"
#include "mozilla/layers/PLayers.h"
#include "mozilla/layers/ShadowLayers.h"
@ -305,12 +306,12 @@ ShadowCanvasLayerOGL::Initialize(const Data& aData)
void
ShadowCanvasLayerOGL::Init(const CanvasSurface& aNewFront, bool needYFlip)
{
nsRefPtr<gfxASurface> surf = ShadowLayerForwarder::OpenDescriptor(aNewFront);
AutoOpenSurface autoSurf(OPEN_READ_ONLY, aNewFront);
mNeedsYFlip = needYFlip;
mTexImage = gl()->CreateTextureImage(surf->GetSize(),
surf->GetContentType(),
mTexImage = gl()->CreateTextureImage(autoSurf.Size(),
autoSurf.ContentType(),
LOCAL_GL_CLAMP_TO_EDGE,
mNeedsYFlip ? TextureImage::NeedsYFlip : TextureImage::NoFlags);
}
@ -321,14 +322,14 @@ ShadowCanvasLayerOGL::Swap(const CanvasSurface& aNewFront,
CanvasSurface* aNewBack)
{
if (!mDestroyed) {
nsRefPtr<gfxASurface> surf = ShadowLayerForwarder::OpenDescriptor(aNewFront);
gfxIntSize sz = surf->GetSize();
AutoOpenSurface autoSurf(OPEN_READ_ONLY, aNewFront);
gfxIntSize sz = autoSurf.Size();
if (!mTexImage || mTexImage->GetSize() != sz ||
mTexImage->GetContentType() != surf->GetContentType()) {
mTexImage->GetContentType() != autoSurf.ContentType()) {
Init(aNewFront, needYFlip);
}
nsIntRegion updateRegion(nsIntRect(0, 0, sz.width, sz.height));
mTexImage->DirectUpdate(surf, updateRegion);
mTexImage->DirectUpdate(autoSurf.Get(), updateRegion);
}
*aNewBack = aNewFront;
@ -365,6 +366,10 @@ void
ShadowCanvasLayerOGL::RenderLayer(int aPreviousFrameBuffer,
const nsIntPoint& aOffset)
{
if (!mTexImage) {
return;
}
mOGLManager->MakeCurrent();
ShaderProgramOGL *program =

View File

@ -5,6 +5,7 @@
#include "gfxSharedImageSurface.h"
#include "ipc/AutoOpenSurface.h"
#include "ImageLayerOGL.h"
#include "gfxImageSurface.h"
#include "gfxUtils.h"
@ -660,12 +661,10 @@ bool
ShadowImageLayerOGL::Init(const SharedImage& aFront)
{
if (aFront.type() == SharedImage::TSurfaceDescriptor) {
SurfaceDescriptor desc = aFront.get_SurfaceDescriptor();
nsRefPtr<gfxASurface> surf =
ShadowLayerForwarder::OpenDescriptor(desc);
mSize = surf->GetSize();
AutoOpenSurface autoSurf(OPEN_READ_ONLY, aFront.get_SurfaceDescriptor());
mSize = autoSurf.Size();
mTexImage = gl()->CreateTextureImage(nsIntSize(mSize.width, mSize.height),
surf->GetContentType(),
autoSurf.ContentType(),
LOCAL_GL_CLAMP_TO_EDGE,
mForceSingleTile
? TextureImage::ForceSingleTile
@ -674,15 +673,11 @@ ShadowImageLayerOGL::Init(const SharedImage& aFront)
} else {
YUVImage yuv = aFront.get_YUVImage();
nsRefPtr<gfxSharedImageSurface> surfY =
gfxSharedImageSurface::Open(yuv.Ydata());
nsRefPtr<gfxSharedImageSurface> surfU =
gfxSharedImageSurface::Open(yuv.Udata());
nsRefPtr<gfxSharedImageSurface> surfV =
gfxSharedImageSurface::Open(yuv.Vdata());
AutoOpenSurface surfY(OPEN_READ_ONLY, yuv.Ydata());
AutoOpenSurface surfU(OPEN_READ_ONLY, yuv.Udata());
mSize = surfY->GetSize();
mCbCrSize = surfU->GetSize();
mSize = surfY.Size();
mCbCrSize = surfU.Size();
if (!mYUVTexture[0].IsAllocated()) {
mYUVTexture[0].Allocate(gl());
@ -710,25 +705,24 @@ ShadowImageLayerOGL::Swap(const SharedImage& aNewFront,
{
if (!mDestroyed) {
if (aNewFront.type() == SharedImage::TSurfaceDescriptor) {
nsRefPtr<gfxASurface> surf =
ShadowLayerForwarder::OpenDescriptor(aNewFront.get_SurfaceDescriptor());
gfxIntSize size = surf->GetSize();
AutoOpenSurface surf(OPEN_READ_ONLY, aNewFront.get_SurfaceDescriptor());
gfxIntSize size = surf.Size();
if (mSize != size || !mTexImage ||
mTexImage->GetContentType() != surf->GetContentType()) {
mTexImage->GetContentType() != surf.ContentType()) {
Init(aNewFront);
}
// XXX this is always just ridiculously slow
nsIntRegion updateRegion(nsIntRect(0, 0, size.width, size.height));
mTexImage->DirectUpdate(surf, updateRegion);
mTexImage->DirectUpdate(surf.Get(), updateRegion);
} else {
const YUVImage& yuv = aNewFront.get_YUVImage();
nsRefPtr<gfxSharedImageSurface> surfY =
gfxSharedImageSurface::Open(yuv.Ydata());
nsRefPtr<gfxSharedImageSurface> surfU =
gfxSharedImageSurface::Open(yuv.Udata());
nsRefPtr<gfxSharedImageSurface> surfV =
gfxSharedImageSurface::Open(yuv.Vdata());
AutoOpenSurface asurfY(OPEN_READ_ONLY, yuv.Ydata());
AutoOpenSurface asurfU(OPEN_READ_ONLY, yuv.Udata());
AutoOpenSurface asurfV(OPEN_READ_ONLY, yuv.Vdata());
nsRefPtr<gfxImageSurface> surfY = asurfY.GetAsImage();
nsRefPtr<gfxImageSurface> surfU = asurfU.GetAsImage();
nsRefPtr<gfxImageSurface> surfV = asurfV.GetAsImage();
mPictureRect = yuv.picture();
gfxIntSize size = surfY->GetSize();

View File

@ -3,6 +3,7 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "ipc/AutoOpenSurface.h"
#include "mozilla/layers/PLayers.h"
#include "TiledLayerBuffer.h"
@ -968,7 +969,7 @@ ShadowThebesLayerOGL::~ShadowThebesLayerOGL()
bool
ShadowThebesLayerOGL::ShouldDoubleBuffer()
{
#ifdef ANDROID
#ifdef MOZ_JAVA_COMPOSITOR
/* Enable double-buffering on Android so that we don't block for as long
* when uploading textures. This is a work-around for the lack of an
* asynchronous texture upload facility.
@ -992,7 +993,8 @@ ShadowThebesLayerOGL::EnsureTextureUpdated()
if (mRegionPendingUpload.IsEmpty() || !IsSurfaceDescriptorValid(mFrontBufferDescriptor))
return;
mBuffer->DirectUpdate(mFrontBuffer.Buffer(), mRegionPendingUpload);
AutoOpenSurface frontSurface(OPEN_READ_ONLY, mFrontBuffer.Buffer());
mBuffer->DirectUpdate(frontSurface.Get(), mRegionPendingUpload);
mRegionPendingUpload.SetEmpty();
}
@ -1045,15 +1047,15 @@ ShadowThebesLayerOGL::EnsureTextureUpdated(nsIntRegion& aRegion)
if (updateRegion.IsEmpty())
continue;
AutoOpenSurface surface(OPEN_READ_ONLY, mFrontBuffer.Buffer());
nsRefPtr<TextureImage> texImage;
if (!gl()->CanUploadSubTextures()) {
// When sub-textures are unsupported, TiledTextureImage expands the
// boundaries of DirectUpdate to tile boundaries. So that we don't
// re-upload texture data, use the tile iteration to monitor how much
// of the texture was actually uploaded.
gfxASurface* surface = mFrontBuffer.Buffer();
gfxIntSize size = surface->GetSize();
mBuffer->EnsureTexture(size, surface->GetContentType());
gfxIntSize size = surface.Size();
mBuffer->EnsureTexture(size, surface.ContentType());
texImage = mBuffer->GetTextureImage().get();
if (texImage->GetTileCount() > 1)
texImage->SetIterationCallback(EnsureTextureUpdatedCallback, (void *)&updateRegion);
@ -1062,7 +1064,7 @@ ShadowThebesLayerOGL::EnsureTextureUpdated(nsIntRegion& aRegion)
}
// Upload this quadrant of the region.
mBuffer->DirectUpdate(mFrontBuffer.Buffer(), updateRegion);
mBuffer->DirectUpdate(surface.Get(), updateRegion);
if (!gl()->CanUploadSubTextures())
texImage->SetIterationCallback(nsnull, nsnull);
@ -1102,8 +1104,9 @@ ShadowThebesLayerOGL::ProgressiveUpload()
// Set a tile iteration callback so we can cancel the upload after a tile
// has been uploaded and subtract it from mRegionPendingUpload
mBuffer->EnsureTexture(mFrontBuffer.Buffer()->GetSize(),
mFrontBuffer.Buffer()->GetContentType());
AutoOpenSurface frontSurface(OPEN_READ_ONLY, mFrontBuffer.Buffer());
mBuffer->EnsureTexture(frontSurface.Size(),
frontSurface.ContentType());
nsRefPtr<gl::TextureImage> tiledImage = mBuffer->GetTextureImage().get();
if (tiledImage->GetTileCount() > 1)
tiledImage->SetIterationCallback(ProgressiveUploadCallback, (void *)&mRegionPendingUpload);
@ -1111,7 +1114,7 @@ ShadowThebesLayerOGL::ProgressiveUpload()
mRegionPendingUpload.SetEmpty();
// Upload a tile
mBuffer->DirectUpdate(mFrontBuffer.Buffer(), mRegionPendingUpload);
mBuffer->DirectUpdate(frontSurface.Get(), mRegionPendingUpload);
// Remove the iteration callback
tiledImage->SetIterationCallback(nsnull, nsnull);
@ -1135,13 +1138,11 @@ ShadowThebesLayerOGL::Swap(const ThebesBuffer& aNewFront,
{
// The double-buffer path is copied and adapted from BasicLayers.cpp
if (ShouldDoubleBuffer()) {
nsRefPtr<gfxASurface> newFrontBuffer =
ShadowLayerForwarder::OpenDescriptor(aNewFront.buffer());
AutoOpenSurface newFrontBuffer(OPEN_READ_ONLY, aNewFront.buffer());
if (IsSurfaceDescriptorValid(mFrontBufferDescriptor)) {
nsRefPtr<gfxASurface> currentFront =
ShadowLayerForwarder::OpenDescriptor(mFrontBufferDescriptor);
if (currentFront->GetSize() != newFrontBuffer->GetSize()) {
AutoOpenSurface currentFront(OPEN_READ_ONLY, mFrontBufferDescriptor);
if (currentFront.Size() != newFrontBuffer.Size()) {
// Current front buffer is obsolete
DestroyFrontBuffer();
}
@ -1159,12 +1160,12 @@ ShadowThebesLayerOGL::Swap(const ThebesBuffer& aNewFront,
// They might overlap with our old pixels.
aNewBackValidRegion->Sub(mOldValidRegion, aUpdatedRegion);
nsRefPtr<gfxASurface> unused;
SurfaceDescriptor unused;
nsIntRect backRect;
nsIntPoint backRotation;
mFrontBuffer.Swap(
newFrontBuffer, aNewFront.rect(), aNewFront.rotation(),
getter_AddRefs(unused), &backRect, &backRotation);
aNewFront.buffer(), aNewFront.rect(), aNewFront.rotation(),
&unused, &backRect, &backRotation);
if (aNewBack->type() != OptionalThebesBuffer::Tnull_t) {
aNewBack->get_ThebesBuffer().rect() = backRect;
@ -1178,12 +1179,13 @@ ShadowThebesLayerOGL::Swap(const ThebesBuffer& aNewFront,
if (!mBuffer) {
mBuffer = new ShadowBufferOGL(this);
}
nsRefPtr<gfxASurface> surf = ShadowLayerForwarder::OpenDescriptor(mFrontBufferDescriptor);
mBuffer->Upload(surf, aUpdatedRegion, aNewFront.rect(), aNewFront.rotation(), true, mRegionPendingUpload);
AutoOpenSurface frontSurface(OPEN_READ_ONLY, mFrontBufferDescriptor);
mBuffer->Upload(frontSurface.Get(), aUpdatedRegion, aNewFront.rect(), aNewFront.rotation(), true, mRegionPendingUpload);
// Schedule a task to progressively upload the texture
if (!mUploadTask) {
mUploadTask = NewRunnableMethod(this, &ShadowThebesLayerOGL::ProgressiveUpload);
// XXX magic delay constant
MessageLoop::current()->PostDelayedTask(FROM_HERE, mUploadTask, 5);
}
}
@ -1199,8 +1201,8 @@ ShadowThebesLayerOGL::Swap(const ThebesBuffer& aNewFront,
if (!mBuffer) {
mBuffer = new ShadowBufferOGL(this);
}
nsRefPtr<gfxASurface> surf = ShadowLayerForwarder::OpenDescriptor(aNewFront.buffer());
mBuffer->Upload(surf, aUpdatedRegion, aNewFront.rect(), aNewFront.rotation(), false, mRegionPendingUpload);
AutoOpenSurface frontSurface(OPEN_READ_ONLY, aNewFront.buffer());
mBuffer->Upload(frontSurface.Get(), aUpdatedRegion, aNewFront.rect(), aNewFront.rotation(), false, mRegionPendingUpload);
}
*aNewBack = aNewFront;

View File

@ -67,19 +67,18 @@ public:
MOZ_COUNT_DTOR(ShadowThebesLayerBufferOGL);
}
void Swap(gfxASurface* aNewBuffer,
void Swap(const SurfaceDescriptor& aDescriptor,
const nsIntRect& aNewRect, const nsIntPoint& aNewRotation,
gfxASurface** aOldBuffer,
SurfaceDescriptor* aOldDescriptor,
nsIntRect* aOldRect, nsIntPoint* aOldRotation)
{
*aOldDescriptor = mBuffer;
*aOldRect = mBufferRect;
*aOldRotation = mBufferRotation;
nsRefPtr<gfxASurface> oldBuffer = mBuffer;
mBuffer = aDescriptor;
mBufferRect = aNewRect;
mBufferRotation = aNewRotation;
mBuffer = aNewBuffer;
oldBuffer.forget(aOldBuffer);
}
nsIntRect Rect() {
@ -90,7 +89,7 @@ public:
return mBufferRotation;
}
gfxASurface* Buffer() {
SurfaceDescriptor Buffer() {
return mBuffer;
}
@ -100,12 +99,11 @@ public:
*/
void Clear()
{
mBuffer = nsnull;
mBufferRect.SetEmpty();
}
protected:
nsRefPtr<gfxASurface> mBuffer;
SurfaceDescriptor mBuffer;
nsIntRect mBufferRect;
nsIntPoint mBufferRotation;
};

View File

@ -95,7 +95,8 @@ public:
typedef enum {
CONTENT_COLOR = 0x1000,
CONTENT_ALPHA = 0x2000,
CONTENT_COLOR_ALPHA = 0x3000
CONTENT_COLOR_ALPHA = 0x3000,
CONTENT_SENTINEL = 0xffff
} gfxContentType;
/** Wrap the given cairo surface and return a gfxASurface for it.

View File

@ -88,7 +88,8 @@ public:
FILTER_BEST,
FILTER_NEAREST,
FILTER_BILINEAR,
FILTER_GAUSSIAN
FILTER_GAUSSIAN,
FILTER_SENTINEL
};
void SetFilter(GraphicsFilter filter);

View File

@ -44,7 +44,7 @@
using mozilla::MonitorAutoLock;
using mozilla::ipc::GeckoChildProcessHost;
#ifdef MOZ_WIDGET_ANDROID
#ifdef ANDROID
// Like its predecessor in nsExceptionHandler.cpp, this is
// the magic number of a file descriptor remapping we must
// preserve for the child process.
@ -484,7 +484,9 @@ GeckoChildProcessHost::PerformAsyncLaunchInternal(std::vector<std::string>& aExt
// fill the last arg with something if there's no cache
if (cacheStr.IsEmpty())
cacheStr.AppendLiteral("-");
#endif // MOZ_WIDGET_ANDROID
#ifdef ANDROID
// Remap the Android property workspace to a well-known int,
// and update the environment to reflect the new value for the
// child process.
@ -499,7 +501,7 @@ GeckoChildProcessHost::PerformAsyncLaunchInternal(std::vector<std::string>& aExt
snprintf(buf, sizeof(buf), "%d%s", kMagicAndroidSystemPropFd, szptr);
newEnvVars["ANDROID_PROPERTY_WORKSPACE"] = buf;
}
#endif // MOZ_WIDGET_ANDROID
#endif // ANDROID
// remap the IPC socket fd to a well-known int, as the OS does for
// STDOUT_FILENO, for example

View File

@ -44,10 +44,11 @@ using mozilla::layers::LayerManager;
namespace mozilla {
typedef gfxPattern::GraphicsFilter GraphicsFilterType;
typedef gfxASurface::gfxSurfaceType gfxSurfaceType;
typedef LayerManager::LayersBackend LayersBackend;
typedef gfxASurface::gfxContentType gfxContentType;
typedef gfxASurface::gfxImageFormat PixelFormat;
typedef gfxASurface::gfxSurfaceType gfxSurfaceType;
typedef gfxPattern::GraphicsFilter GraphicsFilterType;
typedef LayerManager::LayersBackend LayersBackend;
// This is a cross-platform approximation to HANDLE, which we expect
// to be typedef'd to void* or thereabouts.
@ -495,140 +496,40 @@ struct ParamTraits<gfx3DMatrix>
}
};
template<>
struct ParamTraits<mozilla::GraphicsFilterType>
{
typedef mozilla::GraphicsFilterType paramType;
template <>
struct ParamTraits<mozilla::gfxContentType>
: public EnumSerializer<mozilla::gfxContentType,
gfxASurface::CONTENT_COLOR,
gfxASurface::CONTENT_SENTINEL>
{};
static void Write(Message* msg, const paramType& param)
{
switch (param) {
case gfxPattern::FILTER_FAST:
case gfxPattern::FILTER_GOOD:
case gfxPattern::FILTER_BEST:
case gfxPattern::FILTER_NEAREST:
case gfxPattern::FILTER_BILINEAR:
case gfxPattern::FILTER_GAUSSIAN:
WriteParam(msg, int32(param));
return;
}
NS_RUNTIMEABORT("not reached");
}
static bool Read(const Message* msg, void** iter, paramType* result)
{
int32 filter;
if (!ReadParam(msg, iter, &filter))
return false;
switch (filter) {
case gfxPattern::FILTER_FAST:
case gfxPattern::FILTER_GOOD:
case gfxPattern::FILTER_BEST:
case gfxPattern::FILTER_NEAREST:
case gfxPattern::FILTER_BILINEAR:
case gfxPattern::FILTER_GAUSSIAN:
*result = paramType(filter);
return true;
default:
return false;
}
}
};
template<>
template <>
struct ParamTraits<mozilla::gfxSurfaceType>
{
typedef mozilla::gfxSurfaceType paramType;
: public EnumSerializer<gfxASurface::gfxSurfaceType,
gfxASurface::SurfaceTypeImage,
gfxASurface::SurfaceTypeMax>
{};
static void Write(Message* msg, const paramType& param)
{
if (gfxASurface::SurfaceTypeImage <= param &&
param < gfxASurface::SurfaceTypeMax) {
WriteParam(msg, int32(param));
return;
}
NS_RUNTIMEABORT("surface type not reached");
}
template <>
struct ParamTraits<mozilla::GraphicsFilterType>
: public EnumSerializer<mozilla::GraphicsFilterType,
gfxPattern::FILTER_FAST,
gfxPattern::FILTER_SENTINEL>
{};
static bool Read(const Message* msg, void** iter, paramType* result)
{
int32 filter;
if (!ReadParam(msg, iter, &filter))
return false;
if (gfxASurface::SurfaceTypeImage <= filter &&
filter < gfxASurface::SurfaceTypeMax) {
*result = paramType(filter);
return true;
}
return false;
}
};
template<>
template <>
struct ParamTraits<mozilla::LayersBackend>
{
typedef mozilla::LayersBackend paramType;
: public EnumSerializer<mozilla::LayersBackend,
LayerManager::LAYERS_NONE,
LayerManager::LAYERS_LAST>
{};
static void Write(Message* msg, const paramType& param)
{
if (LayerManager::LAYERS_NONE <= param &&
param < LayerManager::LAYERS_LAST) {
WriteParam(msg, int32(param));
return;
}
NS_RUNTIMEABORT("backend type not reached");
}
static bool Read(const Message* msg, void** iter, paramType* result)
{
int32 type;
if (!ReadParam(msg, iter, &type))
return false;
if (LayerManager::LAYERS_NONE <= type &&
type < LayerManager::LAYERS_LAST) {
*result = paramType(type);
return true;
}
return false;
}
};
template<>
template <>
struct ParamTraits<mozilla::PixelFormat>
{
typedef mozilla::PixelFormat paramType;
static bool IsLegalPixelFormat(const paramType& format)
{
return (gfxASurface::ImageFormatARGB32 <= format &&
format < gfxASurface::ImageFormatUnknown);
}
static void Write(Message* msg, const paramType& param)
{
if (!IsLegalPixelFormat(param)) {
NS_RUNTIMEABORT("Unknown pixel format");
}
WriteParam(msg, int32(param));
return;
}
static bool Read(const Message* msg, void** iter, paramType* result)
{
int32 format;
if (!ReadParam(msg, iter, &format) ||
!IsLegalPixelFormat(paramType(format))) {
return false;
}
*result = paramType(format);
return true;
}
};
: public EnumSerializer<mozilla::PixelFormat,
gfxASurface::ImageFormatARGB32,
gfxASurface::ImageFormatUnknown>
{};
template<>
struct ParamTraits<gfxRGBA>

View File

@ -666,6 +666,9 @@ class _StructField(_CompoundTypeComponent):
if self.recursive:
return [ StmtExpr(ExprAssn(self.memberVar(),
ExprNew(self.bareType()))) ]
elif self.ipdltype.isIPDL() and self.ipdltype.isActor():
return [ StmtExpr(ExprAssn(self.memberVar(),
ExprLiteral.NULL)) ]
else:
return []

View File

@ -39,6 +39,9 @@ parent:
sync Test6(IntDoubleArrays[] i1)
returns (IntDoubleArrays[] o1);
sync Test7_0(ActorWrapper a1)
returns (ActorWrapper o1);
sync Test7(Actors i1,
Actors i2,
Actors i3)

View File

@ -28,6 +28,10 @@ struct SIntDoubleArrays {
double[] ad;
};
struct ActorWrapper {
PTestDataStructuresSub actor;
};
union Actors {
int;
int[];

View File

@ -153,6 +153,23 @@ bool TestDataStructuresParent::RecvTest5(
return true;
}
bool
TestDataStructuresParent::RecvTest7_0(const ActorWrapper& i1,
ActorWrapper* o1)
{
if (i1.actorChild() != nsnull)
fail("child side actor should always be null");
if (i1.actorParent() != mKids[0])
fail("should have got back same actor on parent side");
o1->actorParent() = mKids[0];
// malicious behavior
o1->actorChild() =
reinterpret_cast<PTestDataStructuresSubChild*>(0xdeadbeef);
return true;
}
bool TestDataStructuresParent::RecvTest6(
const InfallibleTArray<IntDoubleArrays>& i1,
InfallibleTArray<IntDoubleArrays>* o1)
@ -465,6 +482,7 @@ TestDataStructuresChild::RecvStart()
Test4();
Test5();
Test6();
Test7_0();
Test7();
Test8();
Test9();
@ -610,6 +628,28 @@ TestDataStructuresChild::Test6()
printf(" passed %s\n", __FUNCTION__);
}
void
TestDataStructuresChild::Test7_0()
{
ActorWrapper iaw;
if (iaw.actorChild() != nsnull || iaw.actorParent() != nsnull)
fail("actor members should be null initially");
iaw.actorChild() = mKids[0];
if (iaw.actorParent() != nsnull)
fail("parent should be null on child side after set");
ActorWrapper oaw;
if (!SendTest7_0(iaw, &oaw))
fail("sending Test7_0");
if (oaw.actorParent() != nsnull)
fail("parent accessor on actor-struct members should always be null in child");
if (oaw.actorChild() != mKids[0])
fail("should have got back same child-side actor");
}
void
TestDataStructuresChild::Test7()
{

View File

@ -90,6 +90,10 @@ protected:
const InfallibleTArray<IntDoubleArrays>& i1,
InfallibleTArray<IntDoubleArrays>* o1);
NS_OVERRIDE
virtual bool RecvTest7_0(const ActorWrapper& i1,
ActorWrapper* o1);
NS_OVERRIDE
virtual bool RecvTest7(
const Actors& i1,
@ -226,6 +230,7 @@ private:
void Test4();
void Test5();
void Test6();
void Test7_0();
void Test7();
void Test8();
void Test9();

View File

@ -677,6 +677,8 @@ ifeq ($(CPU_ARCH),x86)
# Workaround compiler bug on PGO (Bug 721284)
MonoIC.$(OBJ_SUFFIX): CXXFLAGS += -GL-
Compiler.$(OBJ_SUFFIX): CXXFLAGS += -GL-
# Ditto (Bug 772303)
RegExp.$(OBJ_SUFFIX): CXXFLAGS += -GL-
endif
endif # _MSC_VER

View File

@ -10,6 +10,7 @@
#include "builtin/Eval.h"
#include "frontend/BytecodeCompiler.h"
#include "mozilla/HashFunctions.h"
#include "vm/GlobalObject.h"
#include "jsinterpinlines.h"
@ -30,108 +31,42 @@ AssertInnerizedScopeChain(JSContext *cx, JSObject &scopeobj)
#endif
}
void
EvalCache::purge()
static bool
IsEvalCacheCandidate(JSScript *script)
{
// Purge all scripts from the eval cache. In addition to removing them from
// table_, null out the evalHashLink field of any script removed. Since
// evalHashLink is in a union with globalObject, this allows the GC to
// indiscriminately use the union as a nullable globalObject pointer.
for (size_t i = 0; i < ArrayLength(table_); ++i) {
for (JSScript **listHeadp = &table_[i]; *listHeadp; ) {
JSScript *script = *listHeadp;
JS_ASSERT(GetGCThingTraceKind(script) == JSTRACE_SCRIPT);
*listHeadp = script->evalHashLink();
script->evalHashLink() = NULL;
}
}
// Make sure there are no inner objects which might use the wrong parent
// and/or call scope by reusing the previous eval's script. Skip the
// script's first object, which entrains the eval's scope.
return script->savedCallerFun &&
!script->hasSingletons &&
script->objects()->length == 1 &&
!script->hasRegexps();
}
JSScript **
EvalCache::bucket(JSLinearString *str)
/* static */ HashNumber
EvalCacheHashPolicy::hash(const EvalCacheLookup &l)
{
const jschar *s = str->chars();
size_t n = str->length();
if (n > 100)
n = 100;
uint32_t h;
for (h = 0; n; s++, n--)
h = JS_ROTATE_LEFT32(h, 4) ^ *s;
h *= JS_GOLDEN_RATIO;
h >>= 32 - SHIFT;
JS_ASSERT(h < ArrayLength(table_));
return &table_[h];
return AddToHash(HashString(l.str->chars(), l.str->length()),
l.caller,
l.staticLevel,
l.version,
l.compartment);
}
static JSScript *
EvalCacheLookup(JSContext *cx, JSLinearString *str, StackFrame *caller, unsigned staticLevel,
JSPrincipals *principals, JSObject &scopeobj, JSScript **bucket)
/* static */ bool
EvalCacheHashPolicy::match(JSScript *script, const EvalCacheLookup &l)
{
// Cache local eval scripts indexed by source qualified by scope.
//
// An eval cache entry should never be considered a hit unless its
// strictness matches that of the new eval code. The existing code takes
// care of this, because hits are qualified by the function from which
// eval was called, whose strictness doesn't change. (We don't cache evals
// in eval code, so the calling function corresponds to the calling script,
// and its strictness never varies.) Scripts produced by calls to eval from
// global code aren't cached.
//
// FIXME bug 620141: Qualify hits by calling script rather than function.
// Then we wouldn't need the unintuitive !isEvalFrame() hack in EvalKernel
// to avoid caching nested evals in functions (thus potentially mismatching
// on strict mode), and we could cache evals in global code if desired.
unsigned count = 0;
JSScript **scriptp = bucket;
JS_ASSERT(IsEvalCacheCandidate(script));
JSVersion version = cx->findVersion();
JSScript *script;
JSSubsumePrincipalsOp subsume = cx->runtime->securityCallbacks->subsumePrincipals;
while ((script = *scriptp) != NULL) {
if (script->savedCallerFun &&
script->staticLevel == staticLevel &&
script->getVersion() == version &&
!script->hasSingletons &&
(!subsume || script->principals == principals ||
(subsume(principals, script->principals) &&
subsume(script->principals, principals))))
{
// Get the prior (cache-filling) eval's saved caller function.
// See frontend::CompileScript.
JSFunction *fun = script->getCallerFunction();
// Get the source string passed for safekeeping in the atom map
// by the prior eval to frontend::CompileScript.
JSAtom *keyStr = script->atoms[0];
if (fun == caller->fun()) {
/*
* Get the source string passed for safekeeping in the atom map
* by the prior eval to frontend::CompileScript.
*/
JSAtom *src = script->atoms[0];
if (src == str || EqualStrings(src, str)) {
// Source matches. Make sure there are no inner objects
// which might use the wrong parent and/or call scope by
// reusing the previous eval's script. Skip the script's
// first object, which entrains the eval's scope.
JS_ASSERT(script->objects()->length >= 1);
if (script->objects()->length == 1 &&
!script->hasRegexps()) {
JS_ASSERT(staticLevel == script->staticLevel);
*scriptp = script->evalHashLink();
script->evalHashLink() = NULL;
return script;
}
}
}
}
static const unsigned EVAL_CACHE_CHAIN_LIMIT = 4;
if (++count == EVAL_CACHE_CHAIN_LIMIT)
return NULL;
scriptp = &script->evalHashLink();
}
return NULL;
return EqualStrings(keyStr, l.str) &&
script->getCallerFunction() == l.caller &&
script->staticLevel == l.staticLevel &&
script->getVersion() == l.version &&
script->compartment() == l.compartment;
}
// There are two things we want to do with each script executed in EvalKernel:
@ -141,21 +76,21 @@ EvalCacheLookup(JSContext *cx, JSLinearString *str, StackFrame *caller, unsigned
// NB: Although the eval cache keeps a script alive wrt to the JS engine, from
// a jsdbgapi user's perspective, we want each eval() to create and destroy a
// script. This hides implementation details and means we don't have to deal
// with calls to JS_GetScriptObject for scripts in the eval cache (currently,
// script->object aliases script->evalHashLink()).
// with calls to JS_GetScriptObject for scripts in the eval cache.
class EvalScriptGuard
{
JSContext *cx_;
JSLinearString *str_;
JSScript **bucket_;
Rooted<JSScript*> script_;
/* These fields are only valid if lookup_.str is non-NULL. */
EvalCacheLookup lookup_;
EvalCache::AddPtr p_;
public:
EvalScriptGuard(JSContext *cx, JSLinearString *str)
: cx_(cx),
str_(str),
script_(cx) {
bucket_ = cx->runtime->evalCache.bucket(str);
EvalScriptGuard(JSContext *cx)
: cx_(cx), script_(cx)
{
lookup_.str = NULL;
}
~EvalScriptGuard() {
@ -163,17 +98,23 @@ class EvalScriptGuard
CallDestroyScriptHook(cx_->runtime->defaultFreeOp(), script_);
script_->isActiveEval = false;
script_->isCachedEval = true;
script_->evalHashLink() = *bucket_;
*bucket_ = script_;
if (lookup_.str && IsEvalCacheCandidate(script_))
cx_->runtime->evalCache.relookupOrAdd(p_, lookup_, script_);
}
}
void lookupInEvalCache(StackFrame *caller, unsigned staticLevel,
JSPrincipals *principals, JSObject &scopeobj) {
if (JSScript *found = EvalCacheLookup(cx_, str_, caller, staticLevel,
principals, scopeobj, bucket_)) {
js_CallNewScriptHook(cx_, found, NULL);
script_ = found;
void lookupInEvalCache(JSLinearString *str, JSFunction *caller, unsigned staticLevel)
{
lookup_.str = str;
lookup_.caller = caller;
lookup_.staticLevel = staticLevel;
lookup_.version = cx_->findVersion();
lookup_.compartment = cx_->compartment;
p_ = cx_->runtime->evalCache.lookupForAdd(lookup_);
if (p_) {
script_ = *p_;
cx_->runtime->evalCache.remove(p_);
js_CallNewScriptHook(cx_, script_, NULL);
script_->isCachedEval = false;
script_->isActiveEval = true;
}
@ -247,11 +188,6 @@ EvalKernel(JSContext *cx, const CallArgs &args, EvalType evalType, StackFrame *c
if (!ComputeThis(cx, caller))
return false;
thisv = caller->thisValue();
#ifdef DEBUG
jsbytecode *callerPC = caller->pcQuadratic(cx);
JS_ASSERT(callerPC && JSOp(*callerPC) == JSOP_EVAL);
#endif
} else {
JS_ASSERT(args.callee().global() == *scopeobj);
staticLevel = 0;
@ -310,12 +246,12 @@ EvalKernel(JSContext *cx, const CallArgs &args, EvalType evalType, StackFrame *c
}
}
EvalScriptGuard esg(cx, linearStr);
EvalScriptGuard esg(cx);
JSPrincipals *principals = PrincipalsForCompiledCode(args, cx);
if (evalType == DIRECT_EVAL && caller->isNonEvalFunctionFrame())
esg.lookupInEvalCache(caller, staticLevel, principals, *scopeobj);
esg.lookupInEvalCache(linearStr, caller->fun(), staticLevel);
if (!esg.foundScript()) {
unsigned lineno;
@ -327,10 +263,9 @@ EvalKernel(JSContext *cx, const CallArgs &args, EvalType evalType, StackFrame *c
bool compileAndGo = true;
bool noScriptRval = false;
bool needScriptGlobal = false;
JSScript *compiled = frontend::CompileScript(cx, scopeobj, caller,
principals, originPrincipals,
compileAndGo, noScriptRval, needScriptGlobal,
compileAndGo, noScriptRval,
chars, length, filename,
lineno, cx->findVersion(), linearStr,
staticLevel);

View File

@ -576,7 +576,7 @@ ExecuteRegExp(JSContext *cx, Native native, unsigned argc, Value *vp)
return false;
/* Step 4. */
const Value &lastIndex = reobj->getLastIndex();
Value lastIndex = reobj->getLastIndex();
/* Step 5. */
double i;
@ -588,7 +588,7 @@ ExecuteRegExp(JSContext *cx, Native native, unsigned argc, Value *vp)
i = 0;
const jschar *chars = linearInput->chars();
size_t length = input->length();
size_t length = linearInput->length();
/* Step 9a. */
if (i < 0 || i > length) {

View File

@ -21,54 +21,10 @@
using namespace js;
using namespace js::frontend;
bool
MarkInnerAndOuterFunctions(JSContext *cx, JSScript* script)
{
AssertRootingUnnecessary safe(cx);
Vector<JSScript *, 16> worklist(cx);
if (!worklist.append(script))
return false;
while (worklist.length()) {
JSScript *outer = worklist.back();
worklist.popBack();
if (outer->hasObjects()) {
ObjectArray *arr = outer->objects();
/*
* If this is an eval script, don't treat the saved caller function
* stored in the first object slot as an inner function.
*/
size_t start = outer->savedCallerFun ? 1 : 0;
for (size_t i = start; i < arr->length; i++) {
JSObject *obj = arr->vector[i];
if (!obj->isFunction())
continue;
JSFunction *fun = obj->toFunction();
JS_ASSERT(fun->isInterpreted());
JSScript *inner = fun->script();
if (outer->function() && outer->function()->isHeavyweight()) {
outer->isOuterFunction = true;
inner->isInnerFunction = true;
}
if (!inner->hasObjects())
continue;
if (!worklist.append(inner))
return false;
}
}
}
return true;
}
JSScript *
frontend::CompileScript(JSContext *cx, HandleObject scopeChain, StackFrame *callerFrame,
JSPrincipals *principals, JSPrincipals *originPrincipals,
bool compileAndGo, bool noScriptRval, bool needScriptGlobal,
bool compileAndGo, bool noScriptRval,
const jschar *chars, size_t length,
const char *filename, unsigned lineno, JSVersion version,
JSString *source_ /* = NULL */,
@ -108,14 +64,13 @@ frontend::CompileScript(JSContext *cx, HandleObject scopeChain, StackFrame *call
return NULL;
bool savedCallerFun = compileAndGo && callerFrame && callerFrame->isFunctionFrame();
GlobalObject *globalObject = needScriptGlobal ? GetCurrentGlobal(cx) : NULL;
Rooted<JSScript*> script(cx, JSScript::Create(cx,
/* enclosingScope = */ NullPtr(),
savedCallerFun,
principals,
originPrincipals,
compileAndGo,
noScriptRval,
globalObject,
version,
staticLevel));
if (!script)
@ -249,9 +204,6 @@ frontend::CompileScript(JSContext *cx, HandleObject scopeChain, StackFrame *call
bce.tellDebuggerAboutCompiledScript(cx);
if (!MarkInnerAndOuterFunctions(cx, script))
return NULL;
return script;
}
@ -279,14 +231,13 @@ frontend::CompileFunctionBody(JSContext *cx, HandleFunction fun,
if (!funtc.init())
return false;
GlobalObject *globalObject = fun->getParent() ? &fun->getParent()->global() : NULL;
Rooted<JSScript*> script(cx, JSScript::Create(cx,
/* enclosingScope = */ NullPtr(),
/* savedCallerFun = */ false,
principals,
originPrincipals,
/* compileAndGo = */ false,
/* noScriptRval = */ false,
globalObject,
version,
staticLevel));
if (!script)

View File

@ -16,7 +16,7 @@ namespace frontend {
JSScript *
CompileScript(JSContext *cx, HandleObject scopeChain, StackFrame *callerFrame,
JSPrincipals *principals, JSPrincipals *originPrincipals,
bool compileAndGo, bool noScriptRval, bool needScriptGlobal,
bool compileAndGo, bool noScriptRval,
const jschar *chars, size_t length,
const char *filename, unsigned lineno, JSVersion version,
JSString *source_ = NULL, unsigned staticLevel = 0);

View File

@ -673,12 +673,31 @@ PushStatementBCE(BytecodeEmitter *bce, StmtInfoBCE *stmt, StmtType type, ptrdiff
PushStatement(bce, stmt, type);
}
/*
* Return the enclosing lexical scope, which is the innermost enclosing static
* block object or compiler created function.
*/
static JSObject *
EnclosingStaticScope(BytecodeEmitter *bce)
{
if (bce->blockChain)
return bce->blockChain;
if (!bce->sc->inFunction()) {
JS_ASSERT(!bce->parent);
return NULL;
}
return bce->sc->fun();
}
// Push a block scope statement and link blockObj into bce->blockChain.
static void
PushBlockScopeBCE(BytecodeEmitter *bce, StmtInfoBCE *stmt, StaticBlockObject &blockObj,
ptrdiff_t top)
{
PushStatementBCE(bce, stmt, STMT_BLOCK, top);
blockObj.initEnclosingStaticScope(EnclosingStaticScope(bce));
FinishPushBlockScope(bce, stmt, blockObj);
}
@ -854,6 +873,7 @@ EmitAliasedVarOp(JSContext *cx, JSOp op, ScopeCoordinate sc, BytecodeEmitter *bc
SET_UINT16(pc, sc.slot);
pc += sizeof(uint16_t);
SET_UINT32_INDEX(pc, maybeBlockIndex);
CheckTypeSet(cx, bce, op);
return true;
}
@ -872,35 +892,48 @@ ClonedBlockDepth(BytecodeEmitter *bce)
static bool
EmitAliasedVarOp(JSContext *cx, JSOp op, ParseNode *pn, BytecodeEmitter *bce)
{
/*
* The contents of the dynamic scope chain (fp->scopeChain) exactly reflect
* the needsClone-subset of the block chain. Use this to determine the
* number of ClonedBlockObjects on fp->scopeChain to skip to find the scope
* object containing the var to which pn is bound. ALIASEDVAR ops cannot
* reach across with scopes so ClonedBlockObjects is the only NestedScope
* on the scope chain.
*/
unsigned skippedScopes = 0;
BytecodeEmitter *bceOfDef = bce;
if (pn->isUsed()) {
/*
* As explained in BindNameToSlot, the 'level' of a use indicates how
* many function scopes (i.e., BytecodeEmitters) to skip to find the
* enclosing function scope of the definition being accessed.
*/
for (unsigned i = pn->pn_cookie.level(); i; i--) {
skippedScopes += ClonedBlockDepth(bceOfDef);
if (bceOfDef->sc->funIsHeavyweight()) {
skippedScopes++;
if (bceOfDef->sc->fun()->isNamedLambda())
skippedScopes++;
}
bceOfDef = bceOfDef->parent;
}
} else {
JS_ASSERT(pn->isDefn());
JS_ASSERT(pn->pn_cookie.level() == bce->script->staticLevel);
}
ScopeCoordinate sc;
if (JOF_OPTYPE(pn->getOp()) == JOF_QARG) {
sc.hops = ClonedBlockDepth(bce);
sc.slot = bce->sc->bindings.formalIndexToSlot(pn->pn_cookie.slot());
sc.hops = skippedScopes + ClonedBlockDepth(bceOfDef);
sc.slot = bceOfDef->sc->bindings.formalIndexToSlot(pn->pn_cookie.slot());
} else {
JS_ASSERT(JOF_OPTYPE(pn->getOp()) == JOF_LOCAL || pn->isKind(PNK_FUNCTION));
unsigned local = pn->pn_cookie.slot();
if (local < bce->sc->bindings.numVars()) {
sc.hops = ClonedBlockDepth(bce);
sc.slot = bce->sc->bindings.varIndexToSlot(local);
if (local < bceOfDef->sc->bindings.numVars()) {
sc.hops = skippedScopes + ClonedBlockDepth(bceOfDef);
sc.slot = bceOfDef->sc->bindings.varIndexToSlot(local);
} else {
unsigned depth = local - bce->sc->bindings.numVars();
unsigned hops = 0;
StaticBlockObject *b = bce->blockChain;
unsigned depth = local - bceOfDef->sc->bindings.numVars();
StaticBlockObject *b = bceOfDef->blockChain;
while (!b->containsVarAtDepth(depth)) {
if (b->needsClone())
hops++;
skippedScopes++;
b = b->enclosingBlock();
}
sc.hops = hops;
sc.slot = b->localIndexToSlot(bce->sc->bindings, local);
sc.hops = skippedScopes;
sc.slot = b->localIndexToSlot(bceOfDef->sc->bindings, local);
}
}
@ -914,8 +947,12 @@ EmitVarOp(JSContext *cx, ParseNode *pn, JSOp op, BytecodeEmitter *bce)
JS_ASSERT_IF(pn->isKind(PNK_NAME), JOF_OPTYPE(op) == JOF_QARG || JOF_OPTYPE(op) == JOF_LOCAL);
JS_ASSERT(!pn->pn_cookie.isFree());
if (!bce->isAliasedName(pn))
if (!bce->isAliasedName(pn)) {
JS_ASSERT(pn->isUsed() || pn->isDefn());
JS_ASSERT_IF(pn->isUsed(), pn->pn_cookie.level() == 0);
JS_ASSERT_IF(pn->isDefn(), pn->pn_cookie.level() == bce->script->staticLevel);
return EmitUnaliasedVarOp(cx, op, pn->pn_cookie.slot(), bce);
}
switch (op) {
case JSOP_GETARG: case JSOP_GETLOCAL: op = JSOP_GETALIASEDVAR; break;
@ -1171,44 +1208,34 @@ TryConvertToGname(BytecodeEmitter *bce, ParseNode *pn, JSOp *op)
static bool
BindNameToSlot(JSContext *cx, BytecodeEmitter *bce, ParseNode *pn)
{
Definition *dn;
JSOp op;
Definition::Kind dn_kind;
JS_ASSERT(pn->isKind(PNK_NAME));
/* Idempotency tests come first, since we may be called more than once. */
if (pn->pn_dflags & PND_BOUND)
/* Don't attempt if 'pn' is already bound, deoptimized, or a nop. */
if ((pn->pn_dflags & PND_BOUND) || pn->isDeoptimized() || pn->getOp() == JSOP_NOP)
return true;
/* No cookie initialized for callee; it is pre-bound by definition. */
/* JSOP_CALLEE is pre-bound by definition. */
JS_ASSERT(!pn->isOp(JSOP_CALLEE));
/*
* The parser linked all uses (including forward references) to their
* definitions, unless a with statement or direct eval intervened.
* The parser already linked name uses to definitions when (where not
* prevented by non-lexical constructs like 'with' and 'eval').
*/
Definition *dn;
if (pn->isUsed()) {
JS_ASSERT(pn->pn_cookie.isFree());
dn = pn->pn_lexdef;
JS_ASSERT(dn->isDefn());
if (pn->isDeoptimized())
return true;
pn->pn_dflags |= (dn->pn_dflags & PND_CONST);
} else {
if (!pn->isDefn())
return true;
} else if (pn->isDefn()) {
dn = (Definition *) pn;
} else {
return true;
}
op = pn->getOp();
if (op == JSOP_NOP)
return true;
JSOp op = pn->getOp();
JS_ASSERT(JOF_OPTYPE(op) == JOF_ATOM);
RootedAtom atom(cx, pn->pn_atom);
UpvarCookie cookie = dn->pn_cookie;
dn_kind = dn->kind();
JS_ASSERT_IF(dn->kind() == Definition::CONST, pn->pn_dflags & PND_CONST);
/*
* Turn attempts to mutate const-declared bindings into get ops (for
@ -1224,7 +1251,7 @@ BindNameToSlot(JSContext *cx, BytecodeEmitter *bce, ParseNode *pn)
case JSOP_SETCONST:
break;
case JSOP_DELNAME:
if (dn_kind != Definition::UNKNOWN) {
if (dn->kind() != Definition::UNKNOWN) {
if (bce->callerFrame && dn->isTopLevel())
JS_ASSERT(bce->script->compileAndGo);
else
@ -1237,7 +1264,7 @@ BindNameToSlot(JSContext *cx, BytecodeEmitter *bce, ParseNode *pn)
if (pn->isConst()) {
if (bce->sc->needStrictChecks()) {
JSAutoByteString name;
if (!js_AtomToPrintableString(cx, atom, &name) ||
if (!js_AtomToPrintableString(cx, pn->pn_atom, &name) ||
!bce->reportStrictModeError(pn, JSMSG_READ_ONLY, name.ptr()))
{
return false;
@ -1247,7 +1274,7 @@ BindNameToSlot(JSContext *cx, BytecodeEmitter *bce, ParseNode *pn)
}
}
if (cookie.isFree()) {
if (dn->pn_cookie.isFree()) {
StackFrame *caller = bce->callerFrame;
if (caller) {
JS_ASSERT(bce->script->compileAndGo);
@ -1288,34 +1315,34 @@ BindNameToSlot(JSContext *cx, BytecodeEmitter *bce, ParseNode *pn)
return true;
}
uint16_t level = cookie.level();
JS_ASSERT(bce->script->staticLevel >= level);
const unsigned skip = bce->script->staticLevel - level;
if (skip != 0)
return true;
/*
* At this point, we are only dealing with uses that have already been
* bound to definitions via pn_lexdef. The rest of this routine converts
* the parse node of the use from its initial JSOP_*NAME* op to a LOCAL/ARG
* op. This requires setting the node's pn_cookie with a pair (level, slot)
* where 'level' is the number of function scopes between the use and the
* def and 'slot' is the index to emit as the immediate of the ARG/LOCAL
* op. For example, in this code:
*
* function(a,b,x) { return x }
* function(y) { function() { return y } }
*
* x will get (level = 0, slot = 2) and y will get (level = 1, slot = 0).
*/
JS_ASSERT(!pn->isDefn());
JS_ASSERT(pn->isUsed());
JS_ASSERT(pn->pn_lexdef);
JS_ASSERT(pn->pn_cookie.isFree());
/*
* We are compiling a function body and may be able to optimize name
* to stack slot. Look for an argument or variable in the function and
* rewrite pn_op and update pn accordingly.
*/
switch (dn_kind) {
switch (dn->kind()) {
case Definition::UNKNOWN:
return true;
case Definition::LET:
switch (op) {
case JSOP_NAME: op = JSOP_GETLOCAL; break;
case JSOP_SETNAME: op = JSOP_SETLOCAL; break;
case JSOP_INCNAME: op = JSOP_INCLOCAL; break;
case JSOP_NAMEINC: op = JSOP_LOCALINC; break;
case JSOP_DECNAME: op = JSOP_DECLOCAL; break;
case JSOP_NAMEDEC: op = JSOP_LOCALDEC; break;
default: JS_NOT_REACHED("let");
}
break;
case Definition::ARG:
switch (op) {
case JSOP_NAME: op = JSOP_GETARG; break;
@ -1332,7 +1359,16 @@ BindNameToSlot(JSContext *cx, BytecodeEmitter *bce, ParseNode *pn)
case Definition::VAR:
if (dn->isOp(JSOP_CALLEE)) {
JS_ASSERT(op != JSOP_CALLEE);
JS_ASSERT((bce->sc->fun()->flags & JSFUN_LAMBDA) && atom == bce->sc->fun()->atom);
/*
* Currently, the ALIASEDVAR ops do not support accessing the
* callee of a DeclEnvObject, so use NAME.
*/
if (dn->pn_cookie.level() != bce->script->staticLevel)
return true;
JS_ASSERT(bce->sc->fun()->flags & JSFUN_LAMBDA);
JS_ASSERT(pn->pn_atom == bce->sc->fun()->atom);
/*
* Leave pn->isOp(JSOP_NAME) if bce->fun is heavyweight to
@ -1370,10 +1406,9 @@ BindNameToSlot(JSContext *cx, BytecodeEmitter *bce, ParseNode *pn)
}
/* FALL THROUGH */
default:
JS_ASSERT_IF(dn_kind != Definition::FUNCTION,
dn_kind == Definition::VAR ||
dn_kind == Definition::CONST);
case Definition::FUNCTION:
case Definition::CONST:
case Definition::LET:
switch (op) {
case JSOP_NAME: op = JSOP_GETLOCAL; break;
case JSOP_SETNAME: op = JSOP_SETLOCAL; break;
@ -1384,14 +1419,42 @@ BindNameToSlot(JSContext *cx, BytecodeEmitter *bce, ParseNode *pn)
case JSOP_NAMEDEC: op = JSOP_LOCALDEC; break;
default: JS_NOT_REACHED("local");
}
JS_ASSERT_IF(dn_kind == Definition::CONST, pn->pn_dflags & PND_CONST);
break;
default:
JS_NOT_REACHED("unexpected dn->kind()");
}
/*
* The difference between the current static level and the static level of
* the definition is the number of function scopes between the current
* scope and dn's scope.
*/
unsigned skip = bce->script->staticLevel - dn->pn_cookie.level();
JS_ASSERT_IF(skip, dn->isClosed());
/*
* Explicitly disallow accessing var/let bindings in global scope from
* nested functions. The reason for this limitation is that, since the
* global script is not included in the static scope chain (1. because it
* has no object to stand in the static scope chain, 2. to minimize memory
* bloat where a single live function keeps its whole global script
* alive.), ScopeCoordinateToTypeSet is not able to find the var/let's
* associated types::TypeSet.
*/
if (skip) {
BytecodeEmitter *bceSkipped = bce;
for (unsigned i = 0; i < skip; i++)
bceSkipped = bceSkipped->parent;
if (!bceSkipped->sc->inFunction())
return true;
}
JS_ASSERT(!pn->isOp(op));
pn->setOp(op);
if (!pn->pn_cookie.set(bce->sc->context, 0, cookie.slot()))
if (!pn->pn_cookie.set(bce->sc->context, skip, dn->pn_cookie.slot()))
return false;
pn->pn_dflags |= PND_BOUND;
return true;
}
@ -1642,11 +1705,8 @@ BytecodeEmitter::tellDebuggerAboutCompiledScript(JSContext *cx)
js_CallNewScriptHook(cx, script, script->function());
if (!parent) {
GlobalObject *compileAndGoGlobal = NULL;
if (script->compileAndGo) {
compileAndGoGlobal = script->globalObject;
if (!compileAndGoGlobal)
compileAndGoGlobal = &sc->scopeChain()->global();
}
if (script->compileAndGo)
compileAndGoGlobal = &script->global();
Debugger::onNewScript(cx, script, compileAndGoGlobal);
}
}
@ -3432,17 +3492,14 @@ EmitAssignment(JSContext *cx, BytecodeEmitter *bce, ParseNode *lhs, JSOp op, Par
* Specialize to avoid ECMA "reference type" values on the operand
* stack, which impose pervasive runtime "GetValue" costs.
*/
jsatomid atomIndex = (jsatomid) -1; /* quell GCC overwarning */
jsatomid atomIndex = (jsatomid) -1;
jsbytecode offset = 1;
switch (lhs->getKind()) {
case PNK_NAME:
if (!BindNameToSlot(cx, bce, lhs))
return false;
if (!lhs->pn_cookie.isFree()) {
JS_ASSERT(lhs->pn_cookie.level() == 0);
atomIndex = lhs->pn_cookie.slot();
} else {
if (lhs->pn_cookie.isFree()) {
if (!bce->makeAtomIndex(lhs->pn_atom, &atomIndex))
return false;
if (!lhs->isConst()) {
@ -4217,7 +4274,6 @@ EmitIf(JSContext *cx, BytecodeEmitter *bce, ParseNode *pn)
* destructure y
* pick 1
* dup +1 SRC_DESTRUCTLET + offset to enterlet0
* pick
* destructure z
* pick 1
* pop -1
@ -4824,15 +4880,15 @@ EmitFunc(JSContext *cx, BytecodeEmitter *bce, ParseNode *pn)
JS_ASSERT_IF(bce->sc->inStrictMode(), sc.inStrictMode());
// Inherit most things (principals, version, etc) from the parent.
GlobalObject *globalObject = fun->getParent() ? &fun->getParent()->global() : NULL;
Rooted<JSScript*> parent(cx, bce->script);
Rooted<JSObject*> enclosingScope(cx, EnclosingStaticScope(bce));
Rooted<JSScript*> script(cx, JSScript::Create(cx,
enclosingScope,
/* savedCallerFun = */ false,
parent->principals,
parent->originPrincipals,
parent->compileAndGo,
/* noScriptRval = */ false,
globalObject,
parent->getVersion(),
parent->staticLevel + 1));
if (!script)

View File

@ -736,6 +736,10 @@ struct ParseNode {
optimizable via an upvar opcode */
#define PND_CLOSED 0x200 /* variable is closed over */
#define PND_DEFAULT 0x400 /* definition is an arg with a default */
#define PND_IMPLICITARGUMENTS 0x800 /* the definition is a placeholder for
'arguments' that has been converted
into a definition after the function
body has been parsed. */
/* Flags to propagate from uses to definition. */
#define PND_USE2DEF_FLAGS (PND_ASSIGNED | PND_CLOSED)
@ -782,6 +786,7 @@ struct ParseNode {
bool isDeoptimized() const { return test(PND_DEOPTIMIZED); }
bool isAssigned() const { return test(PND_ASSIGNED); }
bool isClosed() const { return test(PND_CLOSED); }
bool isImplicitArguments() const { return test(PND_IMPLICITARGUMENTS); }
/*
* True iff this definition creates a top-level binding in the overall

View File

@ -102,14 +102,6 @@ PushStatementTC(TreeContext *tc, StmtInfoTC *stmt, StmtType type)
stmt->isFunctionBodyBlock = false;
}
// Push a block scope statement and link blockObj into tc->blockChain.
static void
PushBlockScopeTC(TreeContext *tc, StmtInfoTC *stmt, StaticBlockObject &blockObj)
{
PushStatementTC(tc, stmt, STMT_BLOCK);
FinishPushBlockScope(tc, stmt, blockObj);
}
Parser::Parser(JSContext *cx, JSPrincipals *prin, JSPrincipals *originPrin,
const jschar *chars, size_t length, const char *fn, unsigned ln, JSVersion v,
bool foldConstants, bool compileAndGo)
@ -524,7 +516,10 @@ CheckStrictParameters(JSContext *cx, Parser *parser)
return false;
// Start with lastVariable(), not the last argument, for destructuring.
for (Shape::Range r = sc->bindings.lastVariable(); !r.empty(); r.popFront()) {
Shape::Range r = sc->bindings.lastVariable();
Shape::Range::AutoRooter root(cx, &r);
for (; !r.empty(); r.popFront()) {
jsid id = r.front().propid();
if (!JSID_IS_ATOM(id))
continue;
@ -670,12 +665,15 @@ Parser::functionBody(FunctionBodyType type)
if (atom == arguments) {
/*
* Turn 'dn' into a proper definition so uses will be bound as
* GETLOCAL in the emitter.
* GETLOCAL in the emitter. The PND_IMPLICITARGUMENTS flag informs
* CompExprTransplanter (and anyone else) that this definition node
* has no proper declaration in the parse tree.
*/
if (!BindLocalVariable(context, tc, dn, VARIABLE))
return NULL;
dn->setOp(JSOP_GETLOCAL);
dn->pn_dflags &= ~PND_PLACEHOLDER;
dn->pn_dflags |= PND_IMPLICITARGUMENTS;
/* NB: this leaves r invalid so we must break immediately. */
tc->lexdeps->remove(arguments);
@ -2141,12 +2139,16 @@ struct RemoveDecl {
static void
PopStatementTC(TreeContext *tc)
{
if (tc->topStmt->isBlockScope) {
StaticBlockObject &blockObj = *tc->topStmt->blockObj;
JS_ASSERT(!blockObj.inDictionaryMode());
ForEachLetDef(tc, blockObj, RemoveDecl());
}
StaticBlockObject *blockObj = tc->topStmt->blockObj;
JS_ASSERT(!!blockObj == (tc->topStmt->isBlockScope));
FinishPopStatement(tc);
if (blockObj) {
JS_ASSERT(!blockObj->inDictionaryMode());
ForEachLetDef(tc, *blockObj, RemoveDecl());
blockObj->resetPrevBlockChainFromParser();
}
}
static inline bool
@ -2755,22 +2757,27 @@ Parser::returnOrYield(bool useAssignExpr)
}
static ParseNode *
PushLexicalScope(JSContext *cx, Parser *parser, StaticBlockObject &obj, StmtInfoTC *stmt)
PushLexicalScope(JSContext *cx, Parser *parser, StaticBlockObject &blockObj, StmtInfoTC *stmt)
{
ParseNode *pn = LexicalScopeNode::create(PNK_LEXICALSCOPE, parser);
if (!pn)
return NULL;
ObjectBox *blockbox = parser->newObjectBox(&obj);
ObjectBox *blockbox = parser->newObjectBox(&blockObj);
if (!blockbox)
return NULL;
PushBlockScopeTC(parser->tc, stmt, obj);
TreeContext *tc = parser->tc;
PushStatementTC(tc, stmt, STMT_BLOCK);
blockObj.initPrevBlockChainFromParser(tc->blockChain);
FinishPushBlockScope(tc, stmt, blockObj);
pn->setOp(JSOP_LEAVEBLOCK);
pn->pn_objbox = blockbox;
pn->pn_cookie.makeFree();
pn->pn_dflags = 0;
if (!GenerateBlockId(parser->tc, stmt->blockid))
if (!GenerateBlockId(tc, stmt->blockid))
return NULL;
pn->pn_blockid = stmt->blockid;
return pn;
@ -3772,7 +3779,7 @@ Parser::letStatement()
stmt->downScope = tc->topScopeStmt;
tc->topScopeStmt = stmt;
blockObj->setEnclosingBlock(tc->blockChain);
blockObj->initPrevBlockChainFromParser(tc->blockChain);
tc->blockChain = blockObj;
stmt->blockObj = blockObj;
@ -4901,11 +4908,16 @@ class CompExprTransplanter {
bool genexp;
unsigned adjust;
unsigned funcLevel;
HashSet<Definition *> visitedImplicitArguments;
public:
CompExprTransplanter(ParseNode *pn, Parser *parser, bool ge, unsigned adj)
: root(pn), parser(parser), genexp(ge), adjust(adj), funcLevel(0)
{
: root(pn), parser(parser), genexp(ge), adjust(adj), funcLevel(0),
visitedImplicitArguments(parser->context)
{}
bool init() {
return visitedImplicitArguments.init();
}
bool transplant(ParseNode *pn);
@ -5180,6 +5192,21 @@ CompExprTransplanter::transplant(ParseNode *pn)
tc->parent->lexdeps->remove(atom);
if (!tc->lexdeps->put(atom, dn))
return false;
} else if (dn->isImplicitArguments()) {
/*
* Implicit 'arguments' Definition nodes (see
* PND_IMPLICITARGUMENTS in Parser::functionBody) are only
* reachable via the lexdefs of their uses. Unfortunately,
* there may be multiple uses, so we need to maintain a set
* to only bump the definition once.
*/
if (genexp && !visitedImplicitArguments.has(dn)) {
if (!BumpStaticLevel(dn, tc))
return false;
AdjustBlockId(dn, adjust, tc);
if (!visitedImplicitArguments.put(dn))
return false;
}
}
}
}
@ -5262,6 +5289,9 @@ Parser::comprehensionTail(ParseNode *kid, unsigned blockid, bool isGenexp,
pnp = &pn->pn_expr;
CompExprTransplanter transplanter(kid, this, kind == PNK_SEMI, adjust);
if (!transplanter.init())
return NULL;
transplanter.transplant(kid);
JS_ASSERT(tc->blockChain && tc->blockChain == pn->pn_objbox->object);

View File

@ -145,7 +145,6 @@ frontend::FinishPushBlockScope(ContextT *ct, typename ContextT::StmtInfo *stmt,
StaticBlockObject &blockObj)
{
stmt->isBlockScope = true;
blockObj.setEnclosingBlock(ct->blockChain);
stmt->downScope = ct->topScopeStmt;
ct->topScopeStmt = stmt;
ct->blockChain = &blockObj;

View File

@ -0,0 +1,5 @@
assertEq(((function() arguments) for (x in [1])).next()(42)[0], 42);
assertEq(((function() {return arguments}) for (x in [1])).next()(42)[0], 42);
assertEq(((function() {return arguments[0] + arguments[1]}) for (x in [1])).next()(41,1), 42);
assertEq(((function() {return arguments[0] + (function() { return arguments[0]})(arguments[1])}) for (x in [1])).next()(41,1), 42);
assertEq(((function() { var arguments = 3; return arguments}) for (x in [1])).next()(42), 3);

View File

@ -314,33 +314,25 @@ ScriptAnalysis::analyzeBytecode(JSContext *cx)
case JSOP_BINDNAME:
case JSOP_SETNAME:
case JSOP_DELNAME:
case JSOP_GETALIASEDVAR:
case JSOP_CALLALIASEDVAR:
case JSOP_SETALIASEDVAR:
usesScopeChain_ = true;
isInlineable = false;
break;
case JSOP_GETALIASEDVAR:
case JSOP_CALLALIASEDVAR:
case JSOP_SETALIASEDVAR: {
JS_ASSERT(!isInlineable);
usesScopeChain_ = true;
break;
}
case JSOP_DEFFUN:
case JSOP_DEFVAR:
case JSOP_DEFCONST:
case JSOP_SETCONST:
extendsScope_ = true;
isInlineable = canTrackVars = false;
break;
case JSOP_EVAL:
extendsScope_ = true;
isInlineable = canTrackVars = false;
break;
case JSOP_ENTERWITH:
addsScopeObjects_ = true;
isJaegerCompileable = isInlineable = canTrackVars = false;
break;
@ -348,7 +340,6 @@ ScriptAnalysis::analyzeBytecode(JSContext *cx)
case JSOP_ENTERLET1:
case JSOP_ENTERBLOCK:
case JSOP_LEAVEBLOCK:
addsScopeObjects_ = true;
isInlineable = false;
break;

View File

@ -359,16 +359,6 @@ static inline uint32_t GetBytecodeSlot(JSScript *script, jsbytecode *pc)
case JSOP_LOCALDEC:
return LocalSlot(script, GET_SLOTNO(pc));
case JSOP_GETALIASEDVAR:
case JSOP_CALLALIASEDVAR:
case JSOP_SETALIASEDVAR:
{
unsigned index;
return ScopeCoordinateToFrameIndex(script, pc, &index) == FrameIndex_Local
? LocalSlot(script, index)
: ArgSlot(index);
}
case JSOP_THIS:
return ThisSlot();
@ -843,8 +833,6 @@ class ScriptAnalysis
bool usesThisValue_:1;
bool hasFunctionCalls_:1;
bool modifiesArguments_:1;
bool extendsScope_:1;
bool addsScopeObjects_:1;
bool localsAliasStack_:1;
bool isInlineable:1;
bool isJaegerCompileable:1;
@ -900,15 +888,6 @@ class ScriptAnalysis
*/
bool modifiesArguments() { return modifiesArguments_; }
/*
* True if the script may extend declarations in its top level scope with
* dynamic fun/var declarations or through eval.
*/
bool extendsScope() { return extendsScope_; }
/* True if the script may add block or with objects to its scope chain. */
bool addsScopeObjects() { return addsScopeObjects_; }
/*
* True if there are any LOCAL opcodes aliasing values on the stack (above
* script->nfixed).
@ -1117,25 +1096,6 @@ class ScriptAnalysis
return lifetimes[slot];
}
/*
* If a NAME or similar opcode is definitely accessing a particular slot
* of a script this one is nested in, get that script/slot.
*/
struct NameAccess {
JSScript *script;
types::TypeScriptNesting *nesting;
uint32_t slot;
/* Decompose the slot above. */
bool arg;
uint32_t index;
const Value **basePointer() const {
return arg ? &nesting->argArray : &nesting->varArray;
}
};
NameAccess resolveNameAccess(JSContext *cx, jsid id, bool addDependency = false);
void printSSA(JSContext *cx);
void printTypes(JSContext *cx);

View File

@ -876,6 +876,9 @@ JSRuntime::init(uint32_t maxbytes)
if (!scriptFilenameTable.init())
return false;
if (!evalCache.init())
return false;
debugScopes = this->new_<DebugScopes>(this);
if (!debugScopes || !debugScopes->init()) {
Foreground::delete_(debugScopes);
@ -4663,8 +4666,15 @@ JS_CloneFunctionObject(JSContext *cx, JSObject *funobj, JSObject *parent_)
return NULL;
}
/*
* If a function was compiled as compile-and-go or was compiled to be
* lexically nested inside some other script, we cannot clone it without
* breaking the compiler's assumptions.
*/
RootedFunction fun(cx, funobj->toFunction());
if (fun->isInterpreted() && fun->script()->compileAndGo) {
if (fun->isInterpreted() &&
(fun->script()->compileAndGo || fun->script()->enclosingStaticScope()))
{
JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
JSMSG_BAD_CLONE_FUNOBJ_SCOPE);
return NULL;
@ -4902,10 +4912,9 @@ CompileUCScriptForPrincipalsCommon(JSContext *cx, JSObject *obj_,
bool compileAndGo = cx->hasRunOption(JSOPTION_COMPILE_N_GO);
bool noScriptRval = cx->hasRunOption(JSOPTION_NO_SCRIPT_RVAL);
bool needScriptGlobal = true;
return frontend::CompileScript(cx, obj, NULL, principals, originPrincipals,
compileAndGo, noScriptRval, needScriptGlobal,
chars, length, filename, lineno, version);
compileAndGo, noScriptRval, chars, length,
filename, lineno, version);
}
extern JS_PUBLIC_API(JSScript *)
@ -5108,10 +5117,9 @@ CompileUTF8FileHelper(JSContext *cx, JSObject *obj_, JSPrincipals *principals,
if (JS_DecodeUTF8(cx, buf, len, decodebuf, &decodelen)) {
bool compileAndGo = cx->hasRunOption(JSOPTION_COMPILE_N_GO);
bool noScriptRval = cx->hasRunOption(JSOPTION_NO_SCRIPT_RVAL);
bool needScriptGlobal = true;
script = frontend::CompileScript(cx, obj, NULL, principals, NULL,
compileAndGo, noScriptRval, needScriptGlobal,
decodebuf, decodelen, filename, 1, cx->findVersion());
compileAndGo, noScriptRval, decodebuf, decodelen,
filename, 1, cx->findVersion());
} else {
script = NULL;
}
@ -5181,9 +5189,7 @@ JS_PUBLIC_API(JSObject *)
JS_GetGlobalFromScript(JSScript *script)
{
JS_ASSERT(!script->isCachedEval);
JS_ASSERT(script->globalObject);
return script->globalObject;
return &script->global();
}
static JSFunction *
@ -5378,7 +5384,7 @@ JS_ExecuteScript(JSContext *cx, JSObject *obj, JSScript *scriptArg_, jsval *rval
* mozilla, but there doesn't seem to be one, so we handle it here.
*/
if (scriptArg->compartment() != obj->compartment()) {
script = CloneScript(cx, scriptArg);
script = CloneScript(cx, NullPtr(), NullPtr(), scriptArg);
if (!script.get())
return false;
} else {
@ -5409,13 +5415,12 @@ EvaluateUCScriptForPrincipalsCommon(JSContext *cx, JSObject *obj_,
bool compileAndGo = true;
bool noScriptRval = !rval;
bool needScriptGlobal = true;
CHECK_REQUEST(cx);
AutoLastFrameCheck lfc(cx);
JSScript *script = frontend::CompileScript(cx, obj, NULL, principals, originPrincipals,
compileAndGo, noScriptRval, needScriptGlobal,
chars, length, filename, lineno, compileVersion);
compileAndGo, noScriptRval, chars, length,
filename, lineno, compileVersion);
if (!script)
return false;

View File

@ -4645,6 +4645,10 @@ extern JS_PUBLIC_API(JSFunction *)
JS_DefineFunctionById(JSContext *cx, JSObject *obj, jsid id, JSNative call,
unsigned nargs, unsigned attrs);
/*
* Clone a top-level function into a new scope. This function will dynamically
* fail if funobj was lexically nested inside some other function.
*/
extern JS_PUBLIC_API(JSObject *)
JS_CloneFunctionObject(JSContext *cx, JSObject *funobj, JSObject *parent);

View File

@ -173,18 +173,25 @@ class ToSourceCache
void purge();
};
class EvalCache
struct EvalCacheLookup
{
static const unsigned SHIFT = 6;
static const unsigned LENGTH = 1 << SHIFT;
JSScript *table_[LENGTH];
public:
EvalCache() { PodArrayZero(table_); }
JSScript **bucket(JSLinearString *str);
void purge();
JSLinearString *str;
JSFunction *caller;
unsigned staticLevel;
JSVersion version;
JSCompartment *compartment;
};
struct EvalCacheHashPolicy
{
typedef EvalCacheLookup Lookup;
static HashNumber hash(const Lookup &l);
static bool match(JSScript *script, const EvalCacheLookup &l);
};
typedef HashSet<JSScript *, EvalCacheHashPolicy, SystemAllocPolicy> EvalCache;
class NativeIterCache
{
static const size_t SIZE = size_t(1) << 8;

View File

@ -293,11 +293,8 @@ class CompartmentChecker
}
void check(JSScript *script) {
if (script) {
if (script)
check(script->compartment());
if (!script->isCachedEval && script->globalObject)
check(script->globalObject);
}
}
void check(StackFrame *fp) {

View File

@ -535,7 +535,6 @@ JSCompartment::sweep(FreeOp *fop, bool releaseTypes)
if (releaseTypes) {
script->types->destroy();
script->types = NULL;
script->typesPurged = true;
}
}
}

View File

@ -128,7 +128,7 @@ struct JSCompartment
//
// In contrast, JSObject::global() is infallible because marking a JSObject
// always marks its global as well.
// TODO: add infallible JSScript::global() and JSContext::global()
// TODO: add infallible JSScript::global()
//
js::GlobalObject *maybeGlobal() const {
JS_ASSERT_IF(global_, global_->compartment() == this);

View File

@ -185,7 +185,7 @@ JS_SetTrap(JSContext *cx, JSScript *script, jsbytecode *pc, JSTrapHandler handle
if (!CheckDebugMode(cx))
return false;
BreakpointSite *site = script->getOrCreateBreakpointSite(cx, pc, NULL);
BreakpointSite *site = script->getOrCreateBreakpointSite(cx, pc);
if (!site)
return false;
site->setTrap(cx->runtime->defaultFreeOp(), handler, closure);

View File

@ -336,7 +336,8 @@ fun_resolve(JSContext *cx, HandleObject obj, HandleId id, unsigned flags,
template<XDRMode mode>
bool
js::XDRInterpretedFunction(XDRState<mode> *xdr, JSObject **objp, JSScript *parentScript)
js::XDRInterpretedFunction(XDRState<mode> *xdr, HandleObject enclosingScope, HandleScript enclosingScript,
JSObject **objp)
{
/* NB: Keep this in sync with CloneInterpretedFunction. */
JSAtom *atom;
@ -382,7 +383,7 @@ js::XDRInterpretedFunction(XDRState<mode> *xdr, JSObject **objp, JSScript *paren
if (!xdr->codeUint32(&flagsword))
return false;
if (!XDRScript(xdr, &script, parentScript))
if (!XDRScript(xdr, enclosingScope, enclosingScript, fun, &script))
return false;
if (mode == XDR_DECODE) {
@ -403,13 +404,13 @@ js::XDRInterpretedFunction(XDRState<mode> *xdr, JSObject **objp, JSScript *paren
}
template bool
js::XDRInterpretedFunction(XDRState<XDR_ENCODE> *xdr, JSObject **objp, JSScript *parentScript);
js::XDRInterpretedFunction(XDRState<XDR_ENCODE> *, HandleObject, HandleScript, JSObject **);
template bool
js::XDRInterpretedFunction(XDRState<XDR_DECODE> *xdr, JSObject **objp, JSScript *parentScript);
js::XDRInterpretedFunction(XDRState<XDR_DECODE> *, HandleObject, HandleScript, JSObject **);
JSObject *
js::CloneInterpretedFunction(JSContext *cx, HandleFunction srcFun)
js::CloneInterpretedFunction(JSContext *cx, HandleObject enclosingScope, HandleFunction srcFun)
{
/* NB: Keep this in sync with XDRInterpretedFunction. */
@ -423,7 +424,7 @@ js::CloneInterpretedFunction(JSContext *cx, HandleFunction srcFun)
return NULL;
Rooted<JSScript*> srcScript(cx, srcFun->script());
JSScript *clonedScript = CloneScript(cx, srcScript);
JSScript *clonedScript = CloneScript(cx, enclosingScope, clone, srcScript);
if (!clonedScript)
return NULL;
@ -1281,20 +1282,23 @@ js_CloneFunctionObject(JSContext *cx, HandleFunction fun, HandleObject parent,
} else {
/*
* Across compartments we have to clone the script for interpreted
* functions.
* functions. Cross-compartment cloning only happens via JSAPI
* (JS_CloneFunctionObject) which dynamically ensures that 'script' has
* no enclosing lexical scope (only the global scope).
*/
if (clone->isInterpreted()) {
RootedScript script(cx, clone->script());
JS_ASSERT(script);
JS_ASSERT(script->compartment() == fun->compartment());
JS_ASSERT(script->compartment() != cx->compartment);
JS_ASSERT(!script->enclosingStaticScope());
clone->mutableScript().init(NULL);
JSScript *cscript = CloneScript(cx, script);
JSScript *cscript = CloneScript(cx, NullPtr(), clone, script);
if (!cscript)
return NULL;
cscript->globalObject = &clone->global();
clone->setScript(cscript);
cscript->setFunction(clone);
if (!clone->setTypeForScriptedFunction(cx))

View File

@ -72,6 +72,7 @@ struct JSFunction : public JSObject
bool isNullClosure() const { return kind() == JSFUN_NULL_CLOSURE; }
bool isFunctionPrototype() const { return flags & JSFUN_PROTOTYPE; }
bool isInterpretedConstructor() const { return isInterpreted() && !isFunctionPrototype(); }
bool isNamedLambda() const { return (flags & JSFUN_LAMBDA) && atom; }
uint16_t kind() const { return flags & JSFUN_KINDMASK; }
void setKind(uint16_t k) {
@ -253,17 +254,15 @@ JSFunction::toExtended() const
return static_cast<const js::FunctionExtended *>(this);
}
inline bool
js_IsNamedLambda(JSFunction *fun) { return (fun->flags & JSFUN_LAMBDA) && fun->atom; }
namespace js {
template<XDRMode mode>
bool
XDRInterpretedFunction(XDRState<mode> *xdr, JSObject **objp, JSScript *parentScript);
XDRInterpretedFunction(XDRState<mode> *xdr, HandleObject enclosingScope,
HandleScript enclosingScript, JSObject **objp);
extern JSObject *
CloneInterpretedFunction(JSContext *cx, HandleFunction fun);
CloneInterpretedFunction(JSContext *cx, HandleObject enclosingScope, HandleFunction fun);
} /* namespace js */

View File

@ -3056,7 +3056,7 @@ PurgeRuntime(JSTracer *trc)
rt->newObjectCache.purge();
rt->nativeIterCache.purge();
rt->toSourceCache.purge();
rt->evalCache.purge();
rt->evalCache.clear();
for (ContextIter acx(rt); !acx.done(); acx.next())
acx->purge();
@ -4327,6 +4327,18 @@ JS::CheckStackRoots(JSContext *cx)
// could happen.)
JS_ASSERT(!cx->rootingUnnecessary);
// GCs can't happen when analysis/inference/compilation are active.
if (cx->compartment->activeAnalysis)
return;
// Can switch to the atoms compartment during analysis.
if (IsAtomsCompartment(cx->compartment)) {
for (CompartmentsIter c(rt); !c.done(); c.next()) {
if (c.get()->activeAnalysis)
return;
}
}
AutoCopyFreeListToArenas copy(rt);
JSTracer checker;

View File

@ -1863,7 +1863,7 @@ TypeCompartment::newAllocationSiteTypeObject(JSContext *cx, AllocationSiteKey ke
JS_ASSERT(!p);
RootedObject proto(cx);
RootedObject global(cx, key.script->global());
RootedObject global(cx, &key.script->global());
if (!js_GetClassPrototype(cx, global, key.kind, &proto, NULL))
return NULL;
@ -1955,7 +1955,7 @@ types::UseNewTypeForInitializer(JSContext *cx, JSScript *script, jsbytecode *pc,
AutoEnterTypeInference enter(cx);
if (!script->ensureRanAnalysis(cx, NULL))
if (!script->ensureRanAnalysis(cx))
return false;
return !script->analysis()->getCode(pc).inLoop;
@ -1967,7 +1967,7 @@ types::ArrayPrototypeHasIndexedProperty(JSContext *cx, JSScript *script)
if (!cx->typeInferenceEnabled() || !script->hasGlobal())
return true;
JSObject *proto = script->global()->getOrCreateArrayPrototype(cx);
JSObject *proto = script->global().getOrCreateArrayPrototype(cx);
if (!proto)
return true;
@ -2889,8 +2889,6 @@ TypeObject::setFlags(JSContext *cx, TypeObjectFlags flags)
/* Make sure flags are consistent with persistent object state. */
JS_ASSERT_IF(flags & OBJECT_FLAG_UNINLINEABLE,
interpretedFunction->script()->uninlineable);
JS_ASSERT_IF(flags & OBJECT_FLAG_REENTRANT_FUNCTION,
interpretedFunction->script()->reentrantOuterFunction);
JS_ASSERT_IF(flags & OBJECT_FLAG_ITERATED,
singleton->lastProperty()->hasObjectFlag(BaseShape::ITERATED_SINGLETON));
}
@ -3142,105 +3140,6 @@ GetInitializerType(JSContext *cx, JSScript *script, jsbytecode *pc)
return TypeScript::InitObject(cx, script, pc, key);
}
/*
* Detach nesting state for script from its parent, removing it entirely if it
* has no children of its own. This happens when walking type information while
* initially resolving NAME accesses, thus will not invalidate any compiler
* dependencies.
*/
static void
DetachNestingParent(JSScript *script)
{
TypeScriptNesting *nesting = script->nesting();
if (!nesting || !nesting->parent)
return;
/* Remove from parent's list of children. */
JSScript **pscript = &nesting->parent->nesting()->children;
while ((*pscript)->nesting() != nesting)
pscript = &(*pscript)->nesting()->next;
*pscript = nesting->next;
nesting->parent = NULL;
/* If this nesting can have no children of its own, destroy it. */
if (!script->isOuterFunction)
script->clearNesting();
}
ScriptAnalysis::NameAccess
ScriptAnalysis::resolveNameAccess(JSContext *cx, jsid id, bool addDependency)
{
JS_ASSERT(cx->typeInferenceEnabled());
NameAccess access;
PodZero(&access);
if (!JSID_IS_ATOM(id))
return access;
JSAtom *atom = JSID_TO_ATOM(id);
JSScript *script = this->script;
while (script->function() && script->nesting()) {
if (!script->ensureRanInference(cx))
return access;
/*
* Don't resolve names in scripts which use 'let' or 'with'. New names
* bound here can mask variables of the script itself.
*
* Also, don't resolve names in scripts which are generators. Frame
* balancing works differently for generators and we do not maintain
* active frame counts for such scripts.
*/
if (script->analysis()->addsScopeObjects() || script->isGenerator)
return access;
/* Check if the script definitely binds the identifier. */
unsigned index;
BindingKind kind = script->bindings.lookup(cx, atom, &index);
if (kind == ARGUMENT || kind == VARIABLE) {
TypeObject *obj = script->function()->getType(cx);
if (addDependency) {
/*
* Record the dependency which compiled code has on the outer
* function being non-reentrant.
*/
if (TypeSet::HasObjectFlags(cx, obj, OBJECT_FLAG_REENTRANT_FUNCTION))
return access;
}
if (!script->isOuterFunction)
return access;
access.script = script;
access.nesting = script->nesting();
access.slot = (kind == ARGUMENT) ? ArgSlot(index) : LocalSlot(script, index);
access.arg = (kind == ARGUMENT);
access.index = index;
return access;
} else if (kind != NONE) {
return access;
}
/*
* The script's bindings do not contain a name for the function itself,
* don't resolve name accesses on lambdas in DeclEnv objects on the
* scope chain.
*/
if (atom == CallObjectLambdaName(*script->function()))
return access;
if (!script->nesting()->parent)
return access;
script = script->nesting()->parent;
}
return access;
}
/* Analyze type information for a single bytecode. */
bool
ScriptAnalysis::analyzeTypesBytecode(JSContext *cx, unsigned offset,
@ -3466,7 +3365,7 @@ ScriptAnalysis::analyzeTypesBytecode(JSContext *cx, unsigned offset,
seen->addType(cx, Type::DoubleType());
/* Handle as a property access. */
PropertyAccess(cx, script, pc, script->global()->getType(cx), false, seen, id);
PropertyAccess(cx, script, pc, script->global().getType(cx), false, seen, id);
if (op == JSOP_CALLGNAME)
pushed[0].addPropagateThis(cx, script, pc, Type::UnknownType());
@ -3479,24 +3378,8 @@ ScriptAnalysis::analyzeTypesBytecode(JSContext *cx, unsigned offset,
case JSOP_NAME:
case JSOP_CALLNAME: {
TypeSet *seen = bytecodeTypes(pc);
addTypeBarrier(cx, pc, seen, Type::UnknownType());
seen->addSubset(cx, &pushed[0]);
/*
* Try to resolve this name by walking the function's scope nesting.
* If we succeed but the accessed script has had its TypeScript purged
* in the past, we still must use a type barrier: the name access can
* be on a call object which predated the purge, and whose types might
* not be reflected in the reconstructed information.
*/
jsid id = GetAtomId(cx, script, pc, 0);
NameAccess access = resolveNameAccess(cx, id);
if (access.script && !access.script->typesPurged) {
TypeSet *types = TypeScript::SlotTypes(access.script, access.slot);
types->addSubsetBarrier(cx, script, pc, seen);
} else {
addTypeBarrier(cx, pc, seen, Type::UnknownType());
}
if (op == JSOP_CALLNAME)
pushed[0].addPropagateThis(cx, script, pc, Type::UnknownType());
break;
@ -3508,25 +3391,13 @@ ScriptAnalysis::analyzeTypesBytecode(JSContext *cx, unsigned offset,
case JSOP_SETGNAME: {
jsid id = GetAtomId(cx, script, pc, 0);
PropertyAccess(cx, script, pc, script->global()->getType(cx),
PropertyAccess(cx, script, pc, script->global().getType(cx),
true, poppedTypes(pc, 0), id);
poppedTypes(pc, 0)->addSubset(cx, &pushed[0]);
break;
}
case JSOP_SETNAME: {
jsid id = GetAtomId(cx, script, pc, 0);
NameAccess access = resolveNameAccess(cx, id);
if (access.script) {
TypeSet *types = TypeScript::SlotTypes(access.script, access.slot);
poppedTypes(pc, 0)->addSubset(cx, types);
} else {
cx->compartment->types.monitorBytecode(cx, script, offset);
}
poppedTypes(pc, 0)->addSubset(cx, &pushed[0]);
break;
}
case JSOP_SETNAME:
case JSOP_SETCONST:
cx->compartment->types.monitorBytecode(cx, script, offset);
poppedTypes(pc, 0)->addSubset(cx, &pushed[0]);
@ -3539,8 +3410,6 @@ ScriptAnalysis::analyzeTypesBytecode(JSContext *cx, unsigned offset,
break;
}
case JSOP_GETALIASEDVAR:
case JSOP_CALLALIASEDVAR:
case JSOP_GETARG:
case JSOP_CALLARG:
case JSOP_GETLOCAL:
@ -3560,12 +3429,11 @@ ScriptAnalysis::analyzeTypesBytecode(JSContext *cx, unsigned offset,
/* Local 'let' variable. Punt on types for these, for now. */
pushed[0].addType(cx, Type::UnknownType());
}
if (op == JSOP_CALLARG || op == JSOP_CALLLOCAL || op == JSOP_CALLALIASEDVAR)
if (op == JSOP_CALLARG || op == JSOP_CALLLOCAL)
pushed[0].addPropagateThis(cx, script, pc, Type::UndefinedType());
break;
}
case JSOP_SETALIASEDVAR:
case JSOP_SETARG:
case JSOP_SETLOCAL: {
uint32_t slot = GetBytecodeSlot(script, pc);
@ -3583,6 +3451,24 @@ ScriptAnalysis::analyzeTypesBytecode(JSContext *cx, unsigned offset,
break;
}
case JSOP_GETALIASEDVAR:
case JSOP_CALLALIASEDVAR:
/*
* Every aliased variable will contain 'undefined' in addition to the
* type of whatever value is written to it. Thus, a dynamic barrier is
* necessary. Since we don't expect the to observe more than 1 type,
* there is little benefit to maintaining a TypeSet for the aliased
* variable. Instead, we monitor/barrier all reads unconditionally.
*/
bytecodeTypes(pc)->addSubset(cx, &pushed[0]);
if (op == JSOP_CALLALIASEDVAR)
pushed[0].addPropagateThis(cx, script, pc, Type::UnknownType());
break;
case JSOP_SETALIASEDVAR:
poppedTypes(pc, 0)->addSubset(cx, &pushed[0]);
break;
case JSOP_INCARG:
case JSOP_DECARG:
case JSOP_ARGINC:
@ -3972,7 +3858,7 @@ ScriptAnalysis::analyzeTypesBytecode(JSContext *cx, unsigned offset,
case JSOP_GENERATOR:
if (script->function()) {
if (script->hasGlobal()) {
JSObject *proto = script->global()->getOrCreateGeneratorPrototype(cx);
JSObject *proto = script->global().getOrCreateGeneratorPrototype(cx);
if (!proto)
return false;
TypeObject *object = proto->getNewType(cx);
@ -4075,45 +3961,6 @@ ScriptAnalysis::analyzeTypes(JSContext *cx)
for (unsigned i = 0; i < script->nfixed; i++)
TypeScript::LocalTypes(script, i)->addType(cx, Type::UndefinedType());
TypeScriptNesting *nesting = script->function() ? script->nesting() : NULL;
if (nesting && nesting->parent) {
/*
* Check whether NAME accesses can be resolved in parent scopes, and
* detach from the parent if so. Even if outdated activations of this
* function are live when the parent is called again, we do not need to
* consider this reentrance as no state in the parent will be used.
*/
if (!nesting->parent->ensureRanInference(cx))
return;
bool detached = false;
/* Don't track for leaf scripts which have no free variables. */
if (!usesScopeChain() && !script->isOuterFunction) {
DetachNestingParent(script);
detached = true;
}
/*
* If the names bound by the script are extensible (DEFFUN, EVAL, ...),
* don't resolve NAME accesses into the parent.
*/
if (!detached && extendsScope()) {
DetachNestingParent(script);
detached = true;
}
if (!detached) {
/*
* Don't track for parents which add call objects or are generators,
* don't resolve NAME accesses into the parent.
*/
if (nesting->parent->analysis()->addsScopeObjects() || nesting->parent->isGenerator)
DetachNestingParent(script);
}
}
TypeInferenceState state(cx);
unsigned offset = 0;
@ -4277,9 +4124,7 @@ AnalyzeNewScriptProperties(JSContext *cx, TypeObject *type, JSFunction *fun, JSO
}
JSScript *script = fun->script();
JS_ASSERT(!script->isInnerFunction);
if (!script->ensureRanAnalysis(cx, fun) || !script->ensureRanInference(cx)) {
if (!script->ensureRanAnalysis(cx) || !script->ensureRanInference(cx)) {
*pbaseobj = NULL;
cx->compartment->types.setPendingNukeTypes(cx);
return false;
@ -4515,7 +4360,6 @@ AnalyzePoppedThis(JSContext *cx, Vector<SSAUseChain *> *pendingPoppedThis,
}
JSFunction *function = scriptObj->toFunction();
JS_ASSERT(!function->script()->isInnerFunction);
/*
* Generate constraints to clear definite properties from the type
@ -4566,7 +4410,7 @@ AnalyzePoppedThis(JSContext *cx, Vector<SSAUseChain *> *pendingPoppedThis,
static void
CheckNewScriptProperties(JSContext *cx, HandleTypeObject type, JSFunction *fun)
{
if (type->unknownProperties() || fun->script()->isInnerFunction)
if (type->unknownProperties())
return;
/* Strawman object to add properties to and watch for duplicates. */
@ -4608,8 +4452,8 @@ CheckNewScriptProperties(JSContext *cx, HandleTypeObject type, JSFunction *fun)
* than we will use for subsequent new objects. Generate an object with the
* appropriate final shape.
*/
baseobj = NewReshapedObject(cx, type, baseobj->getParent(), kind,
baseobj->lastProperty());
RootedShape shape(cx, baseobj->lastProperty());
baseobj = NewReshapedObject(cx, type, baseobj->getParent(), kind, shape);
if (!baseobj ||
!type->addDefiniteProperties(cx, baseobj) ||
!initializerList.append(done)) {
@ -4882,7 +4726,7 @@ TypeDynamicResult(JSContext *cx, JSScript *script, jsbytecode *pc, Type type)
/* Directly update associated type sets for applicable bytecodes. */
if (js_CodeSpec[*pc].format & JOF_TYPESET) {
if (!script->ensureRanAnalysis(cx, NULL)) {
if (!script->ensureRanAnalysis(cx)) {
cx->compartment->types.setPendingNukeTypes(cx);
return;
}
@ -4988,7 +4832,7 @@ TypeMonitorResult(JSContext *cx, JSScript *script, jsbytecode *pc, const js::Val
AutoEnterTypeInference enter(cx);
if (!script->ensureRanAnalysis(cx, NULL)) {
if (!script->ensureRanAnalysis(cx)) {
cx->compartment->types.setPendingNukeTypes(cx);
return;
}
@ -5003,255 +4847,6 @@ TypeMonitorResult(JSContext *cx, JSScript *script, jsbytecode *pc, const js::Val
types->addType(cx, type);
}
bool
TypeScript::SetScope(JSContext *cx, JSScript *script_, JSObject *scope_)
{
Rooted<JSScript*> script(cx, script_);
RootedObject scope(cx, scope_);
JS_ASSERT(script->types && !script->types->hasScope());
JSFunction *fun = script->function();
bool nullClosure = fun && fun->isNullClosure();
JS_ASSERT_IF(!fun, !script->isOuterFunction && !script->isInnerFunction);
JS_ASSERT_IF(!scope, fun && !script->isInnerFunction);
/*
* The scope object must be the initial one for the script, before any call
* object has been created in the heavyweight case.
*/
JS_ASSERT_IF(scope && scope->isCall() && !scope->asCall().isForEval(),
&scope->asCall().callee() != fun);
if (!script->compileAndGo) {
script->types->global = NULL;
return true;
}
JS_ASSERT_IF(fun && scope, fun->global() == scope->global());
script->types->global = fun ? &fun->global() : &scope->global();
/*
* Update the parent in the script's bindings. The bindings are created
* with a NULL parent, and fixing the parent now avoids the need to reshape
* every time a call object is created from the bindings.
*/
if (!script->bindings.setParent(cx, script->types->global))
return false;
if (!cx->typeInferenceEnabled())
return true;
if (!script->isInnerFunction || nullClosure) {
/*
* Outermost functions need nesting information if there are inner
* functions directly nested in them.
*/
if (script->isOuterFunction) {
script->types->nesting = cx->new_<TypeScriptNesting>();
if (!script->types->nesting)
return false;
}
return true;
}
/*
* Walk the scope chain to the next call object, which will be the function
* the script is nested inside.
*/
while (!scope->isCall())
scope = &scope->asScope().enclosingScope();
CallObject &call = scope->asCall();
/* The isInnerFunction test ensures there is no intervening strict eval call object. */
JS_ASSERT(!call.isForEval());
/* Don't track non-heavyweight parents, NAME ops won't reach into them. */
JSFunction *parentFun = &call.callee();
if (!parentFun || !parentFun->isHeavyweight())
return true;
JSScript *parent = parentFun->script();
JS_ASSERT(parent->isOuterFunction);
/*
* We only need the nesting in the child if it has NAME accesses going
* into the parent. We won't know for sure whether this is the case until
* analyzing the script's types, which we don't want to do yet. The nesting
* info we make here may get pruned if/when we eventually do such analysis.
*/
/*
* Scopes are set when scripts first execute, and the parent script must
* have executed first. It is still possible for the parent script to not
* have a scope, however, as we occasionally purge all TypeScripts from the
* compartment and there may be inner function objects parented to an
* activation of the outer function sticking around. In such cases, treat
* the parent's call object as the most recent one, so that it is not
* marked as reentrant.
*/
if (!parent->ensureHasTypes(cx))
return false;
if (!parent->types->hasScope()) {
if (!SetScope(cx, parent, &call.enclosingScope()))
return false;
parent->nesting()->activeCall = &call;
parent->nesting()->argArray = Valueify(call.argArray());
parent->nesting()->varArray = Valueify(call.varArray());
}
JS_ASSERT(!script->types->nesting);
/* Construct and link nesting information for the two functions. */
script->types->nesting = cx->new_<TypeScriptNesting>();
if (!script->types->nesting)
return false;
script->nesting()->parent = parent;
script->nesting()->next = parent->nesting()->children;
parent->nesting()->children = script;
return true;
}
TypeScriptNesting::~TypeScriptNesting()
{
/*
* Unlink from any parent/child. Nesting info on a script does not keep
* either the parent or children live during GC.
*/
if (parent) {
JSScript **pscript = &parent->nesting()->children;
while ((*pscript)->nesting() != this)
pscript = &(*pscript)->nesting()->next;
*pscript = next;
}
while (children) {
TypeScriptNesting *child = children->nesting();
children = child->next;
child->parent = NULL;
child->next = NULL;
}
}
bool
ClearActiveNesting(JSScript *start)
{
/*
* Clear active call information for script and any outer functions
* inner to it. Return false if an inner function has frames on the stack.
*/
/* Traverse children, then parent, avoiding recursion. */
JSScript *script = start;
bool traverseChildren = true;
while (true) {
TypeScriptNesting *nesting = script->nesting();
if (nesting->children && traverseChildren) {
script = nesting->children;
continue;
}
if (nesting->activeFrames)
return false;
if (script->isOuterFunction) {
nesting->activeCall = NULL;
nesting->argArray = NULL;
nesting->varArray = NULL;
}
if (script == start)
break;
if (nesting->next) {
script = nesting->next;
traverseChildren = true;
} else {
script = nesting->parent;
traverseChildren = false;
}
}
return true;
}
/*
* For the specified scope and script with an outer function, check if the
* scope represents a reentrant activation on an inner function of the parent
* or any of its transitive parents.
*/
static void
CheckNestingParent(JSContext *cx, JSObject *scope, JSScript *script)
{
restart:
JSScript *parent = script->nesting()->parent;
JS_ASSERT(parent);
while (!scope->isCall() || scope->asCall().callee().script() != parent)
scope = &scope->asScope().enclosingScope();
if (scope != parent->nesting()->activeCall) {
parent->reentrantOuterFunction = true;
MarkTypeObjectFlags(cx, parent->function(), OBJECT_FLAG_REENTRANT_FUNCTION);
/*
* Continue checking parents to see if this is reentrant for them too.
* We don't need to check this in for non-reentrant calls on the outer
* function: when we entered any outer function to the immediate parent
* we cleared the active call for its transitive children, so a
* non-reentrant call on a child is also a non-reentrant call on the
* parent.
*/
if (parent->nesting()->parent) {
scope = &scope->asScope().enclosingScope();
script = parent;
goto restart;
}
}
}
void
NestingPrologue(JSContext *cx, StackFrame *fp)
{
JSScript *script = fp->fun()->script();
TypeScriptNesting *nesting = script->nesting();
if (nesting->parent)
CheckNestingParent(cx, fp->scopeChain(), script);
if (script->isOuterFunction) {
/*
* Check the stack has no frames for this activation, any of its inner
* functions or any of their transitive inner functions.
*
* Also, if the script has an extensible scope, then the arg/var array
* can be moved unexpectedly, so abort the optimization.
*/
if (!ClearActiveNesting(script) || script->funHasExtensibleScope) {
script->reentrantOuterFunction = true;
MarkTypeObjectFlags(cx, fp->fun(), OBJECT_FLAG_REENTRANT_FUNCTION);
}
nesting->activeCall = &fp->callObj();
nesting->argArray = Valueify(nesting->activeCall->argArray());
nesting->varArray = Valueify(nesting->activeCall->varArray());
}
/* Maintain stack frame count for the function. */
nesting->activeFrames++;
}
void
NestingEpilogue(StackFrame *fp)
{
JSScript *script = fp->fun()->script();
TypeScriptNesting *nesting = script->nesting();
JS_ASSERT(nesting->activeFrames != 0);
nesting->activeFrames--;
}
} } /* namespace js::types */
/////////////////////////////////////////////////////////////////////
@ -5428,13 +5023,15 @@ JSFunction::setTypeForScriptedFunction(JSContext *cx, bool singleton)
if (!setSingletonType(cx))
return false;
} else {
RootedFunction self(cx, this);
TypeObject *type = cx->compartment->types.newTypeObject(cx, script(),
JSProto_Function, getProto());
if (!type)
return false;
setType(type);
type->interpretedFunction = this;
self->setType(type);
type->interpretedFunction = self;
}
return true;
@ -5586,8 +5183,6 @@ JSObject::makeLazyType(JSContext *cx)
JSScript *script = type->interpretedFunction->script();
if (script->uninlineable)
type->flags |= OBJECT_FLAG_UNINLINEABLE;
if (script->reentrantOuterFunction)
type->flags |= OBJECT_FLAG_REENTRANT_FUNCTION;
}
if (self->lastProperty()->hasObjectFlag(BaseShape::ITERATED_SINGLETON))
@ -5610,10 +5205,10 @@ JSObject::makeLazyType(JSContext *cx)
* looking at the class prototype key.
*/
if (isSlowArray())
if (self->isSlowArray())
type->flags |= OBJECT_FLAG_NON_DENSE_ARRAY | OBJECT_FLAG_NON_PACKED_ARRAY;
if (IsTypedArrayProto(this))
if (IsTypedArrayProto(self))
type->flags |= OBJECT_FLAG_NON_TYPED_ARRAY;
self->type_ = type;
@ -5666,7 +5261,7 @@ JSObject::setNewTypeUnknown(JSContext *cx)
}
TypeObject *
JSObject::getNewType(JSContext *cx, JSFunction *fun)
JSObject::getNewType(JSContext *cx, JSFunction *fun_)
{
TypeObjectSet &table = cx->compartment->newTypeObjects;
@ -5688,13 +5283,14 @@ JSObject::getNewType(JSContext *cx, JSFunction *fun)
* Object.create is called with a prototype object that is also the
* 'prototype' property of some scripted function.
*/
if (type->newScript && type->newScript->fun != fun)
if (type->newScript && type->newScript->fun != fun_)
type->clearNewScript(cx);
return type;
}
RootedObject self(cx, this);
RootedFunction fun(cx, fun_);
if (!setDelegate(cx))
return NULL;
@ -6088,15 +5684,6 @@ TypeScript::Sweep(FreeOp *fop, JSScript *script)
presult = &result->next;
}
}
/*
* If the script has nesting state with a most recent activation, we do not
* need either to mark the call object or clear it if not live. Even with
* a dead pointer in the nesting, we can't get a spurious match while
* testing for reentrancy: if previous activations are still live, they
* cannot alias the most recent one, and future activations will overwrite
* activeCall on creation.
*/
}
void
@ -6108,9 +5695,6 @@ TypeScript::destroy()
dynamicList = next;
}
if (nesting)
Foreground::delete_(nesting);
Foreground::free_(this);
}
@ -6166,8 +5750,6 @@ SizeOfScriptTypeInferenceData(JSScript *script, TypeInferenceSizes *sizes,
return;
}
sizes->scripts += mallocSizeOf(typeScript->nesting);
unsigned count = TypeScript::NumTypeSets(script);
sizes->scripts += mallocSizeOf(typeScript);

View File

@ -286,14 +286,11 @@ enum {
/* Whether any objects have been iterated over. */
OBJECT_FLAG_ITERATED = 0x00200000,
/* Outer function which has been marked reentrant. */
OBJECT_FLAG_REENTRANT_FUNCTION = 0x00400000,
/* For a global object, whether flags were set on the RegExpStatics. */
OBJECT_FLAG_REGEXP_FLAGS_SET = 0x00800000,
OBJECT_FLAG_REGEXP_FLAGS_SET = 0x00400000,
/* Flags which indicate dynamic properties of represented objects. */
OBJECT_FLAG_DYNAMIC_MASK = 0x00ff0000,
OBJECT_FLAG_DYNAMIC_MASK = 0x007f0000,
/*
* Whether all properties of this object are considered unknown.
@ -802,7 +799,7 @@ struct TypeObject : gc::Cell
* Get the global object which all objects of this type are parented to,
* or NULL if there is none known.
*/
inline JSObject *getGlobal();
//inline JSObject *getGlobal();
/* Helpers */
@ -907,89 +904,6 @@ struct TypeCallsite
bool isNew, unsigned argumentCount);
};
/*
* Information attached to outer and inner function scripts nested in one
* another for tracking the reentrance state for outer functions. This state is
* used to generate fast accesses to the args and vars of the outer function.
*
* A function is non-reentrant if, at any point in time, only the most recent
* activation (i.e. call object) is live. An activation is live if either the
* activation is on the stack, or a transitive inner function parented to the
* activation is on the stack.
*
* Because inner functions can be (and, quite often, are) stored in object
* properties and it is difficult to build a fast and robust escape analysis
* to cope with such flow, we detect reentrance dynamically. For the outer
* function, we keep track of the call object for the most recent activation,
* and the number of frames for the function and its inner functions which are
* on the stack.
*
* If the outer function is called while frames associated with a previous
* activation are on the stack, the outer function is reentrant. If an inner
* function is called whose scope does not match the most recent activation,
* the outer function is reentrant.
*
* The situation gets trickier when there are several levels of nesting.
*
* function foo() {
* var a;
* function bar() {
* var b;
* function baz() { return a + b; }
* }
* }
*
* At calls to 'baz', we don't want to do the scope check for the activations
* of both 'foo' and 'bar', but rather 'bar' only. For this to work, a call to
* 'baz' which is a reentrant call on 'foo' must also be a reentrant call on
* 'bar'. When 'foo' is called, we clear the most recent call object for 'bar'.
*/
struct TypeScriptNesting
{
/*
* If this is an inner function, the outer function. If non-NULL, this will
* be the immediate nested parent of the script (even if that parent has
* been marked reentrant). May be NULL even if the script has a nested
* parent, if NAME accesses cannot be tracked into the parent (either the
* script extends its scope with eval() etc., or the parent can make new
* scope chain objects with 'let' or 'with').
*/
JSScript *parent;
/* If this is an outer function, list of inner functions. */
JSScript *children;
/* Link for children list of parent. */
JSScript *next;
/* If this is an outer function, the most recent activation. */
CallObject *activeCall;
/*
* If this is an outer function, pointers to the most recent activation's
* arguments and variables arrays. These could be referring either to stack
* values in activeCall's frame (if it has not finished yet) or to the
* internal slots of activeCall (if the frame has finished). Pointers to
* these fields can be embedded directly in JIT code (though remember to
* use 'addDependency == true' when calling resolveNameAccess).
*/
const Value *argArray;
const Value *varArray;
/* Number of frames for this function on the stack. */
uint32_t activeFrames;
TypeScriptNesting() { PodZero(this); }
~TypeScriptNesting();
};
/* Construct nesting information for script wrt its parent. */
bool CheckScriptNesting(JSContext *cx, JSScript *script);
/* Track nesting state when calling or finishing an outer/inner function. */
void NestingPrologue(JSContext *cx, StackFrame *fp);
void NestingEpilogue(StackFrame *fp);
/* Persistent type information for a script, retained across GCs. */
class TypeScript
{
@ -998,28 +912,10 @@ class TypeScript
/* Analysis information for the script, cleared on each GC. */
analyze::ScriptAnalysis *analysis;
/*
* Information about the scope in which a script executes. This information
* is not set until the script has executed at least once and SetScope
* called, before that 'global' will be poisoned per GLOBAL_MISSING_SCOPE.
*/
static const size_t GLOBAL_MISSING_SCOPE = 0x1;
/* Global object for the script, if compileAndGo. */
HeapPtr<GlobalObject> global;
public:
/* Nesting state for outer or inner function scripts. */
TypeScriptNesting *nesting;
/* Dynamic types generated at points within this script. */
TypeResult *dynamicList;
inline TypeScript();
bool hasScope() { return size_t(global.get()) != GLOBAL_MISSING_SCOPE; }
/* Array of type type sets for variables and JOF_TYPESET ops. */
TypeSet *typeArray() { return (TypeSet *) (uintptr_t(this) + sizeof(TypeScript)); }
@ -1082,7 +978,6 @@ class TypeScript
static inline void SetArgument(JSContext *cx, JSScript *script, unsigned arg, const js::Value &value);
static void Sweep(FreeOp *fop, JSScript *script);
inline void trace(JSTracer *trc);
void destroy();
};

View File

@ -12,7 +12,6 @@
#include "jsinfer.h"
#include "jsprf.h"
#include "gc/Marking.h"
#include "gc/Root.h"
#include "vm/GlobalObject.h"
@ -312,7 +311,7 @@ TypeMonitorCall(JSContext *cx, const js::CallArgs &args, bool constructing)
JSFunction *fun = callee->toFunction();
if (fun->isInterpreted()) {
RootedScript script(cx, fun->script());
if (!script->ensureRanAnalysis(cx, fun->environment()))
if (!script->ensureRanAnalysis(cx))
return false;
if (cx->typeInferenceEnabled())
TypeMonitorCallSlow(cx, callee, args, constructing);
@ -449,12 +448,6 @@ UseNewTypeAtEntry(JSContext *cx, StackFrame *fp)
// Script interface functions
/////////////////////////////////////////////////////////////////////
inline
TypeScript::TypeScript()
{
this->global = (js::GlobalObject *) GLOBAL_MISSING_SCOPE;
}
/* static */ inline unsigned
TypeScript::NumTypeSets(JSScript *script)
{
@ -504,7 +497,7 @@ TypeScript::SlotTypes(JSScript *script, unsigned slot)
TypeScript::StandardType(JSContext *cx, JSScript *script, JSProtoKey key)
{
RootedObject proto(cx);
RootedObject global(cx, script->global());
RootedObject global(cx, &script->global());
if (!js_GetClassPrototype(cx, global, key, &proto, NULL))
return NULL;
return proto->getNewType(cx);
@ -694,7 +687,7 @@ TypeScript::SetThis(JSContext *cx, JSScript *script, Type type)
script->id(), TypeString(type));
ThisTypes(script)->addType(cx, type);
if (analyze && script->types->hasScope())
if (analyze)
script->ensureRanInference(cx);
}
}
@ -756,15 +749,6 @@ TypeScript::SetArgument(JSContext *cx, JSScript *script, unsigned arg, const js:
}
}
void
TypeScript::trace(JSTracer *trc)
{
if (hasScope() && global)
gc::MarkObject(trc, &global, "script_global");
/* Note: nesting does not keep anything alive. */
}
/////////////////////////////////////////////////////////////////////
// TypeCompartment
/////////////////////////////////////////////////////////////////////
@ -1367,16 +1351,6 @@ TypeObject::setFlagsFromKey(JSContext *cx, JSProtoKey key)
setFlags(cx, flags);
}
inline JSObject *
TypeObject::getGlobal()
{
if (singleton)
return &singleton->global();
if (interpretedFunction && interpretedFunction->script()->compileAndGo)
return &interpretedFunction->global();
return NULL;
}
inline void
TypeObject::writeBarrierPre(TypeObject *type)
{
@ -1452,7 +1426,7 @@ JSScript::ensureHasTypes(JSContext *cx)
}
inline bool
JSScript::ensureRanAnalysis(JSContext *cx, JSObject *scope)
JSScript::ensureRanAnalysis(JSContext *cx)
{
js::analyze::AutoEnterAnalysis aea(cx->compartment);
JSScript *self = this;
@ -1460,12 +1434,6 @@ JSScript::ensureRanAnalysis(JSContext *cx, JSObject *scope)
if (!self->ensureHasTypes(cx))
return false;
if (!self->types->hasScope()) {
js::RootedObject scopeRoot(cx, scope);
if (!js::types::TypeScript::SetScope(cx, self, scope))
return false;
scope = scopeRoot;
}
if (!self->hasAnalysis() && !self->makeAnalysis(cx))
return false;
JS_ASSERT(self->analysis()->ranBytecode());
@ -1476,7 +1444,7 @@ inline bool
JSScript::ensureRanInference(JSContext *cx)
{
JS::RootedScript self(cx, this);
if (!ensureRanAnalysis(cx, NULL))
if (!ensureRanAnalysis(cx))
return false;
if (!self->analysis()->ranInference()) {
js::types::AutoEnterTypeInference enter(cx);

View File

@ -479,7 +479,7 @@ js::ExecuteKernel(JSContext *cx, JSScript *script_, JSObject &scopeChain, const
if (!cx->stack.pushExecuteFrame(cx, script, thisv, scopeChain, type, evalInFrame, &efg))
return false;
if (!script->ensureRanAnalysis(cx, &scopeChain))
if (!script->ensureRanAnalysis(cx))
return false;
TypeScript::SetThis(cx, script, efg.fp()->thisValue());
@ -839,7 +839,7 @@ TryNoteIter::settle()
* in *expr.
*/
static bool
DoIncDec(JSContext *cx, JSScript *script, jsbytecode *pc, const Value &v, Value *slot, Value *expr)
DoIncDec(JSContext *cx, HandleScript script, jsbytecode *pc, const Value &v, Value *slot, Value *expr)
{
const JSCodeSpec &cs = js_CodeSpec[*pc];
@ -2765,6 +2765,7 @@ BEGIN_CASE(JSOP_GETALIASEDVAR)
{
ScopeCoordinate sc = ScopeCoordinate(regs.pc);
PUSH_COPY(regs.fp()->aliasedVarScope(sc).aliasedVar(sc));
TypeScript::Monitor(cx, script, regs.pc, regs.sp[-1]);
}
END_CASE(JSOP_GETALIASEDVAR)

View File

@ -2183,16 +2183,16 @@ JSObject::sealOrFreeze(JSContext *cx, ImmutabilityType it)
return true;
}
bool
JSObject::isSealedOrFrozen(JSContext *cx, ImmutabilityType it, bool *resultp)
/* static */ bool
JSObject::isSealedOrFrozen(JSContext *cx, HandleObject obj, ImmutabilityType it, bool *resultp)
{
if (isExtensible()) {
if (obj->isExtensible()) {
*resultp = false;
return true;
}
AutoIdVector props(cx);
if (!GetPropertyNames(cx, this, JSITER_HIDDEN | JSITER_OWNONLY, &props))
if (!GetPropertyNames(cx, obj, JSITER_HIDDEN | JSITER_OWNONLY, &props))
return false;
RootedId id(cx);
@ -2200,7 +2200,7 @@ JSObject::isSealedOrFrozen(JSContext *cx, ImmutabilityType it, bool *resultp)
id = props[i];
unsigned attrs;
if (!getGenericAttributes(cx, id, &attrs))
if (!obj->getGenericAttributes(cx, id, &attrs))
return false;
/*
@ -2241,7 +2241,7 @@ obj_isFrozen(JSContext *cx, unsigned argc, Value *vp)
return false;
bool frozen;
if (!obj->isFrozen(cx, &frozen))
if (!JSObject::isFrozen(cx, obj, &frozen))
return false;
vp->setBoolean(frozen);
return true;
@ -2267,7 +2267,7 @@ obj_isSealed(JSContext *cx, unsigned argc, Value *vp)
return false;
bool sealed;
if (!obj->isSealed(cx, &sealed))
if (!JSObject::isSealed(cx, obj, &sealed))
return false;
vp->setBoolean(sealed);
return true;
@ -2509,7 +2509,7 @@ js::NewObjectWithType(JSContext *cx, HandleTypeObject type, JSObject *parent, gc
JSObject *
js::NewReshapedObject(JSContext *cx, HandleTypeObject type, JSObject *parent,
gc::AllocKind kind, Shape *shape)
gc::AllocKind kind, HandleShape shape)
{
RootedObject res(cx, NewObjectWithType(cx, type, parent, kind));
if (!res)
@ -2612,16 +2612,20 @@ js_CreateThisForFunction(JSContext *cx, HandleObject callee, bool newType)
JSObject *obj = js_CreateThisForFunctionWithProto(cx, callee, proto);
if (obj && newType) {
RootedObject nobj(cx, obj);
/*
* Reshape the object and give it a (lazily instantiated) singleton
* type before passing it as the 'this' value for the call.
*/
obj->clear(cx);
if (!obj->setSingletonType(cx))
nobj->clear(cx);
if (!nobj->setSingletonType(cx))
return NULL;
JSScript *calleeScript = callee->toFunction()->script();
TypeScript::SetThis(cx, calleeScript, types::Type::ObjectType(obj));
TypeScript::SetThis(cx, calleeScript, types::Type::ObjectType(nobj));
return nobj;
}
return obj;
@ -3527,10 +3531,12 @@ JSObject::growSlots(JSContext *cx, uint32_t oldCount, uint32_t newCount)
gc::AllocKind kind = type()->newScript->allocKind;
unsigned newScriptSlots = gc::GetGCKindSlots(kind);
if (newScriptSlots == numFixedSlots() && gc::TryIncrementAllocKind(&kind)) {
AutoEnterTypeInference enter(cx);
Rooted<TypeObject*> typeObj(cx, type());
RootedShape shape(cx, typeObj->newScript->shape);
JSObject *obj = NewReshapedObject(cx, typeObj,
getParent(), kind,
typeObj->newScript->shape);
getParent(), kind, shape);
if (!obj)
return false;

View File

@ -531,7 +531,7 @@ struct JSObject : public js::ObjectImpl
*/
bool sealOrFreeze(JSContext *cx, ImmutabilityType it);
bool isSealedOrFrozen(JSContext *cx, ImmutabilityType it, bool *resultp);
static bool isSealedOrFrozen(JSContext *cx, js::HandleObject obj, ImmutabilityType it, bool *resultp);
static inline unsigned getSealedOrFrozenAttributes(unsigned attrs, ImmutabilityType it);
@ -543,8 +543,12 @@ struct JSObject : public js::ObjectImpl
/* ES5 15.2.3.9: non-extensible, all properties non-configurable, all data props read-only */
bool freeze(JSContext *cx) { return sealOrFreeze(cx, FREEZE); }
bool isSealed(JSContext *cx, bool *resultp) { return isSealedOrFrozen(cx, SEAL, resultp); }
bool isFrozen(JSContext *cx, bool *resultp) { return isSealedOrFrozen(cx, FREEZE, resultp); }
static inline bool isSealed(JSContext *cx, js::HandleObject obj, bool *resultp) {
return isSealedOrFrozen(cx, obj, SEAL, resultp);
}
static inline bool isFrozen(JSContext *cx, js::HandleObject obj, bool *resultp) {
return isSealedOrFrozen(cx, obj, FREEZE, resultp);
}
/* Accessors for elements. */

View File

@ -1552,7 +1552,7 @@ CopyInitializerObject(JSContext *cx, HandleObject baseobj)
JSObject *
NewReshapedObject(JSContext *cx, HandleTypeObject type, JSObject *parent,
gc::AllocKind kind, Shape *shape);
gc::AllocKind kind, HandleShape shape);
/*
* As for gc::GetGCObjectKind, where numSlots is a guess at the final size of

View File

@ -6379,7 +6379,7 @@ GetPCCountScriptContents(JSContext *cx, size_t index)
{
JSAutoEnterCompartment ac;
if (!ac.enter(cx, script->function() ? (JSObject *) script->function() : script->global()))
if (!ac.enter(cx, &script->global()))
return NULL;
if (!GetPCCountJSON(cx, sac, buf))

View File

@ -342,8 +342,8 @@ OPDEF(JSOP_FINALLY, 135,"finally", NULL, 1, 0, 2, 0, JOF_BYTE)
* uint32 block: the index (into the script object table) of the block chain
* at the point of the variable access.
*/
OPDEF(JSOP_GETALIASEDVAR, 136,"getaliasedvar",NULL, 9, 0, 1, 19, JOF_SCOPECOORD|JOF_NAME)
OPDEF(JSOP_CALLALIASEDVAR,137,"callaliasedvar",NULL, 9, 0, 1, 19, JOF_SCOPECOORD|JOF_NAME)
OPDEF(JSOP_GETALIASEDVAR, 136,"getaliasedvar",NULL, 9, 0, 1, 19, JOF_SCOPECOORD|JOF_NAME|JOF_TYPESET)
OPDEF(JSOP_CALLALIASEDVAR,137,"callaliasedvar",NULL, 9, 0, 1, 19, JOF_SCOPECOORD|JOF_NAME|JOF_TYPESET)
OPDEF(JSOP_SETALIASEDVAR, 138,"setaliasedvar",NULL, 9, 1, 1, 3, JOF_SCOPECOORD|JOF_NAME|JOF_SET|JOF_DETECTING)
OPDEF(JSOP_INCALIASEDVAR, 139,"incaliasedvar",NULL, 10, 0, 1, 15, JOF_SCOPECOORD|JOF_NAME|JOF_INC|JOF_TMPSLOT3|JOF_DECOMPOSE)
OPDEF(JSOP_DECALIASEDVAR, 140,"decaliasedvar",NULL, 10, 0, 1, 15, JOF_SCOPECOORD|JOF_NAME|JOF_DEC|JOF_TMPSLOT3|JOF_DECOMPOSE)

View File

@ -367,7 +367,7 @@ IndirectProxyHandler::getPropertyDescriptor(JSContext *cx, JSObject *proxy,
}
static bool
GetOwnPropertyDescriptor(JSContext *cx, JSObject *obj, jsid id, unsigned flags,
GetOwnPropertyDescriptor(JSContext *cx, HandleObject obj, jsid id, unsigned flags,
JSPropertyDescriptor *desc)
{
// If obj is a proxy, we can do better than just guessing. This is
@ -389,7 +389,8 @@ IndirectProxyHandler::getOwnPropertyDescriptor(JSContext *cx, JSObject *proxy,
jsid id, bool set,
PropertyDescriptor *desc)
{
return GetOwnPropertyDescriptor(cx, GetProxyTargetObject(proxy), id,
RootedObject target(cx, GetProxyTargetObject(proxy));
return GetOwnPropertyDescriptor(cx, target, id,
JSRESOLVE_QUALIFIED, desc);
}

View File

@ -278,35 +278,12 @@ Shape::getChildBinding(JSContext *cx, const StackShape &child)
{
JS_ASSERT(!inDictionary());
Shape *shape = cx->propertyTree().getChild(cx, this, numFixedSlots(), child);
if (shape) {
//JS_ASSERT(shape->parent == this); // XXX 'this' is not rooted here
/* Try to allocate all slots inline. */
uint32_t slots = child.slotSpan();
gc::AllocKind kind = gc::GetGCObjectKind(slots);
uint32_t nfixed = gc::GetGCKindSlots(kind);
/*
* Update the number of fixed slots which bindings of this shape will
* have. Bindings are constructed as new properties come in, so the
* call object allocation class is not known ahead of time. Compute
* the fixed slot count here, which will feed into call objects created
* off of the bindings.
*/
uint32_t slots = child.slotSpan();
gc::AllocKind kind = gc::GetGCObjectKind(slots);
/*
* Make sure that the arguments and variables in the call object all
* end up in a contiguous range of slots. We need this to be able to
* embed the args/vars arrays in the TypeScriptNesting for the function
* after the call object's frame has finished.
*/
uint32_t nfixed = gc::GetGCKindSlots(kind);
if (nfixed < slots) {
nfixed = CallObject::RESERVED_SLOTS;
JS_ASSERT(gc::GetGCKindSlots(gc::GetGCObjectKind(nfixed)) == CallObject::RESERVED_SLOTS);
}
shape->setNumFixedSlots(nfixed);
}
return shape;
return cx->propertyTree().getChild(cx, this, nfixed, child);
}
/* static */ Shape *
@ -1207,31 +1184,6 @@ Bindings::setExtensibleParents(JSContext *cx)
return true;
}
bool
Bindings::setParent(JSContext *cx, JSObject *obj_)
{
RootedObject obj(cx, obj_);
/*
* This may be invoked on GC heap allocated bindings, in which case this
* is pointing to an internal value of a JSScript that can't itself be
* relocated. The script itself will be rooted, and will not be moved, so
* mark the stack value as non-relocatable for the stack root analysis.
*/
Bindings *self = this;
SkipRoot root(cx, &self);
if (!ensureShape(cx))
return false;
/* This is only used for Block objects, which have a NULL proto. */
Shape *newShape = Shape::setObjectParent(cx, obj, NULL, self->lastBinding);
if (!newShape)
return false;
self->lastBinding = newShape;
return true;
}
inline
InitialShapeEntry::InitialShapeEntry() : shape(NULL), proto(NULL)
{

View File

@ -51,8 +51,6 @@ using namespace js;
using namespace js::gc;
using namespace js::frontend;
namespace js {
BindingKind
Bindings::lookup(JSContext *cx, JSAtom *name, unsigned *indexp) const
{
@ -124,7 +122,7 @@ Bindings::add(JSContext *cx, HandleAtom name, BindingKind kind)
id = AtomToId(name);
}
StackBaseShape base(&CallClass, NULL, BaseShape::VAROBJ);
StackBaseShape base(&CallClass, cx->global(), BaseShape::VAROBJ);
base.updateGetterSetter(attrs, getter, setter);
UnownedBaseShape *nbase = BaseShape::getUnowned(cx, base);
@ -247,8 +245,6 @@ Bindings::trace(JSTracer *trc)
MarkShape(trc, &lastBinding, "shape");
}
} /* namespace js */
template<XDRMode mode>
static bool
XDRScriptConst(XDRState<mode> *xdr, HeapValue *vp)
@ -342,9 +338,25 @@ XDRScriptConst(XDRState<mode> *xdr, HeapValue *vp)
return true;
}
static inline uint32_t
FindBlockIndex(JSScript *script, StaticBlockObject &block)
{
ObjectArray *objects = script->objects();
HeapPtrObject *vector = objects->vector;
unsigned length = objects->length;
for (unsigned i = 0; i < length; ++i) {
if (vector[i] == &block)
return i;
}
JS_NOT_REACHED("Block not found");
return UINT32_MAX;
}
template<XDRMode mode>
bool
js::XDRScript(XDRState<mode> *xdr, JSScript **scriptp, JSScript *parentScript)
js::XDRScript(XDRState<mode> *xdr, HandleObject enclosingScope, HandleScript enclosingScript,
HandleFunction fun, JSScript **scriptp)
{
/* NB: Keep this in sync with CloneScript. */
@ -372,16 +384,12 @@ js::XDRScript(XDRState<mode> *xdr, JSScript **scriptp, JSScript *parentScript)
nsrcnotes = ntrynotes = natoms = nobjects = nregexps = nconsts = nClosedArgs = nClosedVars = 0;
jssrcnote *notes = NULL;
/* XDR arguments, var vars, and upvars. */
uint16_t nargs, nvars;
#if defined(DEBUG) || defined(__GNUC__) /* quell GCC overwarning */
script = NULL;
nargs = nvars = Bindings::BINDING_COUNT_LIMIT;
#endif
uint32_t argsVars;
/* XDR arguments and vars. */
uint16_t nargs = 0, nvars = 0;
uint32_t argsVars = 0;
if (mode == XDR_ENCODE) {
script = *scriptp;
JS_ASSERT_IF(parentScript, parentScript->compartment() == script->compartment());
JS_ASSERT_IF(enclosingScript, enclosingScript->compartment() == script->compartment());
nargs = script->bindings.numArgs();
nvars = script->bindings.numVars();
@ -515,7 +523,7 @@ js::XDRScript(XDRState<mode> *xdr, JSScript **scriptp, JSScript *parentScript)
if (script->analyzedArgsUsage() && script->needsArgsObj())
scriptBits |= (1 << NeedsArgsObj);
if (script->filename) {
scriptBits |= (parentScript && parentScript->filename == script->filename)
scriptBits |= (enclosingScript && enclosingScript->filename == script->filename)
? (1 << ParentFilename)
: (1 << OwnFilename);
}
@ -564,12 +572,12 @@ js::XDRScript(XDRState<mode> *xdr, JSScript **scriptp, JSScript *parentScript)
// principals and originPrincipals are set with xdr->initScriptPrincipals(script) below.
// staticLevel is set below.
script = JSScript::Create(cx,
enclosingScope,
!!(scriptBits & (1 << SavedCallerFun)),
/* principals = */ NULL,
/* originPrincipals = */ NULL,
/* compileAndGo = */ false,
!!(scriptBits & (1 << NoScriptRval)),
/* globalObject = */ NULL,
version_,
/* staticLevel = */ 0);
if (!script || !JSScript::partiallyInit(cx, script,
@ -622,9 +630,9 @@ js::XDRScript(XDRState<mode> *xdr, JSScript **scriptp, JSScript *parentScript)
return false;
}
} else if (scriptBits & (1 << ParentFilename)) {
JS_ASSERT(parentScript);
JS_ASSERT(enclosingScript);
if (mode == XDR_DECODE)
script->filename = parentScript->filename;
script->filename = enclosingScript->filename;
}
if (mode == XDR_DECODE) {
@ -648,10 +656,9 @@ js::XDRScript(XDRState<mode> *xdr, JSScript **scriptp, JSScript *parentScript)
}
/*
* Here looping from 0-to-length to xdr objects is essential. It ensures
* that block objects from the script->objects array will be written and
* restored in the outer-to-inner order. js_XDRBlockObject relies on this
* to restore the parent chain.
* Here looping from 0-to-length to xdr objects is essential to ensure that
* all references to enclosing blocks (via FindBlockIndex below) happen
* after the enclosing block has been XDR'd.
*/
for (i = 0; i != nobjects; ++i) {
HeapPtr<JSObject> *objp = &script->objects()->vector[i];
@ -664,14 +671,58 @@ js::XDRScript(XDRState<mode> *xdr, JSScript **scriptp, JSScript *parentScript)
if (!xdr->codeUint32(&isBlock))
return false;
if (isBlock == 0) {
/* Code the nested function's enclosing scope. */
uint32_t funEnclosingScopeIndex = 0;
if (mode == XDR_ENCODE) {
StaticScopeIter ssi((*objp)->toFunction()->script()->enclosingStaticScope());
if (ssi.done() || ssi.type() == StaticScopeIter::FUNCTION) {
JS_ASSERT(ssi.done() == !fun);
funEnclosingScopeIndex = UINT32_MAX;
} else {
funEnclosingScopeIndex = FindBlockIndex(script, ssi.block());
JS_ASSERT(funEnclosingScopeIndex < i);
}
}
if (!xdr->codeUint32(&funEnclosingScopeIndex))
return false;
Rooted<JSObject*> funEnclosingScope(cx);
if (mode == XDR_DECODE) {
if (funEnclosingScopeIndex == UINT32_MAX) {
funEnclosingScope = fun;
} else {
JS_ASSERT(funEnclosingScopeIndex < i);
funEnclosingScope = script->objects()->vector[funEnclosingScopeIndex];
}
}
JSObject *tmp = *objp;
if (!XDRInterpretedFunction(xdr, &tmp, parentScript))
if (!XDRInterpretedFunction(xdr, funEnclosingScope, script, &tmp))
return false;
*objp = tmp;
} else {
/* Code the nested block's enclosing scope. */
JS_ASSERT(isBlock == 1);
uint32_t blockEnclosingScopeIndex = 0;
if (mode == XDR_ENCODE) {
if (StaticBlockObject *block = (*objp)->asStaticBlock().enclosingBlock())
blockEnclosingScopeIndex = FindBlockIndex(script, *block);
else
blockEnclosingScopeIndex = UINT32_MAX;
}
if (!xdr->codeUint32(&blockEnclosingScopeIndex))
return false;
Rooted<JSObject*> blockEnclosingScope(cx);
if (mode == XDR_DECODE) {
if (blockEnclosingScopeIndex != UINT32_MAX) {
JS_ASSERT(blockEnclosingScopeIndex < i);
blockEnclosingScope = script->objects()->vector[blockEnclosingScopeIndex];
} else {
blockEnclosingScope = fun;
}
}
StaticBlockObject *tmp = static_cast<StaticBlockObject *>(objp->get());
if (!XDRStaticBlockObject(xdr, script, &tmp))
if (!XDRStaticBlockObject(xdr, blockEnclosingScope, script, &tmp))
return false;
*objp = tmp;
}
@ -738,10 +789,10 @@ js::XDRScript(XDRState<mode> *xdr, JSScript **scriptp, JSScript *parentScript)
}
template bool
js::XDRScript(XDRState<XDR_ENCODE> *xdr, JSScript **scriptp, JSScript *parentScript);
js::XDRScript(XDRState<XDR_ENCODE> *, HandleObject, HandleScript, HandleFunction, JSScript **);
template bool
js::XDRScript(XDRState<XDR_DECODE> *xdr, JSScript **scriptp, JSScript *parentScript);
js::XDRScript(XDRState<XDR_DECODE> *, HandleObject, HandleScript, HandleFunction, JSScript **);
bool
JSScript::initScriptCounts(JSContext *cx)
@ -1074,17 +1125,17 @@ ScriptDataSize(uint32_t length, uint32_t nsrcnotes, uint32_t natoms,
}
JSScript *
JSScript::Create(JSContext *cx, bool savedCallerFun, JSPrincipals *principals,
JSPrincipals *originPrincipals, bool compileAndGo, bool noScriptRval,
GlobalObject *globalObject_, JSVersion version, unsigned staticLevel)
JSScript::Create(JSContext *cx, HandleObject enclosingScope, bool savedCallerFun,
JSPrincipals *principals, JSPrincipals *originPrincipals,
bool compileAndGo, bool noScriptRval, JSVersion version, unsigned staticLevel)
{
Rooted<GlobalObject*> globalObject(cx, globalObject_);
JSScript *script = js_NewGCScript(cx);
if (!script)
return NULL;
PodZero(script);
script->enclosingScope_ = enclosingScope;
script->savedCallerFun = savedCallerFun;
/* Establish invariant: principals implies originPrincipals. */
@ -1100,8 +1151,6 @@ JSScript::Create(JSContext *cx, bool savedCallerFun, JSPrincipals *principals,
script->compileAndGo = compileAndGo;
script->noScriptRval = noScriptRval;
script->globalObject = globalObject;
script->version = version;
JS_ASSERT(script->getVersion() == version); // assert that no overflow occurred
@ -1318,7 +1367,7 @@ JSScript::fullyInitFromEmitter(JSContext *cx, Handle<JSScript*> script, Bytecode
if (bce->sc->funArgumentsHasLocalBinding()) {
// This must precede the script->bindings.transfer() call below
script->setArgumentsHasVarBinding();
if (bce->sc->funDefinitelyNeedsArgsObj())
if (bce->sc->funDefinitelyNeedsArgsObj())
script->setNeedsArgsObj(true);
} else {
JS_ASSERT(!bce->sc->funDefinitelyNeedsArgsObj());
@ -1650,7 +1699,7 @@ Rebase(JSScript *dst, JSScript *src, T *srcp)
}
JSScript *
js::CloneScript(JSContext *cx, HandleScript src)
js::CloneScript(JSContext *cx, HandleObject enclosingScope, HandleFunction fun, HandleScript src)
{
/* NB: Keep this in sync with XDRScript. */
@ -1699,13 +1748,29 @@ js::CloneScript(JSContext *cx, HandleScript src)
if (nobjects != 0) {
HeapPtrObject *vector = src->objects()->vector;
for (unsigned i = 0; i < nobjects; i++) {
JSObject &obj = *vector[i];
JSObject *clone;
if (vector[i]->isStaticBlock()) {
Rooted<StaticBlockObject*> block(cx, &vector[i]->asStaticBlock());
clone = CloneStaticBlockObject(cx, block, objects, src);
if (obj.isStaticBlock()) {
Rooted<StaticBlockObject*> innerBlock(cx, &obj.asStaticBlock());
Rooted<JSObject*> enclosingScope(cx);
if (StaticBlockObject *enclosingBlock = innerBlock->enclosingBlock())
enclosingScope = objects[FindBlockIndex(src, *enclosingBlock)];
else
enclosingScope = fun;
clone = CloneStaticBlockObject(cx, enclosingScope, innerBlock);
} else {
RootedFunction fun(cx, vector[i]->toFunction());
clone = CloneInterpretedFunction(cx, fun);
Rooted<JSFunction*> innerFun(cx, obj.toFunction());
StaticScopeIter ssi(innerFun->script()->enclosingStaticScope());
Rooted<JSObject*> enclosingScope(cx);
if (!ssi.done() && ssi.type() == StaticScopeIter::BLOCK)
enclosingScope = objects[FindBlockIndex(src, ssi.block())];
else
enclosingScope = fun;
clone = CloneInterpretedFunction(cx, enclosingScope, innerFun);
}
if (!clone || !objects.append(clone))
return NULL;
@ -1726,11 +1791,10 @@ js::CloneScript(JSContext *cx, HandleScript src)
/* Now that all fallible allocation is complete, create the GC thing. */
JSScript *dst = JSScript::Create(cx, src->savedCallerFun,
JSScript *dst = JSScript::Create(cx, enclosingScope, src->savedCallerFun,
cx->compartment->principals, src->originPrincipals,
src->compileAndGo, src->noScriptRval,
/* globalObject = */ NULL, src->getVersion(),
src->staticLevel);
src->getVersion(), src->staticLevel);
if (!dst) {
Foreground::free_(data);
return NULL;
@ -1949,8 +2013,7 @@ JSScript::changeStepModeCount(JSContext *cx, int delta)
}
BreakpointSite *
JSScript::getOrCreateBreakpointSite(JSContext *cx, jsbytecode *pc,
GlobalObject *scriptGlobal)
JSScript::getOrCreateBreakpointSite(JSContext *cx, jsbytecode *pc)
{
JS_ASSERT(size_t(pc - code) < length);
@ -1969,11 +2032,6 @@ JSScript::getOrCreateBreakpointSite(JSContext *cx, jsbytecode *pc,
debug->numSites++;
}
if (site->scriptGlobal)
JS_ASSERT_IF(scriptGlobal, site->scriptGlobal == scriptGlobal);
else
site->scriptGlobal = scriptGlobal;
return site;
}
@ -2060,17 +2118,14 @@ JSScript::markChildren(JSTracer *trc)
if (function())
MarkObject(trc, &function_, "function");
if (!isCachedEval && globalObject)
MarkObject(trc, &globalObject, "object");
if (enclosingScope_)
MarkObject(trc, &enclosingScope_, "enclosing");
if (IS_GC_MARKING_TRACER(trc) && filename)
MarkScriptFilename(trc->runtime, filename);
bindings.trace(trc);
if (types)
types->trace(trc);
#ifdef JS_METHODJIT
for (int constructing = 0; constructing <= 1; constructing++) {
for (int barriers = 0; barriers <= 1; barriers++) {

View File

@ -103,6 +103,9 @@ class Bindings
uint16_t numVars() const { return nvars; }
unsigned count() const { return nargs + nvars; }
/* Convert a CallObject slot to either a formal or local variable index. */
inline BindingKind slotToFrameIndex(unsigned slot, unsigned *index);
/*
* The VM's StackFrame allocates a Value for each formal and variable.
* A (formal|var)Index is the index passed to fp->unaliasedFormal/Var to
@ -128,8 +131,6 @@ class Bindings
inline bool extensibleParents();
bool setExtensibleParents(JSContext *cx);
bool setParent(JSContext *cx, JSObject *obj);
enum {
/* A script may have no more than this many arguments or variables. */
BINDING_COUNT_LIMIT = 0xFFFF
@ -409,19 +410,6 @@ struct JSScript : public js::gc::Cell
JSPrincipals *principals;/* principals for this script */
JSPrincipals *originPrincipals; /* see jsapi.h 'originPrincipals' comment */
/*
* A global object for the script.
* - All scripts returned by JSAPI functions (JS_CompileScript,
* JS_CompileUTF8File, etc.) have a non-null globalObject.
* - A function script has a globalObject if the function comes from a
* compile-and-go script.
* - Temporary scripts created by obj_eval, JS_EvaluateScript, and
* similar functions never have the globalObject field set; for such
* scripts the global should be extracted from the JS frame that
* execute scripts.
*/
js::HeapPtr<js::GlobalObject, JSScript*> globalObject;
/* Persistent type information retained across GCs. */
js::types::TypeScript *types;
@ -429,8 +417,8 @@ struct JSScript : public js::gc::Cell
#ifdef JS_METHODJIT
JITScriptSet *jitInfo;
#endif
js::HeapPtrFunction function_;
js::HeapPtrObject enclosingScope_;
// 32-bit fields.
@ -510,14 +498,9 @@ struct JSScript : public js::gc::Cell
undefined properties in this
script */
bool hasSingletons:1; /* script has singleton objects */
bool isOuterFunction:1; /* function is heavyweight, with inner functions */
bool isInnerFunction:1; /* function is directly nested in a heavyweight
* outer function */
bool isActiveEval:1; /* script came from eval(), and is still active */
bool isCachedEval:1; /* script came from eval(), and is in eval cache */
bool uninlineable:1; /* script is considered uninlineable by analysis */
bool reentrantOuterFunction:1; /* outer function marked reentrant */
bool typesPurged:1; /* TypeScript has been purged at some point */
#ifdef JS_METHODJIT
bool debugMode:1; /* script was compiled in debug mode */
bool failedBoundsCheck:1; /* script has had hoisted bounds checks fail */
@ -542,11 +525,10 @@ struct JSScript : public js::gc::Cell
//
public:
static JSScript *Create(JSContext *cx, bool savedCallerFun,
static JSScript *Create(JSContext *cx, js::HandleObject enclosingScope, bool savedCallerFun,
JSPrincipals *principals, JSPrincipals *originPrincipals,
bool compileAndGo, bool noScriptRval,
js::GlobalObject *globalObject, JSVersion version,
unsigned staticLevel);
JSVersion version, unsigned staticLevel);
// Three ways ways to initialize a JSScript. Callers of partiallyInit()
// and fullyInitTrivial() are responsible for notifying the debugger after
@ -594,9 +576,6 @@ struct JSScript : public js::gc::Cell
return needsArgsObj() && !strictModeCode;
}
/* Hash table chaining for JSCompartment::evalCache. */
JSScript *&evalHashLink() { return *globalObject.unsafeGetUnioned(); }
/*
* Original compiled function for the script, if it has a function.
* NULL for global and eval scripts.
@ -604,6 +583,9 @@ struct JSScript : public js::gc::Cell
JSFunction *function() const { return function_; }
void setFunction(JSFunction *fun);
/* Return whether this script was compiled for 'eval' */
bool isForEval() { return isCachedEval || isActiveEval; }
#ifdef DEBUG
unsigned id();
#else
@ -614,12 +596,10 @@ struct JSScript : public js::gc::Cell
inline bool ensureHasTypes(JSContext *cx);
/*
* Ensure the script has scope and bytecode analysis information.
* Performed when the script first runs, or first runs after a TypeScript
* GC purge. If scope is NULL then the script must already have types with
* scope information.
* Ensure the script has bytecode analysis information. Performed when the
* script first runs, or first runs after a TypeScript GC purge.
*/
inline bool ensureRanAnalysis(JSContext *cx, JSObject *scope);
inline bool ensureRanAnalysis(JSContext *cx);
/* Ensure the script has type inference analysis information. */
inline bool ensureRanInference(JSContext *cx);
@ -631,15 +611,10 @@ struct JSScript : public js::gc::Cell
inline bool hasGlobal() const;
inline bool hasClearedGlobal() const;
inline js::GlobalObject * global() const;
inline js::types::TypeScriptNesting *nesting() const;
inline js::GlobalObject &global() const;
inline void clearNesting();
/* Return creation time global or null. */
js::GlobalObject *getGlobalObjectOrNull() const {
return (isCachedEval || isActiveEval) ? NULL : globalObject.get();
}
/* See StaticScopeIter comment. */
JSObject *enclosingStaticScope() const { return enclosingScope_; }
private:
bool makeTypes(JSContext *cx);
@ -875,8 +850,7 @@ struct JSScript : public js::gc::Cell
return hasDebugScript ? debugScript()->breakpoints[pc - code] : NULL;
}
js::BreakpointSite *getOrCreateBreakpointSite(JSContext *cx, jsbytecode *pc,
js::GlobalObject *scriptGlobal);
js::BreakpointSite *getOrCreateBreakpointSite(JSContext *cx, jsbytecode *pc);
void destroyBreakpointSite(js::FreeOp *fop, jsbytecode *pc);
@ -1035,7 +1009,7 @@ inline void
CurrentScriptFileLineOrigin(JSContext *cx, unsigned *linenop, LineOption = NOT_CALLED_FROM_JSOP_EVAL);
extern JSScript *
CloneScript(JSContext *cx, HandleScript script);
CloneScript(JSContext *cx, HandleObject enclosingScope, HandleFunction fun, HandleScript script);
/*
* NB: after a successful XDR_DECODE, XDRScript callers must do any required
@ -1044,7 +1018,8 @@ CloneScript(JSContext *cx, HandleScript script);
*/
template<XDRMode mode>
bool
XDRScript(XDRState<mode> *xdr, JSScript **scriptp, JSScript *parentScript);
XDRScript(XDRState<mode> *xdr, HandleObject enclosingScope, HandleScript enclosingScript,
HandleFunction fun, JSScript **scriptp);
} /* namespace js */

View File

@ -15,7 +15,6 @@
#include "jsscript.h"
#include "jsscope.h"
#include "vm/ScopeObject.h"
#include "vm/GlobalObject.h"
#include "vm/RegExpObject.h"
@ -28,6 +27,20 @@ Bindings::Bindings()
: lastBinding(NULL), nargs(0), nvars(0), hasDup_(false)
{}
inline BindingKind
Bindings::slotToFrameIndex(unsigned slot, unsigned *index)
{
slot -= CallObject::RESERVED_SLOTS;
if (slot < numArgs()) {
*index = slot;
return ARGUMENT;
}
*index = slot - numArgs();
JS_ASSERT(*index < numVars());
return VARIABLE;
}
inline void
Bindings::transfer(Bindings *bindings)
{
@ -55,8 +68,8 @@ Bindings::initialShape(JSContext *cx) const
gc::AllocKind kind = gc::FINALIZE_OBJECT2_BACKGROUND;
JS_ASSERT(gc::GetGCKindSlots(kind) == CallObject::RESERVED_SLOTS);
return EmptyShape::getInitialShape(cx, &CallClass, NULL, NULL, kind,
BaseShape::VAROBJ);
return EmptyShape::getInitialShape(cx, &CallClass, NULL, cx->global(),
kind, BaseShape::VAROBJ);
}
bool
@ -191,41 +204,24 @@ JSScript::hasGlobal() const
* which have had their scopes cleared. compileAndGo code should not run
* anymore against such globals.
*/
JS_ASSERT(types && types->hasScope());
js::GlobalObject *obj = types->global;
return obj && !obj->isCleared();
return compileAndGo && !global().isCleared();
}
inline js::GlobalObject *
inline js::GlobalObject &
JSScript::global() const
{
JS_ASSERT(hasGlobal());
return types->global;
/*
* A JSScript always marks its compartment's global (via bindings) so we
* can assert that maybeGlobal is non-null here.
*/
return *compartment()->maybeGlobal();
}
inline bool
JSScript::hasClearedGlobal() const
{
JS_ASSERT(types && types->hasScope());
js::GlobalObject *obj = types->global;
return obj && obj->isCleared();
}
inline js::types::TypeScriptNesting *
JSScript::nesting() const
{
JS_ASSERT(function() && types && types->hasScope());
return types->nesting;
}
inline void
JSScript::clearNesting()
{
js::types::TypeScriptNesting *nesting = this->nesting();
if (nesting) {
js::Foreground::delete_(nesting);
types->nesting = NULL;
}
JS_ASSERT(types);
return global().isCleared();
}
#ifdef JS_METHODJIT

View File

@ -1717,7 +1717,7 @@ GetCurrentScopeChain(JSContext *cx)
}
static JSXML *
ParseXMLSource(JSContext *cx, JSString *src)
ParseXMLSource(JSContext *cx, HandleString src)
{
jsval nsval;
JSLinearString *uri;
@ -1856,7 +1856,7 @@ ToXML(JSContext *cx, jsval v)
JSObject *obj;
JSXML *xml;
Class *clasp;
JSString *str;
RootedString str(cx);
uint32_t length;
if (JSVAL_IS_PRIMITIVE(v)) {
@ -1937,7 +1937,7 @@ ToXMLList(JSContext *cx, jsval v)
JSObject *obj, *listobj;
JSXML *xml, *list, *kid;
Class *clasp;
JSString *str;
RootedString str(cx);
uint32_t i, length;
if (JSVAL_IS_PRIMITIVE(v)) {

View File

@ -1375,7 +1375,7 @@ static const JSC::MacroAssembler::RegisterID JSParamReg_Argc = JSC::MIPSRegiste
};
/* Return f<true> if the script is strict mode code, f<false> otherwise. */
#define STRICT_VARIANT(f) \
#define STRICT_VARIANT(script, f) \
(FunctionTemplateConditional(script->strictModeCode, \
f<true>, f<false>))

View File

@ -59,7 +59,7 @@ mjit::Compiler::Compiler(JSContext *cx, JSScript *outerScript,
isConstructing(isConstructing),
outerChunk(outerJIT()->chunkDescriptor(chunkIndex)),
ssa(cx, outerScript),
globalObj(cx, outerScript->hasGlobal() ? outerScript->global() : NULL),
globalObj(cx, outerScript->hasGlobal() ? &outerScript->global() : NULL),
globalSlots(globalObj ? globalObj->getRawSlots() : NULL),
frame(cx, *thisFromCtor(), masm, stubcc),
a(NULL), outer(NULL), script(NULL), PC(NULL), loop(NULL),
@ -135,7 +135,7 @@ mjit::Compiler::checkAnalysis(HandleScript script)
return Compile_Abort;
}
if (!script->ensureRanAnalysis(cx, NULL))
if (!script->ensureRanAnalysis(cx))
return Compile_Error;
if (!script->analysis()->jaegerCompileable()) {
@ -190,7 +190,7 @@ mjit::Compiler::scanInlineCalls(uint32_t index, uint32_t depth)
/* Don't inline from functions which could have a non-global scope object. */
if (!script->hasGlobal() ||
script->global() != globalObj ||
&script->global() != globalObj ||
(script->function() && script->function()->getParent() != globalObj) ||
(script->function() && script->function()->isHeavyweight()) ||
script->isActiveEval) {
@ -316,7 +316,7 @@ mjit::Compiler::scanInlineCalls(uint32_t index, uint32_t depth)
break;
}
if (!script->types || !script->types->hasScope()) {
if (!script->types) {
okay = false;
break;
}
@ -633,7 +633,7 @@ mjit::SetChunkLimit(uint32_t limit)
JITScript *
MakeJITScript(JSContext *cx, JSScript *script)
{
if (!script->ensureRanAnalysis(cx, NULL))
if (!script->ensureRanAnalysis(cx))
return NULL;
ScriptAnalysis *analysis = script->analysis();
@ -1097,9 +1097,7 @@ mjit::Compiler::generatePrologue()
* set for global and eval frames, and will have been set by
* HeavyweightFunctionPrologue for heavyweight function frames.
*/
if (!script->function()->isHeavyweight() &&
(analysis->usesScopeChain() || script->nesting()))
{
if (!script->function()->isHeavyweight() && analysis->usesScopeChain()) {
RegisterID t0 = Registers::ReturnReg;
Jump hasScope = masm.branchTest32(Assembler::NonZero,
FrameFlagsAddress(), Imm32(StackFrame::HAS_SCOPECHAIN));
@ -1145,42 +1143,10 @@ mjit::Compiler::generatePrologue()
if (script->function()->isHeavyweight()) {
prepareStubCall(Uses(0));
INLINE_STUBCALL(stubs::HeavyweightFunctionPrologue, REJOIN_FUNCTION_PROLOGUE);
} else if (types::TypeScriptNesting *nesting = script->nesting()) {
/*
* Inline the common case for the nesting prologue: the
* function is a non-heavyweight inner function with no
* children of its own. We ensure during inference that the
* outer function does not add scope objects for 'let' or
* 'with', so that the frame's scope chain will be
* the parent's call object, and if it differs from the
* parent's current activation then the parent is reentrant.
*/
JSScript *parent = nesting->parent;
JS_ASSERT(parent);
JS_ASSERT_IF(parent->hasAnalysis() && parent->analysis()->ranBytecode(),
!parent->analysis()->addsScopeObjects());
RegisterID t0 = Registers::ReturnReg;
masm.move(ImmPtr(&parent->nesting()->activeCall), t0);
masm.loadPtr(Address(t0), t0);
Address scopeChain(JSFrameReg, StackFrame::offsetOfScopeChain());
Jump mismatch = masm.branchPtr(Assembler::NotEqual, t0, scopeChain);
masm.add32(Imm32(1), AbsoluteAddress(&nesting->activeFrames));
masm.load32(FrameFlagsAddress(), t0);
masm.or32(Imm32(StackFrame::HAS_NESTING), t0);
masm.store32(t0, FrameFlagsAddress());
stubcc.linkExitDirect(mismatch, stubcc.masm.label());
OOL_STUBCALL(stubs::TypeNestingPrologue, REJOIN_FUNCTION_PROLOGUE);
stubcc.crossJump(stubcc.masm.jump(), masm.label());
}
if (isConstructing) {
if (!constructThis())
return Compile_Error;
}
if (isConstructing && !constructThis())
return Compile_Error;
}
CompileStatus status = methodEntryHelper();
@ -2557,7 +2523,7 @@ mjit::Compiler::generateMethod()
prepareStubCall(Uses(1));
masm.move(ImmPtr(name), Registers::ArgReg1);
INLINE_STUBCALL(STRICT_VARIANT(stubs::DelProp), REJOIN_FALLTHROUGH);
INLINE_STUBCALL(STRICT_VARIANT(script, stubs::DelProp), REJOIN_FALLTHROUGH);
frame.pop();
pushSyncedEntry(0);
}
@ -2566,7 +2532,7 @@ mjit::Compiler::generateMethod()
BEGIN_CASE(JSOP_DELELEM)
{
prepareStubCall(Uses(2));
INLINE_STUBCALL(STRICT_VARIANT(stubs::DelElem), REJOIN_FALLTHROUGH);
INLINE_STUBCALL(STRICT_VARIANT(script, stubs::DelElem), REJOIN_FALLTHROUGH);
frame.popn(2);
pushSyncedEntry(0);
}
@ -2860,8 +2826,6 @@ mjit::Compiler::generateMethod()
BEGIN_CASE(JSOP_GETLOCAL)
BEGIN_CASE(JSOP_CALLLOCAL)
BEGIN_CASE(JSOP_GETALIASEDVAR)
BEGIN_CASE(JSOP_CALLALIASEDVAR)
{
/*
* Update the var type unless we are about to pop the variable.
@ -2873,26 +2837,27 @@ mjit::Compiler::generateMethod()
restoreVarType();
if (JSObject *singleton = pushedSingleton(0))
frame.push(ObjectValue(*singleton));
else if (JOF_OPTYPE(*PC) == JOF_SCOPECOORD)
jsop_aliasedVar(ScopeCoordinate(PC), /* get = */ true);
else
frame.pushLocal(GET_SLOTNO(PC));
PC += GetBytecodeLength(PC);
break;
}
END_CASE(JSOP_GETLOCAL)
BEGIN_CASE(JSOP_GETALIASEDVAR)
BEGIN_CASE(JSOP_CALLALIASEDVAR)
jsop_aliasedVar(ScopeCoordinate(PC), /* get = */ true);
END_CASE(JSOP_GETALIASEDVAR);
BEGIN_CASE(JSOP_SETLOCAL)
BEGIN_CASE(JSOP_SETALIASEDVAR)
{
jsbytecode *next = &PC[GetBytecodeLength(PC)];
bool pop = JSOp(*next) == JSOP_POP && !analysis->jumpTarget(next);
if (JOF_OPTYPE(*PC) == JOF_SCOPECOORD)
if (JOF_OPTYPE(*PC) == JOF_SCOPECOORD) {
jsop_aliasedVar(ScopeCoordinate(PC), /* get = */ false, pop);
else
} else {
frame.storeLocal(GET_SLOTNO(PC), pop);
updateVarType();
updateVarType();
}
if (pop) {
frame.pop();
@ -3067,7 +3032,7 @@ mjit::Compiler::generateMethod()
prepareStubCall(Uses(0));
masm.move(ImmPtr(innerFun), Registers::ArgReg1);
INLINE_STUBCALL(STRICT_VARIANT(stubs::DefFun), REJOIN_FALLTHROUGH);
INLINE_STUBCALL(STRICT_VARIANT(script, stubs::DefFun), REJOIN_FALLTHROUGH);
}
END_CASE(JSOP_DEFFUN)
@ -3808,10 +3773,6 @@ mjit::Compiler::emitReturn(FrameEntry *fe)
INLINE_STUBCALL(stubs::Epilogue, REJOIN_NONE);
} else {
profilingPopHelper();
if (script->function() && script->nesting()) {
masm.sub32(Imm32(1), AbsoluteAddress(&script->nesting()->activeFrames));
}
}
emitReturnValue(&masm, fe);
@ -4239,18 +4200,8 @@ mjit::Compiler::inlineCallHelper(uint32_t argc, bool callingNew, FrameSize &call
if (icCalleeType.isSet())
notObjectJump = masm.testObject(Assembler::NotEqual, icCalleeType.reg());
/*
* For an optimized apply, keep icCalleeData in a callee-saved register for
* the subsequent ic::SplatApplyArgs call.
*/
Registers tempRegs(Registers::AvailRegs);
if (callIC.frameSize.isDynamic() && !Registers::isSaved(icCalleeData)) {
RegisterID x = tempRegs.takeAnyReg(Registers::SavedRegs).reg();
masm.move(icCalleeData, x);
icCalleeData = x;
} else {
tempRegs.takeReg(icCalleeData);
}
tempRegs.takeReg(icCalleeData);
/* Reserve space just before initialization of funGuard. */
RESERVE_IC_SPACE(masm);
@ -4291,9 +4242,16 @@ mjit::Compiler::inlineCallHelper(uint32_t argc, bool callingNew, FrameSize &call
* Check after the function is known not to be a native so that the
* catch-all/native path has a static depth.
*/
if (callIC.frameSize.isDynamic())
if (callIC.frameSize.isDynamic()) {
OOL_STUBCALL(ic::SplatApplyArgs, REJOIN_CALL_SPLAT);
/*
* Restore identity of callee after SplatApplyArgs, which may
* have been clobbered (not callee save reg or changed by moving GC).
*/
stubcc.masm.loadPayload(frame.addressOf(origThis), icCalleeData);
}
/*
* No-op jump that gets patched by ic::New/Call to the stub generated
* by generateFullCallStub.
@ -4736,7 +4694,7 @@ mjit::Compiler::jsop_setprop_slow(PropertyName *name)
{
prepareStubCall(Uses(2));
masm.move(ImmPtr(name), Registers::ArgReg1);
INLINE_STUBCALL(STRICT_VARIANT(stubs::SetName), REJOIN_FALLTHROUGH);
INLINE_STUBCALL(STRICT_VARIANT(script, stubs::SetName), REJOIN_FALLTHROUGH);
JS_STATIC_ASSERT(JSOP_SETNAME_LENGTH == JSOP_SETPROP_LENGTH);
frame.shimmy(1);
if (script->hasScriptCounts)
@ -5397,42 +5355,6 @@ mjit::Compiler::jsop_setprop(PropertyName *name, bool popGuaranteed)
return true;
}
/*
* If this is a SETNAME to a variable of a non-reentrant outer function,
* set the variable's slot directly for the active call object.
*/
if (cx->typeInferenceEnabled() && js_CodeSpec[*PC].format & JOF_NAME) {
ScriptAnalysis::NameAccess access =
analysis->resolveNameAccess(cx, NameToId(name), true);
if (access.nesting) {
/* Use a SavedReg so it isn't clobbered by the stub call. */
RegisterID nameReg = frame.allocReg(Registers::SavedRegs).reg();
Address address = frame.loadNameAddress(access, nameReg);
#ifdef JSGC_INCREMENTAL_MJ
/* Write barrier. */
if (cx->compartment->needsBarrier()) {
stubcc.linkExit(masm.jump(), Uses(0));
stubcc.leave();
/* sync() may have overwritten nameReg, so we reload its data. */
JS_ASSERT(address.base == nameReg);
stubcc.masm.move(ImmPtr(access.basePointer()), nameReg);
stubcc.masm.loadPtr(Address(nameReg), nameReg);
stubcc.masm.addPtr(Imm32(address.offset), nameReg, Registers::ArgReg1);
OOL_STUBCALL(stubs::WriteBarrier, REJOIN_NONE);
stubcc.rejoin(Changes(0));
}
#endif
frame.storeTo(rhs, address, popGuaranteed);
frame.shimmy(1);
frame.freeReg(address.base);
return true;
}
}
/*
* Set the property directly if we are accessing a known object which
* always has the property in a particular inline slot.
@ -5474,7 +5396,7 @@ mjit::Compiler::jsop_setprop(PropertyName *name, bool popGuaranteed)
stubcc.linkExit(notObject.get(), Uses(2));
stubcc.leave();
stubcc.masm.move(ImmPtr(name), Registers::ArgReg1);
OOL_STUBCALL(STRICT_VARIANT(stubs::SetName), REJOIN_FALLTHROUGH);
OOL_STUBCALL(STRICT_VARIANT(script, stubs::SetName), REJOIN_FALLTHROUGH);
}
frame.storeTo(rhs, Address(reg, JSObject::getFixedSlotOffset(slot)), popGuaranteed);
frame.unpinReg(reg);
@ -5538,7 +5460,7 @@ mjit::Compiler::jsop_setprop(PropertyName *name, bool popGuaranteed)
stubcc.leave();
stubcc.masm.move(ImmPtr(name), Registers::ArgReg1);
OOL_STUBCALL(STRICT_VARIANT(stubs::SetName), REJOIN_FALLTHROUGH);
OOL_STUBCALL(STRICT_VARIANT(script, stubs::SetName), REJOIN_FALLTHROUGH);
typeCheck = stubcc.masm.jump();
pic.hasTypeCheck = true;
@ -5619,24 +5541,6 @@ mjit::Compiler::jsop_setprop(PropertyName *name, bool popGuaranteed)
void
mjit::Compiler::jsop_name(PropertyName *name, JSValueType type)
{
/*
* If this is a NAME for a variable of a non-reentrant outer function, get
* the variable's slot directly for the active call object. We always need
* to check for undefined, however.
*/
if (cx->typeInferenceEnabled()) {
ScriptAnalysis::NameAccess access =
analysis->resolveNameAccess(cx, NameToId(name), true);
if (access.nesting) {
Address address = frame.loadNameAddress(access);
JSValueType type = knownPushedType(0);
BarrierState barrier = pushAddressMaybeBarrier(address, type, true,
/* testUndefined = */ true);
finishBarrier(barrier, REJOIN_GETTER, 0);
return;
}
}
PICGenInfo pic(ic::PICInfo::NAME, JSOp(*PC));
RESERVE_IC_SPACE(masm);
@ -5693,24 +5597,6 @@ mjit::Compiler::jsop_name(PropertyName *name, JSValueType type)
bool
mjit::Compiler::jsop_xname(PropertyName *name)
{
/*
* If this is a GETXPROP for a variable of a non-reentrant outer function,
* treat in the same way as a NAME.
*/
if (cx->typeInferenceEnabled()) {
ScriptAnalysis::NameAccess access =
analysis->resolveNameAccess(cx, NameToId(name), true);
if (access.nesting) {
frame.pop();
Address address = frame.loadNameAddress(access);
JSValueType type = knownPushedType(0);
BarrierState barrier = pushAddressMaybeBarrier(address, type, true,
/* testUndefined = */ true);
finishBarrier(barrier, REJOIN_GETTER, 0);
return true;
}
}
PICGenInfo pic(ic::PICInfo::XNAME, JSOp(*PC));
FrameEntry *fe = frame.peek(-1);
@ -5773,23 +5659,6 @@ mjit::Compiler::jsop_xname(PropertyName *name)
void
mjit::Compiler::jsop_bindname(PropertyName *name)
{
/*
* If this is a BINDNAME for a variable of a non-reentrant outer function,
* the object is definitely the outer function's active call object.
*/
if (cx->typeInferenceEnabled()) {
ScriptAnalysis::NameAccess access =
analysis->resolveNameAccess(cx, NameToId(name), true);
if (access.nesting) {
RegisterID reg = frame.allocReg();
CallObject **pobj = &access.nesting->activeCall;
masm.move(ImmPtr(pobj), reg);
masm.loadPtr(Address(reg), reg);
frame.pushTypedPayload(JSVAL_TYPE_OBJECT, reg);
return;
}
}
PICGenInfo pic(ic::PICInfo::BIND, JSOp(*PC));
// This code does not check the frame flags to see if scopeChain has been
@ -5930,47 +5799,25 @@ mjit::Compiler::jsop_aliasedVar(ScopeCoordinate sc, bool get, bool poppedAfter)
for (unsigned i = 0; i < sc.hops; i++)
masm.loadPayload(Address(reg, ScopeObject::offsetOfEnclosingScope()), reg);
/*
* TODO bug 753158: Call and Block objects should use the same layout
* strategy: up to the maximum numFixedSlots and overflow (if any) in
* dynamic slots. For now, we special case for different layouts:
*/
Shape *shape = ScopeCoordinateToStaticScope(script, PC).scopeShape();
Address addr;
StaticBlockObject *block = ScopeCoordinateBlockChain(script, PC);
if (block) {
/*
* Block objects use a fixed AllocKind which means an invariant number
* of fixed slots. Any slot below the fixed slot count is inline, any
* slot over is in the dynamic slots.
*/
uint32_t nfixed = gc::GetGCKindSlots(BlockObject::FINALIZE_KIND);
if (nfixed <= sc.slot) {
masm.loadPtr(Address(reg, JSObject::offsetOfSlots()), reg);
addr = Address(reg, (sc.slot - nfixed) * sizeof(Value));
} else {
addr = Address(reg, JSObject::getFixedSlotOffset(sc.slot));
}
if (shape->numFixedSlots() <= sc.slot) {
masm.loadPtr(Address(reg, JSObject::offsetOfSlots()), reg);
addr = Address(reg, (sc.slot - shape->numFixedSlots()) * sizeof(Value));
} else {
/*
* Using special-case hackery in Shape::getChildBinding, CallObject
* slots are either altogether in fixed slots or altogether in dynamic
* slots (by having numFixed == RESERVED_SLOTS).
*/
if (script->bindings.lastShape()->numFixedSlots() <= sc.slot) {
masm.loadPtr(Address(reg, JSObject::offsetOfSlots()), reg);
addr = Address(reg, (sc.slot - CallObject::RESERVED_SLOTS) * sizeof(Value));
} else {
addr = Address(reg, JSObject::getFixedSlotOffset(sc.slot));
}
addr = Address(reg, JSObject::getFixedSlotOffset(sc.slot));
}
if (get) {
unsigned index;
FrameEntry *fe = ScopeCoordinateToFrameIndex(script, PC, &index) == FrameIndex_Local
? frame.getLocal(index)
: frame.getArg(index);
JSValueType type = fe->isTypeKnown() ? fe->getKnownType() : JSVAL_TYPE_UNKNOWN;
frame.push(addr, type, true /* = reuseBase */);
JSValueType type = knownPushedType(0);
RegisterID typeReg, dataReg;
frame.loadIntoRegisters(addr, /* reuseBase = */ true, &typeReg, &dataReg);
frame.pushRegs(typeReg, dataReg, type);
BarrierState barrier = testBarrier(typeReg, dataReg,
/* testUndefined = */ false,
/* testReturn */ false,
/* force */ true);
finishBarrier(barrier, REJOIN_FALLTHROUGH, 0);
} else {
#ifdef JSGC_INCREMENTAL_MJ
if (cx->compartment->needsBarrier()) {
@ -6466,7 +6313,7 @@ mjit::Compiler::jsop_setgname_slow(PropertyName *name)
{
prepareStubCall(Uses(2));
masm.move(ImmPtr(name), Registers::ArgReg1);
INLINE_STUBCALL(STRICT_VARIANT(stubs::SetGlobalName), REJOIN_FALLTHROUGH);
INLINE_STUBCALL(STRICT_VARIANT(script, stubs::SetGlobalName), REJOIN_FALLTHROUGH);
frame.popn(2);
pushSyncedEntry(0);
}
@ -6603,7 +6450,7 @@ void
mjit::Compiler::jsop_setelem_slow()
{
prepareStubCall(Uses(3));
INLINE_STUBCALL(STRICT_VARIANT(stubs::SetElem), REJOIN_FALLTHROUGH);
INLINE_STUBCALL(STRICT_VARIANT(script, stubs::SetElem), REJOIN_FALLTHROUGH);
frame.popn(3);
frame.pushSynced(JSVAL_TYPE_UNKNOWN);
}

Some files were not shown because too many files have changed in this diff Show More