mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-11-25 13:51:41 +00:00
Merge inbound to m-c. a=merge
This commit is contained in:
commit
48ef8932ac
@ -19,6 +19,9 @@ body[tpEnabled] .showTpDisabled,
|
||||
body:not([tpEnabled]) .showTpEnabled {
|
||||
display: none !important;
|
||||
}
|
||||
body[globalTpEnabled] .showGlobalTpDisabled {
|
||||
display: none !important;
|
||||
}
|
||||
|
||||
@media screen and (min-width: 1200px) and (min-height: 700px) {
|
||||
body.private {
|
||||
|
@ -14,18 +14,25 @@ const FAVICON_PRIVACY = "chrome://browser/skin/Privacy-16.png";
|
||||
let stringBundle = Services.strings.createBundle(
|
||||
"chrome://browser/locale/aboutPrivateBrowsing.properties");
|
||||
|
||||
let prefBranch = Services.prefs.getBranch("privacy.trackingprotection.pbmode.");
|
||||
let prefBranch = Services.prefs.getBranch("privacy.trackingprotection.");
|
||||
let prefObserver = {
|
||||
QueryInterface: XPCOMUtils.generateQI([Ci.nsIObserver,
|
||||
Ci.nsISupportsWeakReference]),
|
||||
observe: function () {
|
||||
if (prefBranch.getBoolPref("enabled")) {
|
||||
document.body.setAttribute("globalTpEnabled", "true");
|
||||
} else {
|
||||
document.body.removeAttribute("globalTpEnabled");
|
||||
}
|
||||
if (prefBranch.getBoolPref("pbmode.enabled") ||
|
||||
prefBranch.getBoolPref("enabled")) {
|
||||
document.body.setAttribute("tpEnabled", "true");
|
||||
} else {
|
||||
document.body.removeAttribute("tpEnabled");
|
||||
}
|
||||
},
|
||||
};
|
||||
prefBranch.addObserver("pbmode.enabled", prefObserver, true);
|
||||
prefBranch.addObserver("enabled", prefObserver, true);
|
||||
|
||||
function setFavIcon(url) {
|
||||
|
@ -73,11 +73,11 @@
|
||||
<!-- Use text links to implement plain styled buttons without an href. -->
|
||||
<label xmlns="http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul"
|
||||
id="disableTrackingProtection"
|
||||
class="text-link showTpEnabled"
|
||||
class="text-link showTpEnabled showGlobalTpDisabled"
|
||||
value="&trackingProtection.disable;"/>
|
||||
<label xmlns="http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul"
|
||||
id="enableTrackingProtection"
|
||||
class="text-link showTpDisabled"
|
||||
class="text-link showTpDisabled showGlobalTpDisabled"
|
||||
value="&trackingProtection.enable;"/>
|
||||
<p id="tpStartTour"
|
||||
class="showTpEnabled"><a id="startTour">&trackingProtection.startTour1;</a></p>
|
||||
|
@ -1612,6 +1612,7 @@ RequestsMenuView.prototype = Heritage.extend(WidgetMethods, {
|
||||
}
|
||||
let nameWithQuery = this._getUriNameWithQuery(uri);
|
||||
let hostPort = this._getUriHostPort(uri);
|
||||
let host = this._getUriHost(uri);
|
||||
let unicodeUrl = NetworkHelper.convertToUnicode(unescape(uri.spec));
|
||||
|
||||
let file = $(".requests-menu-file", target);
|
||||
@ -1621,6 +1622,27 @@ RequestsMenuView.prototype = Heritage.extend(WidgetMethods, {
|
||||
let domain = $(".requests-menu-domain", target);
|
||||
domain.setAttribute("value", hostPort);
|
||||
domain.setAttribute("tooltiptext", hostPort);
|
||||
|
||||
// Mark local hosts specially, where "local" is as defined in the W3C
|
||||
// spec for secure contexts.
|
||||
// http://www.w3.org/TR/powerful-features/
|
||||
//
|
||||
// * If the name falls under 'localhost'
|
||||
// * If the name is an IPv4 address within 127.0.0.0/8
|
||||
// * If the name is an IPv6 address within ::1/128
|
||||
//
|
||||
// IPv6 parsing is a little sloppy; it assumes that the address has
|
||||
// been validated before it gets here.
|
||||
let icon = $(".requests-security-state-icon", target);
|
||||
icon.classList.remove("security-state-local");
|
||||
if (host.match(/(.+\.)?localhost$/) ||
|
||||
host.match(/^127\.\d{1,3}\.\d{1,3}\.\d{1,3}/) ||
|
||||
host.match(/\[[0:]+1\]/)) {
|
||||
let tooltip = L10N.getStr("netmonitor.security.state.secure");
|
||||
icon.classList.add("security-state-local");
|
||||
icon.setAttribute("tooltiptext", tooltip);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
case "remoteAddress":
|
||||
@ -1630,12 +1652,17 @@ RequestsMenuView.prototype = Heritage.extend(WidgetMethods, {
|
||||
domain.setAttribute("tooltiptext", tooltip);
|
||||
break;
|
||||
case "securityState": {
|
||||
let tooltip = L10N.getStr("netmonitor.security.state." + aValue);
|
||||
let icon = $(".requests-security-state-icon", target);
|
||||
this.attachSecurityIconClickListener(aItem);
|
||||
|
||||
// Security icon for local hosts is set in the "url" branch
|
||||
if (icon.classList.contains("security-state-local")) {
|
||||
break;
|
||||
}
|
||||
|
||||
let tooltip = L10N.getStr("netmonitor.security.state." + aValue);
|
||||
icon.classList.add("security-state-" + aValue);
|
||||
icon.setAttribute("tooltiptext", tooltip);
|
||||
|
||||
this.attachSecurityIconClickListener(aItem);
|
||||
break;
|
||||
}
|
||||
case "status": {
|
||||
@ -2113,6 +2140,9 @@ RequestsMenuView.prototype = Heritage.extend(WidgetMethods, {
|
||||
}
|
||||
return NetworkHelper.convertToUnicode(unescape(aUrl.hostPort));
|
||||
},
|
||||
_getUriHost: function(aUrl) {
|
||||
return this._getUriHostPort(aUrl).replace(/:\d+$/, "");
|
||||
},
|
||||
|
||||
/**
|
||||
* Helper for getting an abbreviated string for a mime type.
|
||||
|
@ -14,6 +14,7 @@ add_task(function* () {
|
||||
"example.com": "security-state-secure",
|
||||
"nocert.example.com": "security-state-broken",
|
||||
"rc4.example.com": "security-state-weak",
|
||||
"localhost": "security-state-local",
|
||||
};
|
||||
|
||||
yield new promise(resolve => {
|
||||
@ -50,6 +51,8 @@ add_task(function* () {
|
||||
* - https://nocert.example.com (broken)
|
||||
* - https://example.com (secure)
|
||||
* - http://test1.example.com (insecure)
|
||||
* - https://rc4.example.com (partly secure)
|
||||
* - http://localhost (local)
|
||||
* and waits until NetworkMonitor has handled all packets sent by the server.
|
||||
*/
|
||||
function* performRequests() {
|
||||
@ -82,14 +85,19 @@ add_task(function* () {
|
||||
debuggee.performRequests(1, "https://rc4.example.com" + CORS_SJS_PATH);
|
||||
yield done;
|
||||
|
||||
is(RequestsMenu.itemCount, 4, "Four events logged.");
|
||||
done = waitForSecurityBrokenNetworkEvent(true);
|
||||
info("Requesting a resource over HTTP to localhost.");
|
||||
debuggee.performRequests(1, "http://localhost" + CORS_SJS_PATH);
|
||||
yield done;
|
||||
|
||||
is(RequestsMenu.itemCount, 5, "Five events logged.");
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a promise that's resolved once a request with security issues is
|
||||
* completed.
|
||||
*/
|
||||
function waitForSecurityBrokenNetworkEvent() {
|
||||
function waitForSecurityBrokenNetworkEvent(networkError) {
|
||||
let awaitedEvents = [
|
||||
"UPDATING_REQUEST_HEADERS",
|
||||
"RECEIVED_REQUEST_HEADERS",
|
||||
@ -102,6 +110,12 @@ add_task(function* () {
|
||||
"RECEIVED_EVENT_TIMINGS",
|
||||
];
|
||||
|
||||
// If the reason for breakage is a network error, then the
|
||||
// STARTED_RECEIVING_RESPONSE event does not fire.
|
||||
if (networkError) {
|
||||
awaitedEvents.splice(4, 1);
|
||||
}
|
||||
|
||||
let promises = awaitedEvents.map((event) => {
|
||||
return monitor.panelWin.once(EVENTS[event]);
|
||||
});
|
||||
|
@ -76,6 +76,7 @@ skip-if = os == 'linux' # Bug 1172120
|
||||
[browser_perf-overview-render-02.js]
|
||||
[browser_perf-overview-render-03.js]
|
||||
[browser_perf-overview-render-04.js]
|
||||
skip-if = os == 'linux' # bug 1186322
|
||||
[browser_perf-overview-selection-01.js]
|
||||
[browser_perf-overview-selection-02.js]
|
||||
[browser_perf-overview-selection-03.js]
|
||||
|
@ -142,25 +142,27 @@
|
||||
width: 16px;
|
||||
height: 16px;
|
||||
-moz-margin-end: 4px;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
.security-state-insecure {
|
||||
list-style-image: url(chrome://browser/skin/identity-not-secure.svg);
|
||||
list-style-image: url(chrome://browser/skin/identity-mixed-active-loaded.svg);
|
||||
}
|
||||
|
||||
.security-state-secure {
|
||||
cursor: pointer;
|
||||
list-style-image: url(chrome://browser/skin/identity-secure.svg);
|
||||
}
|
||||
|
||||
.security-state-weak {
|
||||
cursor: pointer;
|
||||
list-style-image: url(chrome://browser/skin/identity-mixed-passive-loaded.svg);
|
||||
}
|
||||
|
||||
.security-state-broken {
|
||||
cursor: pointer;
|
||||
list-style-image: url(chrome://browser/skin/identity-mixed-active-loaded.svg);
|
||||
list-style-image: url(chrome://browser/skin/controlcenter/warning-gray.svg);
|
||||
}
|
||||
|
||||
.security-state-local {
|
||||
list-style-image: url(chrome://browser/skin/identity-not-secure.svg);
|
||||
}
|
||||
|
||||
.requests-menu-type {
|
||||
@ -196,7 +198,8 @@ label.requests-menu-status-code {
|
||||
}
|
||||
|
||||
box.requests-menu-status:not([code]) {
|
||||
background-color: var(--theme-content-color2);
|
||||
background-color: var(--theme-highlight-red);
|
||||
border-radius: 0; /* squares */
|
||||
}
|
||||
|
||||
box.requests-menu-status[code="cached"] {
|
||||
|
@ -767,8 +767,6 @@ See https://developer.mozilla.org/en/Windows_Build_Prerequisites.])
|
||||
fi
|
||||
|
||||
AC_DEFINE_UNQUOTED(MOZ_WINSDK_TARGETVER,0x$MOZ_WINSDK_TARGETVER)
|
||||
# Definitions matching sdkddkver.h
|
||||
AC_DEFINE_UNQUOTED(MOZ_NTDDI_WIN7, 0x06010000)
|
||||
AC_DEFINE_UNQUOTED(MOZ_WINSDK_MAXVER,$MOZ_WINSDK_MAXVER)
|
||||
AC_SUBST(MOZ_WINSDK_MAXVER)
|
||||
;;
|
||||
|
@ -850,12 +850,14 @@ DataTransfer::GetFilesAndDirectories(ErrorResult& aRv)
|
||||
{
|
||||
nsCOMPtr<nsINode> parentNode = do_QueryInterface(mParent);
|
||||
if (!parentNode) {
|
||||
aRv.Throw(NS_ERROR_FAILURE);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
nsCOMPtr<nsIGlobalObject> global = parentNode->OwnerDoc()->GetScopeObject();
|
||||
MOZ_ASSERT(global);
|
||||
if (!global) {
|
||||
aRv.Throw(NS_ERROR_FAILURE);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@ -865,43 +867,44 @@ DataTransfer::GetFilesAndDirectories(ErrorResult& aRv)
|
||||
}
|
||||
|
||||
if (!mFiles) {
|
||||
ErrorResult dummy;
|
||||
GetFiles(dummy);
|
||||
if (!mFiles) {
|
||||
GetFiles(aRv);
|
||||
if (NS_WARN_IF(aRv.Failed())) {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
Sequence<OwningFileOrDirectory> filesAndDirsSeq;
|
||||
|
||||
if (!filesAndDirsSeq.SetLength(mFiles->Length(), mozilla::fallible_t())) {
|
||||
p->MaybeReject(NS_ERROR_OUT_OF_MEMORY);
|
||||
return p.forget();
|
||||
}
|
||||
if (mFiles && mFiles->Length()) {
|
||||
if (!filesAndDirsSeq.SetLength(mFiles->Length(), mozilla::fallible_t())) {
|
||||
p->MaybeReject(NS_ERROR_OUT_OF_MEMORY);
|
||||
return p.forget();
|
||||
}
|
||||
|
||||
nsPIDOMWindow* window = parentNode->OwnerDoc()->GetInnerWindow();
|
||||
nsPIDOMWindow* window = parentNode->OwnerDoc()->GetInnerWindow();
|
||||
|
||||
nsRefPtr<OSFileSystem> fs;
|
||||
for (uint32_t i = 0; i < mFiles->Length(); ++i) {
|
||||
if (mFiles->Item(i)->Impl()->IsDirectory()) {
|
||||
nsRefPtr<OSFileSystem> fs;
|
||||
for (uint32_t i = 0; i < mFiles->Length(); ++i) {
|
||||
if (mFiles->Item(i)->Impl()->IsDirectory()) {
|
||||
#if defined(ANDROID) || defined(MOZ_B2G)
|
||||
MOZ_ASSERT(false,
|
||||
"Directory picking should have been redirected to normal "
|
||||
"file picking for platforms that don't have a directory "
|
||||
"picker");
|
||||
MOZ_ASSERT(false,
|
||||
"Directory picking should have been redirected to normal "
|
||||
"file picking for platforms that don't have a directory "
|
||||
"picker");
|
||||
#endif
|
||||
nsAutoString path;
|
||||
mFiles->Item(i)->GetMozFullPathInternal(path, aRv);
|
||||
if (aRv.Failed()) {
|
||||
return nullptr;
|
||||
nsAutoString path;
|
||||
mFiles->Item(i)->GetMozFullPathInternal(path, aRv);
|
||||
if (aRv.Failed()) {
|
||||
return nullptr;
|
||||
}
|
||||
int32_t leafSeparatorIndex = path.RFind(FILE_PATH_SEPARATOR);
|
||||
nsDependentSubstring dirname = Substring(path, 0, leafSeparatorIndex);
|
||||
nsDependentSubstring basename = Substring(path, leafSeparatorIndex);
|
||||
fs = MakeOrReuseFileSystem(dirname, fs, window);
|
||||
filesAndDirsSeq[i].SetAsDirectory() = new Directory(fs, basename);
|
||||
} else {
|
||||
filesAndDirsSeq[i].SetAsFile() = mFiles->Item(i);
|
||||
}
|
||||
int32_t leafSeparatorIndex = path.RFind(FILE_PATH_SEPARATOR);
|
||||
nsDependentSubstring dirname = Substring(path, 0, leafSeparatorIndex);
|
||||
nsDependentSubstring basename = Substring(path, leafSeparatorIndex);
|
||||
fs = MakeOrReuseFileSystem(dirname, fs, window);
|
||||
filesAndDirsSeq[i].SetAsDirectory() = new Directory(fs, basename);
|
||||
} else {
|
||||
filesAndDirsSeq[i].SetAsFile() = mFiles->Item(i);
|
||||
}
|
||||
}
|
||||
|
||||
|
15
dom/events/crashtests/1190036-1.html
Normal file
15
dom/events/crashtests/1190036-1.html
Normal file
@ -0,0 +1,15 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<script>
|
||||
|
||||
function boom() {
|
||||
var ev = new ClipboardEvent("p");
|
||||
ev.clipboardData.getFilesAndDirectories();
|
||||
}
|
||||
|
||||
</script>
|
||||
</head>
|
||||
<body onload="boom();"></body>
|
||||
</html>
|
@ -9,6 +9,7 @@ load 1033343.html
|
||||
load 1035654-1.html
|
||||
load 1035654-2.html
|
||||
load 1143972-1.html
|
||||
load 1190036-1.html
|
||||
needs-focus load 1072137-1.html
|
||||
load eventctor-nulldictionary.html
|
||||
load eventctor-nullstorage.html
|
||||
|
@ -157,6 +157,16 @@ struct AudioChunk {
|
||||
return static_cast<float*>(const_cast<void*>(mChannelData[aChannel]));
|
||||
}
|
||||
|
||||
void ReleaseBufferIfShared()
|
||||
{
|
||||
if (mBuffer && mBuffer->IsShared()) {
|
||||
// Remove pointers into the buffer, but keep the array allocation for
|
||||
// chunk re-use.
|
||||
mChannelData.ClearAndRetainStorage();
|
||||
mBuffer = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
bool IsMuted() const { return mVolume == 0.0f; }
|
||||
|
||||
size_t SizeOfExcludingThisIfUnshared(MallocSizeOf aMallocSizeOf) const
|
||||
|
@ -105,7 +105,7 @@ UpdateStreamBlocking(MediaStream* aStream, bool aBlocking)
|
||||
*/
|
||||
class DecodedStreamData {
|
||||
public:
|
||||
DecodedStreamData(SourceMediaStream* aStream, bool aPlaying,
|
||||
DecodedStreamData(SourceMediaStream* aStream,
|
||||
MozPromiseHolder<GenericPromise>&& aPromise);
|
||||
~DecodedStreamData();
|
||||
bool IsFinished() const;
|
||||
@ -142,7 +142,7 @@ public:
|
||||
bool mEOSVideoCompensation;
|
||||
};
|
||||
|
||||
DecodedStreamData::DecodedStreamData(SourceMediaStream* aStream, bool aPlaying,
|
||||
DecodedStreamData::DecodedStreamData(SourceMediaStream* aStream,
|
||||
MozPromiseHolder<GenericPromise>&& aPromise)
|
||||
: mAudioFramesWritten(0)
|
||||
, mNextVideoTime(-1)
|
||||
@ -152,17 +152,15 @@ DecodedStreamData::DecodedStreamData(SourceMediaStream* aStream, bool aPlaying,
|
||||
, mHaveSentFinishAudio(false)
|
||||
, mHaveSentFinishVideo(false)
|
||||
, mStream(aStream)
|
||||
, mPlaying(aPlaying)
|
||||
, mPlaying(true)
|
||||
, mEOSVideoCompensation(false)
|
||||
{
|
||||
// DecodedStreamGraphListener will resolve this promise.
|
||||
mListener = new DecodedStreamGraphListener(mStream, Move(aPromise));
|
||||
mStream->AddListener(mListener);
|
||||
|
||||
// Block the stream if we are not playing.
|
||||
if (!aPlaying) {
|
||||
UpdateStreamBlocking(mStream, true);
|
||||
}
|
||||
// mPlaying is initially true because MDSM won't start playback until playing
|
||||
// becomes true. This is consistent with the settings of AudioSink.
|
||||
}
|
||||
|
||||
DecodedStreamData::~DecodedStreamData()
|
||||
@ -358,7 +356,7 @@ DecodedStream::DecodedStream(AbstractThread* aOwnerThread,
|
||||
MediaQueue<MediaData>& aAudioQueue,
|
||||
MediaQueue<MediaData>& aVideoQueue)
|
||||
: mOwnerThread(aOwnerThread)
|
||||
, mMonitor("DecodedStream::mMonitor")
|
||||
, mShuttingDown(false)
|
||||
, mPlaying(false)
|
||||
, mVolume(1.0)
|
||||
, mAudioQueue(aAudioQueue)
|
||||
@ -371,11 +369,17 @@ DecodedStream::~DecodedStream()
|
||||
MOZ_ASSERT(mStartTime.isNothing(), "playback should've ended.");
|
||||
}
|
||||
|
||||
void
|
||||
DecodedStream::Shutdown()
|
||||
{
|
||||
MOZ_ASSERT(NS_IsMainThread());
|
||||
mShuttingDown = true;
|
||||
}
|
||||
|
||||
nsRefPtr<GenericPromise>
|
||||
DecodedStream::StartPlayback(int64_t aStartTime, const MediaInfo& aInfo)
|
||||
{
|
||||
AssertOwnerThread();
|
||||
ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
|
||||
MOZ_ASSERT(mStartTime.isNothing(), "playback already started.");
|
||||
|
||||
mStartTime.emplace(aStartTime);
|
||||
@ -413,7 +417,7 @@ DecodedStream::StartPlayback(int64_t aStartTime, const MediaInfo& aInfo)
|
||||
void DecodedStream::StopPlayback()
|
||||
{
|
||||
AssertOwnerThread();
|
||||
ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
|
||||
|
||||
// Playback didn't even start at all.
|
||||
if (mStartTime.isNothing()) {
|
||||
return;
|
||||
@ -424,12 +428,19 @@ void DecodedStream::StopPlayback()
|
||||
|
||||
// Clear mData immediately when this playback session ends so we won't
|
||||
// send data to the wrong stream in SendData() in next playback session.
|
||||
DecodedStreamData* data = mData.release();
|
||||
// mData is not yet created on the main thread.
|
||||
if (!data) {
|
||||
DestroyData(Move(mData));
|
||||
}
|
||||
|
||||
void
|
||||
DecodedStream::DestroyData(UniquePtr<DecodedStreamData> aData)
|
||||
{
|
||||
AssertOwnerThread();
|
||||
|
||||
if (!aData) {
|
||||
return;
|
||||
}
|
||||
|
||||
DecodedStreamData* data = aData.release();
|
||||
nsRefPtr<DecodedStream> self = this;
|
||||
nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([=] () {
|
||||
self->mOutputStreamManager.Disconnect();
|
||||
@ -442,35 +453,41 @@ void
|
||||
DecodedStream::CreateData(MozPromiseHolder<GenericPromise>&& aPromise)
|
||||
{
|
||||
MOZ_ASSERT(NS_IsMainThread());
|
||||
ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
|
||||
MOZ_ASSERT(!mData, "Already created.");
|
||||
|
||||
// No need to create a source stream when there are no output streams. This
|
||||
// happens when RemoveOutput() is called immediately after StartPlayback().
|
||||
// We also bail out when the playback session has ended. This happens when
|
||||
// StopPlayback() is called immediately after StartPlayback().
|
||||
if (!mOutputStreamManager.Graph() || mStartTime.isNothing()) {
|
||||
// Also we don't create a source stream when MDSM has begun shutdown.
|
||||
if (!mOutputStreamManager.Graph() || mShuttingDown) {
|
||||
// Resolve the promise to indicate the end of playback.
|
||||
aPromise.Resolve(true, __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
auto source = mOutputStreamManager.Graph()->CreateSourceStream(nullptr);
|
||||
mData.reset(new DecodedStreamData(source, mPlaying, Move(aPromise)));
|
||||
mOutputStreamManager.Connect(mData->mStream);
|
||||
auto data = new DecodedStreamData(source, Move(aPromise));
|
||||
mOutputStreamManager.Connect(data->mStream);
|
||||
|
||||
// Start to send data to the stream immediately
|
||||
nsRefPtr<DecodedStream> self = this;
|
||||
nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([=] () {
|
||||
ReentrantMonitorAutoEnter mon(self->GetReentrantMonitor());
|
||||
// Don't send data if playback has ended.
|
||||
if (self->mStartTime.isSome()) {
|
||||
self->SendData();
|
||||
class R : public nsRunnable {
|
||||
typedef void(DecodedStream::*Method)(UniquePtr<DecodedStreamData>);
|
||||
public:
|
||||
R(DecodedStream* aThis, Method aMethod, DecodedStreamData* aData)
|
||||
: mThis(aThis), mMethod(aMethod), mData(aData) {}
|
||||
NS_IMETHOD Run() override
|
||||
{
|
||||
(mThis->*mMethod)(Move(mData));
|
||||
return NS_OK;
|
||||
}
|
||||
});
|
||||
// Don't assert success because the owner thread might have begun shutdown
|
||||
// while we are still dealing with jobs on the main thread.
|
||||
mOwnerThread->Dispatch(r.forget(), AbstractThread::DontAssertDispatchSuccess);
|
||||
private:
|
||||
nsRefPtr<DecodedStream> mThis;
|
||||
Method mMethod;
|
||||
UniquePtr<DecodedStreamData> mData;
|
||||
};
|
||||
|
||||
// Post a message to ensure |mData| is only updated on the worker thread.
|
||||
// Note this must be done before MDSM's shutdown since dispatch could fail
|
||||
// when the worker thread is shut down.
|
||||
nsCOMPtr<nsIRunnable> r = new R(this, &DecodedStream::OnDataCreated, data);
|
||||
mOwnerThread->Dispatch(r.forget());
|
||||
}
|
||||
|
||||
bool
|
||||
@ -479,10 +496,22 @@ DecodedStream::HasConsumers() const
|
||||
return !mOutputStreamManager.IsEmpty();
|
||||
}
|
||||
|
||||
ReentrantMonitor&
|
||||
DecodedStream::GetReentrantMonitor() const
|
||||
void
|
||||
DecodedStream::OnDataCreated(UniquePtr<DecodedStreamData> aData)
|
||||
{
|
||||
return mMonitor;
|
||||
AssertOwnerThread();
|
||||
MOZ_ASSERT(!mData, "Already created.");
|
||||
|
||||
// Start to send data to the stream immediately
|
||||
if (mStartTime.isSome()) {
|
||||
aData->SetPlaying(mPlaying);
|
||||
mData = Move(aData);
|
||||
SendData();
|
||||
return;
|
||||
}
|
||||
|
||||
// Playback has ended. Destroy aData which is not needed anymore.
|
||||
DestroyData(Move(aData));
|
||||
}
|
||||
|
||||
void
|
||||
@ -501,7 +530,6 @@ void
|
||||
DecodedStream::SetPlaying(bool aPlaying)
|
||||
{
|
||||
AssertOwnerThread();
|
||||
ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
|
||||
mPlaying = aPlaying;
|
||||
if (mData) {
|
||||
mData->SetPlaying(aPlaying);
|
||||
@ -512,7 +540,6 @@ void
|
||||
DecodedStream::SetVolume(double aVolume)
|
||||
{
|
||||
AssertOwnerThread();
|
||||
ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
|
||||
mVolume = aVolume;
|
||||
}
|
||||
|
||||
@ -520,7 +547,6 @@ void
|
||||
DecodedStream::SetSameOrigin(bool aSameOrigin)
|
||||
{
|
||||
AssertOwnerThread();
|
||||
ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
|
||||
mSameOrigin = aSameOrigin;
|
||||
}
|
||||
|
||||
@ -528,7 +554,6 @@ void
|
||||
DecodedStream::InitTracks()
|
||||
{
|
||||
AssertOwnerThread();
|
||||
GetReentrantMonitor().AssertCurrentThreadIn();
|
||||
|
||||
if (mData->mStreamInitialized) {
|
||||
return;
|
||||
@ -609,7 +634,6 @@ void
|
||||
DecodedStream::SendAudio(double aVolume, bool aIsSameOrigin)
|
||||
{
|
||||
AssertOwnerThread();
|
||||
GetReentrantMonitor().AssertCurrentThreadIn();
|
||||
|
||||
if (!mInfo.HasAudio()) {
|
||||
return;
|
||||
@ -675,7 +699,6 @@ void
|
||||
DecodedStream::SendVideo(bool aIsSameOrigin)
|
||||
{
|
||||
AssertOwnerThread();
|
||||
GetReentrantMonitor().AssertCurrentThreadIn();
|
||||
|
||||
if (!mInfo.HasVideo()) {
|
||||
return;
|
||||
@ -754,7 +777,6 @@ void
|
||||
DecodedStream::AdvanceTracks()
|
||||
{
|
||||
AssertOwnerThread();
|
||||
GetReentrantMonitor().AssertCurrentThreadIn();
|
||||
|
||||
StreamTime endPosition = 0;
|
||||
|
||||
@ -779,7 +801,6 @@ void
|
||||
DecodedStream::SendData()
|
||||
{
|
||||
AssertOwnerThread();
|
||||
ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
|
||||
MOZ_ASSERT(mStartTime.isSome(), "Must be called after StartPlayback()");
|
||||
|
||||
// Not yet created on the main thread. MDSM will try again later.
|
||||
@ -810,7 +831,6 @@ int64_t
|
||||
DecodedStream::AudioEndTime() const
|
||||
{
|
||||
AssertOwnerThread();
|
||||
ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
|
||||
if (mStartTime.isSome() && mInfo.HasAudio() && mData) {
|
||||
CheckedInt64 t = mStartTime.ref() +
|
||||
FramesToUsecs(mData->mAudioFramesWritten, mInfo.mAudio.mRate);
|
||||
@ -825,7 +845,6 @@ int64_t
|
||||
DecodedStream::GetPosition() const
|
||||
{
|
||||
AssertOwnerThread();
|
||||
ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
|
||||
// This is only called after MDSM starts playback. So mStartTime is
|
||||
// guaranteed to be something.
|
||||
MOZ_ASSERT(mStartTime.isSome());
|
||||
@ -836,7 +855,6 @@ bool
|
||||
DecodedStream::IsFinished() const
|
||||
{
|
||||
AssertOwnerThread();
|
||||
ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
|
||||
return mData && mData->IsFinished();
|
||||
}
|
||||
|
||||
@ -844,7 +862,6 @@ void
|
||||
DecodedStream::ConnectListener()
|
||||
{
|
||||
AssertOwnerThread();
|
||||
ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
|
||||
|
||||
mAudioPushListener = mAudioQueue.PushEvent().Connect(
|
||||
mOwnerThread, this, &DecodedStream::SendData);
|
||||
@ -860,7 +877,6 @@ void
|
||||
DecodedStream::DisconnectListener()
|
||||
{
|
||||
AssertOwnerThread();
|
||||
ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
|
||||
|
||||
mAudioPushListener.Disconnect();
|
||||
mVideoPushListener.Disconnect();
|
||||
|
@ -106,6 +106,8 @@ public:
|
||||
MediaQueue<MediaData>& aAudioQueue,
|
||||
MediaQueue<MediaData>& aVideoQueue);
|
||||
|
||||
void Shutdown();
|
||||
|
||||
// Mimic MDSM::StartAudioThread.
|
||||
// Must be called before any calls to SendData().
|
||||
//
|
||||
@ -132,8 +134,9 @@ protected:
|
||||
virtual ~DecodedStream();
|
||||
|
||||
private:
|
||||
ReentrantMonitor& GetReentrantMonitor() const;
|
||||
void CreateData(MozPromiseHolder<GenericPromise>&& aPromise);
|
||||
void DestroyData(UniquePtr<DecodedStreamData> aData);
|
||||
void OnDataCreated(UniquePtr<DecodedStreamData> aData);
|
||||
void InitTracks();
|
||||
void AdvanceTracks();
|
||||
void SendAudio(double aVolume, bool aIsSameOrigin);
|
||||
@ -149,18 +152,18 @@ private:
|
||||
|
||||
const nsRefPtr<AbstractThread> mOwnerThread;
|
||||
|
||||
UniquePtr<DecodedStreamData> mData;
|
||||
/*
|
||||
* Main thread only members.
|
||||
*/
|
||||
// Data about MediaStreams that are being fed by the decoder.
|
||||
OutputStreamManager mOutputStreamManager;
|
||||
// True if MDSM has begun shutdown.
|
||||
bool mShuttingDown;
|
||||
|
||||
// TODO: This is a temp solution to get rid of decoder monitor on the main
|
||||
// thread in MDSM::AddOutputStream and MDSM::RecreateDecodedStream as
|
||||
// required by bug 1146482. DecodedStream needs to release monitor before
|
||||
// calling back into MDSM functions in order to prevent deadlocks.
|
||||
//
|
||||
// Please move all capture-stream related code from MDSM into DecodedStream
|
||||
// and apply "dispatch + mirroring" to get rid of this monitor in the future.
|
||||
mutable ReentrantMonitor mMonitor;
|
||||
/*
|
||||
* Worker thread only members.
|
||||
*/
|
||||
UniquePtr<DecodedStreamData> mData;
|
||||
|
||||
bool mPlaying;
|
||||
double mVolume;
|
||||
|
@ -171,6 +171,7 @@ public:
|
||||
|
||||
void DispatchShutdown()
|
||||
{
|
||||
mDecodedStream->Shutdown();
|
||||
nsCOMPtr<nsIRunnable> runnable =
|
||||
NS_NewRunnableMethod(this, &MediaDecoderStateMachine::Shutdown);
|
||||
OwnerThread()->Dispatch(runnable.forget());
|
||||
|
@ -776,9 +776,10 @@ MediaRecorder::MediaRecorder(AudioNode& aSrcAudioNode,
|
||||
if (aSrcAudioNode.NumberOfOutputs() > 0) {
|
||||
AudioContext* ctx = aSrcAudioNode.Context();
|
||||
AudioNodeEngine* engine = new AudioNodeEngine(nullptr);
|
||||
mPipeStream = ctx->Graph()->CreateAudioNodeStream(engine,
|
||||
MediaStreamGraph::EXTERNAL_STREAM,
|
||||
ctx->SampleRate());
|
||||
AudioNodeStream::Flags flags =
|
||||
AudioNodeStream::EXTERNAL_OUTPUT |
|
||||
AudioNodeStream::NEED_MAIN_THREAD_FINISHED;
|
||||
mPipeStream = AudioNodeStream::Create(ctx->Graph(), engine, flags);
|
||||
AudioNodeStream* ns = aSrcAudioNode.GetStream();
|
||||
if (ns) {
|
||||
mInputPort = mPipeStream->AllocateInputPort(aSrcAudioNode.GetStream(),
|
||||
|
@ -20,7 +20,6 @@
|
||||
#include "ImageContainer.h"
|
||||
#include "AudioCaptureStream.h"
|
||||
#include "AudioChannelService.h"
|
||||
#include "AudioNodeEngine.h"
|
||||
#include "AudioNodeStream.h"
|
||||
#include "AudioNodeExternalInputStream.h"
|
||||
#include "mozilla/dom/AudioContextBinding.h"
|
||||
@ -86,7 +85,7 @@ MediaStreamGraphImpl::FinishStream(MediaStream* aStream)
|
||||
static const GraphTime START_TIME_DELAYED = -1;
|
||||
|
||||
void
|
||||
MediaStreamGraphImpl::AddStream(MediaStream* aStream)
|
||||
MediaStreamGraphImpl::AddStreamGraphThread(MediaStream* aStream)
|
||||
{
|
||||
// Check if we're adding a stream to a suspended context, in which case, we
|
||||
// add it to mSuspendedStreams, and delay setting mBufferStartTime
|
||||
@ -113,7 +112,7 @@ MediaStreamGraphImpl::AddStream(MediaStream* aStream)
|
||||
}
|
||||
|
||||
void
|
||||
MediaStreamGraphImpl::RemoveStream(MediaStream* aStream)
|
||||
MediaStreamGraphImpl::RemoveStreamGraphThread(MediaStream* aStream)
|
||||
{
|
||||
// Remove references in mStreamUpdates before we allow aStream to die.
|
||||
// Pending updates are not needed (since the main thread has already given
|
||||
@ -1641,7 +1640,7 @@ public:
|
||||
explicit CreateMessage(MediaStream* aStream) : ControlMessage(aStream) {}
|
||||
virtual void Run() override
|
||||
{
|
||||
mStream->GraphImpl()->AddStream(mStream);
|
||||
mStream->GraphImpl()->AddStreamGraphThread(mStream);
|
||||
}
|
||||
virtual void RunDuringShutdown() override
|
||||
{
|
||||
@ -2055,7 +2054,7 @@ MediaStream::Destroy()
|
||||
mStream->RemoveAllListenersImpl();
|
||||
auto graph = mStream->GraphImpl();
|
||||
mStream->DestroyImpl();
|
||||
graph->RemoveStream(mStream);
|
||||
graph->RemoveStreamGraphThread(mStream);
|
||||
}
|
||||
virtual void RunDuringShutdown()
|
||||
{ Run(); }
|
||||
@ -2383,9 +2382,8 @@ MediaStream::AddMainThreadListener(MainThreadMediaStreamListener* aListener)
|
||||
|
||||
mMainThreadListeners.AppendElement(aListener);
|
||||
|
||||
// If we have to send the notification or we have a runnable that will do it,
|
||||
// let finish here.
|
||||
if (!mFinishedNotificationSent || mNotificationMainThreadRunnable) {
|
||||
// If it is not yet time to send the notification, then finish here.
|
||||
if (!mFinishedNotificationSent) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -2399,7 +2397,6 @@ MediaStream::AddMainThreadListener(MainThreadMediaStreamListener* aListener)
|
||||
NS_IMETHOD Run() override
|
||||
{
|
||||
MOZ_ASSERT(NS_IsMainThread());
|
||||
mStream->mNotificationMainThreadRunnable = nullptr;
|
||||
mStream->NotifyMainThreadListeners();
|
||||
return NS_OK;
|
||||
}
|
||||
@ -2411,11 +2408,7 @@ MediaStream::AddMainThreadListener(MainThreadMediaStreamListener* aListener)
|
||||
};
|
||||
|
||||
nsRefPtr<nsRunnable> runnable = new NotifyRunnable(this);
|
||||
if (NS_WARN_IF(NS_FAILED(NS_DispatchToMainThread(runnable.forget())))) {
|
||||
return;
|
||||
}
|
||||
|
||||
mNotificationMainThreadRunnable = runnable;
|
||||
NS_WARN_IF(NS_FAILED(NS_DispatchToMainThread(runnable.forget())));
|
||||
}
|
||||
|
||||
void
|
||||
@ -2860,7 +2853,7 @@ ProcessedMediaStream::DestroyImpl()
|
||||
MediaStream::DestroyImpl();
|
||||
// The stream order is only important if there are connections, in which
|
||||
// case MediaInputPort::Disconnect() called SetStreamOrderDirty().
|
||||
// MediaStreamGraphImpl::RemoveStream() will also call
|
||||
// MediaStreamGraphImpl::RemoveStreamGraphThread() will also call
|
||||
// SetStreamOrderDirty(), for other reasons.
|
||||
}
|
||||
|
||||
@ -3106,10 +3099,7 @@ SourceMediaStream*
|
||||
MediaStreamGraph::CreateSourceStream(DOMMediaStream* aWrapper)
|
||||
{
|
||||
SourceMediaStream* stream = new SourceMediaStream(aWrapper);
|
||||
NS_ADDREF(stream);
|
||||
MediaStreamGraphImpl* graph = static_cast<MediaStreamGraphImpl*>(this);
|
||||
stream->SetGraphImpl(graph);
|
||||
graph->AppendMessage(new CreateMessage(stream));
|
||||
AddStream(stream);
|
||||
return stream;
|
||||
}
|
||||
|
||||
@ -3117,10 +3107,7 @@ ProcessedMediaStream*
|
||||
MediaStreamGraph::CreateTrackUnionStream(DOMMediaStream* aWrapper)
|
||||
{
|
||||
TrackUnionStream* stream = new TrackUnionStream(aWrapper);
|
||||
NS_ADDREF(stream);
|
||||
MediaStreamGraphImpl* graph = static_cast<MediaStreamGraphImpl*>(this);
|
||||
stream->SetGraphImpl(graph);
|
||||
graph->AppendMessage(new CreateMessage(stream));
|
||||
AddStream(stream);
|
||||
return stream;
|
||||
}
|
||||
|
||||
@ -3128,54 +3115,17 @@ ProcessedMediaStream*
|
||||
MediaStreamGraph::CreateAudioCaptureStream(DOMMediaStream* aWrapper)
|
||||
{
|
||||
AudioCaptureStream* stream = new AudioCaptureStream(aWrapper);
|
||||
NS_ADDREF(stream);
|
||||
MediaStreamGraphImpl* graph = static_cast<MediaStreamGraphImpl*>(this);
|
||||
stream->SetGraphImpl(graph);
|
||||
graph->AppendMessage(new CreateMessage(stream));
|
||||
AddStream(stream);
|
||||
return stream;
|
||||
}
|
||||
|
||||
AudioNodeExternalInputStream*
|
||||
MediaStreamGraph::CreateAudioNodeExternalInputStream(AudioNodeEngine* aEngine, TrackRate aSampleRate)
|
||||
void
|
||||
MediaStreamGraph::AddStream(MediaStream* aStream)
|
||||
{
|
||||
MOZ_ASSERT(NS_IsMainThread());
|
||||
if (!aSampleRate) {
|
||||
aSampleRate = aEngine->NodeMainThread()->Context()->SampleRate();
|
||||
}
|
||||
AudioNodeExternalInputStream* stream = new AudioNodeExternalInputStream(
|
||||
aEngine, aSampleRate, aEngine->NodeMainThread()->Context()->Id());
|
||||
NS_ADDREF(stream);
|
||||
NS_ADDREF(aStream);
|
||||
MediaStreamGraphImpl* graph = static_cast<MediaStreamGraphImpl*>(this);
|
||||
stream->SetGraphImpl(graph);
|
||||
graph->AppendMessage(new CreateMessage(stream));
|
||||
return stream;
|
||||
}
|
||||
|
||||
AudioNodeStream*
|
||||
MediaStreamGraph::CreateAudioNodeStream(AudioNodeEngine* aEngine,
|
||||
AudioNodeStreamKind aKind,
|
||||
TrackRate aSampleRate)
|
||||
{
|
||||
MOZ_ASSERT(NS_IsMainThread());
|
||||
if (!aSampleRate) {
|
||||
aSampleRate = aEngine->NodeMainThread()->Context()->SampleRate();
|
||||
}
|
||||
// MediaRecorders use an AudioNodeStream, but no AudioNode
|
||||
AudioNode* node = aEngine->NodeMainThread();
|
||||
dom::AudioContext::AudioContextId contextIdForStream = node ? node->Context()->Id() :
|
||||
NO_AUDIO_CONTEXT;
|
||||
AudioNodeStream* stream = new AudioNodeStream(aEngine, aKind, aSampleRate,
|
||||
contextIdForStream);
|
||||
NS_ADDREF(stream);
|
||||
MediaStreamGraphImpl* graph = static_cast<MediaStreamGraphImpl*>(this);
|
||||
stream->SetGraphImpl(graph);
|
||||
if (aEngine->HasNode()) {
|
||||
stream->SetChannelMixingParametersImpl(aEngine->NodeMainThread()->ChannelCount(),
|
||||
aEngine->NodeMainThread()->ChannelCountModeValue(),
|
||||
aEngine->NodeMainThread()->ChannelInterpretationValue());
|
||||
}
|
||||
graph->AppendMessage(new CreateMessage(stream));
|
||||
return stream;
|
||||
aStream->SetGraphImpl(graph);
|
||||
graph->AppendMessage(new CreateMessage(aStream));
|
||||
}
|
||||
|
||||
class GraphStartedRunnable final : public nsRunnable
|
||||
|
@ -638,7 +638,6 @@ protected:
|
||||
TimeVarying<GraphTime,uint32_t,0> mExplicitBlockerCount;
|
||||
nsTArray<nsRefPtr<MediaStreamListener> > mListeners;
|
||||
nsTArray<MainThreadMediaStreamListener*> mMainThreadListeners;
|
||||
nsRefPtr<nsRunnable> mNotificationMainThreadRunnable;
|
||||
nsTArray<TrackID> mDisabledTrackIDs;
|
||||
|
||||
// Precomputed blocking status (over GraphTime).
|
||||
@ -1241,23 +1240,11 @@ public:
|
||||
* Create a stream that will mix all its audio input.
|
||||
*/
|
||||
ProcessedMediaStream* CreateAudioCaptureStream(DOMMediaStream* aWrapper);
|
||||
// Internal AudioNodeStreams can only pass their output to another
|
||||
// AudioNode, whereas external AudioNodeStreams can pass their output
|
||||
// to an nsAudioStream for playback.
|
||||
enum AudioNodeStreamKind { SOURCE_STREAM, INTERNAL_STREAM, EXTERNAL_STREAM };
|
||||
/**
|
||||
* Create a stream that will process audio for an AudioNode.
|
||||
* Takes ownership of aEngine. aSampleRate is the sampling rate used
|
||||
* for the stream. If 0 is passed, the sampling rate of the engine's
|
||||
* node will get used.
|
||||
*/
|
||||
AudioNodeStream* CreateAudioNodeStream(AudioNodeEngine* aEngine,
|
||||
AudioNodeStreamKind aKind,
|
||||
TrackRate aSampleRate = 0);
|
||||
|
||||
AudioNodeExternalInputStream*
|
||||
CreateAudioNodeExternalInputStream(AudioNodeEngine* aEngine,
|
||||
TrackRate aSampleRate = 0);
|
||||
/**
|
||||
* Add a new stream to the graph. Main thread.
|
||||
*/
|
||||
void AddStream(MediaStream* aStream);
|
||||
|
||||
/* From the main thread, ask the MSG to send back an event when the graph
|
||||
* thread is running, and audio is being processed. */
|
||||
|
@ -424,12 +424,12 @@ public:
|
||||
/**
|
||||
* Add aStream to the graph and initializes its graph-specific state.
|
||||
*/
|
||||
void AddStream(MediaStream* aStream);
|
||||
void AddStreamGraphThread(MediaStream* aStream);
|
||||
/**
|
||||
* Remove aStream from the graph. Ensures that pending messages about the
|
||||
* stream back to the main thread are flushed.
|
||||
*/
|
||||
void RemoveStream(MediaStream* aStream);
|
||||
void RemoveStreamGraphThread(MediaStream* aStream);
|
||||
/**
|
||||
* Remove aPort from the graph and release it.
|
||||
*/
|
||||
|
@ -11,7 +11,7 @@
|
||||
<script class="testbody" type="text/javascript">
|
||||
//longer timeout for slow platforms
|
||||
if (isSlowPlatform()) {
|
||||
SimpleTest.requestLongerTimeout(1.5);
|
||||
SimpleTest.requestLongerTimeout(2);
|
||||
}
|
||||
|
||||
var manager = new MediaTestManager;
|
||||
@ -38,8 +38,8 @@ function startTest(test, token) {
|
||||
|
||||
v.addEventListener("seeked", function (e) {
|
||||
e.target.seekedCount += 1;
|
||||
if (e.target.seekedCount == 3) {
|
||||
ok(e.target.seekingCount == 3, "Expect matched pairs of seeking/seeked events.");
|
||||
if (e.target.seekedCount == 2) {
|
||||
ok(e.target.seekingCount == 2, "Expect matched pairs of seeking/seeked events.");
|
||||
e.target.loop = false;
|
||||
}
|
||||
}, false);
|
||||
|
@ -85,8 +85,9 @@ AnalyserNode::AnalyserNode(AudioContext* aContext)
|
||||
, mMaxDecibels(-30.)
|
||||
, mSmoothingTimeConstant(.8)
|
||||
{
|
||||
mStream = aContext->Graph()->CreateAudioNodeStream(new AnalyserNodeEngine(this),
|
||||
MediaStreamGraph::INTERNAL_STREAM);
|
||||
mStream = AudioNodeStream::Create(aContext->Graph(),
|
||||
new AnalyserNodeEngine(this),
|
||||
AudioNodeStream::NO_STREAM_FLAGS);
|
||||
|
||||
// Enough chunks must be recorded to handle the case of fftSize being
|
||||
// increased to maximum immediately before getFloatTimeDomainData() is
|
||||
|
@ -542,7 +542,8 @@ AudioBufferSourceNode::AudioBufferSourceNode(AudioContext* aContext)
|
||||
, mStartCalled(false)
|
||||
{
|
||||
AudioBufferSourceNodeEngine* engine = new AudioBufferSourceNodeEngine(this, aContext->Destination());
|
||||
mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::SOURCE_STREAM);
|
||||
mStream = AudioNodeStream::Create(aContext->Graph(), engine,
|
||||
AudioNodeStream::NEED_MAIN_THREAD_FINISHED);
|
||||
engine->SetSourceStream(mStream);
|
||||
mStream->AddMainThreadListener(this);
|
||||
}
|
||||
|
@ -346,7 +346,11 @@ AudioDestinationNode::AudioDestinationNode(AudioContext* aContext,
|
||||
aLength, aSampleRate) :
|
||||
static_cast<AudioNodeEngine*>(new DestinationNodeEngine(this));
|
||||
|
||||
mStream = graph->CreateAudioNodeStream(engine, MediaStreamGraph::EXTERNAL_STREAM);
|
||||
AudioNodeStream::Flags flags =
|
||||
AudioNodeStream::NEED_MAIN_THREAD_CURRENT_TIME |
|
||||
AudioNodeStream::NEED_MAIN_THREAD_FINISHED |
|
||||
AudioNodeStream::EXTERNAL_OUTPUT;
|
||||
mStream = AudioNodeStream::Create(graph, engine, flags);
|
||||
mStream->AddMainThreadListener(this);
|
||||
mStream->AddAudioOutput(&gWebAudioOutputKey);
|
||||
|
||||
|
@ -13,7 +13,7 @@ using namespace mozilla::dom;
|
||||
namespace mozilla {
|
||||
|
||||
AudioNodeExternalInputStream::AudioNodeExternalInputStream(AudioNodeEngine* aEngine, TrackRate aSampleRate, uint32_t aContextId)
|
||||
: AudioNodeStream(aEngine, MediaStreamGraph::INTERNAL_STREAM, aSampleRate, aContextId)
|
||||
: AudioNodeStream(aEngine, NO_STREAM_FLAGS, aSampleRate, aContextId)
|
||||
{
|
||||
MOZ_COUNT_CTOR(AudioNodeExternalInputStream);
|
||||
}
|
||||
@ -23,6 +23,20 @@ AudioNodeExternalInputStream::~AudioNodeExternalInputStream()
|
||||
MOZ_COUNT_DTOR(AudioNodeExternalInputStream);
|
||||
}
|
||||
|
||||
/* static */ already_AddRefed<AudioNodeExternalInputStream>
|
||||
AudioNodeExternalInputStream::Create(MediaStreamGraph* aGraph,
|
||||
AudioNodeEngine* aEngine)
|
||||
{
|
||||
MOZ_ASSERT(NS_IsMainThread());
|
||||
MOZ_ASSERT(aGraph->GraphRate() == aEngine->NodeMainThread()->Context()->SampleRate());
|
||||
|
||||
nsRefPtr<AudioNodeExternalInputStream> stream =
|
||||
new AudioNodeExternalInputStream(aEngine, aGraph->GraphRate(),
|
||||
aEngine->NodeMainThread()->Context()->Id());
|
||||
aGraph->AddStream(stream);
|
||||
return stream.forget();
|
||||
}
|
||||
|
||||
/**
|
||||
* Copies the data in aInput to aOffsetInBlock within aBlock.
|
||||
* aBlock must have been allocated with AllocateInputBlock and have a channel
|
||||
|
@ -21,9 +21,12 @@ namespace mozilla {
|
||||
class AudioNodeExternalInputStream final : public AudioNodeStream
|
||||
{
|
||||
public:
|
||||
static already_AddRefed<AudioNodeExternalInputStream>
|
||||
Create(MediaStreamGraph* aGraph, AudioNodeEngine* aEngine);
|
||||
|
||||
protected:
|
||||
AudioNodeExternalInputStream(AudioNodeEngine* aEngine, TrackRate aSampleRate,
|
||||
uint32_t aContextId);
|
||||
protected:
|
||||
~AudioNodeExternalInputStream();
|
||||
|
||||
public:
|
||||
|
@ -26,14 +26,14 @@ namespace mozilla {
|
||||
*/
|
||||
|
||||
AudioNodeStream::AudioNodeStream(AudioNodeEngine* aEngine,
|
||||
MediaStreamGraph::AudioNodeStreamKind aKind,
|
||||
Flags aFlags,
|
||||
TrackRate aSampleRate,
|
||||
AudioContext::AudioContextId aContextId)
|
||||
: ProcessedMediaStream(nullptr),
|
||||
mEngine(aEngine),
|
||||
mSampleRate(aSampleRate),
|
||||
mAudioContextId(aContextId),
|
||||
mKind(aKind),
|
||||
mFlags(aFlags),
|
||||
mNumberOfInputChannels(2),
|
||||
mMarkAsFinishedAfterThisBlock(false),
|
||||
mAudioParamStream(false),
|
||||
@ -53,6 +53,30 @@ AudioNodeStream::~AudioNodeStream()
|
||||
MOZ_COUNT_DTOR(AudioNodeStream);
|
||||
}
|
||||
|
||||
/* static */ already_AddRefed<AudioNodeStream>
|
||||
AudioNodeStream::Create(MediaStreamGraph* aGraph, AudioNodeEngine* aEngine,
|
||||
Flags aFlags)
|
||||
{
|
||||
MOZ_ASSERT(NS_IsMainThread());
|
||||
|
||||
// MediaRecorders use an AudioNodeStream, but no AudioNode
|
||||
AudioNode* node = aEngine->NodeMainThread();
|
||||
MOZ_ASSERT(!node || aGraph->GraphRate() == node->Context()->SampleRate());
|
||||
|
||||
dom::AudioContext::AudioContextId contextIdForStream = node ? node->Context()->Id() :
|
||||
NO_AUDIO_CONTEXT;
|
||||
nsRefPtr<AudioNodeStream> stream =
|
||||
new AudioNodeStream(aEngine, aFlags, aGraph->GraphRate(),
|
||||
contextIdForStream);
|
||||
if (aEngine->HasNode()) {
|
||||
stream->SetChannelMixingParametersImpl(aEngine->NodeMainThread()->ChannelCount(),
|
||||
aEngine->NodeMainThread()->ChannelCountModeValue(),
|
||||
aEngine->NodeMainThread()->ChannelInterpretationValue());
|
||||
}
|
||||
aGraph->AddStream(stream);
|
||||
return stream.forget();
|
||||
}
|
||||
|
||||
size_t
|
||||
AudioNodeStream::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
|
||||
{
|
||||
@ -490,28 +514,34 @@ AudioNodeStream::ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags)
|
||||
bool blocked = mFinished || mBlocked.GetAt(aFrom);
|
||||
// If the stream has finished at this time, it will be blocked.
|
||||
if (blocked || InMutedCycle()) {
|
||||
mInputChunks.Clear();
|
||||
for (uint16_t i = 0; i < outputCount; ++i) {
|
||||
mLastChunks[i].SetNull(WEBAUDIO_BLOCK_SIZE);
|
||||
}
|
||||
} else {
|
||||
// We need to generate at least one input
|
||||
uint16_t maxInputs = std::max(uint16_t(1), mEngine->InputCount());
|
||||
OutputChunks inputChunks;
|
||||
inputChunks.SetLength(maxInputs);
|
||||
mInputChunks.SetLength(maxInputs);
|
||||
for (uint16_t i = 0; i < maxInputs; ++i) {
|
||||
ObtainInputBlock(inputChunks[i], i);
|
||||
ObtainInputBlock(mInputChunks[i], i);
|
||||
}
|
||||
bool finished = false;
|
||||
if (mPassThrough) {
|
||||
MOZ_ASSERT(outputCount == 1, "For now, we only support nodes that have one output port");
|
||||
mLastChunks[0] = inputChunks[0];
|
||||
mLastChunks[0] = mInputChunks[0];
|
||||
} else {
|
||||
if (maxInputs <= 1 && outputCount <= 1) {
|
||||
mEngine->ProcessBlock(this, inputChunks[0], &mLastChunks[0], &finished);
|
||||
mEngine->ProcessBlock(this, mInputChunks[0], &mLastChunks[0], &finished);
|
||||
} else {
|
||||
mEngine->ProcessBlocksOnPorts(this, inputChunks, mLastChunks, &finished);
|
||||
mEngine->ProcessBlocksOnPorts(this, mInputChunks, mLastChunks, &finished);
|
||||
}
|
||||
}
|
||||
for (auto& chunk : mInputChunks) {
|
||||
// If the buffer is shared then it won't be reused, so release the
|
||||
// reference now. Keep the channel data array to save a free/alloc
|
||||
// pair.
|
||||
chunk.ReleaseBufferIfShared();
|
||||
}
|
||||
for (uint16_t i = 0; i < outputCount; ++i) {
|
||||
NS_ASSERTION(mLastChunks[i].GetDuration() == WEBAUDIO_BLOCK_SIZE,
|
||||
"Invalid WebAudio chunk size");
|
||||
@ -571,7 +601,7 @@ AudioNodeStream::AdvanceOutputSegment()
|
||||
StreamBuffer::Track* track = EnsureTrack(AUDIO_TRACK);
|
||||
AudioSegment* segment = track->Get<AudioSegment>();
|
||||
|
||||
if (mKind == MediaStreamGraph::EXTERNAL_STREAM) {
|
||||
if (mFlags & EXTERNAL_OUTPUT) {
|
||||
segment->AppendAndConsumeChunk(&mLastChunks[0]);
|
||||
} else {
|
||||
segment->AppendNullData(mLastChunks[0].GetDuration());
|
||||
|
@ -43,15 +43,33 @@ public:
|
||||
|
||||
typedef nsAutoTArray<AudioChunk, 1> OutputChunks;
|
||||
|
||||
// Flags re main thread updates and stream output.
|
||||
typedef unsigned Flags;
|
||||
enum : Flags {
|
||||
NO_STREAM_FLAGS = 0U,
|
||||
NEED_MAIN_THREAD_FINISHED = 1U << 0,
|
||||
NEED_MAIN_THREAD_CURRENT_TIME = 1U << 1,
|
||||
// Internal AudioNodeStreams can only pass their output to another
|
||||
// AudioNode, whereas external AudioNodeStreams can pass their output
|
||||
// to other ProcessedMediaStreams or hardware audio output.
|
||||
EXTERNAL_OUTPUT = 1U << 2,
|
||||
};
|
||||
/**
|
||||
* Create a stream that will process audio for an AudioNode.
|
||||
* Takes ownership of aEngine.
|
||||
*/
|
||||
static already_AddRefed<AudioNodeStream>
|
||||
Create(MediaStreamGraph* aGraph, AudioNodeEngine* aEngine, Flags aKind);
|
||||
|
||||
protected:
|
||||
/**
|
||||
* Transfers ownership of aEngine to the new AudioNodeStream.
|
||||
*/
|
||||
AudioNodeStream(AudioNodeEngine* aEngine,
|
||||
MediaStreamGraph::AudioNodeStreamKind aKind,
|
||||
Flags aFlags,
|
||||
TrackRate aSampleRate,
|
||||
AudioContext::AudioContextId aContextId);
|
||||
|
||||
protected:
|
||||
~AudioNodeStream();
|
||||
|
||||
public:
|
||||
@ -111,9 +129,8 @@ public:
|
||||
}
|
||||
virtual bool MainThreadNeedsUpdates() const override
|
||||
{
|
||||
// Only source and external streams need updates on the main thread.
|
||||
return (mKind == MediaStreamGraph::SOURCE_STREAM && mFinished) ||
|
||||
mKind == MediaStreamGraph::EXTERNAL_STREAM;
|
||||
return ((mFlags & NEED_MAIN_THREAD_FINISHED) && mFinished) ||
|
||||
(mFlags & NEED_MAIN_THREAD_CURRENT_TIME);
|
||||
}
|
||||
virtual bool IsIntrinsicallyConsumed() const override
|
||||
{
|
||||
@ -166,6 +183,9 @@ protected:
|
||||
|
||||
// The engine that will generate output for this node.
|
||||
nsAutoPtr<AudioNodeEngine> mEngine;
|
||||
// The mixed input blocks are kept from iteration to iteration to avoid
|
||||
// reallocating channel data arrays.
|
||||
OutputChunks mInputChunks;
|
||||
// The last block produced by this node.
|
||||
OutputChunks mLastChunks;
|
||||
// The stream's sampling rate
|
||||
@ -174,7 +194,7 @@ protected:
|
||||
// AudioContext. It is set on the main thread, in the constructor.
|
||||
const AudioContext::AudioContextId mAudioContextId;
|
||||
// Whether this is an internal or external stream
|
||||
const MediaStreamGraph::AudioNodeStreamKind mKind;
|
||||
const Flags mFlags;
|
||||
// The number of input channels that this stream requires. 0 means don't care.
|
||||
uint32_t mNumberOfInputChannels;
|
||||
// The mixing modes
|
||||
|
@ -100,9 +100,8 @@ AudioParam::Stream()
|
||||
|
||||
AudioNodeEngine* engine = new AudioNodeEngine(nullptr);
|
||||
nsRefPtr<AudioNodeStream> stream =
|
||||
mNode->Context()->Graph()->CreateAudioNodeStream(engine,
|
||||
MediaStreamGraph::INTERNAL_STREAM,
|
||||
Node()->Context()->SampleRate());
|
||||
AudioNodeStream::Create(mNode->Context()->Graph(), engine,
|
||||
AudioNodeStream::NO_STREAM_FLAGS);
|
||||
|
||||
// Force the input to have only one channel, and make it down-mix using
|
||||
// the speaker rules if needed.
|
||||
|
@ -250,7 +250,8 @@ BiquadFilterNode::BiquadFilterNode(AudioContext* aContext)
|
||||
, mGain(new AudioParam(this, SendGainToStream, 0.f, "gain"))
|
||||
{
|
||||
BiquadFilterNodeEngine* engine = new BiquadFilterNodeEngine(this, aContext->Destination());
|
||||
mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM);
|
||||
mStream = AudioNodeStream::Create(aContext->Graph(), engine,
|
||||
AudioNodeStream::NO_STREAM_FLAGS);
|
||||
engine->SetSourceStream(mStream);
|
||||
}
|
||||
|
||||
|
@ -73,8 +73,9 @@ ChannelMergerNode::ChannelMergerNode(AudioContext* aContext,
|
||||
ChannelInterpretation::Speakers)
|
||||
, mInputCount(aInputCount)
|
||||
{
|
||||
mStream = aContext->Graph()->CreateAudioNodeStream(new ChannelMergerNodeEngine(this),
|
||||
MediaStreamGraph::INTERNAL_STREAM);
|
||||
mStream = AudioNodeStream::Create(aContext->Graph(),
|
||||
new ChannelMergerNodeEngine(this),
|
||||
AudioNodeStream::NO_STREAM_FLAGS);
|
||||
}
|
||||
|
||||
ChannelMergerNode::~ChannelMergerNode()
|
||||
|
@ -60,8 +60,9 @@ ChannelSplitterNode::ChannelSplitterNode(AudioContext* aContext,
|
||||
ChannelInterpretation::Speakers)
|
||||
, mOutputCount(aOutputCount)
|
||||
{
|
||||
mStream = aContext->Graph()->CreateAudioNodeStream(new ChannelSplitterNodeEngine(this),
|
||||
MediaStreamGraph::INTERNAL_STREAM);
|
||||
mStream = AudioNodeStream::Create(aContext->Graph(),
|
||||
new ChannelSplitterNodeEngine(this),
|
||||
AudioNodeStream::NO_STREAM_FLAGS);
|
||||
}
|
||||
|
||||
ChannelSplitterNode::~ChannelSplitterNode()
|
||||
|
@ -191,7 +191,8 @@ ConvolverNode::ConvolverNode(AudioContext* aContext)
|
||||
, mNormalize(true)
|
||||
{
|
||||
ConvolverNodeEngine* engine = new ConvolverNodeEngine(this, mNormalize);
|
||||
mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM);
|
||||
mStream = AudioNodeStream::Create(aContext->Graph(), engine,
|
||||
AudioNodeStream::NO_STREAM_FLAGS);
|
||||
}
|
||||
|
||||
ConvolverNode::~ConvolverNode()
|
||||
|
@ -198,7 +198,8 @@ DelayNode::DelayNode(AudioContext* aContext, double aMaxDelay)
|
||||
DelayNodeEngine* engine =
|
||||
new DelayNodeEngine(this, aContext->Destination(),
|
||||
aContext->SampleRate() * aMaxDelay);
|
||||
mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM);
|
||||
mStream = AudioNodeStream::Create(aContext->Graph(), engine,
|
||||
AudioNodeStream::NO_STREAM_FLAGS);
|
||||
engine->SetSourceStream(mStream);
|
||||
}
|
||||
|
||||
|
@ -203,7 +203,8 @@ DynamicsCompressorNode::DynamicsCompressorNode(AudioContext* aContext)
|
||||
, mRelease(new AudioParam(this, SendReleaseToStream, 0.25f, "release"))
|
||||
{
|
||||
DynamicsCompressorNodeEngine* engine = new DynamicsCompressorNodeEngine(this, aContext->Destination());
|
||||
mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM);
|
||||
mStream = AudioNodeStream::Create(aContext->Graph(), engine,
|
||||
AudioNodeStream::NO_STREAM_FLAGS);
|
||||
engine->SetSourceStream(mStream);
|
||||
}
|
||||
|
||||
|
@ -128,7 +128,8 @@ GainNode::GainNode(AudioContext* aContext)
|
||||
, mGain(new AudioParam(this, SendGainToStream, 1.0f, "gain"))
|
||||
{
|
||||
GainNodeEngine* engine = new GainNodeEngine(this, aContext->Destination());
|
||||
mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM);
|
||||
mStream = AudioNodeStream::Create(aContext->Graph(), engine,
|
||||
AudioNodeStream::NO_STREAM_FLAGS);
|
||||
engine->SetSourceStream(mStream);
|
||||
}
|
||||
|
||||
|
@ -39,7 +39,8 @@ MediaStreamAudioDestinationNode::MediaStreamAudioDestinationNode(AudioContext* a
|
||||
ProcessedMediaStream* outputStream = mDOMStream->GetStream()->AsProcessedStream();
|
||||
MOZ_ASSERT(!!outputStream);
|
||||
AudioNodeEngine* engine = new AudioNodeEngine(this);
|
||||
mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::EXTERNAL_STREAM);
|
||||
mStream = AudioNodeStream::Create(aContext->Graph(), engine,
|
||||
AudioNodeStream::EXTERNAL_OUTPUT);
|
||||
mPort = outputStream->AllocateInputPort(mStream);
|
||||
|
||||
nsIDocument* doc = aContext->GetParentObject()->GetExtantDoc();
|
||||
|
@ -39,7 +39,7 @@ MediaStreamAudioSourceNode::MediaStreamAudioSourceNode(AudioContext* aContext,
|
||||
mInputStream(aMediaStream)
|
||||
{
|
||||
AudioNodeEngine* engine = new MediaStreamAudioSourceNodeEngine(this);
|
||||
mStream = aContext->Graph()->CreateAudioNodeExternalInputStream(engine);
|
||||
mStream = AudioNodeExternalInputStream::Create(aContext->Graph(), engine);
|
||||
ProcessedMediaStream* outputStream = static_cast<ProcessedMediaStream*>(mStream.get());
|
||||
mInputPort = outputStream->AllocateInputPort(aMediaStream->GetStream(),
|
||||
MediaInputPort::FLAG_BLOCK_INPUT);
|
||||
|
@ -384,7 +384,8 @@ OscillatorNode::OscillatorNode(AudioContext* aContext)
|
||||
, mStartCalled(false)
|
||||
{
|
||||
OscillatorNodeEngine* engine = new OscillatorNodeEngine(this, aContext->Destination());
|
||||
mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::SOURCE_STREAM);
|
||||
mStream = AudioNodeStream::Create(aContext->Graph(), engine,
|
||||
AudioNodeStream::NEED_MAIN_THREAD_FINISHED);
|
||||
engine->SetSourceStream(mStream);
|
||||
mStream->AddMainThreadListener(this);
|
||||
}
|
||||
|
@ -240,8 +240,9 @@ PannerNode::PannerNode(AudioContext* aContext)
|
||||
, mConeOuterAngle(360.)
|
||||
, mConeOuterGain(0.)
|
||||
{
|
||||
mStream = aContext->Graph()->CreateAudioNodeStream(new PannerNodeEngine(this),
|
||||
MediaStreamGraph::INTERNAL_STREAM);
|
||||
mStream = AudioNodeStream::Create(aContext->Graph(),
|
||||
new PannerNodeEngine(this),
|
||||
AudioNodeStream::NO_STREAM_FLAGS);
|
||||
// We should register once we have set up our stream and engine.
|
||||
Context()->Listener()->RegisterPannerNode(this);
|
||||
}
|
||||
|
@ -520,7 +520,8 @@ ScriptProcessorNode::ScriptProcessorNode(AudioContext* aContext,
|
||||
aContext->Destination(),
|
||||
BufferSize(),
|
||||
aNumberOfInputChannels);
|
||||
mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM);
|
||||
mStream = AudioNodeStream::Create(aContext->Graph(), engine,
|
||||
AudioNodeStream::NO_STREAM_FLAGS);
|
||||
engine->SetSourceStream(mStream);
|
||||
}
|
||||
|
||||
|
@ -181,8 +181,8 @@ StereoPannerNode::StereoPannerNode(AudioContext* aContext)
|
||||
, mPan(new AudioParam(this, SendPanToStream, 0.f, "pan"))
|
||||
{
|
||||
StereoPannerNodeEngine* engine = new StereoPannerNodeEngine(this, aContext->Destination());
|
||||
mStream = aContext->Graph()->CreateAudioNodeStream(engine,
|
||||
MediaStreamGraph::INTERNAL_STREAM);
|
||||
mStream = AudioNodeStream::Create(aContext->Graph(), engine,
|
||||
AudioNodeStream::NO_STREAM_FLAGS);
|
||||
engine->SetSourceStream(mStream);
|
||||
}
|
||||
|
||||
|
@ -288,7 +288,8 @@ WaveShaperNode::WaveShaperNode(AudioContext* aContext)
|
||||
mozilla::HoldJSObjects(this);
|
||||
|
||||
WaveShaperNodeEngine* engine = new WaveShaperNodeEngine(this);
|
||||
mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM);
|
||||
mStream = AudioNodeStream::Create(aContext->Graph(), engine,
|
||||
AudioNodeStream::NO_STREAM_FLAGS);
|
||||
}
|
||||
|
||||
WaveShaperNode::~WaveShaperNode()
|
||||
|
@ -1242,6 +1242,7 @@ _releaseobject(NPObject* npobj)
|
||||
{
|
||||
if (!NS_IsMainThread()) {
|
||||
NPN_PLUGIN_LOG(PLUGIN_LOG_ALWAYS,("NPN_releaseobject called from the wrong thread\n"));
|
||||
MOZ_CRASH("NPN_releaseobject called from the wrong thread");
|
||||
}
|
||||
if (!npobj)
|
||||
return;
|
||||
|
@ -4,6 +4,7 @@
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "TiledContentHost.h"
|
||||
#include "gfxPrefs.h" // for gfxPrefs
|
||||
#include "PaintedLayerComposite.h" // for PaintedLayerComposite
|
||||
#include "mozilla/gfx/BaseSize.h" // for BaseSize
|
||||
#include "mozilla/gfx/Matrix.h" // for Matrix4x4
|
||||
|
@ -2652,9 +2652,6 @@ gfxFontGroup::FindNonItalicFaceForChar(gfxFontFamily* aFamily, uint32_t aCh)
|
||||
}
|
||||
|
||||
nsRefPtr<gfxFont> font = fe->FindOrMakeFont(&mStyle, needsBold);
|
||||
if (!font->Valid()) {
|
||||
return nullptr;
|
||||
}
|
||||
return font.forget();
|
||||
}
|
||||
|
||||
|
@ -658,7 +658,7 @@ gfxWindowsPlatform::VerifyD2DDevice(bool aAttemptForce)
|
||||
mozilla::gfx::Factory::SetDirect3D10Device(mD3D10Device);
|
||||
}
|
||||
|
||||
ScopedGfxFeatureReporter reporter1_1("D2D1.1");
|
||||
ScopedGfxFeatureReporter reporter1_1("D2D1.1V");
|
||||
|
||||
if (Factory::SupportsD2D1()) {
|
||||
reporter1_1.SetSuccessful();
|
||||
|
@ -425,20 +425,6 @@ ProgressTracker::RemoveObserver(IProgressObserver* aObserver)
|
||||
return removed;
|
||||
}
|
||||
|
||||
bool
|
||||
ProgressTracker::FirstObserverIs(IProgressObserver* aObserver)
|
||||
{
|
||||
MOZ_ASSERT(NS_IsMainThread(), "Use mObservers on main thread only");
|
||||
ObserverArray::ForwardIterator iter(mObservers);
|
||||
while (iter.HasMore()) {
|
||||
nsRefPtr<IProgressObserver> observer = iter.GetNext().get();
|
||||
if (observer) {
|
||||
return observer.get() == aObserver;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void
|
||||
ProgressTracker::OnUnlockedDraw()
|
||||
{
|
||||
|
@ -155,11 +155,6 @@ public:
|
||||
return mObservers.Length();
|
||||
}
|
||||
|
||||
// This is intentionally non-general because its sole purpose is to support
|
||||
// some obscure network priority logic in imgRequest. That stuff could
|
||||
// probably be improved, but it's too scary to mess with at the moment.
|
||||
bool FirstObserverIs(IProgressObserver* aObserver);
|
||||
|
||||
// Resets our weak reference to our image. Image subclasses should call this
|
||||
// in their destructor.
|
||||
void ResetImage();
|
||||
|
@ -216,15 +216,6 @@ nsPNGDecoder::EndImageFrame()
|
||||
opacity = Opacity::OPAQUE;
|
||||
}
|
||||
|
||||
#ifdef PNG_APNG_SUPPORTED
|
||||
uint32_t numFrames = GetFrameCount();
|
||||
|
||||
// We can't use mPNG->num_frames_read as it may be one ahead.
|
||||
if (numFrames > 1) {
|
||||
PostInvalidation(mFrameRect);
|
||||
}
|
||||
#endif
|
||||
|
||||
PostFrameStop(opacity, mAnimInfo.mDispose, mAnimInfo.mTimeout,
|
||||
mAnimInfo.mBlend);
|
||||
}
|
||||
|
@ -64,6 +64,7 @@ imgRequest::imgRequest(imgLoader* aLoader, const ImageCacheKey& aCacheKey)
|
||||
: mLoader(aLoader)
|
||||
, mCacheKey(aCacheKey)
|
||||
, mLoadId(nullptr)
|
||||
, mFirstProxy(nullptr)
|
||||
, mValidator(nullptr)
|
||||
, mInnerWindowId(0)
|
||||
, mCORSMode(imgIRequest::CORS_NONE)
|
||||
@ -218,6 +219,12 @@ imgRequest::AddProxy(imgRequestProxy* proxy)
|
||||
NS_PRECONDITION(proxy, "null imgRequestProxy passed in");
|
||||
LOG_SCOPE_WITH_PARAM(GetImgLog(), "imgRequest::AddProxy", "proxy", proxy);
|
||||
|
||||
if (!mFirstProxy) {
|
||||
// Save a raw pointer to the first proxy we see, for use in the network
|
||||
// priority logic.
|
||||
mFirstProxy = proxy;
|
||||
}
|
||||
|
||||
// If we're empty before adding, we have to tell the loader we now have
|
||||
// proxies.
|
||||
nsRefPtr<ProgressTracker> progressTracker = GetProgressTracker();
|
||||
@ -535,8 +542,7 @@ imgRequest::AdjustPriority(imgRequestProxy* proxy, int32_t delta)
|
||||
// concern though is that image loads remain lower priority than other pieces
|
||||
// of content such as link clicks, CSS, and JS.
|
||||
//
|
||||
nsRefPtr<ProgressTracker> progressTracker = GetProgressTracker();
|
||||
if (!progressTracker->FirstObserverIs(proxy)) {
|
||||
if (!mFirstProxy || proxy != mFirstProxy) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -256,6 +256,10 @@ private:
|
||||
|
||||
void* mLoadId;
|
||||
|
||||
/// Raw pointer to the first proxy that was added to this imgRequest. Use only
|
||||
/// pointer comparisons; there's no guarantee this will remain valid.
|
||||
void* mFirstProxy;
|
||||
|
||||
imgCacheValidator* mValidator;
|
||||
nsCOMPtr<nsIAsyncVerifyRedirectCallback> mRedirectCallback;
|
||||
nsCOMPtr<nsIChannel> mNewRedirectChannel;
|
||||
|
@ -153,6 +153,164 @@ class HandleBase<TraceableHashMap<A,B,C,D,E,F>>
|
||||
: public TraceableHashMapOperations<JS::Handle<TraceableHashMap<A,B,C,D,E,F>>, A,B,C,D,E,F>
|
||||
{};
|
||||
|
||||
// A TraceableHashSet is a HashSet with an additional trace method that knows
|
||||
// how to visit all set element. HashSets that contain GC pointers that must
|
||||
// be traced to be kept alive will generally want to use this TraceableHashSet
|
||||
// specializeation in lieu of HashSet.
|
||||
//
|
||||
// Most types of GC pointers can be traced with no extra infrastructure. For
|
||||
// structs and non-gc-pointer members, ensure that there is a specialization of
|
||||
// DefaultTracer<T> with an appropriate trace method available to handle the
|
||||
// custom type.
|
||||
//
|
||||
// Note that although this HashSet's trace will deal correctly with moved
|
||||
// elements, it does not itself know when to barrier or trace elements. To
|
||||
// function properly it must either be used with Rooted or barriered and traced
|
||||
// manually.
|
||||
template <typename T,
|
||||
typename HashPolicy = DefaultHasher<T>,
|
||||
typename AllocPolicy = TempAllocPolicy,
|
||||
typename ElemTraceFunc = DefaultTracer<T>>
|
||||
class TraceableHashSet : public HashSet<T, HashPolicy, AllocPolicy>,
|
||||
public JS::Traceable
|
||||
{
|
||||
using Base = HashSet<T, HashPolicy, AllocPolicy>;
|
||||
|
||||
public:
|
||||
explicit TraceableHashSet(AllocPolicy a = AllocPolicy()) : Base(a) {}
|
||||
|
||||
static void trace(TraceableHashSet* set, JSTracer* trc) { set->trace(trc); }
|
||||
void trace(JSTracer* trc) {
|
||||
if (!this->initialized())
|
||||
return;
|
||||
for (typename Base::Enum e(*this); !e.empty(); e.popFront()) {
|
||||
T elem = e.front();
|
||||
ElemTraceFunc::trace(trc, &elem, "hashset element");
|
||||
if (elem != e.front())
|
||||
e.rekeyFront(elem);
|
||||
}
|
||||
}
|
||||
|
||||
// TraceableHashSet is movable
|
||||
TraceableHashSet(TraceableHashSet&& rhs) : Base(mozilla::Forward<TraceableHashSet>(rhs)) {}
|
||||
void operator=(TraceableHashSet&& rhs) {
|
||||
MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
|
||||
Base::operator=(mozilla::Forward<TraceableHashSet>(rhs));
|
||||
}
|
||||
|
||||
private:
|
||||
// TraceableHashSet is not copyable or assignable
|
||||
TraceableHashSet(const TraceableHashSet& hs) = delete;
|
||||
TraceableHashSet& operator=(const TraceableHashSet& hs) = delete;
|
||||
};
|
||||
|
||||
template <typename Outer, typename... Args>
|
||||
class TraceableHashSetOperations
|
||||
{
|
||||
using Set = TraceableHashSet<Args...>;
|
||||
using Lookup = typename Set::Lookup;
|
||||
using Ptr = typename Set::Ptr;
|
||||
using AddPtr = typename Set::AddPtr;
|
||||
using Range = typename Set::Range;
|
||||
using Enum = typename Set::Enum;
|
||||
|
||||
const Set& set() const { return static_cast<const Outer*>(this)->extract(); }
|
||||
|
||||
public:
|
||||
bool initialized() const { return set().initialized(); }
|
||||
Ptr lookup(const Lookup& l) const { return set().lookup(l); }
|
||||
AddPtr lookupForAdd(const Lookup& l) const { return set().lookupForAdd(l); }
|
||||
Range all() const { return set().all(); }
|
||||
bool empty() const { return set().empty(); }
|
||||
uint32_t count() const { return set().count(); }
|
||||
size_t capacity() const { return set().capacity(); }
|
||||
uint32_t generation() const { return set().generation(); }
|
||||
bool has(const Lookup& l) const { return set().lookup(l).found(); }
|
||||
};
|
||||
|
||||
template <typename Outer, typename... Args>
|
||||
class MutableTraceableHashSetOperations
|
||||
: public TraceableHashSetOperations<Outer, Args...>
|
||||
{
|
||||
using Set = TraceableHashSet<Args...>;
|
||||
using Lookup = typename Set::Lookup;
|
||||
using Ptr = typename Set::Ptr;
|
||||
using AddPtr = typename Set::AddPtr;
|
||||
using Range = typename Set::Range;
|
||||
using Enum = typename Set::Enum;
|
||||
|
||||
Set& set() { return static_cast<Outer*>(this)->extract(); }
|
||||
|
||||
public:
|
||||
bool init(uint32_t len = 16) { return set().init(len); }
|
||||
void clear() { set().clear(); }
|
||||
void finish() { set().finish(); }
|
||||
void remove(const Lookup& l) { set().remove(l); }
|
||||
|
||||
template<typename TInput>
|
||||
bool add(AddPtr& p, TInput&& t) {
|
||||
return set().add(p, mozilla::Forward<TInput>(t));
|
||||
}
|
||||
|
||||
template<typename TInput>
|
||||
bool relookupOrAdd(AddPtr& p, const Lookup& l, TInput&& t) {
|
||||
return set().relookupOrAdd(p, l, mozilla::Forward<TInput>(t));
|
||||
}
|
||||
|
||||
template<typename TInput>
|
||||
bool put(TInput&& t) {
|
||||
return set().put(mozilla::Forward<TInput>(t));
|
||||
}
|
||||
|
||||
template<typename TInput>
|
||||
bool putNew(TInput&& t) {
|
||||
return set().putNew(mozilla::Forward<TInput>(t));
|
||||
}
|
||||
|
||||
template<typename TInput>
|
||||
bool putNew(const Lookup& l, TInput&& t) {
|
||||
return set().putNew(l, mozilla::Forward<TInput>(t));
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename HP, typename AP, typename TF>
|
||||
class RootedBase<TraceableHashSet<T, HP, AP, TF>>
|
||||
: public MutableTraceableHashSetOperations<JS::Rooted<TraceableHashSet<T, HP, AP, TF>>, T, HP, AP, TF>
|
||||
{
|
||||
using Set = TraceableHashSet<T, HP, AP, TF>;
|
||||
|
||||
friend class TraceableHashSetOperations<JS::Rooted<Set>, T, HP, AP, TF>;
|
||||
const Set& extract() const { return *static_cast<const JS::Rooted<Set>*>(this)->address(); }
|
||||
|
||||
friend class MutableTraceableHashSetOperations<JS::Rooted<Set>, T, HP, AP, TF>;
|
||||
Set& extract() { return *static_cast<JS::Rooted<Set>*>(this)->address(); }
|
||||
};
|
||||
|
||||
template <typename T, typename HP, typename AP, typename TF>
|
||||
class MutableHandleBase<TraceableHashSet<T, HP, AP, TF>>
|
||||
: public MutableTraceableHashSetOperations<JS::MutableHandle<TraceableHashSet<T, HP, AP, TF>>,
|
||||
T, HP, AP, TF>
|
||||
{
|
||||
using Set = TraceableHashSet<T, HP, AP, TF>;
|
||||
|
||||
friend class TraceableHashSetOperations<JS::MutableHandle<Set>, T, HP, AP, TF>;
|
||||
const Set& extract() const {
|
||||
return *static_cast<const JS::MutableHandle<Set>*>(this)->address();
|
||||
}
|
||||
|
||||
friend class MutableTraceableHashSetOperations<JS::MutableHandle<Set>, T, HP, AP, TF>;
|
||||
Set& extract() { return *static_cast<JS::MutableHandle<Set>*>(this)->address(); }
|
||||
};
|
||||
|
||||
template <typename T, typename HP, typename AP, typename TF>
|
||||
class HandleBase<TraceableHashSet<T, HP, AP, TF>>
|
||||
: public TraceableHashSetOperations<JS::Handle<TraceableHashSet<T, HP, AP, TF>>, T, HP, AP, TF>
|
||||
{
|
||||
using Set = TraceableHashSet<T, HP, AP, TF>;
|
||||
friend class TraceableHashSetOperations<JS::Handle<Set>, T, HP, AP, TF>;
|
||||
const Set& extract() const { return *static_cast<const JS::Handle<Set>*>(this)->address(); }
|
||||
};
|
||||
|
||||
} /* namespace js */
|
||||
|
||||
#endif /* gc_HashTable_h */
|
||||
|
@ -539,6 +539,12 @@ TryEnablingJit(JSContext* cx, AsmJSModule& module, HandleFunction fun, uint32_t
|
||||
return true;
|
||||
}
|
||||
|
||||
// Don't enable jit entry when we have a pending ion builder.
|
||||
// Take the interpreter path which will link it and enable
|
||||
// the fast path on the next call.
|
||||
if (script->baselineScript()->hasPendingIonBuilder())
|
||||
return true;
|
||||
|
||||
// Currently we can't rectify arguments. Therefore disabling if argc is too low.
|
||||
if (fun->nargs() > size_t(argc))
|
||||
return true;
|
||||
|
@ -489,8 +489,6 @@ case "$target" in
|
||||
fi
|
||||
|
||||
AC_DEFINE_UNQUOTED(MOZ_WINSDK_TARGETVER,0x$MOZ_WINSDK_TARGETVER)
|
||||
# Definitions matching sdkddkver.h
|
||||
AC_DEFINE_UNQUOTED(MOZ_NTDDI_WIN7, 0x06010000)
|
||||
;;
|
||||
esac
|
||||
|
||||
|
9
js/src/jit-test/tests/basic/bug1195298.js
Normal file
9
js/src/jit-test/tests/basic/bug1195298.js
Normal file
@ -0,0 +1,9 @@
|
||||
function t() {
|
||||
var o = {l: 0xfffffffff};
|
||||
var l = o.l - 0xffffffffe;
|
||||
var a = getSelfHostedValue('NewDenseArray');
|
||||
var arr = a(l);
|
||||
assertEq(arr.length, 1);
|
||||
}
|
||||
t();
|
||||
t();
|
18
js/src/jit-test/tests/ion/bug1196648.js
Normal file
18
js/src/jit-test/tests/ion/bug1196648.js
Normal file
@ -0,0 +1,18 @@
|
||||
h = function(m, foreign, n) {
|
||||
"use asm";
|
||||
var ff = foreign.ff;
|
||||
function f(x) {
|
||||
x = +x;
|
||||
ff();
|
||||
}
|
||||
return f;
|
||||
}(0, {
|
||||
ff: function() {
|
||||
return {
|
||||
e: String.prototype.substring
|
||||
};
|
||||
}
|
||||
}, 0);
|
||||
for (var k = 0; k < 999; k++) {
|
||||
h();
|
||||
}
|
@ -478,6 +478,21 @@ BaselineScript::Destroy(FreeOp* fop, BaselineScript* script)
|
||||
fop->delete_(script);
|
||||
}
|
||||
|
||||
void
|
||||
BaselineScript::clearDependentAsmJSModules()
|
||||
{
|
||||
// Remove any links from AsmJSModules that contain optimized FFI calls into
|
||||
// this BaselineScript.
|
||||
if (dependentAsmJSModules_) {
|
||||
for (size_t i = 0; i < dependentAsmJSModules_->length(); i++) {
|
||||
DependentAsmJSModuleExit exit = (*dependentAsmJSModules_)[i];
|
||||
exit.module->detachJitCompilation(exit.exitIndex);
|
||||
}
|
||||
|
||||
dependentAsmJSModules_->clear();
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
BaselineScript::unlinkDependentAsmJSModules(FreeOp* fop)
|
||||
{
|
||||
|
@ -401,6 +401,7 @@ struct BaselineScript
|
||||
|
||||
bool addDependentAsmJSModule(JSContext* cx, DependentAsmJSModuleExit exit);
|
||||
void unlinkDependentAsmJSModules(FreeOp* fop);
|
||||
void clearDependentAsmJSModules();
|
||||
void removeDependentAsmJSModule(DependentAsmJSModuleExit exit);
|
||||
|
||||
// Toggle debug traps (used for breakpoints and step mode) in the script.
|
||||
@ -477,6 +478,9 @@ struct BaselineScript
|
||||
|
||||
pendingBuilder_ = builder;
|
||||
|
||||
// lazy linking cannot happen during asmjs to ion.
|
||||
clearDependentAsmJSModules();
|
||||
|
||||
script->updateBaselineOrIonRaw(maybecx);
|
||||
}
|
||||
void removePendingIonBuilder(JSScript* script) {
|
||||
|
@ -229,6 +229,7 @@ JitRuntime::initialize(JSContext* cx)
|
||||
if (class_ == FrameSizeClass::ClassLimit())
|
||||
break;
|
||||
bailoutTables_.infallibleAppend((JitCode*)nullptr);
|
||||
JitSpew(JitSpew_Codegen, "# Bailout table");
|
||||
bailoutTables_[id] = generateBailoutTable(cx, id);
|
||||
if (!bailoutTables_[id])
|
||||
return false;
|
||||
@ -297,6 +298,7 @@ JitRuntime::initialize(JSContext* cx)
|
||||
|
||||
JitSpew(JitSpew_Codegen, "# Emitting VM function wrappers");
|
||||
for (VMFunction* fun = VMFunction::functions; fun; fun = fun->next) {
|
||||
JitSpew(JitSpew_Codegen, "# VM function wrapper");
|
||||
if (!generateVMWrapper(cx, *fun))
|
||||
return false;
|
||||
}
|
||||
|
@ -10,9 +10,13 @@
|
||||
#include "mozilla/MathAlgorithms.h"
|
||||
|
||||
#include "jscompartment.h"
|
||||
#ifdef JS_DISASM_ARM
|
||||
#include "jsprf.h"
|
||||
#endif
|
||||
#include "jsutil.h"
|
||||
|
||||
#include "gc/Marking.h"
|
||||
#include "jit/arm/disasm/Disasm-arm.h"
|
||||
#include "jit/arm/MacroAssembler-arm.h"
|
||||
#include "jit/ExecutableAllocator.h"
|
||||
#include "jit/JitCompartment.h"
|
||||
@ -1405,18 +1409,260 @@ Assembler::bytesNeeded() const
|
||||
preBarrierTableBytes();
|
||||
}
|
||||
|
||||
#ifdef JS_DISASM_ARM
|
||||
|
||||
// Labels are named as they are encountered by adding names to a
|
||||
// table, using the Label address as the key. This is made tricky by
|
||||
// the (memory for) Label objects being reused, but reused label
|
||||
// objects are recognizable from being marked as not used or not
|
||||
// bound. See spewResolve().
|
||||
//
|
||||
// In a number of cases there is no information about the target, and
|
||||
// we just end up printing "patchable constant load to PC". This is
|
||||
// true especially for jumps to bailout handlers (which have no
|
||||
// names). See spewData() and its callers. In some cases (loop back
|
||||
// edges) some information about the intended target may be propagated
|
||||
// from higher levels, and if so it's printed here.
|
||||
|
||||
void
|
||||
Assembler::spew(Instruction* i)
|
||||
{
|
||||
if (spewDisabled() || !i)
|
||||
return;
|
||||
disasm::NameConverter converter;
|
||||
disasm::Disassembler dasm(converter);
|
||||
disasm::EmbeddedVector<char, disasm::ReasonableBufferSize> buffer;
|
||||
uint8_t* loc = reinterpret_cast<uint8_t*>(const_cast<uint32_t*>(i->raw()));
|
||||
dasm.InstructionDecode(buffer, loc);
|
||||
spew(" %08x %s", reinterpret_cast<uint32_t>(loc), buffer.start());
|
||||
}
|
||||
|
||||
void
|
||||
Assembler::spewTarget(Label* target)
|
||||
{
|
||||
if (spewDisabled())
|
||||
return;
|
||||
spew(" -> %d%s", spewResolve(target), !target->bound() ? "f" : "");
|
||||
}
|
||||
|
||||
// If a target label is known, always print that and do not attempt to
|
||||
// disassemble the branch operands, as they will often be encoding
|
||||
// metainformation (pointers for a chain of jump instructions), and
|
||||
// not actual branch targets.
|
||||
|
||||
void
|
||||
Assembler::spewBranch(Instruction* i, Label* target /* may be nullptr */)
|
||||
{
|
||||
if (spewDisabled() || !i)
|
||||
return;
|
||||
disasm::NameConverter converter;
|
||||
disasm::Disassembler dasm(converter);
|
||||
disasm::EmbeddedVector<char, disasm::ReasonableBufferSize> buffer;
|
||||
uint8_t* loc = reinterpret_cast<uint8_t*>(const_cast<uint32_t*>(i->raw()));
|
||||
dasm.InstructionDecode(buffer, loc);
|
||||
char labelBuf[128];
|
||||
labelBuf[0] = 0;
|
||||
if (!target)
|
||||
JS_snprintf(labelBuf, sizeof(labelBuf), " -> (link-time target)");
|
||||
if (InstBranchImm::IsTHIS(*i)) {
|
||||
InstBranchImm* bimm = InstBranchImm::AsTHIS(*i);
|
||||
BOffImm destOff;
|
||||
bimm->extractImm(&destOff);
|
||||
if (destOff.isInvalid() || target) {
|
||||
// The target information in the instruction is likely garbage, so remove it.
|
||||
// The target label will in any case be printed if we have it.
|
||||
//
|
||||
// The format of the instruction disassembly is [0-9a-f]{8}\s+\S+\s+.*,
|
||||
// where the \S+ string is the opcode. Strip everything after the opcode,
|
||||
// and attach the label if we have it.
|
||||
int i;
|
||||
for ( i=8 ; i < buffer.length() && buffer[i] == ' ' ; i++ )
|
||||
;
|
||||
for ( ; i < buffer.length() && buffer[i] != ' ' ; i++ )
|
||||
;
|
||||
buffer[i] = 0;
|
||||
if (target) {
|
||||
JS_snprintf(labelBuf, sizeof(labelBuf), " -> %d%s", spewResolve(target),
|
||||
!target->bound() ? "f" : "");
|
||||
target = nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
spew(" %08x %s%s", reinterpret_cast<uint32_t>(loc), buffer.start(), labelBuf);
|
||||
if (target)
|
||||
spewTarget(target);
|
||||
}
|
||||
|
||||
void
|
||||
Assembler::spewLabel(Label* l)
|
||||
{
|
||||
if (spewDisabled())
|
||||
return;
|
||||
spew(" %d:", spewResolve(l));
|
||||
}
|
||||
|
||||
void
|
||||
Assembler::spewRetarget(Label* label, Label* target)
|
||||
{
|
||||
if (spewDisabled())
|
||||
return;
|
||||
spew(" %d: .retarget -> %d%s",
|
||||
spewResolve(label), spewResolve(target), !target->bound() ? "f" : "");
|
||||
}
|
||||
|
||||
void
|
||||
Assembler::spewData(BufferOffset addr, size_t numInstr, bool loadToPC)
|
||||
{
|
||||
if (spewDisabled())
|
||||
return;
|
||||
Instruction* inst = m_buffer.getInstOrNull(addr);
|
||||
if (!inst)
|
||||
return;
|
||||
uint32_t *instr = reinterpret_cast<uint32_t*>(inst);
|
||||
for ( size_t k=0 ; k < numInstr ; k++ ) {
|
||||
spew(" %08x %08x (patchable constant load%s)",
|
||||
reinterpret_cast<uint32_t>(instr+k), *(instr+k), loadToPC ? " to PC" : "");
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
Assembler::spewDisabled()
|
||||
{
|
||||
return !(JitSpewEnabled(JitSpew_Codegen) || printer_);
|
||||
}
|
||||
|
||||
void
|
||||
Assembler::spew(const char* fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
va_start(args, fmt);
|
||||
spew(fmt, args);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
void
|
||||
Assembler::spew(const char* fmt, va_list va)
|
||||
{
|
||||
if (printer_) {
|
||||
printer_->vprintf(fmt, va);
|
||||
printer_->put("\n");
|
||||
}
|
||||
js::jit::JitSpewVA(js::jit::JitSpew_Codegen, fmt, va);
|
||||
}
|
||||
|
||||
uint32_t
|
||||
Assembler::spewResolve(Label* l)
|
||||
{
|
||||
// Note, spewResolve will sometimes return 0 when it is triggered
|
||||
// by the profiler and not by a full disassembly, since in that
|
||||
// case a label can be used or bound but not previously have been
|
||||
// defined.
|
||||
return l->used() || l->bound() ? spewProbe(l) : spewDefine(l);
|
||||
}
|
||||
|
||||
uint32_t
|
||||
Assembler::spewProbe(Label* l)
|
||||
{
|
||||
uint32_t key = reinterpret_cast<uint32_t>(l);
|
||||
uint32_t value = 0;
|
||||
spewNodes_.lookup(key, &value);
|
||||
return value;
|
||||
}
|
||||
|
||||
uint32_t
|
||||
Assembler::spewDefine(Label* l)
|
||||
{
|
||||
uint32_t key = reinterpret_cast<uint32_t>(l);
|
||||
spewNodes_.remove(key);
|
||||
uint32_t value = spewNext_++;
|
||||
if (!spewNodes_.add(key, value))
|
||||
return 0;
|
||||
return value;
|
||||
}
|
||||
|
||||
Assembler::SpewNodes::~SpewNodes()
|
||||
{
|
||||
Node* p = nodes;
|
||||
while (p) {
|
||||
Node* victim = p;
|
||||
p = p->next;
|
||||
js_free(victim);
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
Assembler::SpewNodes::lookup(uint32_t key, uint32_t* value)
|
||||
{
|
||||
for ( Node* p = nodes ; p ; p = p->next ) {
|
||||
if (p->key == key) {
|
||||
*value = p->value;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
Assembler::SpewNodes::add(uint32_t key, uint32_t value)
|
||||
{
|
||||
Node* node = (Node*)js_malloc(sizeof(Node));
|
||||
if (!node)
|
||||
return false;
|
||||
node->key = key;
|
||||
node->value = value;
|
||||
node->next = nodes;
|
||||
nodes = node;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
Assembler::SpewNodes::remove(uint32_t key)
|
||||
{
|
||||
for ( Node* p = nodes, *pp = nullptr ; p ; pp = p, p = p->next ) {
|
||||
if (p->key == key) {
|
||||
if (pp)
|
||||
pp->next = p->next;
|
||||
else
|
||||
nodes = p->next;
|
||||
js_free(p);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif // JS_DISASM_ARM
|
||||
|
||||
// Write a blob of binary into the instruction stream.
|
||||
BufferOffset
|
||||
Assembler::writeInst(uint32_t x)
|
||||
{
|
||||
return m_buffer.putInt(x);
|
||||
BufferOffset offs = m_buffer.putInt(x);
|
||||
#ifdef JS_DISASM_ARM
|
||||
spew(m_buffer.getInstOrNull(offs));
|
||||
#endif
|
||||
return offs;
|
||||
}
|
||||
|
||||
BufferOffset
|
||||
Assembler::writeBranchInst(uint32_t x)
|
||||
Assembler::writeBranchInst(uint32_t x, Label* documentation)
|
||||
{
|
||||
return m_buffer.putInt(x, /* markAsBranch = */ true);
|
||||
BufferOffset offs = m_buffer.putInt(x, /* markAsBranch = */ true);
|
||||
#ifdef JS_DISASM_ARM
|
||||
spewBranch(m_buffer.getInstOrNull(offs), documentation);
|
||||
#endif
|
||||
return offs;
|
||||
}
|
||||
|
||||
// Allocate memory for a branch instruction, it will be overwritten
|
||||
// subsequently and should not be disassembled.
|
||||
|
||||
BufferOffset
|
||||
Assembler::allocBranchInst()
|
||||
{
|
||||
return m_buffer.putInt(Always | InstNOP::NopInst, /* markAsBranch = */ true);
|
||||
}
|
||||
|
||||
void
|
||||
Assembler::WriteInstStatic(uint32_t x, uint32_t* dest)
|
||||
{
|
||||
@ -1867,12 +2113,34 @@ Assembler::as_dtm(LoadStore ls, Register rn, uint32_t mask,
|
||||
return writeInst(0x08000000 | RN(rn) | ls | mode | mask | c | wb);
|
||||
}
|
||||
|
||||
// Note, it's possible for markAsBranch and loadToPC to disagree,
|
||||
// because some loads to the PC are not necessarily encoding
|
||||
// instructions that should be marked as branches: only patchable
|
||||
// near branch instructions should be marked.
|
||||
|
||||
BufferOffset
|
||||
Assembler::allocEntry(size_t numInst, unsigned numPoolEntries,
|
||||
uint8_t* inst, uint8_t* data, ARMBuffer::PoolEntry* pe,
|
||||
bool markAsBranch, bool loadToPC)
|
||||
{
|
||||
BufferOffset offs = m_buffer.allocEntry(numInst, numPoolEntries, inst, data, pe, markAsBranch);
|
||||
#ifdef JS_DISASM_ARM
|
||||
spewData(offs, numInst, loadToPC);
|
||||
#endif
|
||||
return offs;
|
||||
}
|
||||
|
||||
// This is also used for instructions that might be resolved into branches,
|
||||
// or might not. If dest==pc then it is effectively a branch.
|
||||
|
||||
BufferOffset
|
||||
Assembler::as_Imm32Pool(Register dest, uint32_t value, Condition c)
|
||||
{
|
||||
PoolHintPun php;
|
||||
php.phd.init(0, c, PoolHintData::PoolDTR, dest);
|
||||
return m_buffer.allocEntry(1, 1, (uint8_t*)&php.raw, (uint8_t*)&value);
|
||||
BufferOffset offs = allocEntry(1, 1, (uint8_t*)&php.raw, (uint8_t*)&value, nullptr, false,
|
||||
dest == pc);
|
||||
return offs;
|
||||
}
|
||||
|
||||
/* static */ void
|
||||
@ -1891,12 +2159,13 @@ Assembler::WritePoolEntry(Instruction* addr, Condition c, uint32_t data)
|
||||
}
|
||||
|
||||
BufferOffset
|
||||
Assembler::as_BranchPool(uint32_t value, RepatchLabel* label, ARMBuffer::PoolEntry* pe, Condition c)
|
||||
Assembler::as_BranchPool(uint32_t value, RepatchLabel* label, ARMBuffer::PoolEntry* pe, Condition c,
|
||||
Label* documentation)
|
||||
{
|
||||
PoolHintPun php;
|
||||
php.phd.init(0, c, PoolHintData::PoolBranch, pc);
|
||||
BufferOffset ret = m_buffer.allocEntry(1, 1, (uint8_t*)&php.raw, (uint8_t*)&value, pe,
|
||||
/* markAsBranch = */ true);
|
||||
BufferOffset ret = allocEntry(1, 1, (uint8_t*)&php.raw, (uint8_t*)&value, pe,
|
||||
/* markAsBranch = */ true, /* loadToPC = */ true);
|
||||
// If this label is already bound, then immediately replace the stub load
|
||||
// with a correct branch.
|
||||
if (label->bound()) {
|
||||
@ -1905,6 +2174,10 @@ Assembler::as_BranchPool(uint32_t value, RepatchLabel* label, ARMBuffer::PoolEnt
|
||||
} else {
|
||||
label->use(ret.getOffset());
|
||||
}
|
||||
#ifdef JS_DISASM_ARM
|
||||
if (documentation)
|
||||
spewTarget(documentation);
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1914,7 +2187,7 @@ Assembler::as_FImm64Pool(VFPRegister dest, double value, Condition c)
|
||||
MOZ_ASSERT(dest.isDouble());
|
||||
PoolHintPun php;
|
||||
php.phd.init(0, c, PoolHintData::PoolVDTR, dest);
|
||||
return m_buffer.allocEntry(1, 2, (uint8_t*)&php.raw, (uint8_t*)&value);
|
||||
return allocEntry(1, 2, (uint8_t*)&php.raw, (uint8_t*)&value);
|
||||
}
|
||||
|
||||
BufferOffset
|
||||
@ -1926,7 +2199,7 @@ Assembler::as_FImm32Pool(VFPRegister dest, float value, Condition c)
|
||||
MOZ_ASSERT(dest.isSingle());
|
||||
PoolHintPun php;
|
||||
php.phd.init(0, c, PoolHintData::PoolVDTR, dest);
|
||||
return m_buffer.allocEntry(1, 1, (uint8_t*)&php.raw, (uint8_t*)&value);
|
||||
return allocEntry(1, 1, (uint8_t*)&php.raw, (uint8_t*)&value);
|
||||
}
|
||||
|
||||
// Pool callbacks stuff:
|
||||
@ -2086,9 +2359,9 @@ Assembler::WritePoolGuard(BufferOffset branch, Instruction* dest, BufferOffset a
|
||||
// Branch can branch to an immediate *or* to a register.
|
||||
// Branches to immediates are pc relative, branches to registers are absolute.
|
||||
BufferOffset
|
||||
Assembler::as_b(BOffImm off, Condition c)
|
||||
Assembler::as_b(BOffImm off, Condition c, Label* documentation)
|
||||
{
|
||||
BufferOffset ret = writeBranchInst(((int)c) | OpB | off.encode());
|
||||
BufferOffset ret = writeBranchInst(((int)c) | OpB | off.encode(), documentation);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2102,8 +2375,11 @@ Assembler::as_b(Label* l, Condition c)
|
||||
|
||||
if (l->bound()) {
|
||||
// Note only one instruction is emitted here, the NOP is overwritten.
|
||||
BufferOffset ret = writeBranchInst(Always | InstNOP::NopInst);
|
||||
BufferOffset ret = allocBranchInst();
|
||||
as_b(BufferOffset(l).diffB<BOffImm>(ret), c, ret);
|
||||
#ifdef JS_DISASM_ARM
|
||||
spewBranch(m_buffer.getInstOrNull(ret), l);
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2117,11 +2393,11 @@ Assembler::as_b(Label* l, Condition c)
|
||||
m_buffer.fail_bail();
|
||||
return ret;
|
||||
}
|
||||
ret = as_b(BOffImm(old), c);
|
||||
ret = as_b(BOffImm(old), c, l);
|
||||
} else {
|
||||
old = LabelBase::INVALID_OFFSET;
|
||||
BOffImm inv;
|
||||
ret = as_b(inv, c);
|
||||
ret = as_b(inv, c, l);
|
||||
}
|
||||
DebugOnly<int32_t> check = l->use(ret.getOffset());
|
||||
MOZ_ASSERT(check == old);
|
||||
@ -2131,6 +2407,8 @@ Assembler::as_b(Label* l, Condition c)
|
||||
BufferOffset
|
||||
Assembler::as_b(BOffImm off, Condition c, BufferOffset inst)
|
||||
{
|
||||
// JS_DISASM_ARM NOTE: Can't disassemble here, because numerous callers use this to
|
||||
// patchup old code. Must disassemble in caller where it makes sense. Not many callers.
|
||||
*editSrc(inst) = InstBImm(off, c);
|
||||
return inst;
|
||||
}
|
||||
@ -2149,9 +2427,9 @@ Assembler::as_blx(Register r, Condition c)
|
||||
// bl can only branch to an pc-relative immediate offset
|
||||
// It cannot change the processor state.
|
||||
BufferOffset
|
||||
Assembler::as_bl(BOffImm off, Condition c)
|
||||
Assembler::as_bl(BOffImm off, Condition c, Label* documentation)
|
||||
{
|
||||
return writeBranchInst(((int)c) | OpBl | off.encode());
|
||||
return writeBranchInst(((int)c) | OpBl | off.encode(), documentation);
|
||||
}
|
||||
|
||||
BufferOffset
|
||||
@ -2164,8 +2442,11 @@ Assembler::as_bl(Label* l, Condition c)
|
||||
|
||||
if (l->bound()) {
|
||||
// Note only one instruction is emitted here, the NOP is overwritten.
|
||||
BufferOffset ret = writeBranchInst(Always | InstNOP::NopInst);
|
||||
BufferOffset ret = allocBranchInst();
|
||||
as_bl(BufferOffset(l).diffB<BOffImm>(ret), c, ret);
|
||||
#ifdef JS_DISASM_ARM
|
||||
spewBranch(m_buffer.getInstOrNull(ret), l);
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2180,11 +2461,11 @@ Assembler::as_bl(Label* l, Condition c)
|
||||
m_buffer.fail_bail();
|
||||
return ret;
|
||||
}
|
||||
ret = as_bl(BOffImm(old), c);
|
||||
ret = as_bl(BOffImm(old), c, l);
|
||||
} else {
|
||||
old = LabelBase::INVALID_OFFSET;
|
||||
BOffImm inv;
|
||||
ret = as_bl(inv, c);
|
||||
ret = as_bl(inv, c, l);
|
||||
}
|
||||
DebugOnly<int32_t> check = l->use(ret.getOffset());
|
||||
MOZ_ASSERT(check == old);
|
||||
@ -2508,6 +2789,9 @@ Assembler::nextLink(BufferOffset b, BufferOffset* next)
|
||||
void
|
||||
Assembler::bind(Label* label, BufferOffset boff)
|
||||
{
|
||||
#ifdef JS_DISASM_ARM
|
||||
spewLabel(label);
|
||||
#endif
|
||||
if (label->used()) {
|
||||
bool more;
|
||||
// If our caller didn't give us an explicit target to bind to then we
|
||||
@ -2535,6 +2819,9 @@ Assembler::bind(Label* label, BufferOffset boff)
|
||||
void
|
||||
Assembler::bind(RepatchLabel* label)
|
||||
{
|
||||
// It does not seem to be useful to record this label for
|
||||
// disassembly, as the value that is bound to the label is often
|
||||
// effectively garbage and is replaced by something else later.
|
||||
BufferOffset dest = nextOffset();
|
||||
if (label->used()) {
|
||||
// If the label has a use, then change this use to refer to the bound
|
||||
@ -2558,6 +2845,9 @@ Assembler::bind(RepatchLabel* label)
|
||||
void
|
||||
Assembler::retarget(Label* label, Label* target)
|
||||
{
|
||||
#ifdef JS_DISASM_ARM
|
||||
spewRetarget(label, target);
|
||||
#endif
|
||||
if (label->used()) {
|
||||
if (target->bound()) {
|
||||
bind(label, BufferOffset(target));
|
||||
|
@ -1228,13 +1228,24 @@ class Assembler : public AssemblerShared
|
||||
}
|
||||
|
||||
protected:
|
||||
BufferOffset labelOffset (Label* l) {
|
||||
return BufferOffset(l->bound());
|
||||
}
|
||||
// Shim around AssemblerBufferWithConstantPools::allocEntry.
|
||||
BufferOffset allocEntry(size_t numInst, unsigned numPoolEntries,
|
||||
uint8_t* inst, uint8_t* data, ARMBuffer::PoolEntry* pe = nullptr,
|
||||
bool markAsBranch = false, bool loadToPC = false);
|
||||
|
||||
Instruction* editSrc (BufferOffset bo) {
|
||||
return m_buffer.getInst(bo);
|
||||
}
|
||||
|
||||
#ifdef JS_DISASM_ARM
|
||||
void spew(Instruction* i);
|
||||
void spewBranch(Instruction* i, Label* target);
|
||||
void spewData(BufferOffset addr, size_t numInstr, bool loadToPC);
|
||||
void spewLabel(Label* label);
|
||||
void spewRetarget(Label* label, Label* target);
|
||||
void spewTarget(Label* l);
|
||||
#endif
|
||||
|
||||
public:
|
||||
void resetCounter();
|
||||
uint32_t actualOffset(uint32_t) const;
|
||||
@ -1272,11 +1283,47 @@ class Assembler : public AssemblerShared
|
||||
|
||||
ARMBuffer m_buffer;
|
||||
|
||||
#ifdef JS_DISASM_ARM
|
||||
private:
|
||||
class SpewNodes {
|
||||
struct Node {
|
||||
uint32_t key;
|
||||
uint32_t value;
|
||||
Node* next;
|
||||
};
|
||||
|
||||
Node* nodes;
|
||||
|
||||
public:
|
||||
SpewNodes() : nodes(nullptr) {}
|
||||
~SpewNodes();
|
||||
|
||||
bool lookup(uint32_t key, uint32_t* value);
|
||||
bool add(uint32_t key, uint32_t value);
|
||||
bool remove(uint32_t key);
|
||||
};
|
||||
|
||||
SpewNodes spewNodes_;
|
||||
uint32_t spewNext_;
|
||||
Sprinter* printer_;
|
||||
|
||||
bool spewDisabled();
|
||||
uint32_t spewResolve(Label* l);
|
||||
uint32_t spewProbe(Label* l);
|
||||
uint32_t spewDefine(Label* l);
|
||||
void spew(const char* fmt, ...);
|
||||
void spew(const char* fmt, va_list args);
|
||||
#endif
|
||||
|
||||
public:
|
||||
// For the alignment fill use NOP: 0x0320f000 or (Always | InstNOP::NopInst).
|
||||
// For the nopFill use a branch to the next instruction: 0xeaffffff.
|
||||
Assembler()
|
||||
: m_buffer(1, 1, 8, GetPoolMaxOffset(), 8, 0xe320f000, 0xeaffffff, GetNopFill()),
|
||||
#ifdef JS_DISASM_ARM
|
||||
spewNext_(1000),
|
||||
printer_(nullptr),
|
||||
#endif
|
||||
isFinished(false),
|
||||
dtmActive(false),
|
||||
dtmCond(Always)
|
||||
@ -1336,6 +1383,9 @@ class Assembler : public AssemblerShared
|
||||
bool oom() const;
|
||||
|
||||
void setPrinter(Sprinter* sp) {
|
||||
#ifdef JS_DISASM_ARM
|
||||
printer_ = sp;
|
||||
#endif
|
||||
}
|
||||
|
||||
static const Register getStackPointer() {
|
||||
@ -1374,7 +1424,12 @@ class Assembler : public AssemblerShared
|
||||
BufferOffset writeInst(uint32_t x);
|
||||
|
||||
// As above, but also mark the instruction as a branch.
|
||||
BufferOffset writeBranchInst(uint32_t x);
|
||||
BufferOffset writeBranchInst(uint32_t x, Label* documentation = nullptr);
|
||||
|
||||
// Write a placeholder NOP for a branch into the instruction stream
|
||||
// (in order to adjust assembler addresses and mark it as a branch), it will
|
||||
// be overwritten subsequently.
|
||||
BufferOffset allocBranchInst();
|
||||
|
||||
// A static variant for the cases where we don't want to have an assembler
|
||||
// object.
|
||||
@ -1486,7 +1541,8 @@ class Assembler : public AssemblerShared
|
||||
BufferOffset as_Imm32Pool(Register dest, uint32_t value, Condition c = Always);
|
||||
// Make a patchable jump that can target the entire 32 bit address space.
|
||||
BufferOffset as_BranchPool(uint32_t value, RepatchLabel* label,
|
||||
ARMBuffer::PoolEntry* pe = nullptr, Condition c = Always);
|
||||
ARMBuffer::PoolEntry* pe = nullptr, Condition c = Always,
|
||||
Label* documentation = nullptr);
|
||||
|
||||
// Load a 64 bit floating point immediate from a pool into a register.
|
||||
BufferOffset as_FImm64Pool(VFPRegister dest, double value, Condition c = Always);
|
||||
@ -1528,7 +1584,7 @@ class Assembler : public AssemblerShared
|
||||
|
||||
// Branch can branch to an immediate *or* to a register. Branches to
|
||||
// immediates are pc relative, branches to registers are absolute.
|
||||
BufferOffset as_b(BOffImm off, Condition c);
|
||||
BufferOffset as_b(BOffImm off, Condition c, Label* documentation = nullptr);
|
||||
|
||||
BufferOffset as_b(Label* l, Condition c = Always);
|
||||
BufferOffset as_b(BOffImm off, Condition c, BufferOffset inst);
|
||||
@ -1540,7 +1596,7 @@ class Assembler : public AssemblerShared
|
||||
BufferOffset as_blx(Label* l);
|
||||
|
||||
BufferOffset as_blx(Register r, Condition c = Always);
|
||||
BufferOffset as_bl(BOffImm off, Condition c);
|
||||
BufferOffset as_bl(BOffImm off, Condition c, Label* documentation = nullptr);
|
||||
// bl can only branch+link to an immediate, never to a register it never
|
||||
// changes processor state.
|
||||
BufferOffset as_bl();
|
||||
|
@ -4249,10 +4249,10 @@ MacroAssemblerARMCompat::roundf(FloatRegister input, Register output, Label* bai
|
||||
}
|
||||
|
||||
CodeOffsetJump
|
||||
MacroAssemblerARMCompat::jumpWithPatch(RepatchLabel* label, Condition cond)
|
||||
MacroAssemblerARMCompat::jumpWithPatch(RepatchLabel* label, Condition cond, Label* documentation)
|
||||
{
|
||||
ARMBuffer::PoolEntry pe;
|
||||
BufferOffset bo = as_BranchPool(0xdeadbeef, label, &pe, cond);
|
||||
BufferOffset bo = as_BranchPool(0xdeadbeef, label, &pe, cond, documentation);
|
||||
// Fill in a new CodeOffset with both the load and the pool entry that the
|
||||
// instruction loads from.
|
||||
CodeOffsetJump ret(bo.getOffset(), pe.index());
|
||||
|
@ -980,9 +980,10 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
|
||||
}
|
||||
void moveValue(const Value& val, Register type, Register data);
|
||||
|
||||
CodeOffsetJump jumpWithPatch(RepatchLabel* label, Condition cond = Always);
|
||||
CodeOffsetJump backedgeJump(RepatchLabel* label) {
|
||||
return jumpWithPatch(label);
|
||||
CodeOffsetJump jumpWithPatch(RepatchLabel* label, Condition cond = Always,
|
||||
Label* documentation = nullptr);
|
||||
CodeOffsetJump backedgeJump(RepatchLabel* label, Label* documentation) {
|
||||
return jumpWithPatch(label, Always, documentation);
|
||||
}
|
||||
template <typename T>
|
||||
CodeOffsetJump branchPtrWithPatch(Condition cond, Register reg, T ptr, RepatchLabel* label) {
|
||||
|
@ -1639,7 +1639,9 @@ class MacroAssemblerCompat : public vixl::MacroAssembler
|
||||
loadPtr(address, scratch);
|
||||
branchTest32(cond, scratch, imm, label);
|
||||
}
|
||||
CodeOffsetJump jumpWithPatch(RepatchLabel* label, Condition cond = Always) {
|
||||
CodeOffsetJump jumpWithPatch(RepatchLabel* label, Condition cond = Always,
|
||||
Label* documentation = nullptr)
|
||||
{
|
||||
ARMBuffer::PoolEntry pe;
|
||||
BufferOffset load_bo;
|
||||
BufferOffset branch_bo;
|
||||
@ -1664,8 +1666,8 @@ class MacroAssemblerCompat : public vixl::MacroAssembler
|
||||
label->use(branch_bo.getOffset());
|
||||
return CodeOffsetJump(load_bo.getOffset(), pe.index());
|
||||
}
|
||||
CodeOffsetJump backedgeJump(RepatchLabel* label) {
|
||||
return jumpWithPatch(label);
|
||||
CodeOffsetJump backedgeJump(RepatchLabel* label, Label* documentation = nullptr) {
|
||||
return jumpWithPatch(label, documentation);
|
||||
}
|
||||
template <typename T>
|
||||
CodeOffsetJump branchPtrWithPatch(Condition cond, Register reg, T ptr, RepatchLabel* label) {
|
||||
|
@ -2865,7 +2865,7 @@ MacroAssemblerMIPSCompat::backedgeJump(RepatchLabel* label)
|
||||
}
|
||||
|
||||
CodeOffsetJump
|
||||
MacroAssemblerMIPSCompat::jumpWithPatch(RepatchLabel* label)
|
||||
MacroAssemblerMIPSCompat::jumpWithPatch(RepatchLabel* label, Label* documentation)
|
||||
{
|
||||
// Only one branch per label.
|
||||
MOZ_ASSERT(!label->used());
|
||||
|
@ -733,8 +733,8 @@ protected:
|
||||
public:
|
||||
void moveValue(const Value& val, Register type, Register data);
|
||||
|
||||
CodeOffsetJump backedgeJump(RepatchLabel* label);
|
||||
CodeOffsetJump jumpWithPatch(RepatchLabel* label);
|
||||
CodeOffsetJump backedgeJump(RepatchLabel* label, Label* documentation = nullptr);
|
||||
CodeOffsetJump jumpWithPatch(RepatchLabel* label, Label* documentation = nullptr);
|
||||
|
||||
template <typename T>
|
||||
CodeOffsetJump branchPtrWithPatch(Condition cond, Register reg, T ptr, RepatchLabel* label) {
|
||||
|
@ -233,9 +233,9 @@ class MacroAssemblerNone : public Assembler
|
||||
template <typename T> void Pop(T) { MOZ_CRASH(); }
|
||||
template <typename T> CodeOffsetLabel pushWithPatch(T) { MOZ_CRASH(); }
|
||||
|
||||
CodeOffsetJump jumpWithPatch(RepatchLabel*) { MOZ_CRASH(); }
|
||||
CodeOffsetJump jumpWithPatch(RepatchLabel*, Condition) { MOZ_CRASH(); }
|
||||
CodeOffsetJump backedgeJump(RepatchLabel* label) { MOZ_CRASH(); }
|
||||
CodeOffsetJump jumpWithPatch(RepatchLabel*, Label* doc = nullptr) { MOZ_CRASH(); }
|
||||
CodeOffsetJump jumpWithPatch(RepatchLabel*, Condition, Label* doc = nullptr) { MOZ_CRASH(); }
|
||||
CodeOffsetJump backedgeJump(RepatchLabel* label, Label* doc = nullptr) { MOZ_CRASH(); }
|
||||
template <typename T, typename S>
|
||||
CodeOffsetJump branchPtrWithPatch(Condition, T, S, RepatchLabel*) { MOZ_CRASH(); }
|
||||
|
||||
|
@ -1575,7 +1575,7 @@ CodeGeneratorShared::jumpToBlock(MBasicBlock* mir)
|
||||
// Note: the backedge is initially a jump to the next instruction.
|
||||
// It will be patched to the target block's label during link().
|
||||
RepatchLabel rejoin;
|
||||
CodeOffsetJump backedge = masm.backedgeJump(&rejoin);
|
||||
CodeOffsetJump backedge = masm.backedgeJump(&rejoin, mir->lir()->label());
|
||||
masm.bind(&rejoin);
|
||||
|
||||
masm.propagateOOM(patchableBackedges_.append(PatchableBackedgeInfo(backedge, mir->lir()->label(), oolEntry)));
|
||||
@ -1596,7 +1596,7 @@ CodeGeneratorShared::jumpToBlock(MBasicBlock* mir, Assembler::Condition cond)
|
||||
// Note: the backedge is initially a jump to the next instruction.
|
||||
// It will be patched to the target block's label during link().
|
||||
RepatchLabel rejoin;
|
||||
CodeOffsetJump backedge = masm.jumpWithPatch(&rejoin, cond);
|
||||
CodeOffsetJump backedge = masm.jumpWithPatch(&rejoin, cond, mir->lir()->label());
|
||||
masm.bind(&rejoin);
|
||||
|
||||
masm.propagateOOM(patchableBackedges_.append(PatchableBackedgeInfo(backedge, mir->lir()->label(), oolEntry)));
|
||||
|
@ -279,6 +279,12 @@ class AssemblerBuffer
|
||||
}
|
||||
|
||||
public:
|
||||
Inst* getInstOrNull(BufferOffset off) {
|
||||
if (!off.assigned())
|
||||
return nullptr;
|
||||
return getInst(off);
|
||||
}
|
||||
|
||||
Inst* getInst(BufferOffset off) {
|
||||
const int offset = off.getOffset();
|
||||
|
||||
|
@ -649,17 +649,19 @@ class MacroAssemblerX64 : public MacroAssemblerX86Shared
|
||||
j(cond, label);
|
||||
}
|
||||
|
||||
CodeOffsetJump jumpWithPatch(RepatchLabel* label) {
|
||||
CodeOffsetJump jumpWithPatch(RepatchLabel* label, Label* documentation = nullptr) {
|
||||
JmpSrc src = jmpSrc(label);
|
||||
return CodeOffsetJump(size(), addPatchableJump(src, Relocation::HARDCODED));
|
||||
}
|
||||
|
||||
CodeOffsetJump jumpWithPatch(RepatchLabel* label, Condition cond) {
|
||||
CodeOffsetJump jumpWithPatch(RepatchLabel* label, Condition cond,
|
||||
Label* documentation = nullptr)
|
||||
{
|
||||
JmpSrc src = jSrc(cond, label);
|
||||
return CodeOffsetJump(size(), addPatchableJump(src, Relocation::HARDCODED));
|
||||
}
|
||||
|
||||
CodeOffsetJump backedgeJump(RepatchLabel* label) {
|
||||
CodeOffsetJump backedgeJump(RepatchLabel* label, Label* documentation = nullptr) {
|
||||
return jumpWithPatch(label);
|
||||
}
|
||||
|
||||
|
@ -667,17 +667,19 @@ class MacroAssemblerX86 : public MacroAssemblerX86Shared
|
||||
j(cond, label);
|
||||
}
|
||||
|
||||
CodeOffsetJump jumpWithPatch(RepatchLabel* label) {
|
||||
CodeOffsetJump jumpWithPatch(RepatchLabel* label, Label* documentation = nullptr) {
|
||||
jump(label);
|
||||
return CodeOffsetJump(size());
|
||||
}
|
||||
|
||||
CodeOffsetJump jumpWithPatch(RepatchLabel* label, Assembler::Condition cond) {
|
||||
CodeOffsetJump jumpWithPatch(RepatchLabel* label, Assembler::Condition cond,
|
||||
Label* documentation = nullptr)
|
||||
{
|
||||
j(cond, label);
|
||||
return CodeOffsetJump(size());
|
||||
}
|
||||
|
||||
CodeOffsetJump backedgeJump(RepatchLabel* label) {
|
||||
CodeOffsetJump backedgeJump(RepatchLabel* label, Label* documentation = nullptr) {
|
||||
return jumpWithPatch(label);
|
||||
}
|
||||
|
||||
|
@ -3108,10 +3108,7 @@ static const JSFunctionSpec array_methods[] = {
|
||||
JS_SELF_HOSTED_FN("keys", "ArrayKeys", 0,0),
|
||||
|
||||
/* ES7 additions */
|
||||
#ifdef NIGHTLY_BUILD
|
||||
JS_SELF_HOSTED_FN("includes", "ArrayIncludes", 2,0),
|
||||
#endif
|
||||
|
||||
JS_FS_END
|
||||
};
|
||||
|
||||
|
@ -135,13 +135,19 @@ class StringifyContext
|
||||
: sb(sb),
|
||||
gap(gap),
|
||||
replacer(cx, replacer),
|
||||
stack(cx, TraceableHashSet<JSObject*>(cx)),
|
||||
propertyList(propertyList),
|
||||
depth(0)
|
||||
{}
|
||||
|
||||
bool init() {
|
||||
return stack.init(8);
|
||||
}
|
||||
|
||||
StringBuffer& sb;
|
||||
const StringBuffer& gap;
|
||||
RootedObject replacer;
|
||||
Rooted<TraceableHashSet<JSObject*>> stack;
|
||||
const AutoIdVector& propertyList;
|
||||
uint32_t depth;
|
||||
};
|
||||
@ -290,6 +296,32 @@ IsFilteredValue(const Value& v)
|
||||
return v.isUndefined() || v.isSymbol() || IsCallable(v);
|
||||
}
|
||||
|
||||
class CycleDetector
|
||||
{
|
||||
public:
|
||||
CycleDetector(StringifyContext* scx, HandleObject obj)
|
||||
: stack(&scx->stack), obj_(obj) {
|
||||
}
|
||||
|
||||
bool foundCycle(JSContext* cx) {
|
||||
auto addPtr = stack.lookupForAdd(obj_);
|
||||
if (addPtr) {
|
||||
JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_JSON_CYCLIC_VALUE,
|
||||
js_object_str);
|
||||
return false;
|
||||
}
|
||||
return stack.add(addPtr, obj_);
|
||||
}
|
||||
|
||||
~CycleDetector() {
|
||||
stack.remove(obj_);
|
||||
}
|
||||
|
||||
private:
|
||||
MutableHandle<TraceableHashSet<JSObject*>> stack;
|
||||
HandleObject obj_;
|
||||
};
|
||||
|
||||
/* ES5 15.12.3 JO. */
|
||||
static bool
|
||||
JO(JSContext* cx, HandleObject obj, StringifyContext* scx)
|
||||
@ -305,14 +337,9 @@ JO(JSContext* cx, HandleObject obj, StringifyContext* scx)
|
||||
*/
|
||||
|
||||
/* Steps 1-2, 11. */
|
||||
AutoCycleDetector detect(cx, obj);
|
||||
if (!detect.init())
|
||||
CycleDetector detect(scx, obj);
|
||||
if (!detect.foundCycle(cx))
|
||||
return false;
|
||||
if (detect.foundCycle()) {
|
||||
JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_JSON_CYCLIC_VALUE,
|
||||
js_object_str);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!scx->sb.append('{'))
|
||||
return false;
|
||||
@ -321,7 +348,11 @@ JO(JSContext* cx, HandleObject obj, StringifyContext* scx)
|
||||
Maybe<AutoIdVector> ids;
|
||||
const AutoIdVector* props;
|
||||
if (scx->replacer && !scx->replacer->isCallable()) {
|
||||
MOZ_ASSERT(IsArray(scx->replacer, cx));
|
||||
// NOTE: We can't assert |IsArray(scx->replacer)| because the replacer
|
||||
// might have been a revocable proxy to an array. Such a proxy
|
||||
// satisfies |IsArray|, but any side effect of JSON.stringify
|
||||
// could revoke the proxy so that |!IsArray(scx->replacer)|. See
|
||||
// bug 1196497.
|
||||
props = &scx->propertyList;
|
||||
} else {
|
||||
MOZ_ASSERT_IF(scx->replacer, scx->propertyList.length() == 0);
|
||||
@ -396,14 +427,9 @@ JA(JSContext* cx, HandleObject obj, StringifyContext* scx)
|
||||
*/
|
||||
|
||||
/* Steps 1-2, 11. */
|
||||
AutoCycleDetector detect(cx, obj);
|
||||
if (!detect.init())
|
||||
CycleDetector detect(scx, obj);
|
||||
if (!detect.foundCycle(cx))
|
||||
return false;
|
||||
if (detect.foundCycle()) {
|
||||
JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_JSON_CYCLIC_VALUE,
|
||||
js_object_str);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!scx->sb.append('['))
|
||||
return false;
|
||||
@ -670,6 +696,8 @@ js::Stringify(JSContext* cx, MutableHandleValue vp, JSObject* replacer_, Value s
|
||||
|
||||
/* Step 11. */
|
||||
StringifyContext scx(cx, sb, gap, replacer, propertyList);
|
||||
if (!scx.init())
|
||||
return false;
|
||||
if (!PreprocessValue(cx, wrapper, HandleId(emptyId), vp, &scx))
|
||||
return false;
|
||||
if (IsFilteredValue(vp))
|
||||
|
39
js/src/tests/ecma_5/JSON/cyclic-stringify-unrelated.js
Normal file
39
js/src/tests/ecma_5/JSON/cyclic-stringify-unrelated.js
Normal file
@ -0,0 +1,39 @@
|
||||
// Any copyright is dedicated to the Public Domain.
|
||||
// http://creativecommons.org/licenses/publicdomain/
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
var BUGNUMBER = 1197097;
|
||||
var summary = "JSON.stringify shouldn't use context-wide cycle detection";
|
||||
|
||||
print(BUGNUMBER + ": " + summary);
|
||||
|
||||
/**************
|
||||
* BEGIN TEST *
|
||||
**************/
|
||||
|
||||
var arr;
|
||||
|
||||
// Nested yet separate JSON.stringify is okay.
|
||||
arr = [{}];
|
||||
assertEq(JSON.stringify(arr, function(k, v) {
|
||||
assertEq(JSON.stringify(arr), "[{}]");
|
||||
return v;
|
||||
}), "[{}]");
|
||||
|
||||
// SpiderMonkey censors cycles in array-joining. This mechanism must not
|
||||
// interfere with the cycle detection in JSON.stringify.
|
||||
arr = [{
|
||||
toString: function() {
|
||||
var s = JSON.stringify(arr);
|
||||
assertEq(s, "[{}]");
|
||||
return s;
|
||||
}
|
||||
}];
|
||||
assertEq(arr.join(), "[{}]");
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
if (typeof reportCompare === "function")
|
||||
reportCompare(true, true);
|
||||
|
||||
print("Tests complete");
|
@ -0,0 +1,39 @@
|
||||
// Any copyright is dedicated to the Public Domain.
|
||||
// http://creativecommons.org/licenses/publicdomain/
|
||||
|
||||
var gTestfile = "json-stringify-replacer-array-revocable-proxy.js";
|
||||
//-----------------------------------------------------------------------------
|
||||
var BUGNUMBER = 1196497;
|
||||
var summary =
|
||||
"Don't assert when JSON.stringify is passed a revocable proxy to an array, " +
|
||||
"then that proxy is revoked midflight during stringification";
|
||||
|
||||
print(BUGNUMBER + ": " + summary);
|
||||
|
||||
/**************
|
||||
* BEGIN TEST *
|
||||
**************/
|
||||
|
||||
var arr = [];
|
||||
var { proxy, revoke } = Proxy.revocable(arr, {
|
||||
get(thisv, prop, receiver) {
|
||||
// First (and only) get will be for "length", to determine the length of the
|
||||
// list of properties to serialize. Returning 0 uses the empty list,
|
||||
// resulting in |a: 0| being ignored below.
|
||||
assertEq(thisv, arr);
|
||||
assertEq(prop, "length");
|
||||
assertEq(receiver, proxy);
|
||||
|
||||
revoke();
|
||||
return 0;
|
||||
}
|
||||
});
|
||||
|
||||
assertEq(JSON.stringify({a: 0}, proxy), "{}");
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
if (typeof reportCompare === "function")
|
||||
reportCompare(true, true);
|
||||
|
||||
print("Tests complete");
|
@ -11,9 +11,6 @@ const constructors = [
|
||||
];
|
||||
|
||||
for (var constructor of constructors) {
|
||||
if (!("includes" in constructor.prototype))
|
||||
break;
|
||||
|
||||
assertEq(constructor.prototype.includes.length, 1);
|
||||
|
||||
assertEq(new constructor([1, 2, 3]).includes(1), true);
|
||||
|
@ -8,42 +8,40 @@ var summary = "Implement Array.prototype.includes";
|
||||
|
||||
print(BUGNUMBER + ": " + summary);
|
||||
|
||||
if ('includes' in []) {
|
||||
assertEq(typeof [].includes, "function");
|
||||
assertEq([].includes.length, 1);
|
||||
assertEq(typeof [].includes, "function");
|
||||
assertEq([].includes.length, 1);
|
||||
|
||||
assertTrue([1, 2, 3].includes(2));
|
||||
assertTrue([1,,2].includes(2));
|
||||
assertTrue([1, 2, 3].includes(2, 1));
|
||||
assertTrue([1, 2, 3].includes(2, -2));
|
||||
assertTrue([1, 2, 3].includes(2, -100));
|
||||
assertTrue([Object, Function, Array].includes(Function));
|
||||
assertTrue([-0].includes(0));
|
||||
assertTrue([NaN].includes(NaN));
|
||||
assertTrue([,].includes());
|
||||
assertTrue(staticIncludes("123", "2"));
|
||||
assertTrue(staticIncludes({length: 3, 1: 2}, 2));
|
||||
assertTrue(staticIncludes({length: 3, 1: 2, get 3(){throw ""}}, 2));
|
||||
assertTrue(staticIncludes({length: 3, get 1() {return 2}}, 2));
|
||||
assertTrue(staticIncludes({__proto__: {1: 2}, length: 3}, 2));
|
||||
assertTrue(staticIncludes(new Proxy([1], {get(){return 2}}), 2));
|
||||
assertTrue([1, 2, 3].includes(2));
|
||||
assertTrue([1,,2].includes(2));
|
||||
assertTrue([1, 2, 3].includes(2, 1));
|
||||
assertTrue([1, 2, 3].includes(2, -2));
|
||||
assertTrue([1, 2, 3].includes(2, -100));
|
||||
assertTrue([Object, Function, Array].includes(Function));
|
||||
assertTrue([-0].includes(0));
|
||||
assertTrue([NaN].includes(NaN));
|
||||
assertTrue([,].includes());
|
||||
assertTrue(staticIncludes("123", "2"));
|
||||
assertTrue(staticIncludes({length: 3, 1: 2}, 2));
|
||||
assertTrue(staticIncludes({length: 3, 1: 2, get 3(){throw ""}}, 2));
|
||||
assertTrue(staticIncludes({length: 3, get 1() {return 2}}, 2));
|
||||
assertTrue(staticIncludes({__proto__: {1: 2}, length: 3}, 2));
|
||||
assertTrue(staticIncludes(new Proxy([1], {get(){return 2}}), 2));
|
||||
|
||||
assertFalse([1, 2, 3].includes("2"));
|
||||
assertFalse([1, 2, 3].includes(2, 2));
|
||||
assertFalse([1, 2, 3].includes(2, -1));
|
||||
assertFalse([undefined].includes(NaN));
|
||||
assertFalse([{}].includes({}));
|
||||
assertFalse(staticIncludes({length: 3, 1: 2}, 2, 2));
|
||||
assertFalse(staticIncludes({length: 3, get 0(){delete this[1]}, 1: 2}, 2));
|
||||
assertFalse(staticIncludes({length: -100, 0: 1}, 1));
|
||||
assertFalse([1, 2, 3].includes("2"));
|
||||
assertFalse([1, 2, 3].includes(2, 2));
|
||||
assertFalse([1, 2, 3].includes(2, -1));
|
||||
assertFalse([undefined].includes(NaN));
|
||||
assertFalse([{}].includes({}));
|
||||
assertFalse(staticIncludes({length: 3, 1: 2}, 2, 2));
|
||||
assertFalse(staticIncludes({length: 3, get 0(){delete this[1]}, 1: 2}, 2));
|
||||
assertFalse(staticIncludes({length: -100, 0: 1}, 1));
|
||||
|
||||
assertThrowsInstanceOf(() => staticIncludes(), TypeError);
|
||||
assertThrowsInstanceOf(() => staticIncludes(null), TypeError);
|
||||
assertThrowsInstanceOf(() => staticIncludes({get length(){throw TypeError()}}), TypeError);
|
||||
assertThrowsInstanceOf(() => staticIncludes({length: 3, get 1() {throw TypeError()}}, 2), TypeError);
|
||||
assertThrowsInstanceOf(() => staticIncludes({__proto__: {get 1() {throw TypeError()}}, length: 3}, 2), TypeError);
|
||||
assertThrowsInstanceOf(() => staticIncludes(new Proxy([1], {get(){throw TypeError()}})), TypeError);
|
||||
}
|
||||
assertThrowsInstanceOf(() => staticIncludes(), TypeError);
|
||||
assertThrowsInstanceOf(() => staticIncludes(null), TypeError);
|
||||
assertThrowsInstanceOf(() => staticIncludes({get length(){throw TypeError()}}), TypeError);
|
||||
assertThrowsInstanceOf(() => staticIncludes({length: 3, get 1() {throw TypeError()}}, 2), TypeError);
|
||||
assertThrowsInstanceOf(() => staticIncludes({__proto__: {get 1() {throw TypeError()}}, length: 3}, 2), TypeError);
|
||||
assertThrowsInstanceOf(() => staticIncludes(new Proxy([1], {get(){throw TypeError()}})), TypeError);
|
||||
|
||||
function assertTrue(v) {
|
||||
assertEq(v, true);
|
||||
|
@ -296,12 +296,12 @@ js::intrinsic_NewDenseArray(JSContext* cx, unsigned argc, Value* vp)
|
||||
{
|
||||
CallArgs args = CallArgsFromVp(argc, vp);
|
||||
|
||||
// Check that index is an int32
|
||||
if (!args[0].isInt32()) {
|
||||
JS_ReportError(cx, "Expected int32 as second argument");
|
||||
return false;
|
||||
}
|
||||
uint32_t length = args[0].toInt32();
|
||||
double lengthDouble = args[0].toNumber();
|
||||
MOZ_ASSERT(lengthDouble >= 0);
|
||||
MOZ_ASSERT(lengthDouble < INT32_MAX);
|
||||
MOZ_ASSERT(uint32_t(lengthDouble) == lengthDouble);
|
||||
|
||||
uint32_t length = uint32_t(lengthDouble);
|
||||
|
||||
// Make a new buffer and initialize it up to length.
|
||||
RootedObject buffer(cx, NewFullyAllocatedArrayForCallingAllocationSite(cx, length));
|
||||
|
@ -801,9 +801,7 @@ TypedArrayObject::protoFunctions[] = {
|
||||
// Both of these are actually defined to the same object in FinishTypedArrayInit.
|
||||
JS_SELF_HOSTED_FN("values", "TypedArrayValues", 0, JSPROP_DEFINE_LATE),
|
||||
JS_SELF_HOSTED_SYM_FN(iterator, "TypedArrayValues", 0, JSPROP_DEFINE_LATE),
|
||||
#ifdef NIGHTLY_BUILD
|
||||
JS_SELF_HOSTED_FN("includes", "TypedArrayIncludes", 2, 0),
|
||||
#endif
|
||||
JS_FS_END
|
||||
};
|
||||
|
||||
|
@ -168,22 +168,16 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=933681
|
||||
gPrototypeProperties['Array'] =
|
||||
["length", "toSource", "toString", "toLocaleString", "join", "reverse", "sort", "push",
|
||||
"pop", "shift", "unshift", "splice", "concat", "slice", "lastIndexOf", "indexOf",
|
||||
"forEach", "map", "reduce", "reduceRight", "filter", "some", "every", "find",
|
||||
"includes", "forEach", "map", "reduce", "reduceRight", "filter", "some", "every", "find",
|
||||
"findIndex", "copyWithin", "fill", Symbol.iterator, "entries", "keys", "constructor"];
|
||||
if (isNightlyBuild) {
|
||||
gPrototypeProperties['Array'].push('includes');
|
||||
}
|
||||
for (var c of typedArrayClasses) {
|
||||
gPrototypeProperties[c] = ["constructor", "BYTES_PER_ELEMENT"];
|
||||
}
|
||||
gPrototypeProperties['TypedArray'] =
|
||||
["length", "buffer", "byteLength", "byteOffset", Symbol.iterator, "subarray",
|
||||
"set", "copyWithin", "find", "findIndex", "forEach","indexOf", "lastIndexOf", "reverse",
|
||||
"join", "every", "some", "reduce", "reduceRight", "entries", "keys", "values", "slice",
|
||||
"map", "filter"];
|
||||
if (isNightlyBuild) {
|
||||
gPrototypeProperties['TypedArray'].push('includes');
|
||||
}
|
||||
"set", "copyWithin", "find", "findIndex", "forEach","indexOf", "lastIndexOf", "includes",
|
||||
"reverse", "join", "every", "some", "reduce", "reduceRight", "entries", "keys", "values",
|
||||
"slice", "map", "filter"];
|
||||
for (var c of errorObjectClasses) {
|
||||
gPrototypeProperties[c] = ["constructor", "name",
|
||||
// We don't actually resolve these empty data properties
|
||||
@ -207,10 +201,6 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=933681
|
||||
"flags", "global", "ignoreCase", "multiline", "source", "sticky",
|
||||
"lastIndex"];
|
||||
|
||||
if (isNightlyBuild) {
|
||||
gPrototypeProperties['TypedArray'].push('includes');
|
||||
}
|
||||
|
||||
// Sort an array that may contain symbols as well as strings.
|
||||
function sortProperties(arr) {
|
||||
function sortKey(prop) {
|
||||
|
@ -17,6 +17,8 @@
|
||||
#include "AnnexB.h"
|
||||
#include "Endian.h"
|
||||
|
||||
#include <cstring>
|
||||
|
||||
using mozilla::BigEndian;
|
||||
|
||||
static const uint8_t kAnnexBDelimiter[] = { 0, 0, 0, 1 };
|
||||
|
@ -17,6 +17,8 @@
|
||||
#ifndef __VideoDecoder_h__
|
||||
#define __VideoDecoder_h__
|
||||
|
||||
#include <atomic>
|
||||
|
||||
#include "gmp-task-utils.h"
|
||||
#include "gmp-video-decode.h"
|
||||
#include "gmp-video-host.h"
|
||||
|
@ -8,7 +8,6 @@
|
||||
#if defined(HAVE_CONFIG_H)
|
||||
#include "config.h"
|
||||
#endif
|
||||
#include <algorithm>
|
||||
#include <windows.h>
|
||||
#include <mmdeviceapi.h>
|
||||
#include <windef.h>
|
||||
@ -259,6 +258,9 @@ struct cubeb_stream
|
||||
/* Buffer used to downmix or upmix to the number of channels the mixer has.
|
||||
* its size is |frames_to_bytes_before_mix(buffer_frame_count)|. */
|
||||
float * mix_buffer;
|
||||
/* Stream volume. Set via stream_set_volume and used to reset volume on
|
||||
* device changes. */
|
||||
float volume;
|
||||
/* True if the stream is draining. */
|
||||
bool draining;
|
||||
};
|
||||
@ -720,6 +722,37 @@ current_stream_delay(cubeb_stream * stm)
|
||||
|
||||
return delay;
|
||||
}
|
||||
|
||||
int
|
||||
stream_set_volume(cubeb_stream * stm, float volume)
|
||||
{
|
||||
stm->stream_reset_lock->assert_current_thread_owns();
|
||||
|
||||
uint32_t channels;
|
||||
HRESULT hr = stm->audio_stream_volume->GetChannelCount(&channels);
|
||||
if (hr != S_OK) {
|
||||
LOG("could not get the channel count: %x\n", hr);
|
||||
return CUBEB_ERROR;
|
||||
}
|
||||
|
||||
/* up to 9.1 for now */
|
||||
if (channels > 10) {
|
||||
return CUBEB_ERROR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
float volumes[10];
|
||||
for (uint32_t i = 0; i < channels; i++) {
|
||||
volumes[i] = volume;
|
||||
}
|
||||
|
||||
hr = stm->audio_stream_volume->SetAllVolumes(channels, volumes);
|
||||
if (hr != S_OK) {
|
||||
LOG("could not set the channels volume: %x\n", hr);
|
||||
return CUBEB_ERROR;
|
||||
}
|
||||
|
||||
return CUBEB_OK;
|
||||
}
|
||||
} // namespace anonymous
|
||||
|
||||
extern "C" {
|
||||
@ -1109,6 +1142,11 @@ int setup_wasapi_stream(cubeb_stream * stm)
|
||||
return CUBEB_ERROR;
|
||||
}
|
||||
|
||||
/* Restore the stream volume over a device change. */
|
||||
if (stream_set_volume(stm, stm->volume) != CUBEB_OK) {
|
||||
return CUBEB_ERROR;
|
||||
}
|
||||
|
||||
/* If we are playing a mono stream, we only resample one channel,
|
||||
* and copy it over, so we are always resampling the number
|
||||
* of channels of the stream, not the number of channels
|
||||
@ -1153,6 +1191,7 @@ wasapi_stream_init(cubeb * context, cubeb_stream ** stream,
|
||||
stm->stream_params = stream_params;
|
||||
stm->draining = false;
|
||||
stm->latency = latency;
|
||||
stm->volume = 1.0;
|
||||
|
||||
stm->stream_reset_lock = new owned_critical_section();
|
||||
|
||||
@ -1363,30 +1402,14 @@ int wasapi_stream_get_latency(cubeb_stream * stm, uint32_t * latency)
|
||||
|
||||
int wasapi_stream_set_volume(cubeb_stream * stm, float volume)
|
||||
{
|
||||
HRESULT hr;
|
||||
uint32_t channels;
|
||||
/* up to 9.1 for now */
|
||||
float volumes[10];
|
||||
|
||||
auto_lock lock(stm->stream_reset_lock);
|
||||
|
||||
hr = stm->audio_stream_volume->GetChannelCount(&channels);
|
||||
if (hr != S_OK) {
|
||||
LOG("could not get the channel count: %x\n", hr);
|
||||
if (stream_set_volume(stm, volume) != CUBEB_OK) {
|
||||
return CUBEB_ERROR;
|
||||
}
|
||||
|
||||
XASSERT(channels <= 10 && "bump the array size");
|
||||
stm->volume = volume;
|
||||
|
||||
for (uint32_t i = 0; i < channels; i++) {
|
||||
volumes[i] = volume;
|
||||
}
|
||||
|
||||
hr = stm->audio_stream_volume->SetAllVolumes(channels, volumes);
|
||||
if (hr != S_OK) {
|
||||
LOG("could not set the channels volume: %x\n", hr);
|
||||
return CUBEB_ERROR;
|
||||
}
|
||||
return CUBEB_OK;
|
||||
}
|
||||
|
||||
|
@ -54,6 +54,7 @@
|
||||
# define MOZ_HAVE_NORETURN __declspec(noreturn)
|
||||
# if _MSC_VER >= 1900
|
||||
# define MOZ_HAVE_CXX11_CONSTEXPR
|
||||
# define MOZ_HAVE_CXX11_CONSTEXPR_IN_TEMPLATES
|
||||
# define MOZ_HAVE_EXPLICIT_CONVERSION
|
||||
# endif
|
||||
# ifdef __clang__
|
||||
|
@ -5,8 +5,7 @@
|
||||
|
||||
#include "nsISupports.idl"
|
||||
|
||||
interface nsIPrincipal;
|
||||
interface nsILoadContextInfo;
|
||||
interface nsIChannel;
|
||||
interface nsICacheEntryOpenCallback;
|
||||
|
||||
%{C++
|
||||
@ -16,16 +15,16 @@ interface nsICacheEntryOpenCallback;
|
||||
/**
|
||||
* nsIPackagedAppService
|
||||
*/
|
||||
[scriptable, builtinclass, uuid(f35e5229-d08a-46eb-a574-2db4e22aee98)]
|
||||
[scriptable, builtinclass, uuid(9c96c638-e80c-4dce-abec-c96fdb7a25d8)]
|
||||
interface nsIPackagedAppService : nsISupports
|
||||
{
|
||||
/**
|
||||
* @param aPrincipal
|
||||
* the principal associated to the URL of a packaged resource
|
||||
* URL format: package_url + PACKAGED_APP_TOKEN + resource_path
|
||||
* example: http://test.com/path/to/package!//resource.html
|
||||
* @param aFlags
|
||||
* the load flags used for downloading the package
|
||||
* @param aChannel
|
||||
* this param is passed to the packaged app service in order to provide
|
||||
* info about the requesting channel, which wants to access the contents
|
||||
* of a packaged app resource. Its URI has the following format:
|
||||
* http://domain.com/path/to/package.pak!//path/to/subresource.html
|
||||
*
|
||||
* @param aCallback
|
||||
* an object implementing nsICacheEntryOpenCallback
|
||||
* this is the target of the async result of the operation
|
||||
@ -34,17 +33,12 @@ interface nsIPackagedAppService : nsISupports
|
||||
* the cached entry, if one exists, or an error code otherwise
|
||||
* aCallback is kept alive using an nsCOMPtr until OnCacheEntryAvailable
|
||||
* is called
|
||||
* @param aInfo
|
||||
* an object used to determine the cache jar this resource goes in.
|
||||
* usually created by calling GetLoadContextInfo(requestingChannel)
|
||||
*
|
||||
* Calling this method will either download the package containing the given
|
||||
* resource URI, store it in the cache and pass the cache entry to aCallback,
|
||||
* or if that resource has already been downloaded it will be served from
|
||||
* the cache.
|
||||
*/
|
||||
void getResource(in nsIPrincipal aPrincipal,
|
||||
in uint32_t aFlags,
|
||||
in nsILoadContextInfo aInfo,
|
||||
void getResource(in nsIChannel aChannel,
|
||||
in nsICacheEntryOpenCallback aCallback);
|
||||
};
|
||||
|
@ -690,28 +690,55 @@ PackagedAppService::GetPackageURI(nsIURI *aURI, nsIURI **aPackageURI)
|
||||
}
|
||||
|
||||
NS_IMETHODIMP
|
||||
PackagedAppService::GetResource(nsIPrincipal *aPrincipal,
|
||||
uint32_t aLoadFlags,
|
||||
nsILoadContextInfo *aInfo,
|
||||
PackagedAppService::GetResource(nsIChannel *aChannel,
|
||||
nsICacheEntryOpenCallback *aCallback)
|
||||
{
|
||||
// Check arguments are not null
|
||||
if (!aPrincipal || !aCallback || !aInfo) {
|
||||
MOZ_RELEASE_ASSERT(NS_IsMainThread(), "mDownloadingPackages hashtable is not thread safe");
|
||||
LOG(("[%p] PackagedAppService::GetResource(aChannel: %p, aCallback: %p)\n",
|
||||
this, aChannel, aCallback));
|
||||
|
||||
if (!aChannel || !aCallback) {
|
||||
return NS_ERROR_INVALID_ARG;
|
||||
}
|
||||
|
||||
nsresult rv;
|
||||
nsIScriptSecurityManager *securityManager =
|
||||
nsContentUtils::GetSecurityManager();
|
||||
if (!securityManager) {
|
||||
LOG(("[%p] > No securityManager\n", this));
|
||||
return NS_ERROR_UNEXPECTED;
|
||||
}
|
||||
nsCOMPtr<nsIPrincipal> principal;
|
||||
rv = securityManager->GetChannelURIPrincipal(aChannel, getter_AddRefs(principal));
|
||||
if (NS_FAILED(rv) || !principal) {
|
||||
LOG(("[%p] > Error getting principal rv=%X principal=%p\n",
|
||||
this, rv, principal.get()));
|
||||
return NS_FAILED(rv) ? rv : NS_ERROR_NULL_POINTER;
|
||||
}
|
||||
|
||||
nsCOMPtr<nsIURI> uri;
|
||||
rv = aPrincipal->GetURI(getter_AddRefs(uri));
|
||||
if (NS_WARN_IF(NS_FAILED(rv))) {
|
||||
nsCOMPtr<nsILoadContextInfo> loadContextInfo = GetLoadContextInfo(aChannel);
|
||||
if (!loadContextInfo) {
|
||||
LOG(("[%p] > Channel has no loadContextInfo\n", this));
|
||||
return NS_ERROR_NULL_POINTER;
|
||||
}
|
||||
|
||||
nsLoadFlags loadFlags = 0;
|
||||
rv = aChannel->GetLoadFlags(&loadFlags);
|
||||
if (NS_FAILED(rv)) {
|
||||
LOG(("[%p] > Error calling GetLoadFlags rv=%X\n", this, rv));
|
||||
return rv;
|
||||
}
|
||||
|
||||
LogURI("PackagedAppService::GetResource", this, uri, aInfo);
|
||||
nsCOMPtr<nsILoadInfo> loadInfo = aChannel->GetLoadInfo();
|
||||
|
||||
MOZ_RELEASE_ASSERT(NS_IsMainThread(), "mDownloadingPackages hashtable is not thread safe");
|
||||
nsCOMPtr<nsIURI> uri;
|
||||
rv = principal->GetURI(getter_AddRefs(uri));
|
||||
if (NS_WARN_IF(NS_FAILED(rv))) {
|
||||
LOG(("[%p] > Error calling GetURI rv=%X\n", this, rv));
|
||||
return rv;
|
||||
}
|
||||
|
||||
LogURI("PackagedAppService::GetResource", this, uri, loadContextInfo);
|
||||
nsCOMPtr<nsIURI> packageURI;
|
||||
rv = GetPackageURI(uri, getter_AddRefs(packageURI));
|
||||
if (NS_FAILED(rv)) {
|
||||
@ -719,7 +746,7 @@ PackagedAppService::GetResource(nsIPrincipal *aPrincipal,
|
||||
}
|
||||
|
||||
nsAutoCString key;
|
||||
CacheFileUtils::AppendKeyPrefix(aInfo, key);
|
||||
CacheFileUtils::AppendKeyPrefix(loadContextInfo, key);
|
||||
|
||||
{
|
||||
nsAutoCString spec;
|
||||
@ -740,10 +767,10 @@ PackagedAppService::GetResource(nsIPrincipal *aPrincipal,
|
||||
}
|
||||
|
||||
nsCOMPtr<nsIChannel> channel;
|
||||
rv = NS_NewChannel(
|
||||
getter_AddRefs(channel), packageURI, aPrincipal,
|
||||
nsILoadInfo::SEC_NORMAL, nsIContentPolicy::TYPE_OTHER, nullptr, nullptr,
|
||||
aLoadFlags);
|
||||
rv = NS_NewChannelInternal(
|
||||
getter_AddRefs(channel), packageURI,
|
||||
loadInfo,
|
||||
nullptr, nullptr, loadFlags);
|
||||
|
||||
if (NS_WARN_IF(NS_FAILED(rv))) {
|
||||
return rv;
|
||||
@ -758,7 +785,7 @@ PackagedAppService::GetResource(nsIPrincipal *aPrincipal,
|
||||
}
|
||||
|
||||
downloader = new PackagedAppDownloader();
|
||||
rv = downloader->Init(aInfo, key);
|
||||
rv = downloader->Init(loadContextInfo, key);
|
||||
if (NS_WARN_IF(NS_FAILED(rv))) {
|
||||
return rv;
|
||||
}
|
||||
@ -784,6 +811,10 @@ PackagedAppService::GetResource(nsIPrincipal *aPrincipal,
|
||||
nsRefPtr<PackagedAppChannelListener> listener =
|
||||
new PackagedAppChannelListener(downloader, mimeConverter);
|
||||
|
||||
if (loadInfo && loadInfo->GetEnforceSecurity()) {
|
||||
return channel->AsyncOpen2(listener);
|
||||
}
|
||||
|
||||
return channel->AsyncOpen(listener, nullptr);
|
||||
}
|
||||
|
||||
|
@ -5206,12 +5206,6 @@ nsHttpChannel::BeginConnect()
|
||||
// by the packaged app service into the cache, and the cache entry will
|
||||
// be passed to OnCacheEntryAvailable.
|
||||
|
||||
// Pass the original load flags to the packaged app request.
|
||||
uint32_t loadFlags = mLoadFlags;
|
||||
|
||||
mLoadFlags |= LOAD_ONLY_FROM_CACHE;
|
||||
mLoadFlags |= LOAD_FROM_CACHE;
|
||||
mLoadFlags &= ~VALIDATE_ALWAYS;
|
||||
nsCOMPtr<nsIPackagedAppService> pas =
|
||||
do_GetService("@mozilla.org/network/packaged-app-service;1", &rv);
|
||||
if (NS_WARN_IF(NS_FAILED(rv))) {
|
||||
@ -5219,12 +5213,18 @@ nsHttpChannel::BeginConnect()
|
||||
return rv;
|
||||
}
|
||||
|
||||
nsCOMPtr<nsIPrincipal> principal = GetURIPrincipal();
|
||||
nsCOMPtr<nsILoadContextInfo> loadInfo = GetLoadContextInfo(this);
|
||||
rv = pas->GetResource(principal, loadFlags, loadInfo, this);
|
||||
rv = pas->GetResource(this, this);
|
||||
if (NS_FAILED(rv)) {
|
||||
AsyncAbort(rv);
|
||||
}
|
||||
|
||||
// We need to alter the flags so the cache entry returned by the
|
||||
// packaged app service is always accepted. Revalidation is handled
|
||||
// by the service.
|
||||
mLoadFlags |= LOAD_ONLY_FROM_CACHE;
|
||||
mLoadFlags |= LOAD_FROM_CACHE;
|
||||
mLoadFlags &= ~VALIDATE_ALWAYS;
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
||||
|
@ -34,6 +34,7 @@
|
||||
Cu.import('resource://gre/modules/LoadContextInfo.jsm');
|
||||
Cu.import("resource://testing-common/httpd.js");
|
||||
Cu.import("resource://gre/modules/Services.jsm");
|
||||
Cu.import("resource://gre/modules/NetUtil.jsm");
|
||||
|
||||
// The number of times this package has been requested
|
||||
// This number might be reset by tests that use it
|
||||
@ -59,11 +60,24 @@ function packagedAppContentHandler(metadata, response)
|
||||
response.bodyOutputStream.write(body, body.length);
|
||||
}
|
||||
|
||||
function getPrincipal(url) {
|
||||
function getChannelForURL(url) {
|
||||
let uri = createURI(url);
|
||||
let ssm = Cc["@mozilla.org/scriptsecuritymanager;1"]
|
||||
.getService(Ci.nsIScriptSecurityManager);
|
||||
let uri = createURI(url);
|
||||
return ssm.createCodebasePrincipal(uri, {});
|
||||
let principal = ssm.createCodebasePrincipal(uri, {});
|
||||
let tmpChannel =
|
||||
NetUtil.newChannel({
|
||||
uri: url,
|
||||
loadingPrincipal: principal,
|
||||
contentPolicyType: Ci.nsIContentPolicy.TYPE_OTHER
|
||||
});
|
||||
|
||||
tmpChannel.notificationCallbacks =
|
||||
new LoadContextCallback(principal.appId,
|
||||
principal.isInBrowserElement,
|
||||
false,
|
||||
false);
|
||||
return tmpChannel;
|
||||
}
|
||||
|
||||
// The package content
|
||||
@ -129,6 +143,15 @@ function run_test()
|
||||
add_test(test_bad_package);
|
||||
add_test(test_bad_package_404);
|
||||
|
||||
// Channels created by addons could have no load info.
|
||||
// In debug mode this triggers an assertion, but we still want to test that
|
||||
// it works in optimized mode. See bug 1196021 comment 17
|
||||
if (Components.classes["@mozilla.org/xpcom/debug;1"]
|
||||
.getService(Components.interfaces.nsIDebug2)
|
||||
.isDebugBuild == false) {
|
||||
add_test(test_channel_no_loadinfo);
|
||||
}
|
||||
|
||||
// run tests
|
||||
run_next_test();
|
||||
}
|
||||
@ -179,10 +202,10 @@ var cacheListener = new packagedResourceListener(testData.content[0].data);
|
||||
|
||||
// These calls should fail, since one of the arguments is invalid or null
|
||||
function test_bad_args() {
|
||||
Assert.throws(() => { paservice.getResource(getPrincipal("http://test.com"), 0, LoadContextInfo.default, cacheListener); }, "url's with no !// aren't allowed");
|
||||
Assert.throws(() => { paservice.getResource(getPrincipal("http://test.com/package!//test"), 0, LoadContextInfo.default, null); }, "should have a callback");
|
||||
Assert.throws(() => { paservice.getResource(null, 0, LoadContextInfo.default, cacheListener); }, "should have a URI");
|
||||
Assert.throws(() => { paservice.getResource(getPrincipal("http://test.com/package!//test"), null, cacheListener); }, "should have a LoadContextInfo");
|
||||
Assert.throws(() => { paservice.getResource(getChannelForURL("http://test.com"), cacheListener); }, "url's with no !// aren't allowed");
|
||||
Assert.throws(() => { paservice.getResource(getChannelForURL("http://test.com/package!//test"), null); }, "should have a callback");
|
||||
Assert.throws(() => { paservice.getResource(null, cacheListener); }, "should have a channel");
|
||||
|
||||
run_next_test();
|
||||
}
|
||||
|
||||
@ -191,13 +214,15 @@ function test_bad_args() {
|
||||
// This tests that the callback gets called, and the cacheListener gets the proper content.
|
||||
function test_callback_gets_called() {
|
||||
packagePath = "/package";
|
||||
paservice.getResource(getPrincipal(uri + packagePath + "!//index.html"), 0, LoadContextInfo.default, cacheListener);
|
||||
let url = uri + packagePath + "!//index.html";
|
||||
paservice.getResource(getChannelForURL(url), cacheListener);
|
||||
}
|
||||
|
||||
// Tests that requesting the same resource returns the same content
|
||||
function test_same_content() {
|
||||
packagePath = "/package";
|
||||
paservice.getResource(getPrincipal(uri + packagePath + "!//index.html"), 0, LoadContextInfo.default, cacheListener);
|
||||
let url = uri + packagePath + "!//index.html";
|
||||
paservice.getResource(getChannelForURL(url), cacheListener);
|
||||
}
|
||||
|
||||
// Check the content handler has been called the expected number of times.
|
||||
@ -209,8 +234,9 @@ function test_request_number() {
|
||||
// This tests that new content is returned if the package has been updated
|
||||
function test_updated_package() {
|
||||
packagePath = "/package";
|
||||
paservice.getResource(getPrincipal(uri + packagePath + "!//index.html"), 0, LoadContextInfo.default,
|
||||
new packagedResourceListener(testData.content[0].data.replace(/\.\.\./g, 'xxx')));
|
||||
let url = uri + packagePath + "!//index.html";
|
||||
paservice.getResource(getChannelForURL(url),
|
||||
new packagedResourceListener(testData.content[0].data.replace(/\.\.\./g, 'xxx')));
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
@ -233,13 +259,15 @@ var listener404 = {
|
||||
// Tests that an error is returned for a non existing package
|
||||
function test_package_does_not_exist() {
|
||||
packagePath = "/package_non_existent";
|
||||
paservice.getResource(getPrincipal(uri + packagePath + "!//index.html"), 0, LoadContextInfo.default, listener404);
|
||||
let url = uri + packagePath + "!//index.html";
|
||||
paservice.getResource(getChannelForURL(url), listener404);
|
||||
}
|
||||
|
||||
// Tests that an error is returned for a non existing resource in a package
|
||||
function test_file_does_not_exist() {
|
||||
packagePath = "/package"; // This package exists
|
||||
paservice.getResource(getPrincipal(uri + packagePath + "!//file_non_existent.html"), 0, LoadContextInfo.default, listener404);
|
||||
let url = uri + packagePath + "!//file_non_existent.html";
|
||||
paservice.getResource(getChannelForURL(url), listener404);
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
@ -280,13 +308,24 @@ function packagedAppBadContentHandler(metadata, response)
|
||||
// Checks that the resource with the proper headers inside the bad package is still returned
|
||||
function test_bad_package() {
|
||||
packagePath = "/badPackage";
|
||||
paservice.getResource(getPrincipal(uri + packagePath + "!//index.html"), 0, LoadContextInfo.default, cacheListener);
|
||||
let url = uri + packagePath + "!//index.html";
|
||||
paservice.getResource(getChannelForURL(url), cacheListener);
|
||||
}
|
||||
|
||||
// Checks that the request for a non-existent resource doesn't hang for a bad package
|
||||
function test_bad_package_404() {
|
||||
packagePath = "/badPackage";
|
||||
paservice.getResource(getPrincipal(uri + packagePath + "!//file_non_existent.html"), 0, LoadContextInfo.default, listener404);
|
||||
let url = uri + packagePath + "!//file_non_existent.html";
|
||||
paservice.getResource(getChannelForURL(url), listener404);
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// NOTE: This test only runs in NON-DEBUG mode.
|
||||
function test_channel_no_loadinfo() {
|
||||
packagePath = "/package";
|
||||
let url = uri + packagePath + "!//index.html";
|
||||
let channel = getChannelForURL(url);
|
||||
channel.loadInfo = null;
|
||||
paservice.getResource(channel, cacheListener);
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
Cu.import('resource://gre/modules/LoadContextInfo.jsm');
|
||||
Cu.import("resource://testing-common/httpd.js");
|
||||
Cu.import("resource://gre/modules/Services.jsm");
|
||||
Cu.import("resource://gre/modules/NetUtil.jsm");
|
||||
|
||||
var gRequestNo = 0;
|
||||
function packagedAppContentHandler(metadata, response)
|
||||
@ -11,11 +12,24 @@ function packagedAppContentHandler(metadata, response)
|
||||
gRequestNo++;
|
||||
}
|
||||
|
||||
function getPrincipal(url) {
|
||||
function getChannelForURL(url) {
|
||||
let uri = createURI(url);
|
||||
let ssm = Cc["@mozilla.org/scriptsecuritymanager;1"]
|
||||
.getService(Ci.nsIScriptSecurityManager);
|
||||
let uri = createURI(url);
|
||||
return ssm.createCodebasePrincipal(uri, {});
|
||||
let principal = ssm.createCodebasePrincipal(uri, {});
|
||||
let tmpChannel =
|
||||
NetUtil.newChannel({
|
||||
uri: url,
|
||||
loadingPrincipal: principal,
|
||||
contentPolicyType: Ci.nsIContentPolicy.TYPE_OTHER
|
||||
});
|
||||
|
||||
tmpChannel.notificationCallbacks =
|
||||
new LoadContextCallback(principal.appId,
|
||||
principal.isInBrowserElement,
|
||||
false,
|
||||
false);
|
||||
return tmpChannel;
|
||||
}
|
||||
|
||||
var subresourcePaths = [
|
||||
@ -119,8 +133,8 @@ function test_paths() {
|
||||
for (var i in subresourcePaths) {
|
||||
packagePath = "/package/" + i;
|
||||
dump("Iteration " + i + "\n");
|
||||
paservice.getResource(getPrincipal(uri + packagePath + "!//" + subresourcePaths[i][1]), 0,
|
||||
LoadContextInfo.default,
|
||||
let url = uri + packagePath + "!//" + subresourcePaths[i][1];
|
||||
paservice.getResource(getChannelForURL(url),
|
||||
new packagedResourceListener(subresourcePaths[i][1], content));
|
||||
yield undefined;
|
||||
}
|
||||
|
@ -109,6 +109,25 @@ def ensureParentDir(path):
|
||||
raise
|
||||
|
||||
|
||||
def readFileContent(name, mode):
|
||||
"""Read the content of file, returns tuple (file existed, file content)"""
|
||||
existed = False
|
||||
old_content = None
|
||||
try:
|
||||
existing = open(name, mode)
|
||||
existed = True
|
||||
except IOError:
|
||||
pass
|
||||
else:
|
||||
try:
|
||||
old_content = existing.read()
|
||||
except IOError:
|
||||
pass
|
||||
finally:
|
||||
existing.close()
|
||||
return existed, old_content
|
||||
|
||||
|
||||
class FileAvoidWrite(BytesIO):
|
||||
"""File-like object that buffers output and only writes if content changed.
|
||||
|
||||
@ -127,6 +146,7 @@ class FileAvoidWrite(BytesIO):
|
||||
self._capture_diff = capture_diff
|
||||
self.diff = None
|
||||
self.mode = mode
|
||||
self.force_update = False
|
||||
|
||||
def write(self, buf):
|
||||
if isinstance(buf, unicode):
|
||||
@ -146,23 +166,11 @@ class FileAvoidWrite(BytesIO):
|
||||
"""
|
||||
buf = self.getvalue()
|
||||
BytesIO.close(self)
|
||||
existed = False
|
||||
old_content = None
|
||||
|
||||
try:
|
||||
existing = open(self.name, self.mode)
|
||||
existed = True
|
||||
except IOError:
|
||||
pass
|
||||
else:
|
||||
try:
|
||||
old_content = existing.read()
|
||||
if old_content == buf:
|
||||
return True, False
|
||||
except IOError:
|
||||
pass
|
||||
finally:
|
||||
existing.close()
|
||||
existed, old_content = readFileContent(self.name, self.mode)
|
||||
if not self.force_update and old_content == buf:
|
||||
assert existed
|
||||
return existed, False
|
||||
|
||||
ensureParentDir(self.name)
|
||||
with open(self.name, 'w') as file:
|
||||
|
@ -589,6 +589,9 @@ class Certificate:
|
||||
def main(output, inputPath):
|
||||
with open(inputPath) as configStream:
|
||||
output.write(Certificate(configStream).toPEM())
|
||||
# Force updating the output file even if the content does not change
|
||||
# so that we won't be called again simply because of the mtime.
|
||||
output.force_update = True
|
||||
|
||||
# When run as a standalone program, this will read a specification from
|
||||
# stdin and output the certificate as PEM to stdout.
|
||||
|
@ -700,6 +700,9 @@ def keyFromSpecification(specification):
|
||||
def main(output, inputPath):
|
||||
with open(inputPath) as configStream:
|
||||
output.write(keyFromSpecification(configStream.read().strip()).toPEM())
|
||||
# Force updating the output file even if the content does not change
|
||||
# so that we won't be called again simply because of the mtime.
|
||||
output.force_update = True
|
||||
|
||||
# When run as a standalone program, this will read a specification from
|
||||
# stdin and output the certificate as PEM to stdout.
|
||||
|
@ -17,6 +17,10 @@ const BYTE kRetNp = 0xC3;
|
||||
const ULONG64 kMov1 = 0x54894808244C8948;
|
||||
const ULONG64 kMov2 = 0x4C182444894C1024;
|
||||
const ULONG kMov3 = 0x20244C89;
|
||||
const USHORT kTestByte = 0x04F6;
|
||||
const BYTE kPtr = 0x25;
|
||||
const BYTE kRet = 0xC3;
|
||||
const USHORT kJne = 0x0375;
|
||||
|
||||
// Service code for 64 bit systems.
|
||||
struct ServiceEntry {
|
||||
@ -60,11 +64,37 @@ struct ServiceEntryW8 {
|
||||
BYTE nop; // = 90
|
||||
};
|
||||
|
||||
// Service code for 64 bit systems with int 2e fallback.
|
||||
struct ServiceEntryWithInt2E {
|
||||
// This struct contains roughly the following code:
|
||||
// 00 4c8bd1 mov r10,rcx
|
||||
// 03 b855000000 mov eax,52h
|
||||
// 08 f604250803fe7f01 test byte ptr SharedUserData!308, 1
|
||||
// 10 7503 jne [over syscall]
|
||||
// 12 0f05 syscall
|
||||
// 14 c3 ret
|
||||
// 15 cd2e int 2e
|
||||
// 17 c3 ret
|
||||
|
||||
ULONG mov_r10_rcx_mov_eax; // = 4C 8B D1 B8
|
||||
ULONG service_id;
|
||||
USHORT test_byte; // = F6 04
|
||||
BYTE ptr; // = 25
|
||||
ULONG user_shared_data_ptr;
|
||||
BYTE one; // = 01
|
||||
USHORT jne_over_syscall; // = 75 03
|
||||
USHORT syscall; // = 0F 05
|
||||
BYTE ret; // = C3
|
||||
USHORT int2e; // = CD 2E
|
||||
BYTE ret2; // = C3
|
||||
};
|
||||
|
||||
// We don't have an internal thunk for x64.
|
||||
struct ServiceFullThunk {
|
||||
union {
|
||||
ServiceEntry original;
|
||||
ServiceEntryW8 original_w8;
|
||||
ServiceEntryWithInt2E original_int2e_fallback;
|
||||
};
|
||||
};
|
||||
|
||||
@ -78,6 +108,25 @@ bool IsService(const void* source) {
|
||||
kSyscall == service->syscall && kRetNp == service->ret);
|
||||
}
|
||||
|
||||
bool IsServiceW8(const void* source) {
|
||||
const ServiceEntryW8* service =
|
||||
reinterpret_cast<const ServiceEntryW8*>(source);
|
||||
|
||||
return (kMmovR10EcxMovEax == service->mov_r10_rcx_mov_eax &&
|
||||
kMov1 == service->mov_1 && kMov2 == service->mov_2 &&
|
||||
kMov3 == service->mov_3);
|
||||
}
|
||||
|
||||
bool IsServiceWithInt2E(const void* source) {
|
||||
const ServiceEntryWithInt2E* service =
|
||||
reinterpret_cast<const ServiceEntryWithInt2E*>(source);
|
||||
|
||||
return (kMmovR10EcxMovEax == service->mov_r10_rcx_mov_eax &&
|
||||
kTestByte == service->test_byte && kPtr == service->ptr &&
|
||||
kJne == service->jne_over_syscall && kSyscall == service->syscall &&
|
||||
kRet == service->ret && kRet == service->ret2);
|
||||
}
|
||||
|
||||
}; // namespace
|
||||
|
||||
namespace sandbox {
|
||||
@ -150,15 +199,9 @@ bool ServiceResolverThunk::IsFunctionAService(void* local_thunk) const {
|
||||
if (sizeof(function_code) != read)
|
||||
return false;
|
||||
|
||||
if (!IsService(&function_code)) {
|
||||
// See if it's the Win8 signature.
|
||||
ServiceEntryW8* w8_service = &function_code.original_w8;
|
||||
if (!IsService(&w8_service->mov_r10_rcx_mov_eax) ||
|
||||
w8_service->mov_1 != kMov1 || w8_service->mov_1 != kMov1 ||
|
||||
w8_service->mov_1 != kMov1) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (!IsService(&function_code) && !IsServiceW8(&function_code) &&
|
||||
!IsServiceWithInt2E(&function_code))
|
||||
return false;
|
||||
|
||||
// Save the verified code.
|
||||
memcpy(local_thunk, &function_code, sizeof(function_code));
|
||||
|
@ -2,3 +2,4 @@ Chromium Commit Directory / File (relative to securit
|
||||
---------------------------------------- ------------------------------------------------
|
||||
df7cc6c04725630dd4460f29d858a77507343b24 chromium
|
||||
b533d6533585377edd63ec6500469f6c4fba602a chromium/sandbox/win/src/sharedmem_ipc_server.cc
|
||||
034bd64db1806d85b2ceacc736074ac07722af4a chromium/sandbox/win/src/service_resolver_64.cc
|
||||
|
@ -86,3 +86,7 @@ class TestCapabilities(MarionetteTestCase):
|
||||
# Start a new session just to make sure we leave the browser in the
|
||||
# same state it was before it started the test
|
||||
self.marionette.start_session()
|
||||
|
||||
def test_we_get_valid_uuid_4_when_creating_a_session(self):
|
||||
self.assertNotIn("{", self.marionette.session_id, 'Session ID has {} in it. %s ' \
|
||||
% self.marionette.session_id)
|
||||
|
@ -501,9 +501,10 @@ GeckoDriver.prototype.listeningPromise = function() {
|
||||
|
||||
/** Create a new session. */
|
||||
GeckoDriver.prototype.newSession = function(cmd, resp) {
|
||||
let uuid = uuidGen.generateUUID().toString();
|
||||
this.sessionId = cmd.parameters.sessionId ||
|
||||
cmd.parameters.session_id ||
|
||||
uuidGen.generateUUID().toString();
|
||||
uuid.substring(1, uuid.length - 1);
|
||||
|
||||
this.newSessionCommandId = cmd.id;
|
||||
this.setSessionCapabilities(cmd.parameters.capabilities);
|
||||
|
@ -1,10 +1,12 @@
|
||||
import os
|
||||
import re
|
||||
import urlparse
|
||||
from collections import namedtuple
|
||||
|
||||
from mozharness.base.script import ScriptMixin
|
||||
from mozharness.base.log import LogMixin, OutputParser, WARNING
|
||||
from mozharness.base.errors import HgErrorList, VCSException
|
||||
from mozharness.base.transfer import TransferMixin
|
||||
|
||||
HgtoolErrorList = [{
|
||||
'substr': 'abort: HTTP Error 404: Not Found',
|
||||
@ -28,7 +30,7 @@ class HgtoolParser(OutputParser):
|
||||
super(HgtoolParser, self).parse_single_line(line)
|
||||
|
||||
|
||||
class HgtoolVCS(ScriptMixin, LogMixin):
|
||||
class HgtoolVCS(ScriptMixin, LogMixin, TransferMixin):
|
||||
def __init__(self, log_obj=None, config=None, vcs_config=None,
|
||||
script_obj=None):
|
||||
super(HgtoolVCS, self).__init__()
|
||||
@ -113,3 +115,38 @@ class HgtoolVCS(ScriptMixin, LogMixin):
|
||||
raise VCSException("Unable to checkout")
|
||||
|
||||
return parser.got_revision
|
||||
|
||||
def query_pushinfo(self, repository, revision):
|
||||
"""Query the pushdate and pushid of a repository/revision.
|
||||
This is intended to be used on hg.mozilla.org/mozilla-central and
|
||||
similar. It may or may not work for other hg repositories.
|
||||
"""
|
||||
PushInfo = namedtuple('PushInfo', ['pushid', 'pushdate'])
|
||||
|
||||
try:
|
||||
url = '%s/json-pushes?changeset=%s' % (repository, revision)
|
||||
self.info('Pushdate URL is: %s' % url)
|
||||
contents = self.retry(self.load_json_from_url, args=(url,))
|
||||
|
||||
# The contents should be something like:
|
||||
# {
|
||||
# "28537": {
|
||||
# "changesets": [
|
||||
# "1d0a914ae676cc5ed203cdc05c16d8e0c22af7e5",
|
||||
# ],
|
||||
# "date": 1428072488,
|
||||
# "user": "user@mozilla.com"
|
||||
# }
|
||||
# }
|
||||
#
|
||||
# So we grab the first element ("28537" in this case) and then pull
|
||||
# out the 'date' field.
|
||||
pushid = contents.iterkeys().next()
|
||||
self.info('Pushid is: %s' % pushid)
|
||||
pushdate = contents[pushid]['date']
|
||||
self.info('Pushdate is: %s' % pushdate)
|
||||
return PushInfo(pushid, pushdate)
|
||||
|
||||
except Exception:
|
||||
self.exception("Failed to get push info from hg.mozilla.org")
|
||||
raise
|
||||
|
@ -53,19 +53,16 @@ class VCSMixin(object):
|
||||
self.rmtree(dest)
|
||||
raise
|
||||
|
||||
def _get_vcs_class(self, vcs):
|
||||
vcs = vcs or self.config.get('default_vcs', getattr(self, 'default_vcs', None))
|
||||
vcs_class = VCS_DICT.get(vcs)
|
||||
return vcs_class
|
||||
|
||||
def vcs_checkout(self, vcs=None, error_level=FATAL, **kwargs):
|
||||
""" Check out a single repo.
|
||||
"""
|
||||
c = self.config
|
||||
if not vcs:
|
||||
if c.get('default_vcs'):
|
||||
vcs = c['default_vcs']
|
||||
else:
|
||||
try:
|
||||
vcs = self.default_vcs
|
||||
except AttributeError:
|
||||
pass
|
||||
vcs_class = VCS_DICT.get(vcs)
|
||||
vcs_class = self._get_vcs_class(vcs)
|
||||
if not vcs_class:
|
||||
self.error("Running vcs_checkout with kwargs %s" % str(kwargs))
|
||||
raise VCSException("No VCS set!")
|
||||
@ -110,6 +107,20 @@ class VCSMixin(object):
|
||||
self.chdir(orig_dir)
|
||||
return revision_dict
|
||||
|
||||
def vcs_query_pushinfo(self, repository, revision, vcs=None):
|
||||
"""Query the pushid/pushdate of a repository/revision
|
||||
Returns a namedtuple with "pushid" and "pushdate" elements
|
||||
"""
|
||||
vcs_class = self._get_vcs_class(vcs)
|
||||
if not vcs_class:
|
||||
raise VCSException("No VCS set in vcs_query_pushinfo!")
|
||||
vcs_obj = vcs_class(
|
||||
log_obj=self.log_obj,
|
||||
config=self.config,
|
||||
script_obj=self,
|
||||
)
|
||||
return vcs_obj.query_pushinfo(repository, revision)
|
||||
|
||||
|
||||
class VCSScript(VCSMixin, BaseScript):
|
||||
def __init__(self, **kwargs):
|
||||
|
@ -20,6 +20,7 @@ import uuid
|
||||
import copy
|
||||
import glob
|
||||
import logging
|
||||
import shlex
|
||||
from itertools import chain
|
||||
|
||||
# import the power of mozharness ;)
|
||||
@ -29,7 +30,6 @@ import re
|
||||
from mozharness.base.config import BaseConfig, parse_config_file
|
||||
from mozharness.base.log import ERROR, OutputParser, FATAL
|
||||
from mozharness.base.script import PostScriptRun
|
||||
from mozharness.base.transfer import TransferMixin
|
||||
from mozharness.base.vcs.vcsbase import MercurialScript
|
||||
from mozharness.mozilla.buildbot import BuildbotMixin, TBPL_STATUS_DICT, \
|
||||
TBPL_EXCEPTION, TBPL_RETRY, EXIT_STATUS_DICT, TBPL_WARNING, TBPL_SUCCESS, \
|
||||
@ -534,7 +534,7 @@ def generate_build_UID():
|
||||
|
||||
class BuildScript(BuildbotMixin, PurgeMixin, MockMixin, BalrogMixin,
|
||||
SigningMixin, VirtualenvMixin, MercurialScript,
|
||||
TransferMixin, InfluxRecordingMixin):
|
||||
InfluxRecordingMixin):
|
||||
def __init__(self, **kwargs):
|
||||
# objdir is referenced in _query_abs_dirs() so let's make sure we
|
||||
# have that attribute before calling BaseScript.__init__
|
||||
@ -559,10 +559,11 @@ class BuildScript(BuildbotMixin, PurgeMixin, MockMixin, BalrogMixin,
|
||||
self.repo_path = None
|
||||
self.buildid = None
|
||||
self.builduid = None
|
||||
self.pushdate = None
|
||||
self.query_buildid() # sets self.buildid
|
||||
self.query_builduid() # sets self.builduid
|
||||
self.generated_build_props = False
|
||||
self.client_id = None
|
||||
self.access_token = None
|
||||
|
||||
# Call this before creating the virtualenv so that we have things like
|
||||
# symbol_server_host in the config
|
||||
@ -735,39 +736,6 @@ or run without that action (ie: --no-{action})"
|
||||
self.buildid = buildid
|
||||
return self.buildid
|
||||
|
||||
def query_pushdate(self):
|
||||
if self.pushdate:
|
||||
return self.pushdate
|
||||
|
||||
try:
|
||||
url = '%s/json-pushes?changeset=%s' % (
|
||||
self._query_repo(),
|
||||
self.query_revision(),
|
||||
)
|
||||
self.info('Pushdate URL is: %s' % url)
|
||||
contents = self.retry(self.load_json_from_url, args=(url,))
|
||||
|
||||
# The contents should be something like:
|
||||
# {
|
||||
# "28537": {
|
||||
# "changesets": [
|
||||
# "1d0a914ae676cc5ed203cdc05c16d8e0c22af7e5",
|
||||
# ],
|
||||
# "date": 1428072488,
|
||||
# "user": "user@mozilla.com"
|
||||
# }
|
||||
# }
|
||||
#
|
||||
# So we grab the first element ("28537" in this case) and then pull
|
||||
# out the 'date' field.
|
||||
self.pushdate = contents.itervalues().next()['date']
|
||||
self.info('Pushdate is: %s' % self.pushdate)
|
||||
except Exception:
|
||||
self.exception("Failed to get pushdate from hg.mozilla.org")
|
||||
raise
|
||||
|
||||
return self.pushdate
|
||||
|
||||
def _query_objdir(self):
|
||||
if self.objdir:
|
||||
return self.objdir
|
||||
@ -1369,16 +1337,17 @@ or run without that action (ie: --no-{action})"
|
||||
|
||||
self.generated_build_props = True
|
||||
|
||||
def upload_files(self):
|
||||
def _initialize_taskcluster(self):
|
||||
if self.client_id and self.access_token:
|
||||
# Already initialized
|
||||
return
|
||||
|
||||
dirs = self.query_abs_dirs()
|
||||
auth = os.path.join(os.getcwd(), self.config['taskcluster_credentials_file'])
|
||||
credentials = {}
|
||||
execfile(auth, credentials)
|
||||
client_id = credentials.get('taskcluster_clientId')
|
||||
access_token = credentials.get('taskcluster_accessToken')
|
||||
if not client_id or not access_token:
|
||||
self.warning('Skipping S3 file upload: No taskcluster credentials.')
|
||||
return
|
||||
self.client_id = credentials.get('taskcluster_clientId')
|
||||
self.access_token = credentials.get('taskcluster_accessToken')
|
||||
|
||||
# We need to create & activate the virtualenv so that we can import
|
||||
# taskcluster (and its dependent modules, like requests and hawk).
|
||||
@ -1392,50 +1361,92 @@ or run without that action (ie: --no-{action})"
|
||||
# messages while we are testing uploads.
|
||||
logging.getLogger('taskcluster').setLevel(logging.DEBUG)
|
||||
|
||||
routes_json = os.path.join(dirs['abs_src_dir'],
|
||||
routes_file = os.path.join(dirs['abs_src_dir'],
|
||||
'testing/taskcluster/routes.json')
|
||||
with open(routes_json) as f:
|
||||
contents = json.load(f)
|
||||
if self.query_is_nightly():
|
||||
templates = contents['nightly']
|
||||
with open(routes_file) as f:
|
||||
self.routes_json = json.load(f)
|
||||
|
||||
def _taskcluster_upload(self, files, templates, locale='en-US',
|
||||
property_conditions=[]):
|
||||
if not self.client_id or not self.access_token:
|
||||
self.warning('Skipping S3 file upload: No taskcluster credentials.')
|
||||
return
|
||||
|
||||
repo = self._query_repo()
|
||||
revision = self.query_revision()
|
||||
pushinfo = self.vcs_query_pushinfo(repo, revision)
|
||||
|
||||
# Nightly builds with l10n counterparts also publish to the
|
||||
# 'en-US' locale.
|
||||
if self.config.get('publish_nightly_en_US_routes'):
|
||||
templates.extend(contents['l10n'])
|
||||
else:
|
||||
templates = contents['routes']
|
||||
index = self.config.get('taskcluster_index', 'index.garbage.staging')
|
||||
fmt = {
|
||||
'index': index,
|
||||
'project': self.buildbot_config['properties']['branch'],
|
||||
'head_rev': revision,
|
||||
'build_product': self.config['stage_product'],
|
||||
'build_name': self.query_build_name(),
|
||||
'build_type': self.query_build_type(),
|
||||
'locale': locale,
|
||||
}
|
||||
fmt.update(self.buildid_to_dict(self.query_buildid()))
|
||||
routes = []
|
||||
for template in templates:
|
||||
fmt = {
|
||||
'index': index,
|
||||
'project': self.buildbot_config['properties']['branch'],
|
||||
'head_rev': self.query_revision(),
|
||||
'build_product': self.config['stage_product'],
|
||||
'build_name': self.query_build_name(),
|
||||
'build_type': self.query_build_type(),
|
||||
'locale': 'en-US',
|
||||
}
|
||||
fmt.update(self.buildid_to_dict(self.query_buildid()))
|
||||
routes.append(template.format(**fmt))
|
||||
self.info("Using routes: %s" % routes)
|
||||
|
||||
tc = Taskcluster(self.branch,
|
||||
self.query_pushdate(), # Use pushdate as the rank
|
||||
client_id,
|
||||
access_token,
|
||||
pushinfo.pushdate, # Use pushdate as the rank
|
||||
self.client_id,
|
||||
self.access_token,
|
||||
self.log_obj,
|
||||
)
|
||||
|
||||
# TODO: Bug 1165980 - these should be in tree
|
||||
routes.extend([
|
||||
"%s.buildbot.branches.%s.%s" % (index, self.branch, self.stage_platform),
|
||||
"%s.buildbot.revisions.%s.%s.%s" % (index, self.query_revision(), self.branch, self.stage_platform),
|
||||
"%s.buildbot.revisions.%s.%s.%s" % (index, revision, self.branch, self.stage_platform),
|
||||
])
|
||||
task = tc.create_task(routes)
|
||||
tc.claim_task(task)
|
||||
|
||||
# Only those files uploaded with valid extensions are processed.
|
||||
# This ensures that we get the correct packageUrl from the list.
|
||||
valid_extensions = (
|
||||
'.apk',
|
||||
'.dmg',
|
||||
'.mar',
|
||||
'.rpm',
|
||||
'.tar.bz2',
|
||||
'.tar.gz',
|
||||
'.zip',
|
||||
'.json',
|
||||
)
|
||||
|
||||
for upload_file in files:
|
||||
# Create an S3 artifact for each file that gets uploaded. We also
|
||||
# check the uploaded file against the property conditions so that we
|
||||
# can set the buildbot config with the correct URLs for package
|
||||
# locations.
|
||||
tc.create_artifact(task, upload_file)
|
||||
if upload_file.endswith(valid_extensions):
|
||||
for prop, condition in property_conditions:
|
||||
if condition(upload_file):
|
||||
self.set_buildbot_property(prop, tc.get_taskcluster_url(upload_file))
|
||||
break
|
||||
tc.report_completed(task)
|
||||
|
||||
def upload_files(self):
|
||||
self._initialize_taskcluster()
|
||||
dirs = self.query_abs_dirs()
|
||||
|
||||
if self.query_is_nightly():
|
||||
templates = self.routes_json['nightly']
|
||||
|
||||
# Nightly builds with l10n counterparts also publish to the
|
||||
# 'en-US' locale.
|
||||
if self.config.get('publish_nightly_en_US_routes'):
|
||||
templates.extend(self.routes_json['l10n'])
|
||||
else:
|
||||
templates = self.routes_json['routes']
|
||||
|
||||
# Some trees may not be setting uploadFiles, so default to []. Normally
|
||||
# we'd only expect to get here if the build completes successfully,
|
||||
# which means we should have uploadFiles.
|
||||
@ -1485,37 +1496,14 @@ or run without that action (ie: --no-{action})"
|
||||
('packageUrl', lambda m: m.endswith(packageName)),
|
||||
]
|
||||
|
||||
# Only those files uploaded with valid extensions are processed.
|
||||
# This ensures that we get the correct packageUrl from the list.
|
||||
valid_extensions = (
|
||||
'.apk',
|
||||
'.dmg',
|
||||
'.mar',
|
||||
'.rpm',
|
||||
'.tar.bz2',
|
||||
'.tar.gz',
|
||||
'.zip',
|
||||
'.json',
|
||||
)
|
||||
|
||||
# Also upload our mozharness log files
|
||||
files.extend([os.path.join(self.log_obj.abs_log_dir, x) for x in self.log_obj.log_files.values()])
|
||||
|
||||
# Also upload our buildprops.json file.
|
||||
files.extend([os.path.join(dirs['base_work_dir'], 'buildprops.json')])
|
||||
|
||||
for upload_file in files:
|
||||
# Create an S3 artifact for each file that gets uploaded. We also
|
||||
# check the uploaded file against the property conditions so that we
|
||||
# can set the buildbot config with the correct URLs for package
|
||||
# locations.
|
||||
tc.create_artifact(task, upload_file)
|
||||
if upload_file.endswith(valid_extensions):
|
||||
for prop, condition in property_conditions:
|
||||
if condition(upload_file):
|
||||
self.set_buildbot_property(prop, tc.get_taskcluster_url(upload_file))
|
||||
break
|
||||
tc.report_completed(task)
|
||||
self._taskcluster_upload(files, templates,
|
||||
property_conditions=property_conditions)
|
||||
|
||||
# Report some important file sizes for display in treeherder
|
||||
dirs = self.query_abs_dirs()
|
||||
@ -1684,6 +1672,7 @@ or run without that action (ie: --no-{action})"
|
||||
if not self.query_is_nightly():
|
||||
self.info("Not a nightly build, skipping multi l10n.")
|
||||
return
|
||||
self._initialize_taskcluster()
|
||||
|
||||
self._checkout_compare_locales()
|
||||
dirs = self.query_abs_dirs()
|
||||
@ -1719,10 +1708,9 @@ or run without that action (ie: --no-{action})"
|
||||
'echo-variable-PACKAGE',
|
||||
'AB_CD=multi',
|
||||
]
|
||||
package_filename = self.get_output_from_command(
|
||||
package_filename = self.get_output_from_command_m(
|
||||
package_cmd,
|
||||
cwd=objdir,
|
||||
ignore_errors=True,
|
||||
)
|
||||
if not package_filename:
|
||||
self.fatal("Unable to determine the package filename for the multi-l10n build. Was trying to run: %s" % package_cmd)
|
||||
@ -1743,6 +1731,19 @@ or run without that action (ie: --no-{action})"
|
||||
self.set_buildbot_property(prop,
|
||||
parser.matches[prop],
|
||||
write_to_file=True)
|
||||
upload_files_cmd = [
|
||||
'make',
|
||||
'echo-variable-UPLOAD_FILES',
|
||||
'AB_CD=multi',
|
||||
]
|
||||
output = self.get_output_from_command_m(
|
||||
upload_files_cmd,
|
||||
cwd=objdir,
|
||||
)
|
||||
files = shlex.split(output)
|
||||
abs_files = [os.path.abspath(os.path.join(objdir, f)) for f in files]
|
||||
self._taskcluster_upload(abs_files, self.routes_json['l10n'],
|
||||
locale='multi')
|
||||
|
||||
def postflight_build(self, console_output=True):
|
||||
"""grabs properties from post build and calls ccache -s"""
|
||||
|
@ -989,6 +989,16 @@ class DesktopSingleLocale(LocalesMixin, ReleaseMixin, MockMixin, BuildbotMixin,
|
||||
self.set_buildbot_property('funsize_info', json.dumps(funsize_info),
|
||||
write_to_file=True)
|
||||
|
||||
def query_repo(self):
|
||||
# Find the name of our repository
|
||||
mozilla_dir = self.config['mozilla_dir']
|
||||
repo = None
|
||||
for repository in self.config['repos']:
|
||||
if repository.get('dest') == mozilla_dir:
|
||||
repo = repository['repo']
|
||||
break
|
||||
return repo
|
||||
|
||||
def taskcluster_upload(self):
|
||||
auth = os.path.join(os.getcwd(), self.config['taskcluster_credentials_file'])
|
||||
credentials = {}
|
||||
@ -1016,6 +1026,10 @@ class DesktopSingleLocale(LocalesMixin, ReleaseMixin, MockMixin, BuildbotMixin,
|
||||
branch = self.config['branch']
|
||||
platform = self.config['platform']
|
||||
revision = self._query_revision()
|
||||
repo = self.query_repo()
|
||||
if not repo:
|
||||
self.fatal("Unable to determine repository for querying the push info.")
|
||||
pushinfo = self.vcs_query_pushinfo(repo, revision, vcs='hgtool')
|
||||
|
||||
routes_json = os.path.join(self.query_abs_dirs()['abs_mozilla_dir'],
|
||||
'testing/taskcluster/routes.json')
|
||||
@ -1038,10 +1052,10 @@ class DesktopSingleLocale(LocalesMixin, ReleaseMixin, MockMixin, BuildbotMixin,
|
||||
}
|
||||
fmt.update(self.buildid_to_dict(self._query_buildid()))
|
||||
routes.append(template.format(**fmt))
|
||||
self.info('Using routes: %s' % routes)
|
||||
|
||||
self.info('Using routes: %s' % routes)
|
||||
tc = Taskcluster(branch,
|
||||
self.query_pushdate(),
|
||||
pushinfo.pushdate, # Use pushdate as the rank
|
||||
client_id,
|
||||
access_token,
|
||||
self.log_obj,
|
||||
@ -1057,48 +1071,6 @@ class DesktopSingleLocale(LocalesMixin, ReleaseMixin, MockMixin, BuildbotMixin,
|
||||
tc.create_artifact(task, upload_file)
|
||||
tc.report_completed(task)
|
||||
|
||||
def query_pushdate(self):
|
||||
if self.pushdate:
|
||||
return self.pushdate
|
||||
|
||||
mozilla_dir = self.config['mozilla_dir']
|
||||
repo = None
|
||||
for repository in self.config['repos']:
|
||||
if repository.get('dest') == mozilla_dir:
|
||||
repo = repository['repo']
|
||||
break
|
||||
|
||||
if not repo:
|
||||
self.fatal("Unable to determine repository for querying the pushdate.")
|
||||
try:
|
||||
url = '%s/json-pushes?changeset=%s' % (
|
||||
repo,
|
||||
self._query_revision(),
|
||||
)
|
||||
self.info('Pushdate URL is: %s' % url)
|
||||
contents = self.retry(self.load_json_from_url, args=(url,))
|
||||
|
||||
# The contents should be something like:
|
||||
# {
|
||||
# "28537": {
|
||||
# "changesets": [
|
||||
# "1d0a914ae676cc5ed203cdc05c16d8e0c22af7e5",
|
||||
# ],
|
||||
# "date": 1428072488,
|
||||
# "user": "user@mozilla.com"
|
||||
# }
|
||||
# }
|
||||
#
|
||||
# So we grab the first element ("28537" in this case) and then pull
|
||||
# out the 'date' field.
|
||||
self.pushdate = contents.itervalues().next()['date']
|
||||
self.info('Pushdate is: %s' % self.pushdate)
|
||||
except Exception:
|
||||
self.exception("Failed to get pushdate from hg.mozilla.org")
|
||||
raise
|
||||
|
||||
return self.pushdate
|
||||
|
||||
# main {{{
|
||||
if __name__ == '__main__':
|
||||
single_locale = DesktopSingleLocale()
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user