Merge mozilla-central to mozilla-inbound

This commit is contained in:
Carsten "Tomcat" Book 2017-03-28 13:05:57 +02:00
commit bc21c019f3
92 changed files with 1773 additions and 1089 deletions

View File

@ -441,7 +441,14 @@ Function .onInit
StrCpy $CheckboxShortcuts "1"
StrCpy $CheckboxSendPing "1"
!ifdef MOZ_MAINTENANCE_SERVICE
StrCpy $CheckboxInstallMaintSvc "1"
; We can only install the maintenance service if the user is an admin.
Call IsUserAdmin
Pop $0
${If} "$0" == "true"
StrCpy $CheckboxInstallMaintSvc "1"
${Else}
StrCpy $CheckboxInstallMaintSvc "0"
${EndIf}
!else
StrCpy $CheckboxInstallMaintSvc "0"
!endif

View File

@ -517,7 +517,6 @@ webNotifications.allow.accesskey=A
webNotifications.notNow=Not Now
webNotifications.notNow.accesskey=n
webNotifications.never=Never Allow
webNotifications.neverForSession=Never For This Session
webNotifications.never.accesskey=v
webNotifications.receiveFromSite2=Will you allow %S to send notifications?
# LOCALIZATION NOTE (webNotifications.upgradeTitle): When using native notifications on OS X, the title may be truncated around 32 characters.

View File

@ -568,7 +568,7 @@ DesktopNotificationPermissionPrompt.prototype = {
},
get promptActions() {
return [
let actions = [
{
label: gBrowserBundle.GetStringFromName("webNotifications.allow"),
accessKey:
@ -582,16 +582,17 @@ DesktopNotificationPermissionPrompt.prototype = {
gBrowserBundle.GetStringFromName("webNotifications.notNow.accesskey"),
action: SitePermissions.BLOCK,
},
{
label: PrivateBrowsingUtils.isBrowserPrivate(this.browser) ?
gBrowserBundle.GetStringFromName("webNotifications.neverForSession") :
gBrowserBundle.GetStringFromName("webNotifications.never"),
];
if (!PrivateBrowsingUtils.isBrowserPrivate(this.browser)) {
actions.push({
label: gBrowserBundle.GetStringFromName("webNotifications.never"),
accessKey:
gBrowserBundle.GetStringFromName("webNotifications.never.accesskey"),
action: SitePermissions.BLOCK,
scope: SitePermissions.SCOPE_PERSISTENT,
},
];
});
}
return actions;
},
};

View File

@ -4,6 +4,7 @@
"use strict";
const {Cu} = require("chrome");
const Services = require("Services");
const promise = require("promise");
const defer = require("devtools/shared/defer");

View File

@ -74,7 +74,7 @@ var data = [
},
{
wrong: 'user:@example.com:8080/this/is/a/test.html',
fixed: 'http://user:@example.com:8080/this/is/a/test.html',
fixed: 'http://user@example.com:8080/this/is/a/test.html',
},
{
wrong: '//user:pass@example.com:8080/this/is/a/test.html',

View File

@ -867,6 +867,127 @@ waitForAllPaints(function() {
yield ensureElementRemoval(div);
});
add_task(function *no_throttling_animations_in_view_svg() {
/*
On Android throttled animations are left behind on the main thread in some
frames, We will fix this in bug 1247800.
*/
if (isAndroid) {
return;
}
var div = addDiv(null, { style: 'overflow: scroll;' +
'height: 100px; width: 100px;' });
var svg = addSVGElement(div, 'svg', { viewBox: '-10 -10 0.1 0.1',
width: '50px',
height: '50px' });
var rect = addSVGElement(svg, 'rect', { x: '-10',
y: '-10',
width: '10',
height: '10',
fill: 'red' });
var animation = rect.animate({ fill: ['blue', 'lime'] }, 100 * MS_PER_SEC);
yield animation.ready;
var markers = yield observeStyling(5);
is(markers.length, 5,
'CSS animations on an in-view svg element with post-transform should ' +
'not be throttled.');
yield ensureElementRemoval(div);
});
add_task(function *throttling_animations_out_of_view_svg() {
if (!SpecialPowers.getBoolPref('dom.animations.offscreen-throttling')) {
return;
}
/*
On Android throttled animations are left behind on the main thread in some
frames, We will fix this in bug 1247800.
*/
if (isAndroid) {
return;
}
var div = addDiv(null, { style: 'overflow: scroll;' +
'height: 100px; width: 100px;' });
var svg = addSVGElement(div, 'svg', { viewBox: '-10 -10 0.1 0.1',
width: '50px',
height: '50px' });
var rect = addSVGElement(svg, 'rect', { width: '10',
height: '10',
fill: 'red' });
var animation = rect.animate({ fill: ['blue', 'lime'] }, 100 * MS_PER_SEC);
yield animation.ready;
var markers = yield observeStyling(5);
is(markers.length, 0,
'CSS animations on an out-of-view svg element with post-transform ' +
'should be throttled.');
yield ensureElementRemoval(div);
});
add_task(function *no_throttling_animations_in_view_css_transform() {
/*
On Android throttled animations are left behind on the main thread in some
frames, We will fix this in bug 1247800.
*/
if (isAndroid) {
return;
}
var scrollDiv = addDiv(null, { style: 'overflow: scroll; ' +
'height: 100px; width: 100px;' });
var targetDiv = addDiv(null,
{ style: 'animation: background-color 100s;' +
'transform: translate(-50px, -50px);' });
scrollDiv.appendChild(targetDiv);
var animation = targetDiv.getAnimations()[0];
yield animation.ready;
var markers = yield observeStyling(5);
is(markers.length, 5,
'CSS animation on an in-view element with pre-transform should not ' +
'be throttled.');
yield ensureElementRemoval(scrollDiv);
});
add_task(function *throttling_animations_out_of_view_css_transform() {
if (!SpecialPowers.getBoolPref('dom.animations.offscreen-throttling')) {
return;
}
/*
On Android throttled animations are left behind on the main thread in some
frames, We will fix this in bug 1247800.
*/
if (isAndroid) {
return;
}
var scrollDiv = addDiv(null, { style: 'overflow: scroll;' +
'height: 100px; width: 100px;' });
var targetDiv = addDiv(null,
{ style: 'animation: background-color 100s;' +
'transform: translate(100px, 100px);' });
scrollDiv.appendChild(targetDiv);
var animation = targetDiv.getAnimations()[0];
yield animation.ready;
var markers = yield observeStyling(5);
is(markers.length, 0,
'CSS animation on an out-of-view element with pre-transform should be ' +
'throttled.');
yield ensureElementRemoval(scrollDiv);
});
});
</script>

View File

@ -332,3 +332,25 @@ function isOMTAEnabled() {
return SpecialPowers.DOMWindowUtils.layerManagerRemote &&
SpecialPowers.getBoolPref(OMTAPrefKey);
}
/**
* Append an SVG element to the target element.
*
* @param target The element which want to append.
* @param attrs A array object with attribute name and values to set on
* the SVG element.
* @return An SVG outer element.
*/
function addSVGElement(target, tag, attrs) {
if (!target) {
return null;
}
var element = document.createElementNS('http://www.w3.org/2000/svg', tag);
if (attrs) {
for (var attrName in attrs) {
element.setAttributeNS(null, attrName, attrs[attrName]);
}
}
target.appendChild(element);
return element;
}

View File

@ -2700,7 +2700,8 @@ MediaDecoderStateMachine::CreateAudioSink()
auto audioSinkCreator = [self] () {
MOZ_ASSERT(self->OnTaskQueue());
AudioSink* audioSink = new AudioSink(
self->mTaskQueue, self->mAudioQueue, self->GetMediaTime(),
self->mTaskQueue, self->mAudioQueue,
TimeUnit::FromMicroseconds(self->GetMediaTime()),
self->Info().mAudio, self->mAudioChannel);
self->mAudibleListener = audioSink->AudibleEvent().Connect(
@ -3492,7 +3493,7 @@ MediaDecoderStateMachine::UpdatePlaybackPositionPeriodically()
// Cap the current time to the larger of the audio and video end time.
// This ensures that if we're running off the system clock, we don't
// advance the clock to after the media end time.
if (VideoEndTime() != -1 || AudioEndTime() != -1) {
if (VideoEndTime() > 0 || AudioEndTime() > 0) {
const int64_t clockTime = GetClock();
// Skip frames up to the frame at the playback position, and figure out
@ -3651,7 +3652,7 @@ MediaDecoderStateMachine::AudioEndTime() const
if (mMediaSink->IsStarted()) {
return mMediaSink->GetEndTime(TrackInfo::kAudioTrack);
}
return -1;
return 0;
}
int64_t
@ -3661,7 +3662,7 @@ MediaDecoderStateMachine::VideoEndTime() const
if (mMediaSink->IsStarted()) {
return mMediaSink->GetEndTime(TrackInfo::kVideoTrack);
}
return -1;
return 0;
}
void

View File

@ -170,7 +170,11 @@ private:
DECL_MEDIA_PREF("media.rust.test_mode", RustTestMode, bool, false);
#endif
#if defined(OS_LINUX) && defined(DEBUG)
DECL_MEDIA_PREF("media.rust.mp4parser", EnableRustMP4Parser, bool, true);
#else
DECL_MEDIA_PREF("media.rust.mp4parser", EnableRustMP4Parser, bool, false);
#endif
public:
// Manage the singleton:

View File

@ -11,6 +11,7 @@
#include "nsDeque.h"
#include "MediaEventSource.h"
#include "TimeUnits.h"
namespace mozilla {
@ -131,6 +132,11 @@ public:
}
}
void GetElementsAfter(const media::TimeUnit& aTime,
nsTArray<RefPtr<T>>* aResult) {
GetElementsAfter(aTime.ToMicroseconds(), aResult);
}
void GetFirstElements(uint32_t aMaxElements, nsTArray<RefPtr<T>>* aResult) {
ReentrantMonitorAutoEnter mon(mReentrantMonitor);
for (size_t i = 0; i < aMaxElements && i < GetSize(); ++i) {

View File

@ -13,6 +13,7 @@
#include "mozilla/Unused.h"
#include "nsPrintfCString.h"
#include "base/time.h"
#include "GMPUtils.h"
namespace mozilla {
namespace gmp {
@ -154,15 +155,11 @@ ToString(const cdm::KeyInformation* aKeysInfo, uint32_t aKeysInfoCount)
{
nsCString str;
for (uint32_t i = 0; i < aKeysInfoCount; i++) {
nsCString keyId;
const cdm::KeyInformation& key = aKeysInfo[i];
for (size_t k = 0; k < key.key_id_size; k++) {
keyId.Append(nsPrintfCString("%hhX", key.key_id[k]));
}
if (!str.IsEmpty()) {
str.AppendLiteral(",");
}
str.Append(keyId);
const cdm::KeyInformation& key = aKeysInfo[i];
str.Append(ToHexString(key.key_id, key.key_id_size));
str.AppendLiteral("=");
str.AppendInt(key.status);
}
@ -509,7 +506,8 @@ mozilla::ipc::IPCResult
ChromiumCDMChild::RecvDecryptAndDecodeFrame(const CDMInputBuffer& aBuffer)
{
MOZ_ASSERT(IsOnMessageLoopThread());
GMP_LOG("ChromiumCDMChild::RecvDecryptAndDecodeFrame()");
GMP_LOG("ChromiumCDMChild::RecvDecryptAndDecodeFrame() t=%" PRId64 ")",
aBuffer.mTimestamp());
MOZ_ASSERT(mDecoderInitialized);
// The output frame may not have the same timestamp as the frame we put in.
@ -525,8 +523,9 @@ ChromiumCDMChild::RecvDecryptAndDecodeFrame(const CDMInputBuffer& aBuffer)
WidevineVideoFrame frame;
cdm::Status rv = mCDM->DecryptAndDecodeFrame(input, &frame);
GMP_LOG("WidevineVideoDecoder::Decode(timestamp=%" PRId64 ") rv=%d",
input.timestamp,
GMP_LOG("ChromiumCDMChild::RecvDecryptAndDecodeFrame() t=%" PRId64
" CDM decoder rv=%d",
aBuffer.mTimestamp(),
rv);
switch (rv) {

View File

@ -33,11 +33,10 @@ static const int32_t LOW_AUDIO_USECS = 300000;
AudioSink::AudioSink(AbstractThread* aThread,
MediaQueue<AudioData>& aAudioQueue,
int64_t aStartTime,
TimeUnit aStartTime,
const AudioInfo& aInfo,
dom::AudioChannel aChannel)
: mStartTime(aStartTime)
, mLastGoodPosition(0)
, mInfo(aInfo)
, mChannel(aChannel)
, mPlaying(true)
@ -48,7 +47,6 @@ AudioSink::AudioSink(AbstractThread* aThread,
, mOwnerThread(aThread)
, mProcessedQueueLength(0)
, mFramesParsed(0)
, mLastEndTime(0)
, mIsAudioDataAudible(false)
, mAudioQueue(aAudioQueue)
{
@ -103,12 +101,13 @@ AudioSink::Init(const PlaybackParams& aParams)
return p;
}
int64_t
TimeUnit
AudioSink::GetPosition()
{
int64_t pos;
int64_t tmp;
if (mAudioStream &&
(pos = mAudioStream->GetPosition()) >= 0) {
(tmp = mAudioStream->GetPosition()) >= 0) {
TimeUnit pos = TimeUnit::FromMicroseconds(tmp);
NS_ASSERTION(pos >= mLastGoodPosition,
"AudioStream position shouldn't go backward");
// Update the last good position when we got a good one.
@ -221,7 +220,7 @@ AudioSink::InitializeAudioStream(const PlaybackParams& aParams)
return NS_OK;
}
int64_t
TimeUnit
AudioSink::GetEndTime() const
{
int64_t written;
@ -229,14 +228,14 @@ AudioSink::GetEndTime() const
MonitorAutoLock mon(mMonitor);
written = mWritten;
}
CheckedInt64 playedUsecs = FramesToUsecs(written, mOutputRate) + mStartTime;
if (!playedUsecs.isValid()) {
TimeUnit played = FramesToTimeUnit(written, mOutputRate) + mStartTime;
if (!played.IsValid()) {
NS_WARNING("Int overflow calculating audio end time");
return -1;
return TimeUnit::Zero();
}
// As we may be resampling, rounding errors may occur. Ensure we never get
// past the original end time.
return std::min<int64_t>(mLastEndTime, playedUsecs.value());
return std::min(mLastEndTime, played);
}
UniquePtr<AudioStream::Chunk>
@ -407,8 +406,8 @@ AudioSink::NotifyAudioNeeded()
// audio hardware, so we can play across the gap.
// Calculate the timestamp of the next chunk of audio in numbers of
// samples.
CheckedInt64 sampleTime = UsecsToFrames(data->mTime - mStartTime,
data->mRate);
CheckedInt64 sampleTime = TimeUnitToFrames(
TimeUnit::FromMicroseconds(data->mTime) - mStartTime, data->mRate);
// Calculate the number of frames that have been pushed onto the audio hardware.
CheckedInt64 missingFrames = sampleTime - mFramesParsed;
@ -450,7 +449,7 @@ AudioSink::NotifyAudioNeeded()
}
}
mLastEndTime = data->GetEndTime();
mLastEndTime = TimeUnit::FromMicroseconds(data->GetEndTime());
mFramesParsed += data->mFrames;
if (mConverter->InputConfig() != mConverter->OutputConfig()) {

View File

@ -32,7 +32,7 @@ class AudioSink : private AudioStream::DataSource {
public:
AudioSink(AbstractThread* aThread,
MediaQueue<AudioData>& aAudioQueue,
int64_t aStartTime,
TimeUnit aStartTime,
const AudioInfo& aInfo,
dom::AudioChannel aChannel);
@ -46,8 +46,8 @@ public:
* All public functions are not thread-safe.
* Called on the task queue of MDSM only.
*/
int64_t GetPosition();
int64_t GetEndTime() const;
TimeUnit GetPosition();
TimeUnit GetEndTime() const;
// Check whether we've pushed more frames to the audio hardware than it has
// played.
@ -80,15 +80,15 @@ private:
// The audio stream resource. Used on the task queue of MDSM only.
RefPtr<AudioStream> mAudioStream;
// The presentation time of the first audio frame that was played in
// microseconds. We can add this to the audio stream position to determine
// The presentation time of the first audio frame that was played.
// We can add this to the audio stream position to determine
// the current audio time.
const int64_t mStartTime;
const TimeUnit mStartTime;
// Keep the last good position returned from the audio stream. Used to ensure
// position returned by GetPosition() is mono-increasing in spite of audio
// stream error. Used on the task queue of MDSM only.
int64_t mLastGoodPosition;
TimeUnit mLastGoodPosition;
const AudioInfo mInfo;
@ -149,7 +149,7 @@ private:
// at the current input framerate.
int64_t mFramesParsed;
Maybe<RefPtr<AudioData>> mLastProcessedPacket;
int64_t mLastEndTime;
TimeUnit mLastEndTime;
// Never modifed after construction.
uint32_t mOutputRate;
uint32_t mOutputChannels;

View File

@ -58,9 +58,9 @@ AudioSinkWrapper::GetEndTime(TrackType aType) const
AssertOwnerThread();
MOZ_ASSERT(mIsStarted, "Must be called after playback starts.");
if (aType == TrackInfo::kAudioTrack && mAudioSink) {
return mAudioSink->GetEndTime();
return mAudioSink->GetEndTime().ToMicroseconds();
}
return -1;
return 0;
}
int64_t
@ -85,7 +85,7 @@ AudioSinkWrapper::GetPosition(TimeStamp* aTimeStamp) const
if (!mAudioEnded) {
// Rely on the audio sink to report playback position when it is not ended.
pos = mAudioSink->GetPosition();
pos = mAudioSink->GetPosition().ToMicroseconds();
} else if (!mPlayStartTime.IsNull()) {
// Calculate playback position using system clock if we are still playing.
pos = GetVideoPosition(t);

View File

@ -27,7 +27,7 @@ namespace mozilla {
* way to DecodedStreamGraphListener from DecodedStream.
*/
struct PlaybackInfoInit {
int64_t mStartTime;
media::TimeUnit mStartTime;
MediaInfo mInfo;
};
@ -144,8 +144,8 @@ public:
// mNextVideoTime is the end timestamp for the last packet sent to the stream.
// Therefore video packets starting at or after this time need to be copied
// to the output stream.
int64_t mNextVideoTime; // microseconds
int64_t mNextAudioTime; // microseconds
media::TimeUnit mNextVideoTime;
media::TimeUnit mNextAudioTime;
// The last video image sent to the stream. Useful if we need to replicate
// the image.
RefPtr<layers::Image> mLastVideoImage;
@ -234,8 +234,9 @@ DecodedStreamData::GetDebugInfo()
"DecodedStreamData=%p mPlaying=%d mAudioFramesWritten=%" PRId64
" mNextAudioTime=%" PRId64 " mNextVideoTime=%" PRId64 " mHaveSentFinish=%d "
"mHaveSentFinishAudio=%d mHaveSentFinishVideo=%d",
this, mPlaying, mAudioFramesWritten, mNextAudioTime, mNextVideoTime,
mHaveSentFinish, mHaveSentFinishAudio, mHaveSentFinishVideo);
this, mPlaying, mAudioFramesWritten, mNextAudioTime.ToMicroseconds(),
mNextVideoTime.ToMicroseconds(), mHaveSentFinish, mHaveSentFinishAudio,
mHaveSentFinishVideo);
}
DecodedStream::DecodedStream(AbstractThread* aOwnerThread,
@ -298,8 +299,8 @@ DecodedStream::Start(int64_t aStartTime, const MediaInfo& aInfo)
AssertOwnerThread();
MOZ_ASSERT(mStartTime.isNothing(), "playback already started.");
mStartTime.emplace(aStartTime);
mLastOutputTime = 0;
mStartTime.emplace(FromMicroseconds(aStartTime));
mLastOutputTime = media::TimeUnit::Zero();
mInfo = aInfo;
mPlaying = true;
ConnectListener();
@ -345,7 +346,7 @@ DecodedStream::Start(int64_t aStartTime, const MediaInfo& aInfo)
MozPromiseHolder<GenericPromise> promise;
mFinishPromise = promise.Ensure(__func__);
PlaybackInfoInit init {
aStartTime, aInfo
FromMicroseconds(aStartTime), aInfo
};
nsCOMPtr<nsIRunnable> r =
new R(Move(init), Move(promise), mOutputStreamManager, mAbstractMainThread);
@ -447,7 +448,7 @@ DecodedStream::SetPreservesPitch(bool aPreservesPitch)
}
static void
SendStreamAudio(DecodedStreamData* aStream, int64_t aStartTime,
SendStreamAudio(DecodedStreamData* aStream, const media::TimeUnit& aStartTime,
AudioData* aData, AudioSegment* aOutput, uint32_t aRate,
const PrincipalHandle& aPrincipalHandle)
{
@ -458,14 +459,14 @@ SendStreamAudio(DecodedStreamData* aStream, int64_t aStartTime,
AudioData* audio = aData;
// This logic has to mimic AudioSink closely to make sure we write
// the exact same silences
CheckedInt64 audioWrittenOffset = aStream->mAudioFramesWritten +
UsecsToFrames(aStartTime, aRate);
CheckedInt64 audioWrittenOffset = aStream->mAudioFramesWritten
+ TimeUnitToFrames(aStartTime, aRate);
CheckedInt64 frameOffset = UsecsToFrames(audio->mTime, aRate);
if (!audioWrittenOffset.isValid() ||
!frameOffset.isValid() ||
// ignore packet that we've already processed
audio->GetEndTime() <= aStream->mNextAudioTime) {
audio->GetEndTime() <= aStream->mNextAudioTime.ToMicroseconds()) {
return;
}
@ -491,7 +492,7 @@ SendStreamAudio(DecodedStreamData* aStream, int64_t aStartTime,
aOutput->AppendFrames(buffer.forget(), channels, audio->mFrames, aPrincipalHandle);
aStream->mAudioFramesWritten += audio->mFrames;
aStream->mNextAudioTime = audio->GetEndTime();
aStream->mNextAudioTime = media::TimeUnit::FromMicroseconds(audio->GetEndTime());
}
void
@ -540,17 +541,17 @@ DecodedStream::SendAudio(double aVolume, bool aIsSameOrigin,
static void
WriteVideoToMediaStream(MediaStream* aStream,
layers::Image* aImage,
int64_t aEndMicroseconds,
int64_t aStartMicroseconds,
const media::TimeUnit& aEnd,
const media::TimeUnit& aStart,
const mozilla::gfx::IntSize& aIntrinsicSize,
const TimeStamp& aTimeStamp,
VideoSegment* aOutput,
const PrincipalHandle& aPrincipalHandle)
{
RefPtr<layers::Image> image = aImage;
StreamTime duration =
aStream->MicrosecondsToStreamTimeRoundDown(aEndMicroseconds) -
aStream->MicrosecondsToStreamTimeRoundDown(aStartMicroseconds);
auto end = aStream->MicrosecondsToStreamTimeRoundDown(aEnd.ToMicroseconds());
auto start = aStream->MicrosecondsToStreamTimeRoundDown(aStart.ToMicroseconds());
StreamTime duration = end - start;
aOutput->AppendFrame(image.forget(), duration, aIntrinsicSize,
aPrincipalHandle, false, aTimeStamp);
}
@ -594,7 +595,7 @@ DecodedStream::SendVideo(bool aIsSameOrigin, const PrincipalHandle& aPrincipalHa
for (uint32_t i = 0; i < video.Length(); ++i) {
VideoData* v = video[i];
if (mData->mNextVideoTime < v->mTime) {
if (mData->mNextVideoTime.ToMicroseconds() < v->mTime) {
// Write last video frame to catch up. mLastVideoImage can be null here
// which is fine, it just means there's no video.
@ -604,19 +605,21 @@ DecodedStream::SendVideo(bool aIsSameOrigin, const PrincipalHandle& aPrincipalHa
// video frame). E.g. if we have a video frame that is 30 sec long
// and capture happens at 15 sec, we'll have to append a black frame
// that is 15 sec long.
WriteVideoToMediaStream(sourceStream, mData->mLastVideoImage, v->mTime,
mData->mNextVideoTime, mData->mLastVideoImageDisplaySize,
tracksStartTimeStamp + TimeDuration::FromMicroseconds(v->mTime),
&output, aPrincipalHandle);
mData->mNextVideoTime = v->mTime;
WriteVideoToMediaStream(sourceStream, mData->mLastVideoImage,
FromMicroseconds(v->mTime),
mData->mNextVideoTime, mData->mLastVideoImageDisplaySize,
tracksStartTimeStamp + TimeDuration::FromMicroseconds(v->mTime),
&output, aPrincipalHandle);
mData->mNextVideoTime = FromMicroseconds(v->mTime);
}
if (mData->mNextVideoTime < v->GetEndTime()) {
WriteVideoToMediaStream(sourceStream, v->mImage, v->GetEndTime(),
mData->mNextVideoTime, v->mDisplay,
tracksStartTimeStamp + TimeDuration::FromMicroseconds(v->GetEndTime()),
&output, aPrincipalHandle);
mData->mNextVideoTime = v->GetEndTime();
if (mData->mNextVideoTime.ToMicroseconds() < v->GetEndTime()) {
WriteVideoToMediaStream(sourceStream, v->mImage,
FromMicroseconds(v->GetEndTime()),
mData->mNextVideoTime, v->mDisplay,
tracksStartTimeStamp + TimeDuration::FromMicroseconds(v->GetEndTime()),
&output, aPrincipalHandle);
mData->mNextVideoTime = FromMicroseconds(v->GetEndTime());
mData->mLastVideoImage = v->mImage;
mData->mLastVideoImageDisplaySize = v->mDisplay;
}
@ -639,13 +642,13 @@ DecodedStream::SendVideo(bool aIsSameOrigin, const PrincipalHandle& aPrincipalHa
if (mData->mEOSVideoCompensation) {
VideoSegment endSegment;
// Calculate the deviation clock time from DecodedStream.
int64_t deviation_usec = sourceStream->StreamTimeToMicroseconds(1);
auto deviation = FromMicroseconds(sourceStream->StreamTimeToMicroseconds(1));
WriteVideoToMediaStream(sourceStream, mData->mLastVideoImage,
mData->mNextVideoTime + deviation_usec, mData->mNextVideoTime,
mData->mLastVideoImageDisplaySize,
tracksStartTimeStamp + TimeDuration::FromMicroseconds(mData->mNextVideoTime + deviation_usec),
&endSegment, aPrincipalHandle);
mData->mNextVideoTime += deviation_usec;
mData->mNextVideoTime + deviation, mData->mNextVideoTime,
mData->mLastVideoImageDisplaySize,
tracksStartTimeStamp + (mData->mNextVideoTime + deviation).ToTimeDuration(),
&endSegment, aPrincipalHandle);
mData->mNextVideoTime += deviation;
MOZ_ASSERT(endSegment.GetDuration() > 0);
if (!aIsSameOrigin) {
endSegment.ReplaceWithDisabled();
@ -672,7 +675,7 @@ DecodedStream::AdvanceTracks()
if (mInfo.HasVideo()) {
StreamTime videoEnd = mData->mStream->MicrosecondsToStreamTimeRoundDown(
mData->mNextVideoTime - mStartTime.ref());
(mData->mNextVideoTime - mStartTime.ref()).ToMicroseconds());
endPosition = std::max(endPosition, videoEnd);
}
@ -715,15 +718,15 @@ DecodedStream::GetEndTime(TrackType aType) const
{
AssertOwnerThread();
if (aType == TrackInfo::kAudioTrack && mInfo.HasAudio() && mData) {
CheckedInt64 t = mStartTime.ref() +
FramesToUsecs(mData->mAudioFramesWritten, mInfo.mAudio.mRate);
if (t.isValid()) {
return t.value();
auto t = mStartTime.ref() + FramesToTimeUnit(
mData->mAudioFramesWritten, mInfo.mAudio.mRate);
if (t.IsValid()) {
return t.ToMicroseconds();
}
} else if (aType == TrackInfo::kVideoTrack && mData) {
return mData->mNextVideoTime;
return mData->mNextVideoTime.ToMicroseconds();
}
return -1;
return 0;
}
int64_t
@ -736,14 +739,14 @@ DecodedStream::GetPosition(TimeStamp* aTimeStamp) const
if (aTimeStamp) {
*aTimeStamp = TimeStamp::Now();
}
return mStartTime.ref() + mLastOutputTime;
return (mStartTime.ref() + mLastOutputTime).ToMicroseconds();
}
void
DecodedStream::NotifyOutput(int64_t aTime)
{
AssertOwnerThread();
mLastOutputTime = aTime;
mLastOutputTime = FromMicroseconds(aTime);
int64_t currentTime = GetPosition();
// Remove audio samples that have been played by MSG from the queue.
@ -784,9 +787,10 @@ nsCString
DecodedStream::GetDebugInfo()
{
AssertOwnerThread();
int64_t startTime = mStartTime.isSome() ? mStartTime->ToMicroseconds() : -1;
return nsPrintfCString(
"DecodedStream=%p mStartTime=%" PRId64 " mLastOutputTime=%" PRId64 " mPlaying=%d mData=%p",
this, mStartTime.valueOr(-1), mLastOutputTime, mPlaying, mData.get())
this, startTime, mLastOutputTime.ToMicroseconds(), mPlaying, mData.get())
+ (mData ? nsCString("\n") + mData->GetDebugInfo() : nsCString());
}

View File

@ -71,6 +71,10 @@ protected:
virtual ~DecodedStream();
private:
media::TimeUnit FromMicroseconds(int64_t aTime)
{
return media::TimeUnit::FromMicroseconds(aTime);
}
void DestroyData(UniquePtr<DecodedStreamData> aData);
void AdvanceTracks();
void SendAudio(double aVolume, bool aIsSameOrigin, const PrincipalHandle& aPrincipalHandle);
@ -107,8 +111,8 @@ private:
PlaybackParams mParams;
Maybe<int64_t> mStartTime;
int64_t mLastOutputTime = 0; // microseconds
media::NullableTimeUnit mStartTime;
media::TimeUnit mLastOutputTime;
MediaInfo mInfo;
MediaQueue<AudioData>& mAudioQueue;

View File

@ -60,7 +60,7 @@ public:
virtual RefPtr<GenericPromise> OnEnded(TrackType aType) = 0;
// Return the end time of the audio/video data that has been consumed
// or -1 if no such track.
// or 0 if no such track.
// Must be called after playback starts.
virtual int64_t GetEndTime(TrackType aType) const = 0;

View File

@ -41,7 +41,7 @@ VideoSink::VideoSink(AbstractThread* aThread,
, mContainer(aContainer)
, mProducerID(ImageContainer::AllocateProducerID())
, mFrameStats(aFrameStats)
, mVideoFrameEndTime(-1)
, mVideoFrameEndTime(0)
, mHasVideo(false)
, mUpdateScheduler(aThread)
, mVideoQueueSendToCompositorSize(aVQueueSentToCompositerSize)
@ -95,7 +95,7 @@ VideoSink::GetEndTime(TrackType aType) const
} else if (aType == TrackInfo::kAudioTrack) {
return mAudioSink->GetEndTime(aType);
}
return -1;
return 0;
}
int64_t
@ -225,7 +225,7 @@ VideoSink::Stop()
mEndPromiseHolder.ResolveIfExists(true, __func__);
mEndPromise = nullptr;
}
mVideoFrameEndTime = -1;
mVideoFrameEndTime = 0;
}
bool

View File

@ -1073,6 +1073,10 @@ WebMTrackDemuxer::Seek(const media::TimeUnit& aTime)
mParent->SeekInternal(mType, aTime);
nsresult rv = mParent->GetNextPacket(mType, &mSamples);
if (NS_FAILED(rv)) {
if (rv == NS_ERROR_DOM_MEDIA_END_OF_STREAM) {
// Ignore the error for now, the next GetSample will be rejected with EOS.
return SeekPromise::CreateAndResolve(media::TimeUnit(), __func__);
}
return SeekPromise::CreateAndReject(rv, __func__);
}
mNeedKeyframe = true;

View File

@ -16,14 +16,17 @@
#include "dirent.h"
#include "poll.h"
#include "sys/stat.h"
#if defined(ANDROID)
#if defined(XP_LINUX)
#include <sys/vfs.h>
#define statvfs statfs
#define f_frsize f_bsize
#else
#include "sys/statvfs.h"
#endif // defined(XP_LINUX)
#if !defined(ANDROID)
#include "sys/wait.h"
#include <spawn.h>
#endif // defined(ANDROID)
#endif // !defined(ANDROID)
#endif // defined(XP_UNIX)
#if defined(XP_LINUX)
@ -699,7 +702,7 @@ static const dom::ConstantSpec gLibcProperties[] =
{ "OSFILE_SIZEOF_STATVFS", JS::Int32Value(sizeof (struct statvfs)) },
{ "OSFILE_OFFSETOF_STATVFS_F_BSIZE", JS::Int32Value(offsetof (struct statvfs, f_bsize)) },
{ "OSFILE_OFFSETOF_STATVFS_F_FRSIZE", JS::Int32Value(offsetof (struct statvfs, f_frsize)) },
{ "OSFILE_OFFSETOF_STATVFS_F_BAVAIL", JS::Int32Value(offsetof (struct statvfs, f_bavail)) },
#endif // defined(XP_UNIX)

View File

@ -6,9 +6,15 @@ var success = 0;
try {
parent[name].success = 1;
parent.postMessage(success ? "success" : "failure", "http://mochi.test:8888");
parent.postMessage({
from: name,
result: success ? "success" : "failure"
}, "http://mochi.test:8888");
} catch (e) {
parent.postMessage(e.toString(), "http://mochi.test:8888");
parent.postMessage({
from: name,
result: e.toString()
}, "http://mochi.test:8888");
}
</script>

View File

@ -15,22 +15,22 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=440572
/** Test for Bug 440572 **/
var messages = [];
var messages = new Map();
function receiveMessage(e)
{
is(e.origin, "http://example.org", "wrong sender!");
messages.push(e.data);
messages.set(e.data.from, e.data.result);
}
window.addEventListener("message", receiveMessage);
function runtests()
{
is(messages.length, 3, "received the right number of messages.");
is(messages[0], "success", "test in frame failed.");
isnot(messages[1], "success", "parent[\"content\"] should be the WebIDL property of Window.");
isnot(messages[2], "success", "parent[\"dump\"] should be the WebIDL property of Window.");
is(messages.size, 3, "received the right number of messages.");
is(messages.get("test"), "success", "test in frame failed.");
isnot(messages.get("content"), "success", "parent[\"content\"] should be the WebIDL property of Window.");
isnot(messages.get("dump"), "success", "parent[\"dump\"] should be the WebIDL property of Window.");
SimpleTest.finish();
}

View File

@ -6,5 +6,6 @@ support-files =
WebVRHelpers.js
[test_vrDisplay_getFrameData.html]
[test_vrDisplay_exitPresent.html]
[test_vrDisplay_requestPresent.html]
skip-if = true

View File

@ -0,0 +1,45 @@
<!DOCTYPE html>
<html>
<head>
<title>VRDisplay ExitPresent</title>
<meta name="timeout" content="long"/>
<meta http-equiv="Content-type" content="text/html;charset=UTF-8">
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="runVRTest.js"></script>
</head>
<body>
<script>
function testExitPresentOnOtherIframe(content) {
return content.navigator.getVRDisplays().then((displays) => {
content.vrDisplay = displays[0];
return content.vrDisplay.exitPresent();
});
}
var initVRPresentation = function(content) {
return content.navigator.getVRDisplays().then((displays) => {
console.log("GetVRDisplay!!");
content.vrDisplay = displays[0];
content.canvas = content.document.createElement("canvas");
content.canvas.id = "vrCanvas";
return content.vrDisplay.requestPresent([{source:content.canvas}]);
});
}
function startTest() {
var ifr1 = document.getElementById("iframe1");
var ifr2 = document.getElementById("iframe2");
var frame1 = ifr1.contentWindow;
var frame2 = ifr2.contentWindow;
initVRPresentation(frame1).then(() => {
promise_test((test) => {
return promise_rejects(test, null, testExitPresentOnOtherIframe(frame2));
}, "We cannot exist VR presentation established by another content, this promise is expected to be rejected.")
});
}
runVRTest(startTest);
</script>
<iframe id="iframe1"></iframe>
<iframe id="iframe2"></iframe>
</body>
</html>

View File

@ -10397,7 +10397,7 @@ IsFrameScrolledOutOfView(nsIFrame *aFrame)
}
nsIFrame *scrollableParent = do_QueryFrame(scrollableFrame);
nsRect rect = aFrame->GetVisualOverflowRect();
nsRect rect = aFrame->GetVisualOverflowRectRelativeToSelf();
nsRect transformedRect =
nsLayoutUtils::TransformFrameRectToAncestor(aFrame,

View File

@ -6,7 +6,7 @@
== animate-opacity.html animate-opacity.html
== animate-preserves3d.html animate-preserves3d.html
== in-visibility-hidden-animation.html in-visibility-hidden-animation.html
skip-if(stylo) == in-visibility-hidden-animation-pseudo-element.html in-visibility-hidden-animation-pseudo-element.html # Bug 1331047
== in-visibility-hidden-animation-pseudo-element.html in-visibility-hidden-animation-pseudo-element.html
== partially-out-of-view-animation.html partially-out-of-view-animation.html
== animate-display-table-opacity.html animate-display-table-opacity.html
# We need to run 100% opacity test case when OMTA is disabled to check that the animation creates a stacking context even if the animation is not running on the compositor

View File

@ -185,8 +185,8 @@ CSSStyleSheet::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
// double-counting the inner. We use last instead of first since the first
// sheet may be held in the nsXULPrototypeCache and not used in a window at
// all.
if (mInner->mSheets.LastElement() == s) {
n += Inner()->SizeOfIncludingThis(aMallocSizeOf);
if (s->Inner()->mSheets.LastElement() == s) {
n += s->Inner()->SizeOfIncludingThis(aMallocSizeOf);
}
// Measurement of the following members may be added later if DMD finds it

View File

@ -48,8 +48,11 @@ namespace stagefright {
static const int64_t OVERFLOW_ERROR = -INT64_MAX;
// Calculate units*1,000,000/hz, trying to avoid overflow.
// Return OVERFLOW_ERROR in case of unavoidable overflow.
// Return OVERFLOW_ERROR in case of unavoidable overflow, or div by hz==0.
int64_t unitsToUs(int64_t units, int64_t hz) {
if (hz == 0) {
return OVERFLOW_ERROR;
}
const int64_t MAX_S = INT64_MAX / 1000000;
if (std::abs(units) <= MAX_S) {
return units * 1000000 / hz;

View File

@ -7,6 +7,7 @@
#include "MediaData.h"
#include "MediaPrefs.h"
#include "mozilla/ArrayUtils.h"
#include "mozilla/Preferences.h"
#include "mp4_demuxer/BufferStream.h"
#include "mp4_demuxer/MP4Metadata.h"
#include "mp4_demuxer/MoofParser.h"
@ -204,6 +205,7 @@ static const TestFileData testFiles[] = {
{ "test_case_1301065-u64max.mp4", 0, -1, 0, 0, 1, 0, false, 0, false, false, 2 },
{ "test_case_1329061.mov", 0, -1, 0, 0, 1, 234567981,
false, 0, false, false, 2 },
{ "test_case_1351094.mp4", 0, -1, 0, 0, 0, -1, false, 0, true, true, 0 },
};
static const TestFileData rustTestFiles[] = {
@ -248,88 +250,95 @@ static const TestFileData rustTestFiles[] = {
{ "test_case_1301065-u64max.mp4", 0, -1, 0, 0, 1, 0, false, 0, false, false, 2 },
{ "test_case_1329061.mov", 0, -1, 0, 0, 1, 234567981,
false, 0, false, false, 2 },
{ "test_case_1351094.mp4", 0, -1, 0, 0, 0, -1, false, 0, true, true, 0 },
};
TEST(stagefright_MPEG4Metadata, test_case_mp4)
{
const TestFileData* tests = nullptr;
size_t length = 0;
for (bool rust : { !MediaPrefs::EnableRustMP4Parser(),
MediaPrefs::EnableRustMP4Parser() }) {
mozilla::Preferences::SetBool("media.rust.mp4parser", rust);
ASSERT_EQ(rust, MediaPrefs::EnableRustMP4Parser());
if (MediaPrefs::EnableRustMP4Parser()) {
tests = rustTestFiles;
length = ArrayLength(rustTestFiles);
} else {
tests = testFiles;
length = ArrayLength(testFiles);
}
const TestFileData* tests = nullptr;
size_t length = 0;
for (size_t test = 0; test < length; ++test) {
nsTArray<uint8_t> buffer = ReadTestFile(tests[test].mFilename);
ASSERT_FALSE(buffer.IsEmpty());
RefPtr<Stream> stream = new TestStream(buffer.Elements(), buffer.Length());
RefPtr<MediaByteBuffer> metadataBuffer = MP4Metadata::Metadata(stream);
EXPECT_TRUE(metadataBuffer);
MP4Metadata metadata(stream);
EXPECT_EQ(0u, metadata.GetNumberTracks(TrackInfo::kUndefinedTrack));
EXPECT_EQ(tests[test].mNumberAudioTracks,
metadata.GetNumberTracks(TrackInfo::kAudioTrack));
EXPECT_EQ(tests[test].mNumberVideoTracks,
metadata.GetNumberTracks(TrackInfo::kVideoTrack));
EXPECT_EQ(0u, metadata.GetNumberTracks(TrackInfo::kTextTrack));
EXPECT_EQ(0u, metadata.GetNumberTracks(static_cast<TrackInfo::TrackType>(-1)));
EXPECT_FALSE(metadata.GetTrackInfo(TrackInfo::kUndefinedTrack, 0));
UniquePtr<TrackInfo> trackInfo = metadata.GetTrackInfo(TrackInfo::kVideoTrack, 0);
if (tests[test].mNumberVideoTracks == 0) {
EXPECT_TRUE(!trackInfo);
if (rust) {
tests = rustTestFiles;
length = ArrayLength(rustTestFiles);
} else {
ASSERT_TRUE(!!trackInfo);
const VideoInfo* videoInfo = trackInfo->GetAsVideoInfo();
ASSERT_TRUE(!!videoInfo);
EXPECT_TRUE(videoInfo->IsValid());
EXPECT_TRUE(videoInfo->IsVideo());
EXPECT_EQ(tests[test].mVideoDuration, videoInfo->mDuration);
EXPECT_EQ(tests[test].mWidth, videoInfo->mDisplay.width);
EXPECT_EQ(tests[test].mHeight, videoInfo->mDisplay.height);
UniquePtr<IndiceWrapper> indices = metadata.GetTrackIndice(videoInfo->mTrackId);
EXPECT_TRUE(!!indices);
for (size_t i = 0; i < indices->Length(); i++) {
Index::Indice data;
EXPECT_TRUE(indices->GetIndice(i, data));
EXPECT_TRUE(data.start_offset <= data.end_offset);
EXPECT_TRUE(data.start_composition <= data.end_composition);
}
tests = testFiles;
length = ArrayLength(testFiles);
}
trackInfo = metadata.GetTrackInfo(TrackInfo::kAudioTrack, 0);
if (tests[test].mNumberAudioTracks == 0) {
EXPECT_TRUE(!trackInfo);
} else {
ASSERT_TRUE(!!trackInfo);
const AudioInfo* audioInfo = trackInfo->GetAsAudioInfo();
ASSERT_TRUE(!!audioInfo);
EXPECT_TRUE(audioInfo->IsValid());
EXPECT_TRUE(audioInfo->IsAudio());
EXPECT_EQ(tests[test].mAudioDuration, audioInfo->mDuration);
EXPECT_EQ(tests[test].mAudioProfile, audioInfo->mProfile);
if (tests[test].mAudioDuration != audioInfo->mDuration) {
MOZ_RELEASE_ASSERT(false);
}
UniquePtr<IndiceWrapper> indices = metadata.GetTrackIndice(audioInfo->mTrackId);
EXPECT_TRUE(!!indices);
for (size_t i = 0; i < indices->Length(); i++) {
Index::Indice data;
EXPECT_TRUE(indices->GetIndice(i, data));
EXPECT_TRUE(data.start_offset <= data.end_offset);
EXPECT_TRUE(int64_t(data.start_composition) <= int64_t(data.end_composition));
for (size_t test = 0; test < length; ++test) {
nsTArray<uint8_t> buffer = ReadTestFile(tests[test].mFilename);
ASSERT_FALSE(buffer.IsEmpty());
RefPtr<Stream> stream = new TestStream(buffer.Elements(), buffer.Length());
RefPtr<MediaByteBuffer> metadataBuffer = MP4Metadata::Metadata(stream);
EXPECT_TRUE(metadataBuffer);
MP4Metadata metadata(stream);
EXPECT_EQ(0u, metadata.GetNumberTracks(TrackInfo::kUndefinedTrack));
EXPECT_EQ(tests[test].mNumberAudioTracks,
metadata.GetNumberTracks(TrackInfo::kAudioTrack));
EXPECT_EQ(tests[test].mNumberVideoTracks,
metadata.GetNumberTracks(TrackInfo::kVideoTrack));
EXPECT_EQ(0u, metadata.GetNumberTracks(TrackInfo::kTextTrack));
EXPECT_EQ(0u, metadata.GetNumberTracks(static_cast<TrackInfo::TrackType>(-1)));
EXPECT_FALSE(metadata.GetTrackInfo(TrackInfo::kUndefinedTrack, 0));
UniquePtr<TrackInfo> trackInfo = metadata.GetTrackInfo(TrackInfo::kVideoTrack, 0);
if (tests[test].mNumberVideoTracks == 0) {
EXPECT_TRUE(!trackInfo);
} else {
ASSERT_TRUE(!!trackInfo);
const VideoInfo* videoInfo = trackInfo->GetAsVideoInfo();
ASSERT_TRUE(!!videoInfo);
EXPECT_TRUE(videoInfo->IsValid());
EXPECT_TRUE(videoInfo->IsVideo());
EXPECT_EQ(tests[test].mVideoDuration, videoInfo->mDuration);
EXPECT_EQ(tests[test].mWidth, videoInfo->mDisplay.width);
EXPECT_EQ(tests[test].mHeight, videoInfo->mDisplay.height);
UniquePtr<IndiceWrapper> indices = metadata.GetTrackIndice(videoInfo->mTrackId);
EXPECT_TRUE(!!indices);
for (size_t i = 0; i < indices->Length(); i++) {
Index::Indice data;
EXPECT_TRUE(indices->GetIndice(i, data));
EXPECT_TRUE(data.start_offset <= data.end_offset);
EXPECT_TRUE(data.start_composition <= data.end_composition);
}
}
trackInfo = metadata.GetTrackInfo(TrackInfo::kAudioTrack, 0);
if (tests[test].mNumberAudioTracks == 0) {
EXPECT_TRUE(!trackInfo);
} else {
ASSERT_TRUE(!!trackInfo);
const AudioInfo* audioInfo = trackInfo->GetAsAudioInfo();
ASSERT_TRUE(!!audioInfo);
EXPECT_TRUE(audioInfo->IsValid());
EXPECT_TRUE(audioInfo->IsAudio());
EXPECT_EQ(tests[test].mAudioDuration, audioInfo->mDuration);
EXPECT_EQ(tests[test].mAudioProfile, audioInfo->mProfile);
if (tests[test].mAudioDuration != audioInfo->mDuration) {
MOZ_RELEASE_ASSERT(false);
}
UniquePtr<IndiceWrapper> indices = metadata.GetTrackIndice(audioInfo->mTrackId);
EXPECT_TRUE(!!indices);
for (size_t i = 0; i < indices->Length(); i++) {
Index::Indice data;
EXPECT_TRUE(indices->GetIndice(i, data));
EXPECT_TRUE(data.start_offset <= data.end_offset);
EXPECT_TRUE(int64_t(data.start_composition) <= int64_t(data.end_composition));
}
}
EXPECT_FALSE(metadata.GetTrackInfo(TrackInfo::kTextTrack, 0));
EXPECT_FALSE(metadata.GetTrackInfo(static_cast<TrackInfo::TrackType>(-1), 0));
// We can see anywhere in any MPEG4.
EXPECT_TRUE(metadata.CanSeek());
EXPECT_EQ(tests[test].mHasCrypto, metadata.Crypto().valid);
}
EXPECT_FALSE(metadata.GetTrackInfo(TrackInfo::kTextTrack, 0));
EXPECT_FALSE(metadata.GetTrackInfo(static_cast<TrackInfo::TrackType>(-1), 0));
// We can see anywhere in any MPEG4.
EXPECT_TRUE(metadata.CanSeek());
EXPECT_EQ(tests[test].mHasCrypto, metadata.Crypto().valid);
}
}
@ -373,49 +382,55 @@ TEST(stagefright_MPEG4Metadata, test_case_mp4_subsets)
TEST(stagefright_MoofParser, test_case_mp4)
{
const TestFileData* tests = nullptr;
size_t length = 0;
for (bool rust : { !MediaPrefs::EnableRustMP4Parser(),
MediaPrefs::EnableRustMP4Parser() }) {
mozilla::Preferences::SetBool("media.rust.mp4parser", rust);
ASSERT_EQ(rust, MediaPrefs::EnableRustMP4Parser());
if (MediaPrefs::EnableRustMP4Parser()) {
tests = rustTestFiles;
length = ArrayLength(rustTestFiles);
} else {
tests = testFiles;
length = ArrayLength(testFiles);
}
const TestFileData* tests = nullptr;
size_t length = 0;
for (size_t test = 0; test < length; ++test) {
nsTArray<uint8_t> buffer = ReadTestFile(tests[test].mFilename);
ASSERT_FALSE(buffer.IsEmpty());
RefPtr<Stream> stream = new TestStream(buffer.Elements(), buffer.Length());
MoofParser parser(stream, 0, false);
EXPECT_EQ(0u, parser.mOffset);
EXPECT_FALSE(parser.ReachedEnd());
EXPECT_TRUE(parser.mInitRange.IsEmpty());
EXPECT_TRUE(parser.HasMetadata());
RefPtr<MediaByteBuffer> metadataBuffer = parser.Metadata();
EXPECT_TRUE(metadataBuffer);
EXPECT_FALSE(parser.mInitRange.IsEmpty());
const MediaByteRangeSet byteRanges(
MediaByteRange(0, int64_t(buffer.Length())));
EXPECT_EQ(tests[test].mValidMoof,
parser.RebuildFragmentedIndex(byteRanges));
if (tests[test].mMoofReachedOffset == 0) {
EXPECT_EQ(buffer.Length(), parser.mOffset);
EXPECT_TRUE(parser.ReachedEnd());
if (rust) {
tests = rustTestFiles;
length = ArrayLength(rustTestFiles);
} else {
EXPECT_EQ(tests[test].mMoofReachedOffset, parser.mOffset);
EXPECT_FALSE(parser.ReachedEnd());
tests = testFiles;
length = ArrayLength(testFiles);
}
EXPECT_FALSE(parser.mInitRange.IsEmpty());
EXPECT_TRUE(parser.GetCompositionRange(byteRanges).IsNull());
EXPECT_TRUE(parser.FirstCompleteMediaSegment().IsEmpty());
EXPECT_EQ(tests[test].mHeader,
!parser.FirstCompleteMediaHeader().IsEmpty());
for (size_t test = 0; test < length; ++test) {
nsTArray<uint8_t> buffer = ReadTestFile(tests[test].mFilename);
ASSERT_FALSE(buffer.IsEmpty());
RefPtr<Stream> stream = new TestStream(buffer.Elements(), buffer.Length());
MoofParser parser(stream, 0, false);
EXPECT_EQ(0u, parser.mOffset);
EXPECT_FALSE(parser.ReachedEnd());
EXPECT_TRUE(parser.mInitRange.IsEmpty());
EXPECT_TRUE(parser.HasMetadata());
RefPtr<MediaByteBuffer> metadataBuffer = parser.Metadata();
EXPECT_TRUE(metadataBuffer);
EXPECT_FALSE(parser.mInitRange.IsEmpty());
const MediaByteRangeSet byteRanges(
MediaByteRange(0, int64_t(buffer.Length())));
EXPECT_EQ(tests[test].mValidMoof,
parser.RebuildFragmentedIndex(byteRanges));
if (tests[test].mMoofReachedOffset == 0) {
EXPECT_EQ(buffer.Length(), parser.mOffset);
EXPECT_TRUE(parser.ReachedEnd());
} else {
EXPECT_EQ(tests[test].mMoofReachedOffset, parser.mOffset);
EXPECT_FALSE(parser.ReachedEnd());
}
EXPECT_FALSE(parser.mInitRange.IsEmpty());
EXPECT_TRUE(parser.GetCompositionRange(byteRanges).IsNull());
EXPECT_TRUE(parser.FirstCompleteMediaSegment().IsEmpty());
EXPECT_EQ(tests[test].mHeader,
!parser.FirstCompleteMediaHeader().IsEmpty());
}
}
}

View File

@ -35,6 +35,7 @@ TEST_HARNESS_FILES.gtest += [
'test_case_1301065-u64max.mp4',
'test_case_1301065.mp4',
'test_case_1329061.mov',
'test_case_1351094.mp4',
]
if CONFIG['MOZ_RUST']:

Binary file not shown.

View File

@ -829,11 +829,13 @@ nsStandardURL::BuildNormalizedSpec(const char *spec)
i = AppendSegmentToBuf(buf, i, spec, username, mUsername,
&encUsername, useEncUsername, &diff);
ShiftFromPassword(diff);
if (password.mLen >= 0) {
if (password.mLen > 0) {
buf[i++] = ':';
i = AppendSegmentToBuf(buf, i, spec, password, mPassword,
&encPassword, useEncPassword, &diff);
ShiftFromHost(diff);
} else {
mPassword.mLen = -1;
}
buf[i++] = '@';
}
@ -1702,7 +1704,7 @@ nsStandardURL::SetUserPass(const nsACString &input)
usernameLen),
esc_Username | esc_AlwaysCopy,
buf, ignoredOut);
if (passwordLen >= 0) {
if (passwordLen > 0) {
buf.Append(':');
passwordLen = encoder.EncodeSegmentCount(userpass.get(),
URLSegment(passwordPos,
@ -1710,6 +1712,8 @@ nsStandardURL::SetUserPass(const nsACString &input)
esc_Password |
esc_AlwaysCopy, buf,
ignoredOut);
} else {
passwordLen = -1;
}
if (mUsername.mLen < 0)
buf.Append('@');
@ -1740,8 +1744,9 @@ nsStandardURL::SetUserPass(const nsACString &input)
// update positions and lengths
mUsername.mLen = usernameLen;
mPassword.mLen = passwordLen;
if (passwordLen)
if (passwordLen > 0) {
mPassword.mPos = mUsername.mPos + mUsername.mLen + 1;
}
CALL_RUST_SETTER(SetUserPass, input);
return NS_OK;

View File

@ -5,11 +5,12 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "ChannelEventQueue.h"
#include "mozilla/Assertions.h"
#include "mozilla/Unused.h"
#include "nsISupports.h"
#include "mozilla/net/ChannelEventQueue.h"
#include "mozilla/Unused.h"
#include "nsThreadUtils.h"
#include "mozilla/Unused.h"
namespace mozilla {
namespace net {
@ -39,29 +40,94 @@ ChannelEventQueue::FlushQueue()
nsCOMPtr<nsISupports> kungFuDeathGrip(mOwner);
mozilla::Unused << kungFuDeathGrip; // Not used in this function
// Prevent flushed events from flushing the queue recursively
bool needResumeOnOtherThread = false;
{
MutexAutoLock lock(mMutex);
mFlushing = true;
}
// Don't allow event enqueued during flush to make sure all events
// are run.
ReentrantMonitorAutoEnter monitor(mRunningMonitor);
while (true) {
UniquePtr<ChannelEvent> event(TakeEvent());
if (!event) {
break;
// Prevent flushed events from flushing the queue recursively
{
MutexAutoLock lock(mMutex);
MOZ_ASSERT(!mFlushing);
mFlushing = true;
}
event->Run();
while (true) {
UniquePtr<ChannelEvent> event(TakeEvent());
if (!event) {
break;
}
nsCOMPtr<nsIEventTarget> target = event->GetEventTarget();
MOZ_ASSERT(target);
bool isCurrentThread = false;
nsresult rv = target->IsOnCurrentThread(&isCurrentThread);
if (NS_WARN_IF(NS_FAILED(rv))) {
// Simply run this event on current thread if we are not sure about it
// in release channel, or assert in Aurora/Nightly channel.
MOZ_DIAGNOSTIC_ASSERT(false);
isCurrentThread = true;
}
if (!isCurrentThread) {
// Next event needs to run on another thread. Put it back to
// the front of the queue can try resume on that thread.
Suspend();
PrependEvent(event);
needResumeOnOtherThread = true;
break;
}
event->Run();
}
{
MutexAutoLock lock(mMutex);
MOZ_ASSERT(mFlushing);
mFlushing = false;
MOZ_ASSERT(mEventQueue.IsEmpty() || (needResumeOnOtherThread || mSuspended || !!mForcedCount));
}
}
MutexAutoLock lock(mMutex);
mFlushing = false;
// The flush procedure is aborted because next event cannot be run on current
// thread. We need to resume the event processing right after flush procedure
// is finished.
// Note: we cannot call Resume() while "mFlushing == true" because
// CompleteResume will not trigger FlushQueue while there is an ongoing flush.
if (needResumeOnOtherThread) {
Resume();
}
}
void
ChannelEventQueue::Resume()
ChannelEventQueue::Suspend()
{
MutexAutoLock lock(mMutex);
SuspendInternal();
}
void
ChannelEventQueue::SuspendInternal()
{
mMutex.AssertCurrentThreadOwns();
mSuspended = true;
mSuspendCount++;
}
void ChannelEventQueue::Resume()
{
MutexAutoLock lock(mMutex);
ResumeInternal();
}
void
ChannelEventQueue::ResumeInternal()
{
mMutex.AssertCurrentThreadOwns();
// Resuming w/o suspend: error in debug mode, ignore in build
MOZ_ASSERT(mSuspendCount > 0);
@ -70,44 +136,24 @@ ChannelEventQueue::Resume()
}
if (!--mSuspendCount) {
RefPtr<Runnable> event =
NewRunnableMethod(this, &ChannelEventQueue::CompleteResume);
if (mTargetThread) {
mTargetThread->Dispatch(event.forget(), NS_DISPATCH_NORMAL);
} else {
MOZ_RELEASE_ASSERT(NS_IsMainThread());
Unused << NS_WARN_IF(NS_FAILED(NS_DispatchToCurrentThread(event.forget())));
if (mEventQueue.IsEmpty()) {
// Nothing in queue to flush, simply clear the flag.
mSuspended = false;
return;
}
// Worker thread requires a CancelableRunnable.
RefPtr<Runnable> event =
NewCancelableRunnableMethod(this, &ChannelEventQueue::CompleteResume);
nsCOMPtr<nsIEventTarget> target;
target = mEventQueue[0]->GetEventTarget();
MOZ_ASSERT(target);
Unused << NS_WARN_IF(NS_FAILED(target->Dispatch(event.forget(),
NS_DISPATCH_NORMAL)));
}
}
nsresult
ChannelEventQueue::RetargetDeliveryTo(nsIEventTarget* aTargetThread)
{
MOZ_RELEASE_ASSERT(NS_IsMainThread());
MOZ_RELEASE_ASSERT(!mTargetThread);
MOZ_RELEASE_ASSERT(aTargetThread);
mTargetThread = do_QueryInterface(aTargetThread);
MOZ_RELEASE_ASSERT(mTargetThread);
return NS_OK;
}
nsresult
ChannelEventQueue::ResetDeliveryTarget()
{
MutexAutoLock lock(mMutex);
MOZ_RELEASE_ASSERT(mEventQueue.IsEmpty());
MOZ_RELEASE_ASSERT(mSuspendCount == 0);
MOZ_RELEASE_ASSERT(!mSuspended);
MOZ_RELEASE_ASSERT(!mForced);
MOZ_RELEASE_ASSERT(!mFlushing);
mTargetThread = nullptr;
return NS_OK;
}
} // namespace net
} // namespace mozilla

View File

@ -10,11 +10,15 @@
#include "nsTArray.h"
#include "nsAutoPtr.h"
#include "nsIEventTarget.h"
#include "nsThreadUtils.h"
#include "mozilla/DebugOnly.h"
#include "mozilla/Mutex.h"
#include "mozilla/ReentrantMonitor.h"
#include "mozilla/UniquePtr.h"
#include "mozilla/Unused.h"
class nsISupports;
class nsIEventTarget;
namespace mozilla {
namespace net {
@ -25,6 +29,20 @@ class ChannelEvent
ChannelEvent() { MOZ_COUNT_CTOR(ChannelEvent); }
virtual ~ChannelEvent() { MOZ_COUNT_DTOR(ChannelEvent); }
virtual void Run() = 0;
virtual already_AddRefed<nsIEventTarget> GetEventTarget() = 0;
};
class MainThreadChannelEvent : public ChannelEvent
{
public:
MainThreadChannelEvent() { MOZ_COUNT_CTOR(MainThreadChannelEvent); }
virtual ~MainThreadChannelEvent() { MOZ_COUNT_DTOR(MainThreadChannelEvent); }
already_AddRefed<nsIEventTarget>
GetEventTarget() override
{
return do_GetMainThread();
}
};
// Workaround for Necko re-entrancy dangers. We buffer IPDL messages in a
@ -42,10 +60,11 @@ class ChannelEventQueue final
explicit ChannelEventQueue(nsISupports *owner)
: mSuspendCount(0)
, mSuspended(false)
, mForced(false)
, mForcedCount(0)
, mFlushing(false)
, mOwner(owner)
, mMutex("ChannelEventQueue::mMutex")
, mRunningMonitor("ChannelEventQueue::mRunningMonitor")
{}
// Puts IPDL-generated channel event into queue, to be run later
@ -56,6 +75,9 @@ class ChannelEventQueue final
// assertion when the event is executed directly.
inline void RunOrEnqueue(ChannelEvent* aCallback,
bool aAssertionWhenNotQueued = false);
// Append ChannelEvent in front of the event queue.
inline nsresult PrependEvent(UniquePtr<ChannelEvent>& aEvent);
inline nsresult PrependEvents(nsTArray<UniquePtr<ChannelEvent>>& aEvents);
// After StartForcedQueueing is called, RunOrEnqueue() will start enqueuing
@ -69,25 +91,20 @@ class ChannelEventQueue final
// Suspend/resume event queue. RunOrEnqueue() will start enqueuing
// events and they will be run/flushed when resume is called. These should be
// called when the channel owning the event queue is suspended/resumed.
inline void Suspend();
void Suspend();
// Resume flushes the queue asynchronously, i.e. items in queue will be
// dispatched in a new event on the current thread.
void Resume();
// Retargets delivery of events to the target thread specified.
nsresult RetargetDeliveryTo(nsIEventTarget* aTargetThread);
// Nulls out the delivery target so events are delivered to the main
// thread. Should only be called when the queue is known to be empty.
// Useful if the queue will be re-used.
nsresult ResetDeliveryTarget();
private:
// Private destructor, to discourage deletion outside of Release():
~ChannelEventQueue()
{
}
void SuspendInternal();
void ResumeInternal();
inline void MaybeFlushQueue();
void FlushQueue();
inline void CompleteResume();
@ -97,17 +114,18 @@ class ChannelEventQueue final
nsTArray<UniquePtr<ChannelEvent>> mEventQueue;
uint32_t mSuspendCount;
bool mSuspended;
bool mForced;
bool mSuspended;
uint32_t mForcedCount; // Support ForcedQueueing on multiple thread.
bool mFlushing;
// Keep ptr to avoid refcount cycle: only grab ref during flushing.
nsISupports *mOwner;
// For atomic mEventQueue operation and state update
Mutex mMutex;
// EventTarget for delivery of events to the correct thread.
nsCOMPtr<nsIEventTarget> mTargetThread;
// To guarantee event execution order among threads
ReentrantMonitor mRunningMonitor;
friend class AutoEventEnqueuer;
};
@ -118,20 +136,44 @@ ChannelEventQueue::RunOrEnqueue(ChannelEvent* aCallback,
{
MOZ_ASSERT(aCallback);
// Events execution could be a destruction of the channel (and our own
// destructor) unless we make sure its refcount doesn't drop to 0 while this
// method is running.
nsCOMPtr<nsISupports> kungFuDeathGrip(mOwner);
Unused << kungFuDeathGrip; // Not used in this function
// To avoid leaks.
UniquePtr<ChannelEvent> event(aCallback);
// To guarantee that the running event and all the events generated within
// it will be finished before events on other threads.
ReentrantMonitorAutoEnter monitor(mRunningMonitor);
{
MutexAutoLock lock(mMutex);
bool enqueue = mForced || mSuspended || mFlushing;
MOZ_ASSERT(enqueue == true || mEventQueue.IsEmpty(),
"Should always enqueue if ChannelEventQueue not empty");
bool enqueue = !!mForcedCount || mSuspended || mFlushing || !mEventQueue.IsEmpty();
if (enqueue) {
mEventQueue.AppendElement(Move(event));
return;
}
nsCOMPtr<nsIEventTarget> target = event->GetEventTarget();
MOZ_ASSERT(target);
bool isCurrentThread = false;
DebugOnly<nsresult> rv = target->IsOnCurrentThread(&isCurrentThread);
MOZ_ASSERT(NS_SUCCEEDED(rv));
if (!isCurrentThread) {
// Leverage Suspend/Resume mechanism to trigger flush procedure without
// creating a new one.
SuspendInternal();
mEventQueue.AppendElement(Move(event));
ResumeInternal();
return;
}
}
MOZ_RELEASE_ASSERT(!aAssertionWhenNotQueued);
@ -142,18 +184,45 @@ inline void
ChannelEventQueue::StartForcedQueueing()
{
MutexAutoLock lock(mMutex);
mForced = true;
++mForcedCount;
}
inline void
ChannelEventQueue::EndForcedQueueing()
{
bool tryFlush = false;
{
MutexAutoLock lock(mMutex);
mForced = false;
MOZ_ASSERT(mForcedCount > 0);
if(!--mForcedCount) {
tryFlush = true;
}
}
MaybeFlushQueue();
if (tryFlush) {
MaybeFlushQueue();
}
}
inline nsresult
ChannelEventQueue::PrependEvent(UniquePtr<ChannelEvent>& aEvent)
{
MutexAutoLock lock(mMutex);
// Prepending event while no queue flush foreseen might cause the following
// channel events not run. This assertion here guarantee there must be a
// queue flush, either triggered by Resume or EndForcedQueueing, to execute
// the added event.
MOZ_ASSERT(mSuspended || !!mForcedCount);
UniquePtr<ChannelEvent>* newEvent =
mEventQueue.InsertElementAt(0, Move(aEvent));
if (!newEvent) {
return NS_ERROR_OUT_OF_MEMORY;
}
return NS_OK;
}
inline nsresult
@ -161,6 +230,12 @@ ChannelEventQueue::PrependEvents(nsTArray<UniquePtr<ChannelEvent>>& aEvents)
{
MutexAutoLock lock(mMutex);
// Prepending event while no queue flush foreseen might cause the following
// channel events not run. This assertion here guarantee there must be a
// queue flush, either triggered by Resume or EndForcedQueueing, to execute
// the added events.
MOZ_ASSERT(mSuspended || !!mForcedCount);
UniquePtr<ChannelEvent>* newEvents =
mEventQueue.InsertElementsAt(0, aEvents.Length());
if (!newEvents) {
@ -174,18 +249,10 @@ ChannelEventQueue::PrependEvents(nsTArray<UniquePtr<ChannelEvent>>& aEvents)
return NS_OK;
}
inline void
ChannelEventQueue::Suspend()
{
MutexAutoLock lock(mMutex);
mSuspended = true;
mSuspendCount++;
}
inline void
ChannelEventQueue::CompleteResume()
{
bool tryFlush = false;
{
MutexAutoLock lock(mMutex);
@ -196,10 +263,13 @@ ChannelEventQueue::CompleteResume()
// messages) until this point, else new incoming messages could run before
// queued ones.
mSuspended = false;
tryFlush = true;
}
}
MaybeFlushQueue();
if (tryFlush) {
MaybeFlushQueue();
}
}
inline void
@ -211,7 +281,7 @@ ChannelEventQueue::MaybeFlushQueue()
{
MutexAutoLock lock(mMutex);
flushQueue = !mForced && !mFlushing && !mSuspended &&
flushQueue = !mForcedCount && !mFlushing && !mSuspended &&
!mEventQueue.IsEmpty();
}

View File

@ -263,6 +263,12 @@ public:
mLastModified, mEntityID, mURI);
}
already_AddRefed<nsIEventTarget> GetEventTarget()
{
MOZ_ASSERT(mChild);
nsCOMPtr<nsIEventTarget> target = mChild->GetNeckoTarget();
return target.forget();
}
private:
FTPChannelChild* mChild;
nsresult mChannelStatus;
@ -369,6 +375,12 @@ public:
mChild->DoOnDataAvailable(mChannelStatus, mData, mOffset, mCount);
}
already_AddRefed<nsIEventTarget> GetEventTarget()
{
MOZ_ASSERT(mChild);
nsCOMPtr<nsIEventTarget> target = mChild->GetNeckoTarget();
return target.forget();
}
private:
FTPChannelChild* mChild;
nsresult mChannelStatus;
@ -412,6 +424,12 @@ class MaybeDivertOnDataFTPEvent : public ChannelEvent
mChild->MaybeDivertOnData(mData, mOffset, mCount);
}
already_AddRefed<nsIEventTarget> GetEventTarget()
{
MOZ_ASSERT(mChild);
nsCOMPtr<nsIEventTarget> target = mChild->GetNeckoTarget();
return target.forget();
}
private:
FTPChannelChild* mChild;
nsCString mData;
@ -498,6 +516,10 @@ public:
mChild->DoOnStopRequest(mChannelStatus, mErrorMsg, mUseUTF8);
}
already_AddRefed<nsIEventTarget> GetEventTarget()
{
return do_GetMainThread();
}
private:
FTPChannelChild* mChild;
nsresult mChannelStatus;
@ -559,6 +581,12 @@ class MaybeDivertOnStopFTPEvent : public ChannelEvent
mChild->MaybeDivertOnStop(mChannelStatus);
}
already_AddRefed<nsIEventTarget> GetEventTarget()
{
MOZ_ASSERT(mChild);
nsCOMPtr<nsIEventTarget> target = mChild->GetNeckoTarget();
return target.forget();
}
private:
FTPChannelChild* mChild;
nsresult mChannelStatus;
@ -646,6 +674,13 @@ class FTPFailedAsyncOpenEvent : public ChannelEvent
FTPFailedAsyncOpenEvent(FTPChannelChild* aChild, nsresult aStatus)
: mChild(aChild), mStatus(aStatus) {}
void Run() { mChild->DoFailedAsyncOpen(mStatus); }
already_AddRefed<nsIEventTarget> GetEventTarget()
{
MOZ_ASSERT(mChild);
nsCOMPtr<nsIEventTarget> target = mChild->GetNeckoTarget();
return target.forget();
}
private:
FTPChannelChild* mChild;
nsresult mStatus;
@ -698,6 +733,13 @@ class FTPFlushedForDiversionEvent : public ChannelEvent
{
mChild->FlushedForDiversion();
}
already_AddRefed<nsIEventTarget> GetEventTarget()
{
MOZ_ASSERT(mChild);
nsCOMPtr<nsIEventTarget> target = mChild->GetNeckoTarget();
return target.forget();
}
private:
FTPChannelChild* mChild;
};
@ -747,6 +789,13 @@ class FTPDeleteSelfEvent : public ChannelEvent
explicit FTPDeleteSelfEvent(FTPChannelChild* aChild)
: mChild(aChild) {}
void Run() { mChild->DoDeleteSelf(); }
already_AddRefed<nsIEventTarget> GetEventTarget()
{
MOZ_ASSERT(mChild);
nsCOMPtr<nsIEventTarget> target = mChild->GetNeckoTarget();
return target.forget();
}
private:
FTPChannelChild* mChild;
};
@ -961,7 +1010,19 @@ FTPChannelChild::EnsureDispatcher()
nsCOMPtr<nsIEventTarget> target =
mDispatcher->EventTargetFor(TaskCategory::Network);
gNeckoChild->SetEventTargetForActor(this, target);
mEventQ->RetargetDeliveryTo(target);
mNeckoTarget = target;
}
already_AddRefed<nsIEventTarget>
FTPChannelChild::GetNeckoTarget()
{
nsCOMPtr<nsIEventTarget> target = mNeckoTarget;
if (!target) {
target = do_GetMainThread();
}
return target.forget();
}
} // namespace net

View File

@ -18,6 +18,7 @@
#include "nsIResumableChannel.h"
#include "nsIChildChannel.h"
#include "nsIDivertableChannel.h"
#include "nsIEventTarget.h"
#include "nsIStreamListener.h"
#include "PrivateBrowsingChannel.h"
@ -123,9 +124,13 @@ protected:
friend class FTPStopRequestEvent;
friend class MaybeDivertOnStopFTPEvent;
friend class FTPFailedAsyncOpenEvent;
friend class FTPFlushedForDiversionEvent;
friend class FTPDeleteSelfEvent;
private:
// Get event target for processing network events.
already_AddRefed<nsIEventTarget> GetNeckoTarget();
nsCOMPtr<nsIInputStream> mUploadStream;
bool mIPCOpen;
@ -154,6 +159,9 @@ private:
// diverting callbacks to parent.
bool mSuspendSent;
// EventTarget for labeling networking events.
nsCOMPtr<nsIEventTarget> mNeckoTarget;
RefPtr<Dispatcher> mDispatcher;
void EnsureDispatcher();

View File

@ -240,7 +240,7 @@ FTPChannelParent::RecvResume()
return IPC_OK();
}
class FTPDivertDataAvailableEvent : public ChannelEvent
class FTPDivertDataAvailableEvent : public MainThreadChannelEvent
{
public:
FTPDivertDataAvailableEvent(FTPChannelParent* aParent,
@ -331,7 +331,7 @@ FTPChannelParent::DivertOnDataAvailable(const nsCString& data,
}
}
class FTPDivertStopRequestEvent : public ChannelEvent
class FTPDivertStopRequestEvent : public MainThreadChannelEvent
{
public:
FTPDivertStopRequestEvent(FTPChannelParent* aParent,
@ -391,7 +391,7 @@ FTPChannelParent::DivertOnStopRequest(const nsresult& statusCode)
OnStopRequest(mChannel, nullptr, status);
}
class FTPDivertCompleteEvent : public ChannelEvent
class FTPDivertCompleteEvent : public MainThreadChannelEvent
{
public:
explicit FTPDivertCompleteEvent(FTPChannelParent* aParent)

View File

@ -150,7 +150,8 @@ private:
NS_IMPL_ISUPPORTS(AddHeadersToChannelVisitor, nsIHttpHeaderVisitor)
HttpBaseChannel::HttpBaseChannel()
: mStartPos(UINT64_MAX)
: mCanceled(false)
, mStartPos(UINT64_MAX)
, mStatus(NS_OK)
, mLoadFlags(LOAD_NORMAL)
, mCaps(0)
@ -158,7 +159,6 @@ HttpBaseChannel::HttpBaseChannel()
, mPriority(PRIORITY_NORMAL)
, mRedirectionLimit(gHttpHandler->RedirectionLimit())
, mApplyConversion(true)
, mCanceled(false)
, mIsPending(false)
, mWasOpened(false)
, mRequestObserversCalled(false)

View File

@ -8,6 +8,7 @@
#ifndef mozilla_net_HttpBaseChannel_h
#define mozilla_net_HttpBaseChannel_h
#include "mozilla/Atomics.h"
#include "nsHttp.h"
#include "nsAutoPtr.h"
#include "nsHashPropertyBag.h"
@ -442,6 +443,10 @@ private:
void ReleaseMainThreadOnlyReferences();
protected:
// Use Release-Acquire ordering to ensure the OMT ODA is ignored while channel
// is canceled on main thread.
Atomic<bool, ReleaseAcquire> mCanceled;
nsTArray<Pair<nsString, nsString>> mSecurityConsoleMessages;
nsCOMPtr<nsIStreamListener> mListener;
@ -485,7 +490,6 @@ protected:
uint8_t mRedirectionLimit;
uint32_t mApplyConversion : 1;
uint32_t mCanceled : 1;
uint32_t mIsPending : 1;
uint32_t mWasOpened : 1;
// if 1 all "http-on-{opening|modify|etc}-request" observers have been called

View File

@ -20,6 +20,7 @@
#include "mozilla/net/NeckoChild.h"
#include "mozilla/net/HttpChannelChild.h"
#include "nsCOMPtr.h"
#include "nsISupportsPrimitives.h"
#include "nsChannelClassifier.h"
#include "nsGlobalWindow.h"
@ -42,8 +43,11 @@
#include "nsIDeprecationWarner.h"
#include "nsICompressConvStats.h"
#include "nsIDocument.h"
#include "nsIDOMDocument.h"
#include "nsIDOMWindowUtils.h"
#include "nsIEventTarget.h"
#include "nsStreamUtils.h"
#include "nsThreadUtils.h"
#ifdef OS_POSIX
#include "chrome/common/file_descriptor_set_posix.h"
@ -180,6 +184,7 @@ HttpChannelChild::HttpChannelChild()
, mPostRedirectChannelShouldUpgrade(false)
, mShouldParentIntercept(false)
, mSuspendParentAfterSynthesizeResponse(false)
, mEventTargetMutex("HttpChannelChild::EventTargetMutex")
{
LOG(("Creating HttpChannelChild @%p\n", this));
@ -192,40 +197,54 @@ HttpChannelChild::HttpChannelChild()
HttpChannelChild::~HttpChannelChild()
{
LOG(("Destroying HttpChannelChild @%p\n", this));
ReleaseMainThreadOnlyReferences();
}
void
HttpChannelChild::ReleaseMainThreadOnlyReferences()
{
if (NS_IsMainThread()) {
// Already on main thread, let dtor to
// take care of releasing references
return;
}
nsTArray<nsCOMPtr<nsISupports>> arrayToRelease;
arrayToRelease.AppendElement(mCacheKey.forget());
NS_DispatchToMainThread(new ProxyReleaseRunnable(Move(arrayToRelease)));
}
//-----------------------------------------------------------------------------
// HttpChannelChild::nsISupports
//-----------------------------------------------------------------------------
// Override nsHashPropertyBag's AddRef: we don't need thread-safe refcnt
NS_IMPL_ADDREF(HttpChannelChild)
NS_IMETHODIMP_(MozExternalRefCountType) HttpChannelChild::Release()
{
NS_PRECONDITION(0 != mRefCnt, "dup release");
NS_ASSERT_OWNINGTHREAD(HttpChannelChild);
--mRefCnt;
NS_LOG_RELEASE(this, mRefCnt, "HttpChannelChild");
nsrefcnt count = --mRefCnt;
MOZ_ASSERT(int32_t(count) >= 0, "dup release");
NS_LOG_RELEASE(this, count, "HttpChannelChild");
// Normally we Send_delete in OnStopRequest, but when we need to retain the
// remote channel for security info IPDL itself holds 1 reference, so we
// Send_delete when refCnt==1. But if !mIPCOpen, then there's nobody to send
// to, so we fall through.
if (mKeptAlive && mRefCnt == 1 && mIPCOpen) {
if (mKeptAlive && count == 1 && mIPCOpen) {
mKeptAlive = false;
// We send a message to the parent, which calls SendDelete, and then the
// child calling Send__delete__() to finally drop the refcount to 0.
SendDeletingChannel();
TrySendDeletingChannel();
return 1;
}
if (mRefCnt == 0) {
if (count == 0) {
mRefCnt = 1; /* stabilize */
delete this;
return 0;
}
return mRefCnt;
return count;
}
NS_INTERFACE_MAP_BEGIN(HttpChannelChild)
@ -246,6 +265,7 @@ NS_INTERFACE_MAP_BEGIN(HttpChannelChild)
NS_INTERFACE_MAP_ENTRY(nsIHttpChannelChild)
NS_INTERFACE_MAP_ENTRY_CONDITIONAL(nsIAssociatedContentSecurity, GetAssociatedContentSecurity())
NS_INTERFACE_MAP_ENTRY(nsIDivertableChannel)
NS_INTERFACE_MAP_ENTRY(nsIThreadRetargetableRequest)
NS_INTERFACE_MAP_END_INHERITING(HttpBaseChannel)
//-----------------------------------------------------------------------------
@ -279,6 +299,13 @@ class AssociateApplicationCacheEvent : public ChannelEvent
, clientID(aClientID) {}
void Run() { mChild->AssociateApplicationCache(groupID, clientID); }
already_AddRefed<nsIEventTarget> GetEventTarget()
{
MOZ_ASSERT(mChild);
nsCOMPtr<nsIEventTarget> target = mChild->GetNeckoTarget();
return target.forget();
}
private:
HttpChannelChild* mChild;
nsCString groupID;
@ -353,6 +380,13 @@ class StartRequestEvent : public ChannelEvent
mSecurityInfoSerialization, mSelfAddr, mPeerAddr,
mCacheKey, mAltDataType, mAltDataLen);
}
already_AddRefed<nsIEventTarget> GetEventTarget()
{
MOZ_ASSERT(mChild);
nsCOMPtr<nsIEventTarget> target = mChild->GetNeckoTarget();
return target.forget();
}
private:
HttpChannelChild* mChild;
nsresult mChannelStatus;
@ -612,6 +646,13 @@ class TransportAndDataEvent : public ChannelEvent
mChild->OnTransportAndData(mChannelStatus, mTransportStatus,
mOffset, mCount, mData);
}
already_AddRefed<nsIEventTarget> GetEventTarget()
{
MOZ_ASSERT(mChild);
nsCOMPtr<nsIEventTarget> target = mChild->GetODATarget();
return target.forget();
}
private:
HttpChannelChild* mChild;
nsresult mChannelStatus;
@ -656,6 +697,12 @@ class MaybeDivertOnDataHttpEvent : public ChannelEvent
mChild->MaybeDivertOnData(mData, mOffset, mCount);
}
already_AddRefed<nsIEventTarget> GetEventTarget()
{
MOZ_ASSERT(mChild);
nsCOMPtr<nsIEventTarget> target = mChild->GetNeckoTarget();
return target.forget();
}
private:
HttpChannelChild* mChild;
nsCString mData;
@ -711,11 +758,27 @@ HttpChannelChild::OnTransportAndData(const nsresult& channelStatus,
// necko msg in between them.
AutoEventEnqueuer ensureSerialDispatch(mEventQ);
DoOnStatus(this, transportStatus);
const int64_t progressMax = mResponseHead->ContentLength();
const int64_t progress = offset + count;
DoOnProgress(this, progress, progressMax);
// OnTransportAndData will be run on retargeted thread if applicable, however
// OnStatus/OnProgress event can only be fired on main thread. We need to
// dispatch the status/progress event handling back to main thread with the
// appropriate event target for networking.
if (NS_IsMainThread()) {
DoOnStatus(this, transportStatus);
DoOnProgress(this, progress, progressMax);
} else {
RefPtr<HttpChannelChild> self = this;
nsCOMPtr<nsIEventTarget> neckoTarget = GetNeckoTarget();
DebugOnly<nsresult> rv =
neckoTarget->Dispatch(
NS_NewRunnableFunction([self, transportStatus, progress, progressMax]() {
self->DoOnStatus(self, transportStatus);
self->DoOnProgress(self, progress, progressMax);
}), NS_DISPATCH_NORMAL);
MOZ_ASSERT(NS_SUCCEEDED(rv));
}
// OnDataAvailable
//
@ -740,6 +803,8 @@ void
HttpChannelChild::DoOnStatus(nsIRequest* aRequest, nsresult status)
{
LOG(("HttpChannelChild::DoOnStatus [this=%p]\n", this));
MOZ_ASSERT(NS_IsMainThread());
if (mCanceled)
return;
@ -773,6 +838,8 @@ void
HttpChannelChild::DoOnProgress(nsIRequest* aRequest, int64_t progress, int64_t progressMax)
{
LOG(("HttpChannelChild::DoOnProgress [this=%p]\n", this));
MOZ_ASSERT(NS_IsMainThread());
if (mCanceled)
return;
@ -804,7 +871,7 @@ HttpChannelChild::DoOnDataAvailable(nsIRequest* aRequest, nsISupports* aContext,
nsresult rv = mListener->OnDataAvailable(aRequest, aContext, aStream, offset, count);
if (NS_FAILED(rv)) {
Cancel(rv);
CancelOnMainThread(rv);
}
}
@ -819,6 +886,13 @@ class StopRequestEvent : public ChannelEvent
, mTiming(timing) {}
void Run() { mChild->OnStopRequest(mChannelStatus, mTiming); }
already_AddRefed<nsIEventTarget> GetEventTarget()
{
MOZ_ASSERT(mChild);
nsCOMPtr<nsIEventTarget> target = mChild->GetNeckoTarget();
return target.forget();
}
private:
HttpChannelChild* mChild;
nsresult mChannelStatus;
@ -852,6 +926,12 @@ class MaybeDivertOnStopHttpEvent : public ChannelEvent
mChild->MaybeDivertOnStop(mChannelStatus);
}
already_AddRefed<nsIEventTarget> GetEventTarget()
{
MOZ_ASSERT(mChild);
nsCOMPtr<nsIEventTarget> target = mChild->GetNeckoTarget();
return target.forget();
}
private:
HttpChannelChild* mChild;
nsresult mChannelStatus;
@ -874,6 +954,7 @@ HttpChannelChild::OnStopRequest(const nsresult& channelStatus,
{
LOG(("HttpChannelChild::OnStopRequest [this=%p status=%" PRIx32 "]\n",
this, static_cast<uint32_t>(channelStatus)));
MOZ_ASSERT(NS_IsMainThread());
if (mDivertingToParent) {
MOZ_RELEASE_ASSERT(!mFlushedForDiversion,
@ -883,6 +964,30 @@ HttpChannelChild::OnStopRequest(const nsresult& channelStatus,
return;
}
// In thread retargeting is enabled, there might be Runnable for
// DoOnStatus/DoOnProgress sit in the main thread event target. We need to
// ensure OnStopRequest is fired after that by postponing the
// ChannelEventQueue processing to the end of main thread event target.
// This workaround can be removed after bug 1338493 is complete.
if (mODATarget) {
{
MutexAutoLock lock(mEventTargetMutex);
mODATarget = nullptr;
}
mEventQ->Suspend();
UniquePtr<ChannelEvent> stopEvent =
MakeUnique<StopRequestEvent>(this, channelStatus, timing);
mEventQ->PrependEvent(stopEvent);
nsCOMPtr<nsIEventTarget> neckoTarget = GetNeckoTarget();
MOZ_ASSERT(neckoTarget);
DebugOnly<nsresult> rv = neckoTarget->Dispatch(
NewRunnableMethod(mEventQ, &ChannelEventQueue::Resume), NS_DISPATCH_NORMAL);
MOZ_ASSERT(NS_SUCCEEDED(rv));
return;
}
if (mUnknownDecoderInvolved) {
LOG(("UnknownDecoder is involved queue OnStopRequest call. [this=%p]",
this));
@ -950,7 +1055,7 @@ HttpChannelChild::OnStopRequest(const nsresult& channelStatus,
} else {
// The parent process will respond by sending a DeleteSelf message and
// making sure not to send any more messages after that.
SendDeletingChannel();
TrySendDeletingChannel();
}
}
@ -970,6 +1075,7 @@ void
HttpChannelChild::DoOnStopRequest(nsIRequest* aRequest, nsresult aChannelStatus, nsISupports* aContext)
{
LOG(("HttpChannelChild::DoOnStopRequest [this=%p]\n", this));
MOZ_ASSERT(NS_IsMainThread());
MOZ_ASSERT(!mIsPending);
// NB: We use aChannelStatus here instead of mStatus because if there was an
@ -1022,6 +1128,13 @@ class ProgressEvent : public ChannelEvent
, mProgressMax(progressMax) {}
void Run() { mChild->OnProgress(mProgress, mProgressMax); }
already_AddRefed<nsIEventTarget> GetEventTarget()
{
MOZ_ASSERT(mChild);
nsCOMPtr<nsIEventTarget> target = mChild->GetNeckoTarget();
return target.forget();
}
private:
HttpChannelChild* mChild;
int64_t mProgress, mProgressMax;
@ -1070,6 +1183,13 @@ class StatusEvent : public ChannelEvent
, mStatus(status) {}
void Run() { mChild->OnStatus(mStatus); }
already_AddRefed<nsIEventTarget> GetEventTarget()
{
MOZ_ASSERT(mChild);
nsCOMPtr<nsIEventTarget> target = mChild->GetNeckoTarget();
return target.forget();
}
private:
HttpChannelChild* mChild;
nsresult mStatus;
@ -1117,6 +1237,13 @@ class FailedAsyncOpenEvent : public ChannelEvent
, mStatus(status) {}
void Run() { mChild->FailedAsyncOpen(mStatus); }
already_AddRefed<nsIEventTarget> GetEventTarget()
{
MOZ_ASSERT(mChild);
nsCOMPtr<nsIEventTarget> target = mChild->GetNeckoTarget();
return target.forget();
}
private:
HttpChannelChild* mChild;
nsresult mStatus;
@ -1151,7 +1278,7 @@ HttpChannelChild::FailedAsyncOpen(const nsresult& status)
HandleAsyncAbort();
if (mIPCOpen) {
SendDeletingChannel();
TrySendDeletingChannel();
}
}
@ -1171,6 +1298,13 @@ class DeleteSelfEvent : public ChannelEvent
public:
explicit DeleteSelfEvent(HttpChannelChild* child) : mChild(child) {}
void Run() { mChild->DeleteSelf(); }
already_AddRefed<nsIEventTarget> GetEventTarget()
{
MOZ_ASSERT(mChild);
nsCOMPtr<nsIEventTarget> target = mChild->GetNeckoTarget();
return target.forget();
}
private:
HttpChannelChild* mChild;
};
@ -1228,9 +1362,12 @@ HttpChannelChild::RecvFinishInterceptedRedirect()
RefPtr<HttpChannelChild> self(this);
Send__delete__(this);
// Reset the event target since the IPC actor is about to be destroyed.
// Following channel event should be handled on main thread.
mEventQ->ResetDeliveryTarget();
{
// Reset the event target since the IPC actor is about to be destroyed.
// Following channel event should be handled on main thread.
MutexAutoLock lock(mEventTargetMutex);
mNeckoTarget = nullptr;
}
// The IPDL connection was torn down by a interception logic in
// CompleteRedirectSetup, and we need to call FinishInterceptedRedirect.
@ -1301,6 +1438,13 @@ class Redirect1Event : public ChannelEvent
mResponseHead, mSecurityInfoSerialization,
mChannelId);
}
already_AddRefed<nsIEventTarget> GetEventTarget()
{
MOZ_ASSERT(mChild);
nsCOMPtr<nsIEventTarget> target = mChild->GetNeckoTarget();
return target.forget();
}
private:
HttpChannelChild* mChild;
uint32_t mRegistrarId;
@ -1474,6 +1618,13 @@ class Redirect3Event : public ChannelEvent
public:
explicit Redirect3Event(HttpChannelChild* child) : mChild(child) {}
void Run() { mChild->Redirect3Complete(nullptr); }
already_AddRefed<nsIEventTarget> GetEventTarget()
{
MOZ_ASSERT(mChild);
nsCOMPtr<nsIEventTarget> target = mChild->GetNeckoTarget();
return target.forget();
}
private:
HttpChannelChild* mChild;
};
@ -1499,6 +1650,13 @@ class HttpFlushedForDiversionEvent : public ChannelEvent
{
mChild->FlushedForDiversion();
}
already_AddRefed<nsIEventTarget> GetEventTarget()
{
MOZ_ASSERT(mChild);
nsCOMPtr<nsIEventTarget> target = mChild->GetNeckoTarget();
return target.forget();
}
private:
HttpChannelChild* mChild;
};
@ -2092,7 +2250,41 @@ HttpChannelChild::SetEventTarget()
nsCOMPtr<nsIEventTarget> target =
dispatcher->EventTargetFor(TaskCategory::Network);
gNeckoChild->SetEventTargetForActor(this, target);
mEventQ->RetargetDeliveryTo(target);
{
MutexAutoLock lock(mEventTargetMutex);
mNeckoTarget = target;
}
}
already_AddRefed<nsIEventTarget>
HttpChannelChild::GetNeckoTarget()
{
nsCOMPtr<nsIEventTarget> target;
{
MutexAutoLock lock(mEventTargetMutex);
target = mNeckoTarget;
}
if (!target) {
target = do_GetMainThread();
}
return target.forget();
}
already_AddRefed<nsIEventTarget>
HttpChannelChild::GetODATarget()
{
nsCOMPtr<nsIEventTarget> target;
{
MutexAutoLock lock(mEventTargetMutex);
target = mODATarget ? mODATarget : mNeckoTarget;
}
if (!target) {
target = do_GetMainThread();
}
return target.forget();
}
nsresult
@ -2834,6 +3026,48 @@ HttpChannelChild::GetDivertingToParent(bool* aDiverting)
return NS_OK;
}
//-----------------------------------------------------------------------------
// HttpChannelChild::nsIThreadRetargetableRequest
//-----------------------------------------------------------------------------
NS_IMETHODIMP
HttpChannelChild::RetargetDeliveryTo(nsIEventTarget* aNewTarget)
{
LOG(("HttpChannelChild::RetargetDeliveryTo [this=%p, aNewTarget=%p]",
this, aNewTarget));
MOZ_ASSERT(NS_IsMainThread(), "Should be called on main thread only");
MOZ_ASSERT(!mODATarget);
MOZ_ASSERT(aNewTarget);
NS_ENSURE_ARG(aNewTarget);
if (aNewTarget == NS_GetCurrentThread()) {
NS_WARNING("Retargeting delivery to same thread");
return NS_OK;
}
// Ensure that |mListener| and any subsequent listeners can be retargeted
// to another thread.
nsresult rv = NS_OK;
nsCOMPtr<nsIThreadRetargetableStreamListener> retargetableListener =
do_QueryInterface(mListener, &rv);
if (!retargetableListener || NS_FAILED(rv)) {
NS_WARNING("Listener is not retargetable");
return NS_ERROR_NO_INTERFACE;
}
rv = retargetableListener->CheckListenerChain();
if (NS_FAILED(rv)) {
NS_WARNING("Subsequent listeners are not retargetable");
return rv;
}
{
MutexAutoLock lock(mEventTargetMutex);
mODATarget = aNewTarget;
}
return NS_OK;
}
void
HttpChannelChild::ResetInterception()
@ -2864,6 +3098,67 @@ HttpChannelChild::GetResponseSynthesized(bool* aSynthesized)
return NS_OK;
}
void
HttpChannelChild::TrySendDeletingChannel()
{
if (NS_IsMainThread()) {
Unused << PHttpChannelChild::SendDeletingChannel();
return;
}
DebugOnly<nsresult> rv =
NS_DispatchToMainThread(
NewNonOwningRunnableMethod(this, &HttpChannelChild::TrySendDeletingChannel));
MOZ_ASSERT(NS_SUCCEEDED(rv));
}
class CancelEvent final : public ChannelEvent
{
public:
CancelEvent(HttpChannelChild* aChild, nsresult aRv)
: mChild(aChild)
, mRv(aRv)
{
MOZ_ASSERT(!NS_IsMainThread());
MOZ_ASSERT(aChild);
}
void Run() {
MOZ_ASSERT(NS_IsMainThread());
mChild->Cancel(mRv);
}
already_AddRefed<nsIEventTarget> GetEventTarget()
{
MOZ_ASSERT(mChild);
nsCOMPtr<nsIEventTarget> target = mChild->GetNeckoTarget();
return target.forget();
}
private:
HttpChannelChild* mChild;
const nsresult mRv;
};
void
HttpChannelChild::CancelOnMainThread(nsresult aRv)
{
LOG(("HttpChannelChild::CancelOnMainThread [this=%p]", this));
if (NS_IsMainThread()) {
Cancel(aRv);
return;
}
mEventQ->Suspend();
// Cancel is expected to preempt any other channel events, thus we put this
// event in the front of mEventQ to make sure nsIStreamListener not receiving
// any ODA/OnStopRequest callbacks.
UniquePtr<ChannelEvent> cancelEvent = MakeUnique<CancelEvent>(this, aRv);
mEventQ->PrependEvent(cancelEvent);
mEventQ->Resume();
}
void
HttpChannelChild::OverrideWithSynthesizedResponse(nsAutoPtr<nsHttpResponseHead>& aResponseHead,
nsIInputStream* aSynthesizedInput,

View File

@ -8,6 +8,7 @@
#ifndef mozilla_net_HttpChannelChild_h
#define mozilla_net_HttpChannelChild_h
#include "mozilla/Mutex.h"
#include "mozilla/UniquePtr.h"
#include "mozilla/net/HttpBaseChannel.h"
#include "mozilla/net/PHttpChannelChild.h"
@ -29,8 +30,10 @@
#include "nsIChildChannel.h"
#include "nsIHttpChannelChild.h"
#include "nsIDivertableChannel.h"
#include "nsIThreadRetargetableRequest.h"
#include "mozilla/net/DNS.h"
class nsIEventTarget;
class nsInputStreamPump;
namespace mozilla {
@ -50,6 +53,7 @@ class HttpChannelChild final : public PHttpChannelChild
, public nsIChildChannel
, public nsIHttpChannelChild
, public nsIDivertableChannel
, public nsIThreadRetargetableRequest
{
virtual ~HttpChannelChild();
public:
@ -63,6 +67,7 @@ public:
NS_DECL_NSICHILDCHANNEL
NS_DECL_NSIHTTPCHANNELCHILD
NS_DECL_NSIDIVERTABLECHANNEL
NS_DECL_NSITHREADRETARGETABLEREQUEST
HttpChannelChild();
@ -163,6 +168,14 @@ protected:
NS_IMETHOD GetResponseSynthesized(bool* aSynthesized) override;
private:
// this section is for main-thread-only object
// all the references need to be proxy released on main thread.
nsCOMPtr<nsISupports> mCacheKey;
// Proxy release all members above on main thread.
void ReleaseMainThreadOnlyReferences();
private:
class OverrideRunnable : public Runnable {
@ -189,6 +202,12 @@ private:
// before the constructor message is sent to the parent.
void SetEventTarget();
// Get event target for processing network events.
already_AddRefed<nsIEventTarget> GetNeckoTarget();
// Get event target for ODA.
already_AddRefed<nsIEventTarget> GetODATarget();
MOZ_MUST_USE nsresult ContinueAsyncOpen();
void DoOnStartRequest(nsIRequest* aRequest, nsISupports* aContext);
@ -212,6 +231,14 @@ private:
void ForceIntercepted(nsIInputStream* aSynthesizedInput);
// Try send DeletingChannel message to parent side. Dispatch an async task to
// main thread if invoking on non-main thread.
void TrySendDeletingChannel();
// Try invoke Cancel if on main thread, or prepend a CancelEvent in mEventQ to
// ensure Cacnel is processed before any other channel events.
void CancelOnMainThread(nsresult aRv);
RequestHeaderTuples mClientSetRequestHeaders;
nsCOMPtr<nsIChildChannel> mRedirectChannelChild;
RefPtr<InterceptStreamListener> mInterceptListener;
@ -223,7 +250,6 @@ private:
bool mCacheEntryAvailable;
uint32_t mCacheExpirationTime;
nsCString mCachedCharset;
nsCOMPtr<nsISupports> mCacheKey;
nsCString mProtocolVersion;
@ -284,6 +310,13 @@ private:
// Used to call OverrideWithSynthesizedResponse in FinishInterceptedRedirect
RefPtr<OverrideRunnable> mOverrideRunnable;
// EventTarget for labeling networking events.
nsCOMPtr<nsIEventTarget> mNeckoTarget;
// Target thread for delivering ODA.
nsCOMPtr<nsIEventTarget> mODATarget;
// Used to ensure atomicity of mNeckoTarget / mODATarget;
Mutex mEventTargetMutex;
void FinishInterceptedRedirect();
void CleanupRedirectingChannel(nsresult rv);
@ -355,6 +388,8 @@ private:
friend class Redirect1Event;
friend class Redirect3Event;
friend class DeleteSelfEvent;
friend class HttpFlushedForDiversionEvent;
friend class CancelEvent;
friend class HttpAsyncAborter<HttpChannelChild>;
friend class InterceptStreamListener;
friend class InterceptedChannelContent;

View File

@ -838,7 +838,7 @@ HttpChannelParent::RecvMarkOfflineCacheEntryAsForeign()
return IPC_OK();
}
class DivertDataAvailableEvent : public ChannelEvent
class DivertDataAvailableEvent : public MainThreadChannelEvent
{
public:
DivertDataAvailableEvent(HttpChannelParent* aParent,
@ -933,7 +933,7 @@ HttpChannelParent::DivertOnDataAvailable(const nsCString& data,
}
}
class DivertStopRequestEvent : public ChannelEvent
class DivertStopRequestEvent : public MainThreadChannelEvent
{
public:
DivertStopRequestEvent(HttpChannelParent* aParent,
@ -994,7 +994,7 @@ HttpChannelParent::DivertOnStopRequest(const nsresult& statusCode)
mParentListener->OnStopRequest(mChannel, nullptr, status);
}
class DivertCompleteEvent : public ChannelEvent
class DivertCompleteEvent : public MainThreadChannelEvent
{
public:
explicit DivertCompleteEvent(HttpChannelParent* aParent)

View File

@ -6358,8 +6358,8 @@ nsHttpChannel::ContinueBeginConnectWithResult()
}
LOG(("nsHttpChannel::ContinueBeginConnectWithResult result [this=%p rv=%" PRIx32
" mCanceled=%i]\n",
this, static_cast<uint32_t>(rv), mCanceled));
" mCanceled=%u]\n",
this, static_cast<uint32_t>(rv), static_cast<bool>(mCanceled)));
return rv;
}

View File

@ -176,6 +176,15 @@ public:
mChannelEvent->Run();
}
already_AddRefed<nsIEventTarget> GetEventTarget()
{
nsCOMPtr<nsIEventTarget> target = mEventTarget;
if (!target) {
target = do_GetMainThread();
}
return target.forget();
}
private:
nsAutoPtr<ChannelEvent> mChannelEvent;
nsCOMPtr<nsIEventTarget> mEventTarget;
@ -200,6 +209,13 @@ class StartEvent : public ChannelEvent
{
mChild->OnStart(mProtocol, mExtensions, mEffectiveURL, mEncrypted);
}
already_AddRefed<nsIEventTarget> GetEventTarget()
{
nsCOMPtr<nsIEventTarget> target = do_GetCurrentThread();
return target.forget();
}
private:
RefPtr<WebSocketChannelChild> mChild;
nsCString mProtocol;
@ -258,6 +274,13 @@ class StopEvent : public ChannelEvent
{
mChild->OnStop(mStatusCode);
}
already_AddRefed<nsIEventTarget> GetEventTarget()
{
nsCOMPtr<nsIEventTarget> target = do_GetCurrentThread();
return target.forget();
}
private:
RefPtr<WebSocketChannelChild> mChild;
nsresult mStatusCode;
@ -308,6 +331,13 @@ class MessageEvent : public ChannelEvent
mChild->OnBinaryMessageAvailable(mMessage);
}
}
already_AddRefed<nsIEventTarget> GetEventTarget()
{
nsCOMPtr<nsIEventTarget> target = do_GetCurrentThread();
return target.forget();
}
private:
RefPtr<WebSocketChannelChild> mChild;
nsCString mMessage;
@ -380,6 +410,13 @@ class AcknowledgeEvent : public ChannelEvent
{
mChild->OnAcknowledge(mSize);
}
already_AddRefed<nsIEventTarget> GetEventTarget()
{
nsCOMPtr<nsIEventTarget> target = do_GetCurrentThread();
return target.forget();
}
private:
RefPtr<WebSocketChannelChild> mChild;
uint32_t mSize;
@ -426,6 +463,13 @@ class ServerCloseEvent : public ChannelEvent
{
mChild->OnServerClose(mCode, mReason);
}
already_AddRefed<nsIEventTarget> GetEventTarget()
{
nsCOMPtr<nsIEventTarget> target = do_GetCurrentThread();
return target.forget();
}
private:
RefPtr<WebSocketChannelChild> mChild;
uint16_t mCode;
@ -721,19 +765,6 @@ WebSocketChannelChild::GetSecurityInfo(nsISupports **aSecurityInfo)
return NS_ERROR_NOT_AVAILABLE;
}
//-----------------------------------------------------------------------------
// WebSocketChannelChild::nsIThreadRetargetableRequest
//-----------------------------------------------------------------------------
NS_IMETHODIMP
WebSocketChannelChild::RetargetDeliveryTo(nsIEventTarget* aTargetThread)
{
nsresult rv = BaseWebSocketChannel::RetargetDeliveryTo(aTargetThread);
MOZ_RELEASE_ASSERT(NS_SUCCEEDED(rv));
return mEventQ->RetargetDeliveryTo(aTargetThread);
}
bool
WebSocketChannelChild::IsOnTargetThread()
{

View File

@ -25,7 +25,6 @@ class WebSocketChannelChild final : public BaseWebSocketChannel,
explicit WebSocketChannelChild(bool aSecure);
NS_DECL_THREADSAFE_ISUPPORTS
NS_DECL_NSITHREADRETARGETABLEREQUEST
// nsIWebSocketChannel methods BaseWebSocketChannel didn't implement for us
//

View File

@ -130,7 +130,7 @@ WyciwygChannelChild::Init(nsIURI* uri)
// WyciwygChannelChild::PWyciwygChannelChild
//-----------------------------------------------------------------------------
class WyciwygStartRequestEvent : public ChannelEvent
class WyciwygStartRequestEvent : public MainThreadChannelEvent
{
public:
WyciwygStartRequestEvent(WyciwygChannelChild* child,
@ -192,7 +192,7 @@ WyciwygChannelChild::OnStartRequest(const nsresult& statusCode,
Cancel(rv);
}
class WyciwygDataAvailableEvent : public ChannelEvent
class WyciwygDataAvailableEvent : public MainThreadChannelEvent
{
public:
WyciwygDataAvailableEvent(WyciwygChannelChild* child,
@ -253,7 +253,7 @@ WyciwygChannelChild::OnDataAvailable(const nsCString& data,
}
}
class WyciwygStopRequestEvent : public ChannelEvent
class WyciwygStopRequestEvent : public MainThreadChannelEvent
{
public:
WyciwygStopRequestEvent(WyciwygChannelChild* child,
@ -305,7 +305,7 @@ WyciwygChannelChild::OnStopRequest(const nsresult& statusCode)
PWyciwygChannelChild::Send__delete__(this);
}
class WyciwygCancelEvent : public ChannelEvent
class WyciwygCancelEvent : public MainThreadChannelEvent
{
public:
WyciwygCancelEvent(WyciwygChannelChild* child, const nsresult& status)

View File

@ -115,7 +115,7 @@ var gTests = [
nsIURL: true, nsINestedURI: false },
{ spec: "ftp://foo:@ftp.mozilla.org:100/pub/mozilla.org/README",
scheme: "ftp",
prePath: "ftp://foo:@ftp.mozilla.org:100",
prePath: "ftp://foo@ftp.mozilla.org:100",
port: 100,
username: "foo",
password: "",

View File

@ -469,3 +469,23 @@ add_test(function test_normalize_ipv6() {
run_next_test();
});
add_test(function test_emptyPassword() {
var url = stringToURL("http://a:@example.com");
do_check_eq(url.spec, "http://a@example.com/");
url.password = "pp";
do_check_eq(url.spec, "http://a:pp@example.com/");
url.password = "";
do_check_eq(url.spec, "http://a@example.com/");
url.userPass = "xxx:";
do_check_eq(url.spec, "http://xxx@example.com/");
url.password = "zzzz";
do_check_eq(url.spec, "http://xxx:zzzz@example.com/");
url.userPass = "xxxxx:yyyyyy";
do_check_eq(url.spec, "http://xxxxx:yyyyyy@example.com/");
url.userPass = "z:";
do_check_eq(url.spec, "http://z@example.com/");
url.password = "ppppppppppp";
do_check_eq(url.spec, "http://z:ppppppppppp@example.com/");
run_next_test();
});

View File

@ -5306,7 +5306,12 @@ dnl ICU Support
dnl ========================================================
if test "$MOZ_WIDGET_TOOLKIT" != "android" -o -z "$RELEASE_OR_BETA"; then
_INTL_API=yes
dnl Disable ICU on android/x86 for testing on tier-1 platform
if test "$MOZ_WIDGET_TOOLKIT" = "android" -a "$CPU_ARCH" = "x86"; then
_INTL_API=no
else
_INTL_API=yes
fi
else
_INTL_API=no
fi

View File

@ -982,11 +982,7 @@ nsHtml5StreamParser::OnStartRequest(nsIRequest* aRequest, nsISupports* aContext)
}
if (NS_FAILED(rv)) {
// for now skip warning if we're on child process, since we don't support
// off-main thread delivery there yet. This will change with bug 1015466
if (!XRE_IsContentProcess()) {
NS_WARNING("Failed to retarget HTML data delivery to the parser thread.");
}
NS_WARNING("Failed to retarget HTML data delivery to the parser thread.");
}
if (mCharsetSource == kCharsetFromParentFrame) {

2
servo/Cargo.lock generated
View File

@ -450,6 +450,7 @@ dependencies = [
"layout_traits 0.0.1",
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
"msg 0.0.1",
"net 0.0.1",
"net_traits 0.0.1",
"offscreen_gl_context 0.8.4 (registry+https://github.com/rust-lang/crates.io-index)",
"profile_traits 0.0.1",
@ -1755,6 +1756,7 @@ dependencies = [
"hyper 0.9.18 (registry+https://github.com/rust-lang/crates.io-index)",
"hyper_serde 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
"image 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)",
"immeta 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
"ipc-channel 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",

View File

@ -24,6 +24,7 @@ ipc-channel = "0.7"
layout_traits = {path = "../layout_traits"}
log = "0.3.5"
msg = {path = "../msg"}
net = {path = "../net"}
net_traits = {path = "../net_traits"}
offscreen_gl_context = "0.8"
profile_traits = {path = "../profile_traits"}

View File

@ -87,7 +87,6 @@ use msg::constellation_msg::{FrameId, FrameType, PipelineId};
use msg::constellation_msg::{Key, KeyModifiers, KeyState};
use msg::constellation_msg::{PipelineNamespace, PipelineNamespaceId, TraversalDirection};
use net_traits::{self, IpcSend, ResourceThreads};
use net_traits::image_cache_thread::ImageCacheThread;
use net_traits::pub_domains::reg_host;
use net_traits::storage_thread::{StorageThreadMsg, StorageType};
use offscreen_gl_context::{GLContextAttributes, GLLimits};
@ -175,10 +174,6 @@ pub struct Constellation<Message, LTF, STF> {
/// browsing.
private_resource_threads: ResourceThreads,
/// A channel for the constellation to send messages to the image
/// cache thread.
image_cache_thread: ImageCacheThread,
/// A channel for the constellation to send messages to the font
/// cache thread.
font_cache_thread: FontCacheThread,
@ -302,9 +297,6 @@ pub struct InitialConstellationState {
/// A channel to the bluetooth thread.
pub bluetooth_thread: IpcSender<BluetoothRequest>,
/// A channel to the image cache thread.
pub image_cache_thread: ImageCacheThread,
/// A channel to the font cache thread.
pub font_cache_thread: FontCacheThread,
@ -518,7 +510,6 @@ impl<Message, LTF, STF> Constellation<Message, LTF, STF>
bluetooth_thread: state.bluetooth_thread,
public_resource_threads: state.public_resource_threads,
private_resource_threads: state.private_resource_threads,
image_cache_thread: state.image_cache_thread,
font_cache_thread: state.font_cache_thread,
swmanager_chan: None,
swmanager_receiver: swmanager_receiver,
@ -657,7 +648,6 @@ impl<Message, LTF, STF> Constellation<Message, LTF, STF>
devtools_chan: self.devtools_chan.clone(),
bluetooth_thread: self.bluetooth_thread.clone(),
swmanager_thread: self.swmanager_sender.clone(),
image_cache_thread: self.image_cache_thread.clone(),
font_cache_thread: self.font_cache_thread.clone(),
resource_threads: resource_threads,
time_profiler_chan: self.time_profiler_chan.clone(),
@ -1210,9 +1200,6 @@ impl<Message, LTF, STF> Constellation<Message, LTF, STF>
let (core_sender, core_receiver) = ipc::channel().expect("Failed to create IPC channel!");
let (storage_sender, storage_receiver) = ipc::channel().expect("Failed to create IPC channel!");
debug!("Exiting image cache.");
self.image_cache_thread.exit();
debug!("Exiting core resource threads.");
if let Err(e) = self.public_resource_threads.send(net_traits::CoreResourceMsg::Exit(core_sender)) {
warn!("Exit resource thread failed ({})", e);

View File

@ -24,6 +24,7 @@ extern crate layout_traits;
#[macro_use]
extern crate log;
extern crate msg;
extern crate net;
extern crate net_traits;
extern crate offscreen_gl_context;
extern crate profile_traits;

View File

@ -16,8 +16,9 @@ use ipc_channel::ipc::{self, IpcReceiver, IpcSender};
use ipc_channel::router::ROUTER;
use layout_traits::LayoutThreadFactory;
use msg::constellation_msg::{FrameId, FrameType, PipelineId, PipelineNamespaceId};
use net::image_cache::ImageCacheImpl;
use net_traits::{IpcSend, ResourceThreads};
use net_traits::image_cache_thread::ImageCacheThread;
use net_traits::image_cache::ImageCache;
use profile_traits::mem as profile_mem;
use profile_traits::time;
use script_traits::{ConstellationControlMsg, DevicePixel, DiscardBrowsingContext};
@ -34,6 +35,7 @@ use std::env;
use std::ffi::OsStr;
use std::process;
use std::rc::Rc;
use std::sync::Arc;
use std::sync::mpsc::Sender;
use style_traits::CSSPixel;
use webrender_traits;
@ -133,9 +135,6 @@ pub struct InitialPipelineState {
/// A channel to the service worker manager thread
pub swmanager_thread: IpcSender<SWManagerMsg>,
/// A channel to the image cache thread.
pub image_cache_thread: ImageCacheThread,
/// A channel to the font cache thread.
pub font_cache_thread: FontCacheThread,
@ -250,7 +249,6 @@ impl Pipeline {
devtools_chan: script_to_devtools_chan,
bluetooth_thread: state.bluetooth_thread,
swmanager_thread: state.swmanager_thread,
image_cache_thread: state.image_cache_thread,
font_cache_thread: state.font_cache_thread,
resource_threads: state.resource_threads,
time_profiler_chan: state.time_profiler_chan,
@ -451,7 +449,6 @@ pub struct UnprivilegedPipelineContent {
devtools_chan: Option<IpcSender<ScriptToDevtoolsControlMsg>>,
bluetooth_thread: IpcSender<BluetoothRequest>,
swmanager_thread: IpcSender<SWManagerMsg>,
image_cache_thread: ImageCacheThread,
font_cache_thread: FontCacheThread,
resource_threads: ResourceThreads,
time_profiler_chan: time::ProfilerChan,
@ -477,6 +474,7 @@ impl UnprivilegedPipelineContent {
where LTF: LayoutThreadFactory<Message=Message>,
STF: ScriptThreadFactory<Message=Message>
{
let image_cache = Arc::new(ImageCacheImpl::new(self.webrender_api_sender.create_api()));
let layout_pair = STF::create(InitialScriptState {
id: self.id,
frame_id: self.frame_id,
@ -489,7 +487,7 @@ impl UnprivilegedPipelineContent {
scheduler_chan: self.scheduler_chan,
bluetooth_thread: self.bluetooth_thread,
resource_threads: self.resource_threads,
image_cache_thread: self.image_cache_thread.clone(),
image_cache: image_cache.clone(),
time_profiler_chan: self.time_profiler_chan.clone(),
mem_profiler_chan: self.mem_profiler_chan.clone(),
devtools_chan: self.devtools_chan,
@ -507,7 +505,7 @@ impl UnprivilegedPipelineContent {
self.pipeline_port,
self.layout_to_constellation_chan,
self.script_chan,
self.image_cache_thread,
image_cache.clone(),
self.font_cache_thread,
self.time_profiler_chan,
self.mem_profiler_chan,

View File

@ -9,8 +9,8 @@ use gfx::display_list::{WebRenderImageInfo, OpaqueNode};
use gfx::font_cache_thread::FontCacheThread;
use gfx::font_context::FontContext;
use heapsize::HeapSizeOf;
use net_traits::image_cache_thread::{ImageCacheThread, ImageState, CanRequestImages};
use net_traits::image_cache_thread::{ImageOrMetadataAvailable, UsePlaceholder};
use net_traits::image_cache::{CanRequestImages, ImageCache, ImageState};
use net_traits::image_cache::{ImageOrMetadataAvailable, UsePlaceholder};
use opaque_node::OpaqueNodeMethods;
use parking_lot::RwLock;
use script_layout_interface::{PendingImage, PendingImageState};
@ -79,8 +79,8 @@ pub struct LayoutContext<'a> {
/// Bits shared by the layout and style system.
pub style_context: SharedStyleContext<'a>,
/// The shared image cache thread.
pub image_cache_thread: Mutex<ImageCacheThread>,
/// Reference to the script thread image cache.
pub image_cache: Arc<ImageCache>,
/// Interface to the font cache thread.
pub font_cache_thread: Mutex<FontCacheThread>,
@ -126,10 +126,9 @@ impl<'a> LayoutContext<'a> {
};
// See if the image is already available
let result = self.image_cache_thread.lock().unwrap()
.find_image_or_metadata(url.clone(),
use_placeholder,
can_request);
let result = self.image_cache.find_image_or_metadata(url.clone(),
use_placeholder,
can_request);
match result {
Ok(image_or_metadata) => Some(image_or_metadata),
// Image failed to load, so just return nothing

View File

@ -36,7 +36,7 @@ use list_item::ListItemFlow;
use model::{self, MaybeAuto};
use msg::constellation_msg::PipelineId;
use net_traits::image::base::PixelFormat;
use net_traits::image_cache_thread::UsePlaceholder;
use net_traits::image_cache::UsePlaceholder;
use range::Range;
use servo_config::opts;
use servo_url::ServoUrl;

View File

@ -27,7 +27,7 @@ use model::{self, IntrinsicISizes, IntrinsicISizesContribution, MaybeAuto, SizeC
use model::{style_length, ToGfxMatrix};
use msg::constellation_msg::PipelineId;
use net_traits::image::base::{Image, ImageMetadata};
use net_traits::image_cache_thread::{ImageOrMetadataAvailable, UsePlaceholder};
use net_traits::image_cache::{ImageOrMetadataAvailable, UsePlaceholder};
use range::*;
use script_layout_interface::HTMLCanvasData;
use script_layout_interface::SVGSVGData;

View File

@ -76,8 +76,7 @@ use layout::wrapper::LayoutNodeLayoutData;
use layout::wrapper::drop_style_and_layout_data;
use layout_traits::LayoutThreadFactory;
use msg::constellation_msg::{FrameId, PipelineId};
use net_traits::image_cache_thread::ImageCacheThread;
use net_traits::image_cache_thread::UsePlaceholder;
use net_traits::image_cache::{ImageCache, UsePlaceholder};
use parking_lot::RwLock;
use profile_traits::mem::{self, Report, ReportKind, ReportsChan};
use profile_traits::time::{self, TimerMetadata, profile};
@ -157,8 +156,8 @@ pub struct LayoutThread {
/// The channel on which messages can be sent to the memory profiler.
mem_profiler_chan: mem::ProfilerChan,
/// The channel on which messages can be sent to the image cache.
image_cache_thread: ImageCacheThread,
/// Reference to the script thread image cache.
image_cache: Arc<ImageCache>,
/// Public interface to the font cache thread.
font_cache_thread: FontCacheThread,
@ -245,7 +244,7 @@ impl LayoutThreadFactory for LayoutThread {
pipeline_port: IpcReceiver<LayoutControlMsg>,
constellation_chan: IpcSender<ConstellationMsg>,
script_chan: IpcSender<ConstellationControlMsg>,
image_cache_thread: ImageCacheThread,
image_cache: Arc<ImageCache>,
font_cache_thread: FontCacheThread,
time_profiler_chan: time::ProfilerChan,
mem_profiler_chan: mem::ProfilerChan,
@ -268,7 +267,7 @@ impl LayoutThreadFactory for LayoutThread {
pipeline_port,
constellation_chan,
script_chan,
image_cache_thread,
image_cache.clone(),
font_cache_thread,
time_profiler_chan,
mem_profiler_chan.clone(),
@ -382,7 +381,7 @@ impl LayoutThread {
pipeline_port: IpcReceiver<LayoutControlMsg>,
constellation_chan: IpcSender<ConstellationMsg>,
script_chan: IpcSender<ConstellationControlMsg>,
image_cache_thread: ImageCacheThread,
image_cache: Arc<ImageCache>,
font_cache_thread: FontCacheThread,
time_profiler_chan: time::ProfilerChan,
mem_profiler_chan: mem::ProfilerChan,
@ -432,7 +431,7 @@ impl LayoutThread {
constellation_chan: constellation_chan.clone(),
time_profiler_chan: time_profiler_chan,
mem_profiler_chan: mem_profiler_chan,
image_cache_thread: image_cache_thread,
image_cache: image_cache.clone(),
font_cache_thread: font_cache_thread,
first_reflow: true,
font_cache_receiver: font_cache_receiver,
@ -522,7 +521,7 @@ impl LayoutThread {
quirks_mode: self.quirks_mode.unwrap(),
animation_only_restyle: false,
},
image_cache_thread: Mutex::new(self.image_cache_thread.clone()),
image_cache: self.image_cache.clone(),
font_cache_thread: Mutex::new(self.font_cache_thread.clone()),
webrender_image_cache: self.webrender_image_cache.clone(),
pending_images: if request_images { Some(Mutex::new(vec![])) } else { None },
@ -693,7 +692,7 @@ impl LayoutThread {
info.pipeline_port,
info.constellation_chan,
info.script_chan.clone(),
self.image_cache_thread.clone(),
info.image_cache.clone(),
self.font_cache_thread.clone(),
self.time_profiler_chan.clone(),
self.mem_profiler_chan.clone(),

View File

@ -21,11 +21,12 @@ extern crate webrender_traits;
use gfx::font_cache_thread::FontCacheThread;
use ipc_channel::ipc::{IpcReceiver, IpcSender};
use msg::constellation_msg::{FrameId, PipelineId};
use net_traits::image_cache_thread::ImageCacheThread;
use net_traits::image_cache::ImageCache;
use profile_traits::{mem, time};
use script_traits::{ConstellationControlMsg, LayoutControlMsg};
use script_traits::LayoutMsg as ConstellationMsg;
use servo_url::ServoUrl;
use std::sync::Arc;
use std::sync::mpsc::{Receiver, Sender};
// A static method creating a layout thread
@ -40,7 +41,7 @@ pub trait LayoutThreadFactory {
pipeline_port: IpcReceiver<LayoutControlMsg>,
constellation_chan: IpcSender<ConstellationMsg>,
script_chan: IpcSender<ConstellationControlMsg>,
image_cache_thread: ImageCacheThread,
image_cache: Arc<ImageCache>,
font_cache_thread: FontCacheThread,
time_profiler_chan: time::ProfilerChan,
mem_profiler_chan: mem::ProfilerChan,

View File

@ -3,34 +3,78 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use immeta::load_from_buf;
use ipc_channel::ipc::{self, IpcReceiver, IpcSender};
use ipc_channel::router::ROUTER;
use net_traits::{NetworkError, FetchResponseMsg};
use net_traits::{FetchResponseMsg, NetworkError};
use net_traits::image::base::{Image, ImageMetadata, PixelFormat, load_from_memory};
use net_traits::image_cache_thread::{ImageCacheCommand, ImageCacheThread, ImageState};
use net_traits::image_cache_thread::{ImageOrMetadataAvailable, ImageResponse, UsePlaceholder};
use net_traits::image_cache_thread::{ImageResponder, PendingImageId, CanRequestImages};
use net_traits::image_cache::{CanRequestImages, ImageCache, ImageResponder};
use net_traits::image_cache::{ImageOrMetadataAvailable, ImageResponse, ImageState};
use net_traits::image_cache::{PendingImageId, UsePlaceholder};
use servo_config::resource_files::resources_dir_path;
use servo_url::ServoUrl;
use std::borrow::ToOwned;
use std::collections::HashMap;
use std::collections::hash_map::Entry::{Occupied, Vacant};
use std::fs::File;
use std::io::{self, Read};
use std::mem;
use std::sync::Arc;
use std::sync::mpsc::{Receiver, Sender, channel};
use std::sync::{Arc, Mutex};
use std::thread;
use threadpool::ThreadPool;
use webrender_traits;
///
/// TODO(gw): Remaining work on image cache:
/// * Make use of the prefetch support in various parts of the code.
/// * Profile time in GetImageIfAvailable - might be worth caching these results per paint / layout thread.
/// * Profile time in GetImageIfAvailable - might be worth caching these
/// results per paint / layout thread.
///
/// MAYBE(Yoric):
/// * For faster lookups, it might be useful to store the LoadKey in the DOM once we have performed a first load.
/// * For faster lookups, it might be useful to store the LoadKey in the
/// DOM once we have performed a first load.
// ======================================================================
// Helper functions.
// ======================================================================
fn convert_format(format: PixelFormat) -> webrender_traits::ImageFormat {
match format {
PixelFormat::K8 | PixelFormat::KA8 => {
panic!("Not support by webrender yet");
}
PixelFormat::RGB8 => webrender_traits::ImageFormat::RGB8,
PixelFormat::RGBA8 => webrender_traits::ImageFormat::RGBA8,
}
}
fn decode_bytes_sync(key: LoadKey, bytes: &[u8]) -> DecoderMsg {
let image = load_from_memory(bytes);
DecoderMsg {
key: key,
image: image
}
}
fn get_placeholder_image(webrender_api: &webrender_traits::RenderApi) -> io::Result<Arc<Image>> {
let mut placeholder_path = try!(resources_dir_path());
placeholder_path.push("rippy.png");
let mut file = try!(File::open(&placeholder_path));
let mut image_data = vec![];
try!(file.read_to_end(&mut image_data));
let mut image = load_from_memory(&image_data).unwrap();
let format = convert_format(image.format);
let mut bytes = Vec::new();
bytes.extend_from_slice(&*image.bytes);
let descriptor = webrender_traits::ImageDescriptor {
width: image.width,
height: image.height,
stride: None,
format: format,
offset: 0,
is_opaque: is_image_opaque(format, &bytes),
};
let data = webrender_traits::ImageData::new(bytes);
let image_key = webrender_api.generate_image_key();
webrender_api.add_image(image_key, descriptor, data, None);
image.id = Some(image_key);
Ok(Arc::new(image))
}
// TODO(gw): This is a port of the old is_image_opaque code from WR.
// Consider using SIMD to speed this up if it shows in profiles.
@ -52,80 +96,24 @@ fn is_image_opaque(format: webrender_traits::ImageFormat, bytes: &[u8]) -> bool
}
}
/// Represents an image that is either being loaded
/// by the resource thread, or decoded by a worker thread.
struct PendingLoad {
// The bytes loaded so far. Reset to an empty vector once loading
// is complete and the buffer has been transmitted to the decoder.
bytes: ImageBytes,
fn premultiply(data: &mut [u8]) {
let length = data.len();
// Image metadata, if available.
metadata: Option<ImageMetadata>,
for i in (0..length).step_by(4) {
let b = data[i + 0] as u32;
let g = data[i + 1] as u32;
let r = data[i + 2] as u32;
let a = data[i + 3] as u32;
// Once loading is complete, the result of the operation.
result: Option<Result<(), NetworkError>>,
listeners: Vec<ImageResponder>,
// The url being loaded. Do not forget that this may be several Mb
// if we are loading a data: url.
url: ServoUrl,
}
enum ImageBytes {
InProgress(Vec<u8>),
Complete(Arc<Vec<u8>>),
}
impl ImageBytes {
fn extend_from_slice(&mut self, data: &[u8]) {
match *self {
ImageBytes::InProgress(ref mut bytes) => bytes.extend_from_slice(data),
ImageBytes::Complete(_) => panic!("attempted modification of complete image bytes"),
}
}
fn mark_complete(&mut self) -> Arc<Vec<u8>> {
let bytes = {
let own_bytes = match *self {
ImageBytes::InProgress(ref mut bytes) => bytes,
ImageBytes::Complete(_) => panic!("attempted modification of complete image bytes"),
};
mem::replace(own_bytes, vec![])
};
let bytes = Arc::new(bytes);
*self = ImageBytes::Complete(bytes.clone());
bytes
}
fn as_slice(&self) -> &[u8] {
match *self {
ImageBytes::InProgress(ref bytes) => &bytes,
ImageBytes::Complete(ref bytes) => &*bytes,
}
data[i + 0] = (b * a / 255) as u8;
data[i + 1] = (g * a / 255) as u8;
data[i + 2] = (r * a / 255) as u8;
}
}
enum LoadResult {
Loaded(Image),
PlaceholderLoaded(Arc<Image>),
None
}
impl PendingLoad {
fn new(url: ServoUrl) -> PendingLoad {
PendingLoad {
bytes: ImageBytes::InProgress(vec!()),
metadata: None,
result: None,
listeners: vec!(),
url: url,
}
}
fn add_listener(&mut self, listener: ImageResponder) {
self.listeners.push(listener);
}
}
// ======================================================================
// Aux structs and enums.
// ======================================================================
// Represents all the currently pending loads/decodings. For
// performance reasons, loads are indexed by a dedicated load key.
@ -142,14 +130,6 @@ struct AllPendingLoads {
keygen: LoadKeyGenerator,
}
/// Result of accessing a cache.
enum CacheResult<'a> {
/// The value was in the cache.
Hit(LoadKey, &'a mut PendingLoad),
/// The value was not in the cache and needed to be regenerated.
Miss(Option<(LoadKey, &'a mut PendingLoad)>),
}
impl AllPendingLoads {
fn new() -> AllPendingLoads {
AllPendingLoads {
@ -159,12 +139,6 @@ impl AllPendingLoads {
}
}
// `true` if there is no currently pending load, `false` otherwise.
fn is_empty(&self) -> bool {
assert!(self.loads.is_empty() == self.url_to_load_key.is_empty());
self.loads.is_empty()
}
// get a PendingLoad from its LoadKey.
fn get_by_key_mut(&mut self, key: &LoadKey) -> Option<&mut PendingLoad> {
self.loads.get_mut(key)
@ -206,6 +180,14 @@ impl AllPendingLoads {
}
}
/// Result of accessing a cache.
enum CacheResult<'a> {
/// The value was in the cache.
Hit(LoadKey, &'a mut PendingLoad),
/// The value was not in the cache and needed to be regenerated.
Miss(Option<(LoadKey, &'a mut PendingLoad)>),
}
/// Represents an image that has completed loading.
/// Images that fail to load (due to network or decode
/// failure) are still stored here, so that they aren't
@ -224,6 +206,46 @@ impl CompletedLoad {
}
}
/// Message that the decoder worker threads send to the image cache.
struct DecoderMsg {
key: LoadKey,
image: Option<Image>,
}
enum ImageBytes {
InProgress(Vec<u8>),
Complete(Arc<Vec<u8>>),
}
impl ImageBytes {
fn extend_from_slice(&mut self, data: &[u8]) {
match *self {
ImageBytes::InProgress(ref mut bytes) => bytes.extend_from_slice(data),
ImageBytes::Complete(_) => panic!("attempted modification of complete image bytes"),
}
}
fn mark_complete(&mut self) -> Arc<Vec<u8>> {
let bytes = {
let own_bytes = match *self {
ImageBytes::InProgress(ref mut bytes) => bytes,
ImageBytes::Complete(_) => panic!("attempted modification of complete image bytes"),
};
mem::replace(own_bytes, vec![])
};
let bytes = Arc::new(bytes);
*self = ImageBytes::Complete(bytes.clone());
bytes
}
fn as_slice(&self) -> &[u8] {
match *self {
ImageBytes::InProgress(ref bytes) => &bytes,
ImageBytes::Complete(ref bytes) => &*bytes,
}
}
}
// A key used to communicate during loading.
type LoadKey = PendingImageId;
@ -243,18 +265,51 @@ impl LoadKeyGenerator {
}
}
struct ResourceLoadInfo {
action: FetchResponseMsg,
key: LoadKey,
enum LoadResult {
Loaded(Image),
PlaceholderLoaded(Arc<Image>),
None
}
/// Implementation of the image cache
struct ImageCache {
decoder_sender: Sender<DecoderMsg>,
/// Represents an image that is either being loaded
/// by the resource thread, or decoded by a worker thread.
struct PendingLoad {
// The bytes loaded so far. Reset to an empty vector once loading
// is complete and the buffer has been transmitted to the decoder.
bytes: ImageBytes,
// Worker threads for decoding images.
thread_pool: ThreadPool,
// Image metadata, if available.
metadata: Option<ImageMetadata>,
// Once loading is complete, the result of the operation.
result: Option<Result<(), NetworkError>>,
listeners: Vec<ImageResponder>,
// The url being loaded. Do not forget that this may be several Mb
// if we are loading a data: url.
url: ServoUrl,
}
impl PendingLoad {
fn new(url: ServoUrl) -> PendingLoad {
PendingLoad {
bytes: ImageBytes::InProgress(vec!()),
metadata: None,
result: None,
listeners: vec!(),
url: url,
}
}
fn add_listener(&mut self, listener: ImageResponder) {
self.listeners.push(listener);
}
}
// ======================================================================
// Image cache implementation.
// ======================================================================
struct ImageCacheStore {
// Images that are loading over network, or decoding.
pending_loads: AllPendingLoads,
@ -268,225 +323,7 @@ struct ImageCache {
webrender_api: webrender_traits::RenderApi,
}
/// Message that the decoder worker threads send to main image cache thread.
struct DecoderMsg {
key: LoadKey,
image: Option<Image>,
}
struct Receivers {
cmd_receiver: Receiver<ImageCacheCommand>,
decoder_receiver: Receiver<DecoderMsg>,
}
impl Receivers {
#[allow(unsafe_code)]
fn recv(&self) -> SelectResult {
let cmd_receiver = &self.cmd_receiver;
let decoder_receiver = &self.decoder_receiver;
select! {
msg = cmd_receiver.recv() => {
SelectResult::Command(msg.unwrap())
},
msg = decoder_receiver.recv() => {
SelectResult::Decoder(msg.unwrap())
}
}
}
}
/// The types of messages that the main image cache thread receives.
enum SelectResult {
Command(ImageCacheCommand),
Decoder(DecoderMsg),
}
fn convert_format(format: PixelFormat) -> webrender_traits::ImageFormat {
match format {
PixelFormat::K8 | PixelFormat::KA8 => {
panic!("Not support by webrender yet");
}
PixelFormat::RGB8 => webrender_traits::ImageFormat::RGB8,
PixelFormat::RGBA8 => webrender_traits::ImageFormat::RGBA8,
}
}
fn get_placeholder_image(webrender_api: &webrender_traits::RenderApi) -> io::Result<Arc<Image>> {
let mut placeholder_path = try!(resources_dir_path());
placeholder_path.push("rippy.png");
let mut file = try!(File::open(&placeholder_path));
let mut image_data = vec![];
try!(file.read_to_end(&mut image_data));
let mut image = load_from_memory(&image_data).unwrap();
let format = convert_format(image.format);
let mut bytes = Vec::new();
bytes.extend_from_slice(&*image.bytes);
let descriptor = webrender_traits::ImageDescriptor {
width: image.width,
height: image.height,
stride: None,
format: format,
offset: 0,
is_opaque: is_image_opaque(format, &bytes),
};
let data = webrender_traits::ImageData::new(bytes);
let image_key = webrender_api.generate_image_key();
webrender_api.add_image(image_key, descriptor, data, None);
image.id = Some(image_key);
Ok(Arc::new(image))
}
fn premultiply(data: &mut [u8]) {
let length = data.len();
for i in (0..length).step_by(4) {
let b = data[i + 0] as u32;
let g = data[i + 1] as u32;
let r = data[i + 2] as u32;
let a = data[i + 3] as u32;
data[i + 0] = (b * a / 255) as u8;
data[i + 1] = (g * a / 255) as u8;
data[i + 2] = (r * a / 255) as u8;
}
}
impl ImageCache {
fn run(webrender_api: webrender_traits::RenderApi,
ipc_command_receiver: IpcReceiver<ImageCacheCommand>) {
// Preload the placeholder image, used when images fail to load.
let placeholder_image = get_placeholder_image(&webrender_api).ok();
// Ask the router to proxy messages received over IPC to us.
let cmd_receiver = ROUTER.route_ipc_receiver_to_new_mpsc_receiver(ipc_command_receiver);
let (decoder_sender, decoder_receiver) = channel();
let mut cache = ImageCache {
decoder_sender: decoder_sender,
thread_pool: ThreadPool::new(4),
pending_loads: AllPendingLoads::new(),
completed_loads: HashMap::new(),
placeholder_image: placeholder_image,
webrender_api: webrender_api,
};
let receivers = Receivers {
cmd_receiver: cmd_receiver,
decoder_receiver: decoder_receiver,
};
let mut exit_sender: Option<IpcSender<()>> = None;
loop {
match receivers.recv() {
SelectResult::Command(cmd) => {
exit_sender = cache.handle_cmd(cmd);
}
SelectResult::Decoder(msg) => {
cache.handle_decoder(msg);
}
}
// Can only exit when all pending loads are complete.
if let Some(ref exit_sender) = exit_sender {
if cache.pending_loads.is_empty() {
exit_sender.send(()).unwrap();
break;
}
}
}
}
// Handle a request from a client
fn handle_cmd(&mut self, cmd: ImageCacheCommand) -> Option<IpcSender<()>> {
match cmd {
ImageCacheCommand::Exit(sender) => {
return Some(sender);
}
ImageCacheCommand::AddListener(id, responder) => {
self.add_listener(id, responder);
}
ImageCacheCommand::GetImageOrMetadataIfAvailable(url,
use_placeholder,
can_request,
consumer) => {
let result = self.get_image_or_meta_if_available(url, use_placeholder, can_request);
// TODO(#15501): look for opportunities to clean up cache if this send fails.
let _ = consumer.send(result);
}
ImageCacheCommand::StoreDecodeImage(id, data) => {
self.handle_progress(ResourceLoadInfo {
action: data,
key: id
});
}
};
None
}
// Handle progress messages from the resource thread
fn handle_progress(&mut self, msg: ResourceLoadInfo) {
match (msg.action, msg.key) {
(FetchResponseMsg::ProcessRequestBody, _) |
(FetchResponseMsg::ProcessRequestEOF, _) => return,
(FetchResponseMsg::ProcessResponse(_response), _) => {}
(FetchResponseMsg::ProcessResponseChunk(data), _) => {
debug!("got some data for {:?}", msg.key);
let pending_load = self.pending_loads.get_by_key_mut(&msg.key).unwrap();
pending_load.bytes.extend_from_slice(&data);
//jmr0 TODO: possibly move to another task?
if let None = pending_load.metadata {
if let Ok(metadata) = load_from_buf(&pending_load.bytes.as_slice()) {
let dimensions = metadata.dimensions();
let img_metadata = ImageMetadata { width: dimensions.width,
height: dimensions.height };
for listener in &pending_load.listeners {
listener.respond(ImageResponse::MetadataLoaded(img_metadata.clone()));
}
pending_load.metadata = Some(img_metadata);
}
}
}
(FetchResponseMsg::ProcessResponseEOF(result), key) => {
debug!("received EOF for {:?}", key);
match result {
Ok(()) => {
let pending_load = self.pending_loads.get_by_key_mut(&msg.key).unwrap();
pending_load.result = Some(result);
let bytes = pending_load.bytes.mark_complete();
let sender = self.decoder_sender.clone();
debug!("async decoding {} ({:?})", pending_load.url, key);
self.thread_pool.execute(move || {
let msg = decode_bytes_sync(key, &*bytes);
sender.send(msg).unwrap();
});
}
Err(_) => {
debug!("processing error for {:?}", key);
match self.placeholder_image.clone() {
Some(placeholder_image) => {
self.complete_load(msg.key, LoadResult::PlaceholderLoaded(
placeholder_image))
}
None => self.complete_load(msg.key, LoadResult::None),
}
}
}
}
}
}
// Handle a message from one of the decoder worker threads
fn handle_decoder(&mut self, msg: DecoderMsg) {
let image = match msg.image {
None => LoadResult::None,
Some(image) => LoadResult::Loaded(image),
};
self.complete_load(msg.key, image);
}
impl ImageCacheStore {
// Change state of a url from pending -> loaded.
fn complete_load(&mut self, key: LoadKey, mut load_result: LoadResult) {
let pending_load = match self.pending_loads.remove(&key) {
@ -532,25 +369,6 @@ impl ImageCache {
}
}
/// Add a listener for a given image if it is still pending, or notify the
/// listener if the image is complete.
fn add_listener(&mut self,
id: PendingImageId,
listener: ImageResponder) {
if let Some(load) = self.pending_loads.get_by_key_mut(&id) {
if let Some(ref metadata) = load.metadata {
listener.respond(ImageResponse::MetadataLoaded(metadata.clone()));
}
load.add_listener(listener);
return;
}
if let Some(load) = self.completed_loads.values().find(|l| l.id == id) {
listener.respond(load.image_response.clone());
return;
}
warn!("Couldn't find cached entry for listener {:?}", id);
}
/// Return a completed image if it exists, or None if there is no complete load
/// or the complete load is not fully decoded or is unavailable.
fn get_completed_image_if_available(&self,
@ -572,29 +390,59 @@ impl ImageCache {
})
}
/// Return any available metadata or image for the given URL, or an indication that
/// the image is not yet available if it is in progress, or else reserve a slot in
/// the cache for the URL if the consumer can request images.
fn get_image_or_meta_if_available(&mut self,
url: ServoUrl,
placeholder: UsePlaceholder,
can_request: CanRequestImages)
-> Result<ImageOrMetadataAvailable, ImageState> {
if let Some(result) = self.get_completed_image_if_available(&url, placeholder) {
/// Handle a message from one of the decoder worker threads or from a sync
/// decoding operation.
fn handle_decoder(&mut self, msg: DecoderMsg) {
let image = match msg.image {
None => LoadResult::None,
Some(image) => LoadResult::Loaded(image),
};
self.complete_load(msg.key, image);
}
}
pub struct ImageCacheImpl {
store: Arc<Mutex<ImageCacheStore>>,
}
impl ImageCache for ImageCacheImpl {
fn new(webrender_api: webrender_traits::RenderApi) -> ImageCacheImpl {
debug!("New image cache");
ImageCacheImpl {
store: Arc::new(Mutex::new(ImageCacheStore {
pending_loads: AllPendingLoads::new(),
completed_loads: HashMap::new(),
placeholder_image: get_placeholder_image(&webrender_api).ok(),
webrender_api: webrender_api,
}))
}
}
/// Return any available metadata or image for the given URL,
/// or an indication that the image is not yet available if it is in progress,
/// or else reserve a slot in the cache for the URL if the consumer can request images.
fn find_image_or_metadata(&self,
url: ServoUrl,
use_placeholder: UsePlaceholder,
can_request: CanRequestImages)
-> Result<ImageOrMetadataAvailable, ImageState> {
debug!("Find image or metadata for {}", url);
let mut store = self.store.lock().unwrap();
if let Some(result) = store.get_completed_image_if_available(&url, use_placeholder) {
debug!("{} is available", url);
return result;
}
let decoded = {
let result = self.pending_loads.get_cached(url.clone(), can_request);
let result = store.pending_loads.get_cached(url.clone(), can_request);
match result {
CacheResult::Hit(key, pl) => match (&pl.result, &pl.metadata) {
(&Some(Ok(_)), _) => {
debug!("sync decoding {} ({:?})", url, key);
debug!("Sync decoding {} ({:?})", url, key);
decode_bytes_sync(key, &pl.bytes.as_slice())
}
(&None, &Some(ref meta)) => {
debug!("metadata available for {} ({:?})", url, key);
debug!("Metadata available for {} ({:?})", url, key);
return Ok(ImageOrMetadataAvailable::MetadataAvailable(meta.clone()))
}
(&Some(Err(_)), _) | (&None, &None) => {
@ -603,43 +451,101 @@ impl ImageCache {
}
},
CacheResult::Miss(Some((key, _pl))) => {
debug!("should be requesting {} ({:?})", url, key);
debug!("Should be requesting {} ({:?})", url, key);
return Err(ImageState::NotRequested(key));
}
CacheResult::Miss(None) => {
debug!("couldn't find an entry for {}", url);
debug!("Couldn't find an entry for {}", url);
return Err(ImageState::LoadError);
}
}
};
// In the case where a decode is ongoing (or waiting in a queue) but we have the
// full response available, we decode the bytes synchronously and ignore the
// async decode when it finishes later.
// In the case where a decode is ongoing (or waiting in a queue) but we
// have the full response available, we decode the bytes synchronously
// and ignore the async decode when it finishes later.
// TODO: make this behaviour configurable according to the caller's needs.
self.handle_decoder(decoded);
match self.get_completed_image_if_available(&url, placeholder) {
store.handle_decoder(decoded);
match store.get_completed_image_if_available(&url, use_placeholder) {
Some(result) => result,
None => Err(ImageState::LoadError),
}
}
}
/// Create a new image cache.
pub fn new_image_cache_thread(webrender_api: webrender_traits::RenderApi) -> ImageCacheThread {
let (ipc_command_sender, ipc_command_receiver) = ipc::channel().unwrap();
/// Add a new listener for the given pending image id. If the image is already present,
/// the responder will still receive the expected response.
fn add_listener(&self, id: PendingImageId, listener: ImageResponder) {
let mut store = self.store.lock().unwrap();
if let Some(load) = store.pending_loads.get_by_key_mut(&id) {
if let Some(ref metadata) = load.metadata {
listener.respond(ImageResponse::MetadataLoaded(metadata.clone()));
}
load.add_listener(listener);
return;
}
if let Some(load) = store.completed_loads.values().find(|l| l.id == id) {
listener.respond(load.image_response.clone());
return;
}
warn!("Couldn't find cached entry for listener {:?}", id);
}
thread::Builder::new().name("ImageCacheThread".to_owned()).spawn(move || {
ImageCache::run(webrender_api, ipc_command_receiver)
}).expect("Thread spawning failed");
/// Inform the image cache about a response for a pending request.
fn notify_pending_response(&self, id: PendingImageId, action: FetchResponseMsg) {
match (action, id) {
(FetchResponseMsg::ProcessRequestBody, _) |
(FetchResponseMsg::ProcessRequestEOF, _) => return,
(FetchResponseMsg::ProcessResponse(_response), _) => {}
(FetchResponseMsg::ProcessResponseChunk(data), _) => {
debug!("Got some data for {:?}", id);
let mut store = self.store.lock().unwrap();
let pending_load = store.pending_loads.get_by_key_mut(&id).unwrap();
pending_load.bytes.extend_from_slice(&data);
//jmr0 TODO: possibly move to another task?
if let None = pending_load.metadata {
if let Ok(metadata) = load_from_buf(&pending_load.bytes.as_slice()) {
let dimensions = metadata.dimensions();
let img_metadata = ImageMetadata { width: dimensions.width,
height: dimensions.height };
for listener in &pending_load.listeners {
listener.respond(ImageResponse::MetadataLoaded(img_metadata.clone()));
}
pending_load.metadata = Some(img_metadata);
}
}
}
(FetchResponseMsg::ProcessResponseEOF(result), key) => {
debug!("Received EOF for {:?}", key);
match result {
Ok(()) => {
let bytes = {
let mut store = self.store.lock().unwrap();
let pending_load = store.pending_loads.get_by_key_mut(&id).unwrap();
pending_load.result = Some(result);
debug!("Async decoding {} ({:?})", pending_load.url, key);
pending_load.bytes.mark_complete()
};
ImageCacheThread::new(ipc_command_sender)
}
fn decode_bytes_sync(key: LoadKey, bytes: &[u8]) -> DecoderMsg {
let image = load_from_memory(bytes);
DecoderMsg {
key: key,
image: image
let local_store = self.store.clone();
thread::spawn(move || {
let msg = decode_bytes_sync(key, &*bytes);
debug!("Image decoded");
local_store.lock().unwrap().handle_decoder(msg);
});
}
Err(_) => {
debug!("Processing error for {:?}", key);
let mut store = self.store.lock().unwrap();
match store.placeholder_image.clone() {
Some(placeholder_image) => {
store.complete_load(
id, LoadResult::PlaceholderLoaded(placeholder_image))
}
None => store.complete_load(id, LoadResult::None),
}
}
}
}
}
}
}

View File

@ -4,7 +4,6 @@
#![deny(unsafe_code)]
#![feature(box_syntax)]
#![feature(mpsc_select)]
#![feature(step_by)]
extern crate base64;
@ -32,7 +31,6 @@ extern crate serde_derive;
extern crate serde_json;
extern crate servo_config;
extern crate servo_url;
extern crate threadpool;
extern crate time;
#[cfg(any(target_os = "macos", target_os = "linux", target_os = "windows"))]
extern crate tinyfiledialogs;
@ -51,7 +49,7 @@ mod data_loader;
pub mod filemanager_thread;
pub mod hsts;
mod http_loader;
pub mod image_cache_thread;
pub mod image_cache;
pub mod mime_classifier;
pub mod resource_thread;
mod storage_thread;

View File

@ -16,6 +16,7 @@ heapsize_derive = "0.1"
hyper = "0.9.9"
hyper_serde = "0.5"
image = "0.12"
immeta = "0.3.1"
ipc-channel = "0.7"
lazy_static = "0.2"
log = "0.3.5"

View File

@ -0,0 +1,121 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use FetchResponseMsg;
use image::base::{Image, ImageMetadata};
use ipc_channel::ipc::IpcSender;
use servo_url::ServoUrl;
use std::sync::Arc;
use webrender_traits;
// ======================================================================
// Aux structs and enums.
// ======================================================================
/// Whether a consumer is in a position to request images or not. This can occur
/// when animations are being processed by the layout thread while the script
/// thread is executing in parallel.
#[derive(Copy, Clone, PartialEq, Deserialize, Serialize)]
pub enum CanRequestImages {
No,
Yes,
}
/// Indicating either entire image or just metadata availability
#[derive(Clone, Deserialize, Serialize, HeapSizeOf)]
pub enum ImageOrMetadataAvailable {
ImageAvailable(Arc<Image>),
MetadataAvailable(ImageMetadata),
}
/// This is optionally passed to the image cache when requesting
/// and image, and returned to the specified event loop when the
/// image load completes. It is typically used to trigger a reflow
/// and/or repaint.
#[derive(Clone, Deserialize, Serialize)]
pub struct ImageResponder {
id: PendingImageId,
sender: IpcSender<PendingImageResponse>,
}
impl ImageResponder {
pub fn new(sender: IpcSender<PendingImageResponse>, id: PendingImageId) -> ImageResponder {
ImageResponder {
sender: sender,
id: id,
}
}
pub fn respond(&self, response: ImageResponse) {
debug!("Notifying listener");
// This send can fail if thread waiting for this notification has panicked.
// That's not a case that's worth warning about.
// TODO(#15501): are there cases in which we should perform cleanup?
let _ = self.sender.send(PendingImageResponse {
response: response,
id: self.id,
});
}
}
/// The returned image.
#[derive(Clone, Deserialize, Serialize, HeapSizeOf)]
pub enum ImageResponse {
/// The requested image was loaded.
Loaded(Arc<Image>),
/// The request image metadata was loaded.
MetadataLoaded(ImageMetadata),
/// The requested image failed to load, so a placeholder was loaded instead.
PlaceholderLoaded(Arc<Image>),
/// Neither the requested image nor the placeholder could be loaded.
None,
}
/// The current state of an image in the cache.
#[derive(PartialEq, Copy, Clone, Deserialize, Serialize)]
pub enum ImageState {
Pending(PendingImageId),
LoadError,
NotRequested(PendingImageId),
}
/// The unique id for an image that has previously been requested.
#[derive(Copy, Clone, PartialEq, Eq, Deserialize, Serialize, HeapSizeOf, Hash, Debug)]
pub struct PendingImageId(pub u64);
#[derive(Deserialize, Serialize)]
pub struct PendingImageResponse {
pub response: ImageResponse,
pub id: PendingImageId,
}
#[derive(Copy, Clone, PartialEq, Hash, Eq, Deserialize, Serialize)]
pub enum UsePlaceholder {
No,
Yes,
}
// ======================================================================
// ImageCache public API.
// ======================================================================
pub trait ImageCache: Sync + Send {
fn new(webrender_api: webrender_traits::RenderApi) -> Self where Self: Sized;
/// Return any available metadata or image for the given URL,
/// or an indication that the image is not yet available if it is in progress,
/// or else reserve a slot in the cache for the URL if the consumer can request images.
fn find_image_or_metadata(&self,
url: ServoUrl,
use_placeholder: UsePlaceholder,
can_request: CanRequestImages)
-> Result<ImageOrMetadataAvailable, ImageState>;
/// Add a new listener for the given pending image id. If the image is already present,
/// the responder will still receive the expected response.
fn add_listener(&self, id: PendingImageId, listener: ImageResponder);
/// Inform the image cache about a response for a pending request.
fn notify_pending_response(&self, id: PendingImageId, action: FetchResponseMsg);
}

View File

@ -1,169 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use FetchResponseMsg;
use image::base::{Image, ImageMetadata};
use ipc_channel::ipc::{self, IpcSender};
use servo_url::ServoUrl;
use std::sync::Arc;
/// This is optionally passed to the image cache when requesting
/// and image, and returned to the specified event loop when the
/// image load completes. It is typically used to trigger a reflow
/// and/or repaint.
#[derive(Clone, Deserialize, Serialize)]
pub struct ImageResponder {
id: PendingImageId,
sender: IpcSender<PendingImageResponse>,
}
#[derive(Deserialize, Serialize)]
pub struct PendingImageResponse {
pub response: ImageResponse,
pub id: PendingImageId,
}
impl ImageResponder {
pub fn new(sender: IpcSender<PendingImageResponse>, id: PendingImageId) -> ImageResponder {
ImageResponder {
sender: sender,
id: id,
}
}
pub fn respond(&self, response: ImageResponse) {
// This send can fail if thread waiting for this notification has panicked.
// That's not a case that's worth warning about.
// TODO(#15501): are there cases in which we should perform cleanup?
let _ = self.sender.send(PendingImageResponse {
response: response,
id: self.id,
});
}
}
/// The unique id for an image that has previously been requested.
#[derive(Copy, Clone, PartialEq, Eq, Deserialize, Serialize, HeapSizeOf, Hash, Debug)]
pub struct PendingImageId(pub u64);
/// The current state of an image in the cache.
#[derive(PartialEq, Copy, Clone, Deserialize, Serialize)]
pub enum ImageState {
Pending(PendingImageId),
LoadError,
NotRequested(PendingImageId),
}
/// The returned image.
#[derive(Clone, Deserialize, Serialize, HeapSizeOf)]
pub enum ImageResponse {
/// The requested image was loaded.
Loaded(Arc<Image>),
/// The request image metadata was loaded.
MetadataLoaded(ImageMetadata),
/// The requested image failed to load, so a placeholder was loaded instead.
PlaceholderLoaded(Arc<Image>),
/// Neither the requested image nor the placeholder could be loaded.
None,
}
/// Indicating either entire image or just metadata availability
#[derive(Clone, Deserialize, Serialize, HeapSizeOf)]
pub enum ImageOrMetadataAvailable {
ImageAvailable(Arc<Image>),
MetadataAvailable(ImageMetadata),
}
/// Commands that the image cache understands.
#[derive(Deserialize, Serialize)]
pub enum ImageCacheCommand {
/// Synchronously check the state of an image in the cache. If the image is in a loading
/// state and but its metadata has been made available, it will be sent as a response.
GetImageOrMetadataIfAvailable(ServoUrl,
UsePlaceholder,
CanRequestImages,
IpcSender<Result<ImageOrMetadataAvailable, ImageState>>),
/// Add a new listener for the given pending image.
AddListener(PendingImageId, ImageResponder),
/// Instruct the cache to store this data as a newly-complete network request and continue
/// decoding the result into pixel data
StoreDecodeImage(PendingImageId, FetchResponseMsg),
/// Clients must wait for a response before shutting down the ResourceThread
Exit(IpcSender<()>),
}
#[derive(Copy, Clone, PartialEq, Hash, Eq, Deserialize, Serialize)]
pub enum UsePlaceholder {
No,
Yes,
}
/// Whether a consumer is in a position to request images or not. This can occur when
/// animations are being processed by the layout thread while the script thread is executing
/// in parallel.
#[derive(Copy, Clone, PartialEq, Deserialize, Serialize)]
pub enum CanRequestImages {
No,
Yes,
}
/// The client side of the image cache thread. This can be safely cloned
/// and passed to different threads.
#[derive(Clone, Deserialize, Serialize)]
pub struct ImageCacheThread {
chan: IpcSender<ImageCacheCommand>,
}
/// The public API for the image cache thread.
impl ImageCacheThread {
/// Construct a new image cache
pub fn new(chan: IpcSender<ImageCacheCommand>) -> ImageCacheThread {
ImageCacheThread {
chan: chan,
}
}
/// Get the current state of an image, returning its metadata if available.
/// See ImageCacheCommand::GetImageOrMetadataIfAvailable.
///
/// FIXME: We shouldn't do IPC for data uris!
pub fn find_image_or_metadata(&self,
url: ServoUrl,
use_placeholder: UsePlaceholder,
can_request: CanRequestImages)
-> Result<ImageOrMetadataAvailable, ImageState> {
let (sender, receiver) = ipc::channel().unwrap();
let msg = ImageCacheCommand::GetImageOrMetadataIfAvailable(url,
use_placeholder,
can_request,
sender);
let _ = self.chan.send(msg);
try!(receiver.recv().map_err(|_| ImageState::LoadError))
}
/// Add a new listener for the given pending image id. If the image is already present,
/// the responder will still receive the expected response.
pub fn add_listener(&self, id: PendingImageId, responder: ImageResponder) {
let msg = ImageCacheCommand::AddListener(id, responder);
self.chan.send(msg).expect("Image cache thread is not available");
}
/// Inform the image cache about a response for a pending request.
pub fn notify_pending_response(&self, id: PendingImageId, data: FetchResponseMsg) {
let msg = ImageCacheCommand::StoreDecodeImage(id, data);
self.chan.send(msg).expect("Image cache thread is not available");
}
/// Shutdown the image cache thread.
pub fn exit(&self) {
// If the image cache is not available when we're trying to shut it down,
// that is not worth warning about.
let (response_chan, response_port) = ipc::channel().unwrap();
let _ = self.chan.send(ImageCacheCommand::Exit(response_chan));
let _ = response_port.recv();
}
}

View File

@ -50,7 +50,7 @@ use storage_thread::StorageThreadMsg;
pub mod blob_url_store;
pub mod filemanager_thread;
pub mod hosts;
pub mod image_cache_thread;
pub mod image_cache;
pub mod net_error_list;
pub mod pub_domains;
pub mod request;

View File

@ -62,7 +62,7 @@ use msg::constellation_msg::{FrameId, FrameType, PipelineId};
use net_traits::{Metadata, NetworkError, ReferrerPolicy, ResourceThreads};
use net_traits::filemanager_thread::RelativePos;
use net_traits::image::base::{Image, ImageMetadata};
use net_traits::image_cache_thread::{ImageCacheThread, PendingImageId};
use net_traits::image_cache::{ImageCache, PendingImageId};
use net_traits::request::{Request, RequestInit};
use net_traits::response::{Response, ResponseBody};
use net_traits::response::HttpsState;
@ -321,7 +321,7 @@ unsafe_no_jsmanaged_fields!(bool, f32, f64, String, AtomicBool, AtomicUsize, Uui
unsafe_no_jsmanaged_fields!(usize, u8, u16, u32, u64);
unsafe_no_jsmanaged_fields!(isize, i8, i16, i32, i64);
unsafe_no_jsmanaged_fields!(ServoUrl, ImmutableOrigin, MutableOrigin);
unsafe_no_jsmanaged_fields!(Image, ImageMetadata, ImageCacheThread, PendingImageId);
unsafe_no_jsmanaged_fields!(Image, ImageMetadata, ImageCache, PendingImageId);
unsafe_no_jsmanaged_fields!(Metadata);
unsafe_no_jsmanaged_fields!(NetworkError);
unsafe_no_jsmanaged_fields!(Atom, Prefix, LocalName, Namespace, QualName);

View File

@ -40,7 +40,7 @@ use euclid::rect::Rect;
use euclid::size::Size2D;
use ipc_channel::ipc::{self, IpcSender};
use net_traits::image::base::PixelFormat;
use net_traits::image_cache_thread::ImageResponse;
use net_traits::image_cache::ImageResponse;
use num_traits::ToPrimitive;
use script_traits::ScriptMsg as ConstellationMsg;
use servo_url::ServoUrl;

View File

@ -338,12 +338,12 @@ impl<'a> From<&'a WebGLContextAttributes> for GLContextAttributes {
pub mod utils {
use dom::window::Window;
use net_traits::image_cache_thread::{ImageResponse, UsePlaceholder, ImageOrMetadataAvailable};
use net_traits::image_cache_thread::CanRequestImages;
use net_traits::image_cache::{ImageResponse, UsePlaceholder, ImageOrMetadataAvailable};
use net_traits::image_cache::CanRequestImages;
use servo_url::ServoUrl;
pub fn request_image_from_cache(window: &Window, url: ServoUrl) -> ImageResponse {
let image_cache = window.image_cache_thread();
let image_cache = window.image_cache();
let response =
image_cache.find_image_or_metadata(url.into(),
UsePlaceholder::No,

View File

@ -40,9 +40,9 @@ use ipc_channel::ipc;
use ipc_channel::router::ROUTER;
use net_traits::{FetchResponseListener, FetchMetadata, NetworkError, FetchResponseMsg};
use net_traits::image::base::{Image, ImageMetadata};
use net_traits::image_cache_thread::{ImageResponder, ImageResponse, PendingImageId, ImageState};
use net_traits::image_cache_thread::{UsePlaceholder, ImageOrMetadataAvailable, CanRequestImages};
use net_traits::image_cache_thread::ImageCacheThread;
use net_traits::image_cache::{CanRequestImages, ImageCache, ImageOrMetadataAvailable};
use net_traits::image_cache::{ImageResponder, ImageResponse, ImageState, PendingImageId};
use net_traits::image_cache::UsePlaceholder;
use net_traits::request::{RequestInit, Type as RequestType};
use network_listener::{NetworkListener, PreInvoke};
use num_traits::ToPrimitive;
@ -120,8 +120,8 @@ impl Runnable for ImageResponseHandlerRunnable {
/// The context required for asynchronously loading an external image.
struct ImageContext {
/// A handle with which to communicate with the image cache.
image_cache: ImageCacheThread,
/// Reference to the script thread image cache.
image_cache: Arc<ImageCache>,
/// Indicates whether the request failed, and why
status: Result<(), NetworkError>,
/// The cache ID for this request.
@ -186,7 +186,7 @@ impl HTMLImageElement {
Some(LoadBlocker::new(&*document, LoadType::Image(img_url.clone())));
}
fn add_cache_listener_for_element(image_cache: &ImageCacheThread,
fn add_cache_listener_for_element(image_cache: Arc<ImageCache>,
id: PendingImageId,
elem: &HTMLImageElement) {
let trusted_node = Trusted::new(elem);
@ -197,8 +197,9 @@ impl HTMLImageElement {
let wrapper = window.get_runnable_wrapper();
let generation = elem.generation.get();
ROUTER.add_route(responder_receiver.to_opaque(), box move |message| {
// Return the image via a message to the script thread, which marks the element
// as dirty and triggers a reflow.
debug!("Got image {:?}", message);
// Return the image via a message to the script thread, which marks
// the element as dirty and triggers a reflow.
let runnable = ImageResponseHandlerRunnable::new(
trusted_node.clone(), message.to().unwrap(), generation);
let _ = task_source.queue_with_wrapper(box runnable, &wrapper);
@ -208,7 +209,7 @@ impl HTMLImageElement {
}
let window = window_from_node(self);
let image_cache = window.image_cache_thread();
let image_cache = window.image_cache();
let response =
image_cache.find_image_or_metadata(img_url.clone().into(),
UsePlaceholder::Yes,
@ -223,7 +224,7 @@ impl HTMLImageElement {
}
Err(ImageState::Pending(id)) => {
add_cache_listener_for_element(image_cache, id, self);
add_cache_listener_for_element(image_cache.clone(), id, self);
}
Err(ImageState::LoadError) => {
@ -242,7 +243,7 @@ impl HTMLImageElement {
let window = window_from_node(self);
let context = Arc::new(Mutex::new(ImageContext {
image_cache: window.image_cache_thread().clone(),
image_cache: window.image_cache(),
status: Ok(()),
id: id,
}));

View File

@ -42,7 +42,7 @@ use js::jsapi::{JSContext, JSObject, Type, Rooted};
use js::jsval::{BooleanValue, DoubleValue, Int32Value, JSVal, NullValue, UndefinedValue};
use js::typedarray::{TypedArray, TypedArrayElement, Float32, Int32};
use net_traits::image::base::PixelFormat;
use net_traits::image_cache_thread::ImageResponse;
use net_traits::image_cache::ImageResponse;
use offscreen_gl_context::{GLContextAttributes, GLLimits};
use script_traits::ScriptMsg as ConstellationMsg;
use std::cell::Cell;

View File

@ -62,8 +62,8 @@ use js::rust::Runtime;
use layout_image::fetch_image_for_layout;
use msg::constellation_msg::{FrameType, PipelineId};
use net_traits::{ResourceThreads, ReferrerPolicy};
use net_traits::image_cache_thread::{ImageResponder, ImageResponse};
use net_traits::image_cache_thread::{PendingImageResponse, ImageCacheThread, PendingImageId};
use net_traits::image_cache::{ImageCache, ImageResponder, ImageResponse};
use net_traits::image_cache::{PendingImageId, PendingImageResponse};
use net_traits::storage_thread::StorageType;
use num_traits::ToPrimitive;
use open;
@ -167,8 +167,8 @@ pub struct Window {
#[ignore_heap_size_of = "task sources are hard"]
file_reading_task_source: FileReadingTaskSource,
navigator: MutNullableJS<Navigator>,
#[ignore_heap_size_of = "channels are hard"]
image_cache_thread: ImageCacheThread,
#[ignore_heap_size_of = "Arc"]
image_cache: Arc<ImageCache>,
#[ignore_heap_size_of = "channels are hard"]
image_cache_chan: Sender<ImageCacheMsg>,
browsing_context: MutNullableJS<BrowsingContext>,
@ -315,8 +315,8 @@ impl Window {
(box SendableMainThreadScriptChan(tx), box rx)
}
pub fn image_cache_thread(&self) -> &ImageCacheThread {
&self.image_cache_thread
pub fn image_cache(&self) -> Arc<ImageCache> {
self.image_cache.clone()
}
/// This can panic if it is called after the browsing context has been discarded
@ -1227,7 +1227,7 @@ impl Window {
let node = from_untrusted_node_address(js_runtime.rt(), image.node);
if let PendingImageState::Unrequested(ref url) = image.state {
fetch_image_for_layout(url.clone(), &*node, id, self.image_cache_thread.clone());
fetch_image_for_layout(url.clone(), &*node, id, self.image_cache.clone());
}
let mut images = self.pending_layout_images.borrow_mut();
@ -1239,7 +1239,7 @@ impl Window {
ROUTER.add_route(responder_listener.to_opaque(), box move |message| {
let _ = image_cache_chan.send((pipeline, message.to().unwrap()));
});
self.image_cache_thread.add_listener(id, ImageResponder::new(responder, id));
self.image_cache.add_listener(id, ImageResponder::new(responder, id));
nodes.push(JS::from_ref(&*node));
}
}
@ -1703,7 +1703,7 @@ impl Window {
history_task_source: HistoryTraversalTaskSource,
file_task_source: FileReadingTaskSource,
image_cache_chan: Sender<ImageCacheMsg>,
image_cache_thread: ImageCacheThread,
image_cache: Arc<ImageCache>,
resource_threads: ResourceThreads,
bluetooth_thread: IpcSender<BluetoothRequest>,
mem_profiler_chan: MemProfilerChan,
@ -1747,8 +1747,8 @@ impl Window {
history_traversal_task_source: history_task_source,
file_reading_task_source: file_task_source,
image_cache_chan: image_cache_chan,
image_cache: image_cache.clone(),
navigator: Default::default(),
image_cache_thread: image_cache_thread,
history: Default::default(),
browsing_context: Default::default(),
document: Default::default(),

View File

@ -12,7 +12,7 @@ use dom::node::{Node, document_from_node};
use ipc_channel::ipc;
use ipc_channel::router::ROUTER;
use net_traits::{FetchResponseMsg, FetchResponseListener, FetchMetadata, NetworkError};
use net_traits::image_cache_thread::{ImageCacheThread, PendingImageId};
use net_traits::image_cache::{ImageCache, PendingImageId};
use net_traits::request::{Type as RequestType, RequestInit as FetchRequestInit};
use network_listener::{NetworkListener, PreInvoke};
use servo_url::ServoUrl;
@ -20,7 +20,7 @@ use std::sync::{Arc, Mutex};
struct LayoutImageContext {
id: PendingImageId,
cache: ImageCacheThread,
cache: Arc<ImageCache>,
}
impl FetchResponseListener for LayoutImageContext {
@ -49,7 +49,7 @@ impl PreInvoke for LayoutImageContext {}
pub fn fetch_image_for_layout(url: ServoUrl,
node: &Node,
id: PendingImageId,
cache: ImageCacheThread) {
cache: Arc<ImageCache>) {
let context = Arc::new(Mutex::new(LayoutImageContext {
id: id,
cache: cache,

View File

@ -73,7 +73,7 @@ use microtask::{MicrotaskQueue, Microtask};
use msg::constellation_msg::{FrameId, FrameType, PipelineId, PipelineNamespace};
use net_traits::{CoreResourceMsg, FetchMetadata, FetchResponseListener};
use net_traits::{IpcSend, Metadata, ReferrerPolicy, ResourceThreads};
use net_traits::image_cache_thread::{PendingImageResponse, ImageCacheThread};
use net_traits::image_cache::{ImageCache, PendingImageResponse};
use net_traits::request::{CredentialsMode, Destination, RequestInit};
use net_traits::storage_thread::StorageType;
use network_listener::NetworkListener;
@ -233,7 +233,7 @@ enum MixedMessage {
FromScript(MainThreadScriptMsg),
FromDevtools(DevtoolScriptControlMsg),
FromImageCache((PipelineId, PendingImageResponse)),
FromScheduler(TimerEvent)
FromScheduler(TimerEvent),
}
/// Messages used to control the script event loop
@ -408,8 +408,8 @@ pub struct ScriptThread {
registration_map: DOMRefCell<HashMap<ServoUrl, JS<ServiceWorkerRegistration>>>,
/// A job queue for Service Workers keyed by their scope url
job_queue_map: Rc<JobQueue>,
/// A handle to the image cache thread.
image_cache_thread: ImageCacheThread,
/// Image cache for this script thread.
image_cache: Arc<ImageCache>,
/// A handle to the resource thread. This is an `Arc` to avoid running out of file descriptors if
/// there are many iframes.
resource_threads: ResourceThreads,
@ -450,7 +450,6 @@ pub struct ScriptThread {
/// The channel on which the image cache can send messages to ourself.
image_cache_channel: Sender<ImageCacheMsg>,
/// For providing contact with the time profiler.
time_profiler_chan: time::ProfilerChan,
@ -685,7 +684,7 @@ impl ScriptThread {
registration_map: DOMRefCell::new(HashMap::new()),
job_queue_map: Rc::new(JobQueue::new()),
image_cache_thread: state.image_cache_thread,
image_cache: state.image_cache.clone(),
image_cache_channel: image_cache_channel,
image_cache_port: image_cache_port,
@ -1267,7 +1266,7 @@ impl ScriptThread {
pipeline_port: pipeline_port,
constellation_chan: self.layout_to_constellation_chan.clone(),
script_chan: self.control_chan.clone(),
image_cache_thread: self.image_cache_thread.clone(),
image_cache: self.image_cache.clone(),
content_process_shutdown_chan: content_process_shutdown_chan,
layout_threads: layout_threads,
});
@ -1756,7 +1755,7 @@ impl ScriptThread {
HistoryTraversalTaskSource(history_sender.clone()),
self.file_reading_task_source.clone(),
self.image_cache_channel.clone(),
self.image_cache_thread.clone(),
self.image_cache.clone(),
self.resource_threads.clone(),
self.bluetooth_thread.clone(),
self.mem_profiler_chan.clone(),

View File

@ -43,7 +43,7 @@ use canvas_traits::CanvasMsg;
use core::nonzero::NonZero;
use ipc_channel::ipc::IpcSender;
use libc::c_void;
use net_traits::image_cache_thread::PendingImageId;
use net_traits::image_cache::PendingImageId;
use script_traits::UntrustedNodeAddress;
use servo_url::ServoUrl;
use std::sync::atomic::AtomicIsize;

View File

@ -9,7 +9,7 @@ use euclid::rect::Rect;
use gfx_traits::Epoch;
use ipc_channel::ipc::{IpcReceiver, IpcSender};
use msg::constellation_msg::PipelineId;
use net_traits::image_cache_thread::ImageCacheThread;
use net_traits::image_cache::ImageCache;
use profile_traits::mem::ReportsChan;
use rpc::LayoutRPC;
use script_traits::{ConstellationControlMsg, LayoutControlMsg};
@ -143,7 +143,7 @@ pub struct NewLayoutThreadInfo {
pub pipeline_port: IpcReceiver<LayoutControlMsg>,
pub constellation_chan: IpcSender<ConstellationMsg>,
pub script_chan: IpcSender<ConstellationControlMsg>,
pub image_cache_thread: ImageCacheThread,
pub image_cache: Arc<ImageCache>,
pub content_process_shutdown_chan: Option<IpcSender<()>>,
pub layout_threads: usize,
}

View File

@ -57,7 +57,7 @@ use msg::constellation_msg::{FrameId, FrameType, Key, KeyModifiers, KeyState};
use msg::constellation_msg::{PipelineId, PipelineNamespaceId, TraversalDirection};
use net_traits::{ReferrerPolicy, ResourceThreads};
use net_traits::image::base::Image;
use net_traits::image_cache_thread::ImageCacheThread;
use net_traits::image_cache::ImageCache;
use net_traits::response::HttpsState;
use net_traits::storage_thread::StorageType;
use profile_traits::mem;
@ -67,6 +67,7 @@ use servo_url::ImmutableOrigin;
use servo_url::ServoUrl;
use std::collections::HashMap;
use std::fmt;
use std::sync::Arc;
use std::sync::mpsc::{Receiver, Sender};
use style_traits::{CSSPixel, UnsafeNode};
use webdriver_msg::{LoadStatus, WebDriverScriptCommand};
@ -483,8 +484,8 @@ pub struct InitialScriptState {
pub resource_threads: ResourceThreads,
/// A channel to the bluetooth thread.
pub bluetooth_thread: IpcSender<BluetoothRequest>,
/// A channel to the image cache thread.
pub image_cache_thread: ImageCacheThread,
/// The image cache for this script thread.
pub image_cache: Arc<ImageCache>,
/// A channel to the time profiler thread.
pub time_profiler_chan: profile_traits::time::ProfilerChan,
/// A channel to the memory profiler thread.
@ -507,7 +508,8 @@ pub trait ScriptThreadFactory {
/// Type of message sent from script to layout.
type Message;
/// Create a `ScriptThread`.
fn create(state: InitialScriptState, load_data: LoadData) -> (Sender<Self::Message>, Receiver<Self::Message>);
fn create(state: InitialScriptState, load_data: LoadData)
-> (Sender<Self::Message>, Receiver<Self::Message>);
}
/// Whether the sandbox attribute is present for an iframe element

View File

@ -82,7 +82,6 @@ use gaol::sandbox::{ChildSandbox, ChildSandboxMethods};
use gfx::font_cache_thread::FontCacheThread;
use ipc_channel::ipc::{self, IpcSender};
use log::{Log, LogMetadata, LogRecord};
use net::image_cache_thread::new_image_cache_thread;
use net::resource_thread::new_resource_threads;
use net_traits::IpcSend;
use profile::mem as profile_mem;
@ -290,7 +289,6 @@ fn create_constellation(user_agent: Cow<'static, str>,
devtools_chan.clone(),
time_profiler_chan.clone(),
config_dir);
let image_cache_thread = new_image_cache_thread(webrender_api_sender.create_api());
let font_cache_thread = FontCacheThread::new(public_resource_threads.sender(),
Some(webrender_api_sender.create_api()));
@ -301,7 +299,6 @@ fn create_constellation(user_agent: Cow<'static, str>,
debugger_chan: debugger_chan,
devtools_chan: devtools_chan,
bluetooth_thread: bluetooth_thread,
image_cache_thread: image_cache_thread,
font_cache_thread: font_cache_thread,
public_resource_threads: public_resource_threads,
private_resource_threads: private_resource_threads,

View File

@ -135,13 +135,20 @@ pub struct StoredRestyleHint(RestyleHint);
impl StoredRestyleHint {
/// Propagates this restyle hint to a child element.
pub fn propagate(&self) -> Self {
// If we have RESTYLE_CSS_ANIMATIONS restyle hint, it means we are in the
// middle of an animation only restyle. In that case, we don't need to
// propagate any restyle hints.
StoredRestyleHint(if self.0.contains(RESTYLE_CSS_ANIMATIONS) {
RestyleHint::empty()
} else if self.0.contains(RESTYLE_DESCENDANTS) {
pub fn propagate(&mut self) -> Self {
use std::mem;
// If we have RESTYLE_CSS_ANIMATIONS restyle hint, it means we are in
// the middle of an animation only restyle. In that case, we don't need
// to propagate any restyle hints, and we need to remove ourselves.
if self.0.contains(RESTYLE_CSS_ANIMATIONS) {
self.0.remove(RESTYLE_CSS_ANIMATIONS);
return Self::empty();
}
// Else we should clear ourselves, and return the propagated hint.
let hint = mem::replace(&mut self.0, RestyleHint::empty());
StoredRestyleHint(if hint.contains(RESTYLE_DESCENDANTS) {
RESTYLE_SELF | RESTYLE_DESCENDANTS
} else {
RestyleHint::empty()
@ -180,11 +187,6 @@ impl StoredRestyleHint {
self.0 |= other.0
}
/// Remove animation restyle hint.
pub fn remove_animation_hint(&mut self) {
self.0.remove(RESTYLE_CSS_ANIMATIONS)
}
/// Returns true if the hint has animation-only restyle.
pub fn has_animation_hint(&self) -> bool {
self.0.contains(RESTYLE_CSS_ANIMATIONS)

View File

@ -15,7 +15,6 @@ use restyle_hints::{RESTYLE_DESCENDANTS, RESTYLE_SELF};
use selector_parser::RestyleDamage;
use servo_config::opts;
use std::borrow::BorrowMut;
use std::mem;
use stylist::Stylist;
/// A per-traversal-level chunk of data. This is sent down by the traversal, and
@ -484,37 +483,46 @@ pub fn recalc_style_at<E, D>(traversal: &D,
// Now that matching and cascading is done, clear the bits corresponding to
// those operations and compute the propagated restyle hint.
let empty_hint = StoredRestyleHint::empty();
let propagated_hint = match data.get_restyle_mut() {
None => empty_hint,
None => StoredRestyleHint::empty(),
Some(r) => {
debug_assert!(context.shared.animation_only_restyle ||
!r.hint.has_animation_hint(),
"animation restyle hint should be handled during \
animation-only restyles");
r.recascade = false;
if r.hint.has_animation_hint() {
debug_assert!(context.shared.animation_only_restyle,
"animation restyle hint should be handled during animation-only restyles");
// Drop animation restyle hint.
let propagated_hint = r.hint.propagate();
r.hint.remove_animation_hint();
propagated_hint
} else {
mem::replace(&mut r.hint, empty_hint).propagate()
}
r.hint.propagate()
},
};
debug_assert!(data.has_current_styles() || context.shared.animation_only_restyle,
"Should have computed style or haven't yet valid computed style in case of animation-only restyle");
trace!("propagated_hint={:?}, inherited_style_changed={:?}", propagated_hint, inherited_style_changed);
trace!("propagated_hint={:?}, inherited_style_changed={:?}",
propagated_hint, inherited_style_changed);
let has_dirty_descendants_for_this_restyle =
if context.shared.animation_only_restyle {
element.has_animation_only_dirty_descendants()
} else {
element.has_dirty_descendants()
};
// Preprocess children, propagating restyle hints and handling sibling relationships.
if traversal.should_traverse_children(&mut context.thread_local, element, &data, DontLog) &&
((!context.shared.animation_only_restyle && element.has_dirty_descendants()) ||
(context.shared.animation_only_restyle && element.has_animation_only_dirty_descendants()) ||
!propagated_hint.is_empty() ||
inherited_style_changed) {
if traversal.should_traverse_children(&mut context.thread_local,
element,
&data,
DontLog) &&
(has_dirty_descendants_for_this_restyle ||
!propagated_hint.is_empty() ||
inherited_style_changed) {
let damage_handled = data.get_restyle().map_or(RestyleDamage::empty(), |r| {
r.damage_handled() | r.damage.handled_for_descendants()
});
preprocess_children(traversal, element, propagated_hint, damage_handled, inherited_style_changed);
preprocess_children(traversal,
element,
propagated_hint,
damage_handled,
inherited_style_changed);
}
if context.shared.animation_only_restyle {

View File

@ -414,9 +414,6 @@
[Parsing: <tel:1234567890> against <http://example.org/foo/bar>]
expected: FAIL
[Parsing: <https://test:@test> against <about:blank>]
expected: FAIL
[Parsing: <https://:@test> against <about:blank>]
expected: FAIL
@ -426,15 +423,6 @@
[Parsing: <non-special://:@test/x> against <about:blank>]
expected: FAIL
[Parsing: <http:a:@www.example.com> against <about:blank>]
expected: FAIL
[Parsing: <http:/a:@www.example.com> against <about:blank>]
expected: FAIL
[Parsing: <http://a:@www.example.com> against <about:blank>]
expected: FAIL
[Parsing: <http://10000000000> against <http://other.com/>]
expected: FAIL

View File

@ -423,9 +423,6 @@
[Parsing: <tel:1234567890> against <http://example.org/foo/bar>]
expected: FAIL
[Parsing: <https://test:@test> against <about:blank>]
expected: FAIL
[Parsing: <https://:@test> against <about:blank>]
expected: FAIL
@ -435,15 +432,6 @@
[Parsing: <non-special://:@test/x> against <about:blank>]
expected: FAIL
[Parsing: <http:a:@www.example.com> against <about:blank>]
expected: FAIL
[Parsing: <http:/a:@www.example.com> against <about:blank>]
expected: FAIL
[Parsing: <http://a:@www.example.com> against <about:blank>]
expected: FAIL
[Parsing: <http://10000000000> against <http://other.com/>]
expected: FAIL

View File

@ -201,9 +201,6 @@
[Parsing: <file:..> against <http://www.example.com/test>]
expected: FAIL
[Parsing: <https://test:@test> against <about:blank>]
expected: FAIL
[Parsing: <https://:@test> against <about:blank>]
expected: FAIL
@ -213,15 +210,6 @@
[Parsing: <non-special://:@test/x> against <about:blank>]
expected: FAIL
[Parsing: <http:a:@www.example.com> against <about:blank>]
expected: FAIL
[Parsing: <http:/a:@www.example.com> against <about:blank>]
expected: FAIL
[Parsing: <http://a:@www.example.com> against <about:blank>]
expected: FAIL
[Parsing: <http://10000000000> against <http://other.com/>]
expected: FAIL

View File

@ -228,8 +228,8 @@
let statvfs = new SharedAll.HollowStructure("statvfs",
Const.OSFILE_SIZEOF_STATVFS);
statvfs.add_field_at(Const.OSFILE_OFFSETOF_STATVFS_F_BSIZE,
"f_bsize", Type.unsigned_long.implementation);
statvfs.add_field_at(Const.OSFILE_OFFSETOF_STATVFS_F_FRSIZE,
"f_frsize", Type.unsigned_long.implementation);
statvfs.add_field_at(Const.OSFILE_OFFSETOF_STATVFS_F_BAVAIL,
"f_bavail", Type.fsblkcnt_t.implementation);

View File

@ -399,7 +399,7 @@
throw_on_negative("statvfs", (UnixFile.statvfs || UnixFile.statfs)(sourcePath, fileSystemInfoPtr));
let bytes = new Type.uint64_t.implementation(
fileSystemInfo.f_bsize * fileSystemInfo.f_bavail);
fileSystemInfo.f_frsize * fileSystemInfo.f_bavail);
return bytes.value;
};

View File

@ -12,10 +12,8 @@ const Cu = Components.utils;
const myScope = this;
Cu.import("resource://gre/modules/Log.jsm");
Cu.import("resource://gre/modules/debug.js", this);
Cu.import("resource://gre/modules/Services.jsm", this);
Cu.import("resource://gre/modules/XPCOMUtils.jsm", this);
Cu.import("resource://gre/modules/osfile.jsm", this);
Cu.import("resource://gre/modules/Promise.jsm", this);
Cu.import("resource://gre/modules/PromiseUtils.jsm", this);
Cu.import("resource://gre/modules/Task.jsm", this);

View File

@ -144,6 +144,10 @@ var SysInfo = {
}
},
getPropertyAsUint32(name) {
return this.get(name);
},
get(name) {
return this._genuine.get(name);
},

View File

@ -51,4 +51,12 @@ interface nsIUpdateTimerManager : nsISupports
void registerTimer(in AString id,
in nsITimerCallback callback,
in unsigned long interval);
/**
* Unregister an existing interval from the timer manager.
*
* @param id
* An id that identifies the interval.
*/
void unregisterTimer(in AString id);
};

View File

@ -330,6 +330,15 @@ TimerManager.prototype = {
this._ensureTimer(interval * 1000);
},
unregisterTimer: function TM_unregisterTimer(id) {
LOG(`TimerManager:unregisterTimer - id: ${id}`);
if (id in this._timers) {
delete this._timers[id];
} else {
LOG(`TimerManager:registerTimer - Ignoring unregistration request for unknown id: ${id}`);
}
},
classID: Components.ID("{B322A5C0-A419-484E-96BA-D7182163899F}"),
QueryInterface: XPCOMUtils.generateQI([Ci.nsIUpdateTimerManager,
Ci.nsITimerCallback,

View File

@ -111,6 +111,14 @@ const TESTS = [ {
classID: Components.ID("5136b201-d64c-4328-8cf1-1a63491cc117"),
notified: false,
lastUpdateTime: 0
}, {
desc: "Test Timer Callback 10",
timerID: "test10-update-timer",
defaultInterval: CONSUMER_TIMER_INTERVAL,
contractID: "@mozilla.org/test9/timercallback;1",
classID: Components.ID("1f42bbb3-d116-4012-8491-3ec4797a97ee"),
notified: false,
lastUpdateTime: 0
} ];
var gUTM;
@ -272,7 +280,7 @@ const gTest8TimerCallback = {
TESTS[8].notified = true;
TESTS[8].notifyTime = Date.now();
do_execute_soon(function() {
check_test8thru9(gTest8TimerCallback);
check_test8thru10(gTest8TimerCallback);
});
},
QueryInterface: XPCOMUtils.generateQI([Ci.nsITimerCallback])
@ -292,12 +300,21 @@ const gTest9TimerCallback = {
TESTS[9].notified = true;
TESTS[9].notifyTime = Date.now();
do_execute_soon(function() {
check_test8thru9(gTest9TimerCallback);
check_test8thru10(gTest9TimerCallback);
});
},
QueryInterface: XPCOMUtils.generateQI([Ci.nsITimerCallback])
};
const gTest10TimerCallback = {
notify: function T9CB_notify(aTimer) {
// The timer should have been unregistered before this could
// be called.
do_throw("gTest10TimerCallback notify method should not have been called");
},
QueryInterface: XPCOMUtils.generateQI([Ci.nsITimerCallback])
};
const gTest9Factory = {
createInstance: function T9F_createInstance(aOuter, aIID) {
if (aOuter == null) {
@ -469,10 +486,10 @@ function check_test0thru7() {
"no " + CATEGORY_UPDATE_TIMER + " categories should still be " +
"registered");
do_execute_soon(run_test8thru9);
do_execute_soon(run_test8thru10);
}
function run_test8thru9() {
function run_test8thru10() {
gPref.setIntPref(PREF_BRANCH_LAST_UPDATE_TIME + TESTS[8].timerID, 1);
gCompReg.registerFactory(TESTS[8].classID, TESTS[8].desc,
TESTS[8].contractID, gTest8Factory);
@ -483,9 +500,12 @@ function run_test8thru9() {
TESTS[9].contractID, gTest9Factory);
gUTM.registerTimer(TESTS[9].timerID, gTest9TimerCallback,
TESTS[9].defaultInterval);
gUTM.registerTimer(TESTS[10].timerID, gTest10TimerCallback,
TESTS[10].defaultInterval);
gUTM.unregisterTimer(TESTS[10].timerID);
}
function check_test8thru9(aTestTimerCallback) {
function check_test8thru10(aTestTimerCallback) {
aTestTimerCallback.timesCalled = (aTestTimerCallback.timesCalled || 0) + 1;
if (aTestTimerCallback.timesCalled < 2) {
return;

View File

@ -8,7 +8,6 @@ this.EXPORTED_SYMBOLS = ["ClientID"];
const {classes: Cc, interfaces: Ci, results: Cr, utils: Cu} = Components;
Cu.import("resource://gre/modules/osfile.jsm");
Cu.import("resource://gre/modules/Task.jsm");
Cu.import("resource://gre/modules/XPCOMUtils.jsm");
Cu.import("resource://gre/modules/Preferences.jsm");
@ -19,6 +18,8 @@ const LOGGER_PREFIX = "ClientID::";
XPCOMUtils.defineLazyModuleGetter(this, "CommonUtils",
"resource://services-common/utils.js");
XPCOMUtils.defineLazyModuleGetter(this, "OS",
"resource://gre/modules/osfile.jsm");
XPCOMUtils.defineLazyGetter(this, "gDatareportingPath", () => {
return OS.Path.join(OS.Constants.Path.profileDir, "datareporting");

View File

@ -5738,9 +5738,14 @@
; If the user account has a split token
${If} "$0" == "3"
UAC::RunElevated
${If} "$0" == "0" ; Was elevation successful
UAC::Unload
; Nothing besides UAC initialized so no need to call OnEndCommon
Quit
${EndIf}
; Unload UAC since the elevation request was not successful and
; install anyway.
UAC::Unload
; Nothing besides UAC initialized so no need to call OnEndCommon
Quit
${EndIf}
${Else}
; Check if UAC is enabled. If the user has turned UAC on or off