Bug 1273136: Start remote streams on SRD, and end them even if offer/answer never completed. r=jesup, r=pehrsons

MozReview-Commit-ID: ulrDM0Gzj6
This commit is contained in:
Byron Campen [:bwc] 2016-05-23 10:22:01 -05:00
parent b52f88014f
commit 6caa8b4bd4
9 changed files with 182 additions and 225 deletions

View File

@ -30,6 +30,7 @@ class PeerConnectionMedia;
class PeerIdentity;
class ProcessedMediaStream;
class RemoteSourceStreamInfo;
class SourceStreamInfo;
namespace dom {
@ -228,6 +229,7 @@ class MediaStreamTrack : public DOMEventTargetHelper,
// PeerConnection and friends need to know our owning DOMStream and track id.
friend class mozilla::PeerConnectionImpl;
friend class mozilla::PeerConnectionMedia;
friend class mozilla::SourceStreamInfo;
friend class mozilla::RemoteSourceStreamInfo;
class PrincipalHandleListener;

View File

@ -58,8 +58,11 @@
#include "logging.h"
// Should come from MediaEngineWebRTC.h, but that's a pain to include here
#define DEFAULT_SAMPLE_RATE 32000
// Max size given stereo is 480*2*2 = 1920 (48KHz)
#define AUDIO_SAMPLE_BUFFER_MAX 480*2*2
static_assert((WEBRTC_DEFAULT_SAMPLE_RATE/100)*sizeof(uint16_t) * 2
<= AUDIO_SAMPLE_BUFFER_MAX,
"AUDIO_SAMPLE_BUFFER_MAX is not large enough");
using namespace mozilla;
using namespace mozilla::dom;
@ -1713,9 +1716,6 @@ void MediaPipelineTransmit::PipelineListener::ProcessAudioChunk(
// We know that webrtc.org's code going to copy the samples down the line,
// so we can just use a stack buffer here instead of malloc-ing.
// Max size given stereo is 480*2*2 = 1920 (10ms of 16-bits stereo audio at
// 48KHz)
const size_t AUDIO_SAMPLE_BUFFER_MAX = 1920;
int16_t packet[AUDIO_SAMPLE_BUFFER_MAX];
packetizer_->Output(packet);
@ -1749,117 +1749,44 @@ class GenericReceiveCallback : public TrackAddedCallback
RefPtr<GenericReceiveListener> listener_;
};
// Add a track and listener on the MSG thread using the MSG command queue
static void AddTrackAndListener(MediaStream* source,
TrackID track_id, TrackRate track_rate,
MediaStreamListener* listener, MediaSegment* segment,
const RefPtr<TrackAddedCallback>& completed,
bool queue_track) {
// This both adds the listener and the track
// Add a listener on the MSG thread using the MSG command queue
static void AddListener(MediaStream* source, MediaStreamListener* listener) {
#if !defined(MOZILLA_EXTERNAL_LINKAGE)
class Message : public ControlMessage {
public:
Message(MediaStream* stream, TrackID track, TrackRate rate,
MediaSegment* segment, MediaStreamListener* listener,
const RefPtr<TrackAddedCallback>& completed)
Message(MediaStream* stream, MediaStreamListener* listener)
: ControlMessage(stream),
track_id_(track),
track_rate_(rate),
segment_(segment),
listener_(listener),
completed_(completed) {}
listener_(listener) {}
virtual void Run() override {
StreamTime current_end = mStream->GetTracksEnd();
TrackTicks current_ticks =
mStream->TimeToTicksRoundUp(track_rate_, current_end);
mStream->AddListenerImpl(listener_.forget());
// Add a track 'now' to avoid possible underrun, especially if we add
// a track "later".
if (current_end != 0L) {
MOZ_MTLOG(ML_DEBUG, "added track @ " << current_end <<
" -> " << mStream->StreamTimeToSeconds(current_end));
}
// To avoid assertions, we need to insert a dummy segment that covers up
// to the "start" time for the track
segment_->AppendNullData(current_ticks);
if (segment_->GetType() == MediaSegment::AUDIO) {
mStream->AsSourceStream()->AddAudioTrack(track_id_, track_rate_, 0,
static_cast<AudioSegment*>(segment_.forget()));
} else {
NS_ASSERTION(mStream->GraphRate() == track_rate_, "Rate mismatch");
mStream->AsSourceStream()->AddTrack(track_id_, 0, segment_.forget());
}
// We need to know how much has been "inserted" because we're given absolute
// times in NotifyPull.
completed_->TrackAdded(current_ticks);
}
private:
TrackID track_id_;
TrackRate track_rate_;
nsAutoPtr<MediaSegment> segment_;
RefPtr<MediaStreamListener> listener_;
const RefPtr<TrackAddedCallback> completed_;
};
MOZ_ASSERT(listener);
if (!queue_track) {
// We're only queueing the initial set of tracks since they are added
// atomically and have start time 0. When not queueing we have to add
// the track on the MediaStreamGraph thread so it can be added with the
// appropriate start time.
source->GraphImpl()->AppendMessage(MakeUnique<Message>(source, track_id, track_rate, segment, listener, completed));
MOZ_MTLOG(ML_INFO, "Dispatched track-add for track id " << track_id <<
" on stream " << source);
return;
}
#endif
source->GraphImpl()->AppendMessage(MakeUnique<Message>(source, listener));
#else
source->AddListener(listener);
if (segment->GetType() == MediaSegment::AUDIO) {
source->AsSourceStream()->AddAudioTrack(track_id, track_rate, 0,
static_cast<AudioSegment*>(segment),
SourceMediaStream::ADDTRACK_QUEUED);
} else {
source->AsSourceStream()->AddTrack(track_id, 0, segment,
SourceMediaStream::ADDTRACK_QUEUED);
}
MOZ_MTLOG(ML_INFO, "Queued track-add for track id " << track_id <<
" on MediaStream " << source);
#endif
}
class GenericReceiveListener : public MediaStreamListener
{
public:
GenericReceiveListener(SourceMediaStream *source, TrackID track_id,
TrackRate track_rate, bool queue_track)
GenericReceiveListener(SourceMediaStream *source, TrackID track_id)
: source_(source),
track_id_(track_id),
track_rate_(track_rate),
played_ticks_(0),
queue_track_(queue_track),
principal_handle_(PRINCIPAL_HANDLE_NONE) {}
virtual ~GenericReceiveListener() {}
void AddSelf(MediaSegment* segment)
void AddSelf()
{
RefPtr<TrackAddedCallback> callback = new GenericReceiveCallback(this);
AddTrackAndListener(source_, track_id_, track_rate_, this, segment, callback,
queue_track_);
}
void SetPlayedTicks(TrackTicks time) {
played_ticks_ = time;
}
void EndTrack() {
source_->EndTrack(track_id_);
AddListener(source_, this);
}
#ifndef USE_FAKE_MEDIA_STREAMS
@ -1898,17 +1825,10 @@ class GenericReceiveListener : public MediaStreamListener
protected:
SourceMediaStream *source_;
TrackID track_id_;
TrackRate track_rate_;
TrackTicks played_ticks_;
bool queue_track_;
PrincipalHandle principal_handle_;
};
void GenericReceiveCallback::TrackAdded(TrackTicks time)
{
listener_->SetPlayedTicks(time);
}
MediaPipelineReceive::MediaPipelineReceive(
const std::string& pc,
nsCOMPtr<nsIEventTarget> main_thread,
@ -1939,12 +1859,10 @@ class MediaPipelineReceiveAudio::PipelineListener
{
public:
PipelineListener(SourceMediaStream * source, TrackID track_id,
const RefPtr<MediaSessionConduit>& conduit,
bool queue_track)
: GenericReceiveListener(source, track_id, DEFAULT_SAMPLE_RATE, queue_track), // XXX rate assumption
const RefPtr<MediaSessionConduit>& conduit)
: GenericReceiveListener(source, track_id),
conduit_(conduit)
{
MOZ_ASSERT(track_rate_%100 == 0);
}
~PipelineListener()
@ -1972,12 +1890,8 @@ public:
}
// This comparison is done in total time to avoid accumulated roundoff errors.
while (source_->TicksToTimeRoundDown(track_rate_, played_ticks_) <
desired_time) {
// Max size given stereo is 480*2*2 = 1920 (48KHz)
const size_t AUDIO_SAMPLE_BUFFER_MAX = 1920;
MOZ_ASSERT((track_rate_/100)*sizeof(uint16_t) * 2 <= AUDIO_SAMPLE_BUFFER_MAX);
while (source_->TicksToTimeRoundDown(WEBRTC_DEFAULT_SAMPLE_RATE,
played_ticks_) < desired_time) {
int16_t scratch_buffer[AUDIO_SAMPLE_BUFFER_MAX];
int samples_length;
@ -1986,7 +1900,7 @@ public:
MediaConduitErrorCode err =
static_cast<AudioSessionConduit*>(conduit_.get())->GetAudioFrame(
scratch_buffer,
track_rate_,
WEBRTC_DEFAULT_SAMPLE_RATE,
0, // TODO(ekr@rtfm.com): better estimate of "capture" (really playout) delay
samples_length);
@ -1996,7 +1910,8 @@ public:
<< ") to return data @ " << played_ticks_
<< " (desired " << desired_time << " -> "
<< source_->StreamTimeToSeconds(desired_time) << ")");
samples_length = track_rate_/100; // if this is not enough we'll loop and provide more
// if this is not enough we'll loop and provide more
samples_length = WEBRTC_DEFAULT_SAMPLE_RATE/100;
PodArrayZero(scratch_buffer);
}
@ -2011,7 +1926,7 @@ public:
// We derive the number of channels of the stream from the number of samples
// the AudioConduit gives us, considering it gives us packets of 10ms and we
// know the rate.
uint32_t channelCount = samples_length / (track_rate_ / 100);
uint32_t channelCount = samples_length / (WEBRTC_DEFAULT_SAMPLE_RATE / 100);
AutoTArray<int16_t*,2> channels;
AutoTArray<const int16_t*,2> outputChannels;
size_t frames = samples_length / channelCount;
@ -2061,13 +1976,11 @@ MediaPipelineReceiveAudio::MediaPipelineReceiveAudio(
RefPtr<AudioSessionConduit> conduit,
RefPtr<TransportFlow> rtp_transport,
RefPtr<TransportFlow> rtcp_transport,
nsAutoPtr<MediaPipelineFilter> filter,
bool queue_track) :
nsAutoPtr<MediaPipelineFilter> filter) :
MediaPipelineReceive(pc, main_thread, sts_thread,
stream, media_stream_track_id, level, conduit,
rtp_transport, rtcp_transport, filter),
listener_(new PipelineListener(stream, numeric_track_id, conduit,
queue_track))
listener_(new PipelineListener(stream, numeric_track_id, conduit))
{}
void MediaPipelineReceiveAudio::DetachMedia()
@ -2087,7 +2000,7 @@ nsresult MediaPipelineReceiveAudio::Init() {
description_ += track_id_;
description_ += "]";
listener_->AddSelf(new AudioSegment());
listener_->AddSelf();
return MediaPipelineReceive::Init();
}
@ -2102,9 +2015,8 @@ void MediaPipelineReceiveAudio::SetPrincipalHandle_m(const PrincipalHandle& prin
class MediaPipelineReceiveVideo::PipelineListener
: public GenericReceiveListener {
public:
PipelineListener(SourceMediaStream * source, TrackID track_id,
bool queue_track)
: GenericReceiveListener(source, track_id, source->GraphRate(), queue_track),
PipelineListener(SourceMediaStream * source, TrackID track_id)
: GenericReceiveListener(source, track_id),
width_(0),
height_(0),
#if defined(MOZILLA_INTERNAL_API)
@ -2122,15 +2034,10 @@ public:
// Implement MediaStreamListener
void NotifyPull(MediaStreamGraph* graph, StreamTime desired_time) override
{
#if defined(MOZILLA_INTERNAL_API)
ReentrantMonitorAutoEnter enter(monitor_);
#if defined(MOZILLA_INTERNAL_API)
RefPtr<Image> image = image_;
// our constructor sets track_rate_ to the graph rate
MOZ_ASSERT(track_rate_ == source_->GraphRate());
#endif
#if defined(MOZILLA_INTERNAL_API)
StreamTime delta = desired_time - played_ticks_;
// Don't append if we've already provided a frame that supposedly
@ -2293,20 +2200,18 @@ MediaPipelineReceiveVideo::MediaPipelineReceiveVideo(
RefPtr<VideoSessionConduit> conduit,
RefPtr<TransportFlow> rtp_transport,
RefPtr<TransportFlow> rtcp_transport,
nsAutoPtr<MediaPipelineFilter> filter,
bool queue_track) :
nsAutoPtr<MediaPipelineFilter> filter) :
MediaPipelineReceive(pc, main_thread, sts_thread,
stream, media_stream_track_id, level, conduit,
rtp_transport, rtcp_transport, filter),
renderer_(new PipelineRenderer(this)),
listener_(new PipelineListener(stream, numeric_track_id, queue_track))
listener_(new PipelineListener(stream, numeric_track_id))
{}
void MediaPipelineReceiveVideo::DetachMedia()
{
ASSERT_ON_THREAD(main_thread_);
listener_->EndTrack();
// stop generating video and thus stop invoking the PipelineRenderer
// and PipelineListener - the renderer has a raw ptr to the Pipeline to
// avoid cycles, and the render callbacks are invoked from a different
@ -2327,7 +2232,7 @@ nsresult MediaPipelineReceiveVideo::Init() {
description_ += "]";
#if defined(MOZILLA_INTERNAL_API)
listener_->AddSelf(new VideoSegment());
listener_->AddSelf();
#endif
// Always happens before we can DetachMedia()

View File

@ -25,6 +25,10 @@
#include "webrtc/modules/rtp_rtcp/interface/rtp_header_parser.h"
// Should come from MediaEngine.h, but that's a pain to include here
// because of the MOZILLA_EXTERNAL_LINKAGE stuff.
#define WEBRTC_DEFAULT_SAMPLE_RATE 32000
class nsIPrincipal;
namespace mozilla {
@ -404,8 +408,7 @@ class MediaPipelineReceiveAudio : public MediaPipelineReceive {
RefPtr<AudioSessionConduit> conduit,
RefPtr<TransportFlow> rtp_transport,
RefPtr<TransportFlow> rtcp_transport,
nsAutoPtr<MediaPipelineFilter> filter,
bool queue_track);
nsAutoPtr<MediaPipelineFilter> filter);
void DetachMedia() override;
@ -443,8 +446,7 @@ class MediaPipelineReceiveVideo : public MediaPipelineReceive {
RefPtr<VideoSessionConduit> conduit,
RefPtr<TransportFlow> rtp_transport,
RefPtr<TransportFlow> rtcp_transport,
nsAutoPtr<MediaPipelineFilter> filter,
bool queue_track);
nsAutoPtr<MediaPipelineFilter> filter);
// Called on the main thread.
void DetachMedia() override;

View File

@ -535,8 +535,6 @@ MediaPipelineFactory::CreateMediaPipelineReceiving(
TrackID numericTrackId = stream->GetNumericTrackId(aTrack.GetTrackId());
MOZ_ASSERT(IsTrackIDExplicit(numericTrackId));
bool queue_track = stream->ShouldQueueTracks();
MOZ_MTLOG(ML_DEBUG, __FUNCTION__ << ": Creating pipeline for "
<< numericTrackId << " -> " << aTrack.GetTrackId());
@ -552,8 +550,7 @@ MediaPipelineFactory::CreateMediaPipelineReceiving(
static_cast<AudioSessionConduit*>(aConduit.get()), // Ugly downcast.
aRtpFlow,
aRtcpFlow,
aFilter,
queue_track);
aFilter);
} else if (aTrack.GetMediaType() == SdpMediaSection::kVideo) {
pipeline = new MediaPipelineReceiveVideo(
mPC->GetHandle(),
@ -566,8 +563,7 @@ MediaPipelineFactory::CreateMediaPipelineReceiving(
static_cast<VideoSessionConduit*>(aConduit.get()), // Ugly downcast.
aRtpFlow,
aRtcpFlow,
aFilter,
queue_track);
aFilter);
} else {
MOZ_ASSERT(false);
MOZ_MTLOG(ML_ERROR, "Invalid media type in CreateMediaPipelineReceiving");

View File

@ -97,6 +97,10 @@
#include "mozilla/net/DataChannelProtocol.h"
#endif
#ifndef USE_FAKE_MEDIA_STREAMS
#include "MediaStreamGraphImpl.h"
#endif
#ifdef XP_WIN
// We need to undef the MS macro again in case the windows include file
// got imported after we included nsIDocument.h
@ -1743,6 +1747,61 @@ static void DeferredSetRemote(const std::string& aPcHandle,
}
}
static void StartTrack(MediaStream* aSource,
TrackID aTrackId,
nsAutoPtr<MediaSegment>&& aSegment) {
#if !defined(MOZILLA_EXTERNAL_LINKAGE)
class Message : public ControlMessage {
public:
Message(MediaStream* aStream,
TrackID aTrack,
nsAutoPtr<MediaSegment>&& aSegment)
: ControlMessage(aStream),
track_id_(aTrack),
segment_(aSegment) {}
virtual void Run() override {
TrackRate track_rate = segment_->GetType() == MediaSegment::AUDIO ?
WEBRTC_DEFAULT_SAMPLE_RATE : mStream->GraphRate();
StreamTime current_end = mStream->GetTracksEnd();
TrackTicks current_ticks =
mStream->TimeToTicksRoundUp(track_rate, current_end);
// Add a track 'now' to avoid possible underrun, especially if we add
// a track "later".
if (current_end != 0L) {
CSFLogDebug(logTag, "added track @ %u -> %f",
static_cast<unsigned>(current_end),
mStream->StreamTimeToSeconds(current_end));
}
// To avoid assertions, we need to insert a dummy segment that covers up
// to the "start" time for the track
segment_->AppendNullData(current_ticks);
if (segment_->GetType() == MediaSegment::AUDIO) {
mStream->AsSourceStream()->AddAudioTrack(
track_id_,
WEBRTC_DEFAULT_SAMPLE_RATE,
0,
static_cast<AudioSegment*>(segment_.forget()));
} else {
mStream->AsSourceStream()->AddTrack(track_id_, 0, segment_.forget());
}
}
private:
TrackID track_id_;
nsAutoPtr<MediaSegment> segment_;
};
aSource->GraphImpl()->AppendMessage(
MakeUnique<Message>(aSource, aTrackId, Move(aSegment)));
CSFLogInfo(logTag, "Dispatched track-add for track id %u on stream %p",
aTrackId, aSource);
#endif
}
nsresult
PeerConnectionImpl::CreateNewRemoteTracks(RefPtr<PeerConnectionObserver>& aPco)
{
@ -1826,6 +1885,16 @@ PeerConnectionImpl::CreateNewRemoteTracks(RefPtr<PeerConnectionObserver>& aPco)
}
#endif
// We need to select unique ids, just use max + 1
TrackID maxTrackId = 0;
{
nsTArray<RefPtr<dom::MediaStreamTrack>> domTracks;
info->GetMediaStream()->GetTracks(domTracks);
for (auto& track : domTracks) {
maxTrackId = std::max(maxTrackId, track->mTrackID);
}
}
for (RefPtr<JsepTrack>& track : tracks) {
std::string webrtcTrackId(track->GetTrackId());
if (!info->HasTrack(webrtcTrackId)) {
@ -1835,20 +1904,27 @@ PeerConnectionImpl::CreateNewRemoteTracks(RefPtr<PeerConnectionObserver>& aPco)
#else
RefPtr<MediaStreamTrackSource> source = new MediaStreamTrackSource();
#endif
TrackID trackID = info->GetNextAvailableNumericTrackId();
TrackID trackID = ++maxTrackId;
RefPtr<MediaStreamTrack> domTrack;
nsAutoPtr<MediaSegment> segment;
if (track->GetMediaType() == SdpMediaSection::kAudio) {
domTrack =
info->GetMediaStream()->CreateDOMTrack(trackID,
MediaSegment::AUDIO,
source);
segment = new AudioSegment;
} else {
domTrack =
info->GetMediaStream()->CreateDOMTrack(trackID,
MediaSegment::VIDEO,
source);
#if !defined(MOZILLA_EXTERNAL_LINKAGE)
segment = new VideoSegment;
#endif
}
StartTrack(info->GetMediaStream()->GetInputStream()->AsSourceStream(),
trackID, Move(segment));
info->AddTrack(webrtcTrackId, domTrack);
CSFLogDebug(logTag, "Added remote track %s/%s",
info->GetId().c_str(), webrtcTrackId.c_str());
@ -2209,20 +2285,6 @@ PeerConnectionImpl::PrincipalChanged(MediaStreamTrack* aTrack) {
}
#endif
#if !defined(MOZILLA_EXTERNAL_LINKAGE)
nsresult
PeerConnectionImpl::GetRemoteTrackId(const std::string streamId,
const MediaStreamTrack& aTrack,
std::string* trackId) const
{
if (IsClosed()) {
return NS_ERROR_UNEXPECTED;
}
return mMedia->GetRemoteTrackId(streamId, aTrack, trackId);
}
#endif
std::string
PeerConnectionImpl::GetTrackId(const MediaStreamTrack& aTrack)
{

View File

@ -648,9 +648,6 @@ public:
// PeerConnectionMedia can't do it because it doesn't know about principals
virtual void PrincipalChanged(dom::MediaStreamTrack* aTrack) override;
nsresult GetRemoteTrackId(const std::string streamId,
const dom::MediaStreamTrack& track,
std::string* trackId) const;
#endif
static std::string GetStreamId(const DOMMediaStream& aStream);

View File

@ -33,6 +33,10 @@
#endif
#endif
#ifndef USE_FAKE_MEDIA_STREAMS
#include "MediaStreamGraphImpl.h"
#endif
#include "nsNetCID.h"
#include "nsNetUtil.h"
#include "nsIURI.h"
@ -110,10 +114,38 @@ PipelineDetachTransport_s(RefPtr<MediaPipeline> pipeline,
NS_DISPATCH_NORMAL);
}
void
SourceStreamInfo::EndTrack(MediaStream* stream, dom::MediaStreamTrack* track)
{
if (!stream || !stream->AsSourceStream()) {
return;
}
#if !defined(MOZILLA_EXTERNAL_LINKAGE)
class Message : public ControlMessage {
public:
Message(MediaStream* stream, TrackID track)
: ControlMessage(stream),
track_id_(track) {}
virtual void Run() override {
mStream->AsSourceStream()->EndTrack(track_id_);
}
private:
TrackID track_id_;
};
stream->GraphImpl()->AppendMessage(
MakeUnique<Message>(stream, track->mTrackID));
#endif
}
void
SourceStreamInfo::RemoveTrack(const std::string& trackId)
{
mTracks.erase(trackId);
RefPtr<MediaPipeline> pipeline = GetPipelineByTrackId_m(trackId);
if (pipeline) {
mPipelines.erase(trackId);
@ -945,23 +977,6 @@ PeerConnectionMedia::RemoveRemoteTrack(const std::string& streamId,
return NS_OK;
}
nsresult
PeerConnectionMedia::GetRemoteTrackId(const std::string streamId,
const MediaStreamTrack& track,
std::string* trackId) const
{
auto* ncThis = const_cast<PeerConnectionMedia*>(this);
const RemoteSourceStreamInfo* info =
ncThis->GetRemoteStreamById(streamId);
if (!info) {
CSFLogError(logTag, "%s: Could not find stream info", __FUNCTION__);
return NS_ERROR_NOT_AVAILABLE;
}
return info->GetTrackId(track, trackId);
}
void
PeerConnectionMedia::SelfDestruct()
{
@ -1512,6 +1527,26 @@ SourceStreamInfo::StorePipeline(
return NS_OK;
}
void
RemoteSourceStreamInfo::DetachMedia_m()
{
for (auto& webrtcIdAndTrack : mTracks) {
EndTrack(mMediaStream->GetInputStream(), webrtcIdAndTrack.second);
}
SourceStreamInfo::DetachMedia_m();
}
void
RemoteSourceStreamInfo::RemoveTrack(const std::string& trackId)
{
auto it = mTracks.find(trackId);
if (it != mTracks.end()) {
EndTrack(mMediaStream->GetInputStream(), it->second);
}
SourceStreamInfo::RemoveTrack(trackId);
}
void
RemoteSourceStreamInfo::SyncPipeline(
RefPtr<MediaPipelineReceive> aPipeline)
@ -1550,7 +1585,6 @@ RemoteSourceStreamInfo::StartReceiving()
mReceiving = true;
SourceMediaStream* source = GetMediaStream()->GetInputStream()->AsSourceStream();
source->FinishAddTracks();
source->SetPullEnabled(true);
// AdvanceKnownTracksTicksTime(HEAT_DEATH_OF_UNIVERSE) means that in
// theory per the API, we can't add more tracks before that

View File

@ -85,7 +85,7 @@ public:
{
mTracks.insert(std::make_pair(trackId, aTrack));
}
void RemoveTrack(const std::string& trackId);
virtual void RemoveTrack(const std::string& trackId);
bool HasTrack(const std::string& trackId) const
{
return !!mTracks.count(trackId);
@ -101,7 +101,7 @@ public:
// PrincipalChangeObserver from each track.
const std::map<std::string, RefPtr<dom::MediaStreamTrack>>&
GetMediaStreamTracks() const { return mTracks; }
dom::MediaStreamTrack* GetTrackById(const std::string& trackId)
dom::MediaStreamTrack* GetTrackById(const std::string& trackId) const
{
auto it = mTracks.find(trackId);
if (it == mTracks.end()) {
@ -113,9 +113,10 @@ public:
const std::string& GetId() const { return mId; }
void DetachTransport_s();
void DetachMedia_m();
virtual void DetachMedia_m();
bool AnyCodecHasPluginID(uint64_t aPluginID);
protected:
void EndTrack(MediaStream* stream, dom::MediaStreamTrack* track);
RefPtr<DOMMediaStream> mMediaStream;
PeerConnectionMedia *mParent;
const std::string mId;
@ -200,6 +201,8 @@ class RemoteSourceStreamInfo : public SourceStreamInfo {
{
}
void DetachMedia_m() override;
void RemoveTrack(const std::string& trackId) override;
void SyncPipeline(RefPtr<MediaPipelineReceive> aPipeline);
#if !defined(MOZILLA_EXTERNAL_LINKAGE)
@ -211,60 +214,21 @@ class RemoteSourceStreamInfo : public SourceStreamInfo {
void AddTrack(const std::string& trackId,
const RefPtr<dom::MediaStreamTrack>& aTrack) override
{
mTrackIdMap.push_back(trackId);
MOZ_RELEASE_ASSERT(GetNumericTrackId(trackId) == aTrack->mTrackID);
SourceStreamInfo::AddTrack(trackId, aTrack);
}
TrackID GetNextAvailableNumericTrackId() const
{
return mTrackIdMap.size() + 1;
}
TrackID GetNumericTrackId(const std::string& trackId) const
{
for (size_t i = 0; i < mTrackIdMap.size(); ++i) {
if (mTrackIdMap[i] == trackId) {
return static_cast<TrackID>(i + 1);
}
dom::MediaStreamTrack* track = GetTrackById(trackId);
if (!track) {
return TRACK_INVALID;
}
return TRACK_INVALID;
}
nsresult GetTrackId(const dom::MediaStreamTrack& track, std::string* trackId) const
{
TrackID numericTrackId = track.mTrackID;
if (numericTrackId <= 0 ||
static_cast<size_t>(numericTrackId) > mTrackIdMap.size()) {
return NS_ERROR_INVALID_ARG;;
}
*trackId = mTrackIdMap[numericTrackId - 1];
return NS_OK;
return track->mTrackID;
}
void StartReceiving();
/**
* Returns true if a |MediaPipeline| should be queueing its track instead of
* adding it to the |SourceMediaStream| directly.
*/
bool ShouldQueueTracks() const
{
return !mReceiving;
}
private:
// For remote streams, the MediaStreamGraph API forces us to select a
// numeric track id before creation of the MediaStreamTrack, and does not
// allow us to specify a string-based id until later. We cannot simply use
// something based on mline index, since renegotiation can move tracks
// around. Hopefully someday we'll be able to specify the string id up-front,
// and have the numeric track id selected for us, in which case this variable
// and its dependencies can go away.
std::vector<std::string> mTrackIdMap;
#if !defined(MOZILLA_EXTERNAL_LINKAGE)
// MediaStreamTrackSources associated with this remote stream.
// We use them for updating their principal if that's needed.
@ -349,10 +313,6 @@ class PeerConnectionMedia : public sigslot::has_slots<> {
nsresult RemoveRemoteTrack(const std::string& streamId,
const std::string& trackId);
nsresult GetRemoteTrackId(const std::string streamId,
const dom::MediaStreamTrack& track,
std::string* trackId) const;
// Get a specific local stream
uint32_t LocalStreamsLength()
{

View File

@ -334,8 +334,7 @@ class TestAgentReceive : public TestAgent {
static_cast<mozilla::AudioSessionConduit *>(audio_conduit_.get()),
audio_rtp_transport_.flow_,
audio_rtcp_transport_.flow_,
bundle_filter_,
false);
bundle_filter_);
audio_pipeline_->Init();
}