mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-11-25 05:41:12 +00:00
Bug 1494312 - Part 2: Make the internal PC stats API based on MozPromise. r=mjf,mayhemer
Differential Revision: https://phabricator.services.mozilla.com/D11776 --HG-- extra : moz-landing-system : lando
This commit is contained in:
parent
69e7155b3a
commit
e6218aee8a
@ -791,6 +791,27 @@ NrIceStats NrIceCtx::Destroy() {
|
||||
Telemetry::WEBRTC_ICE_ANSWERER_ABORT_TIME,
|
||||
time_delta.ToMilliseconds());
|
||||
}
|
||||
|
||||
unsigned char rate_limit_bit_pattern = 0;
|
||||
if (!mozilla::nr_socket_short_term_violation_time().IsNull() &&
|
||||
mozilla::nr_socket_short_term_violation_time() >= ice_start_time_) {
|
||||
rate_limit_bit_pattern |= 1;
|
||||
}
|
||||
if (!mozilla::nr_socket_long_term_violation_time().IsNull() &&
|
||||
mozilla::nr_socket_long_term_violation_time() >= ice_start_time_) {
|
||||
rate_limit_bit_pattern |= 2;
|
||||
}
|
||||
|
||||
if (connection_state_ == ICE_CTX_FAILED) {
|
||||
Telemetry::Accumulate(
|
||||
Telemetry::WEBRTC_STUN_RATE_LIMIT_EXCEEDED_BY_TYPE_GIVEN_FAILURE,
|
||||
rate_limit_bit_pattern);
|
||||
} else if (connection_state_ == ICE_CTX_CONNECTED ||
|
||||
connection_state_ == ICE_CTX_COMPLETED) {
|
||||
Telemetry::Accumulate(
|
||||
Telemetry::WEBRTC_STUN_RATE_LIMIT_EXCEEDED_BY_TYPE_GIVEN_SUCCESS,
|
||||
rate_limit_bit_pattern);
|
||||
}
|
||||
}
|
||||
|
||||
if (peer_) {
|
||||
|
@ -540,24 +540,15 @@ MediaTransportHandler::GetState(const std::string& aTransportId,
|
||||
return TransportLayer::TS_NONE;
|
||||
}
|
||||
|
||||
void
|
||||
MediaTransportHandler::GetAllIceStats(DOMHighResTimeStamp aNow,
|
||||
dom::RTCStatsReportInternal* aReport)
|
||||
RefPtr<RTCStatsQueryPromise>
|
||||
MediaTransportHandler::GetIceStats(UniquePtr<RTCStatsQuery>&& aQuery)
|
||||
{
|
||||
for (const auto& stream : mIceCtx->GetStreams()) {
|
||||
GetIceStats(*stream, aNow, aReport);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
MediaTransportHandler::GetIceStats(const std::string& aTransportId,
|
||||
DOMHighResTimeStamp aNow,
|
||||
dom::RTCStatsReportInternal* aReport)
|
||||
{
|
||||
auto stream = mIceCtx->GetStream(aTransportId);
|
||||
if (stream) {
|
||||
GetIceStats(*stream, aNow, aReport);
|
||||
if (aQuery->grabAllLevels || aQuery->transportId == stream->GetId()) {
|
||||
GetIceStats(*stream, aQuery->now, aQuery->report);
|
||||
}
|
||||
}
|
||||
return RTCStatsQueryPromise::CreateAndResolve(std::move(aQuery), __func__);
|
||||
}
|
||||
|
||||
static void ToRTCIceCandidateStats(
|
||||
|
@ -13,6 +13,9 @@
|
||||
#include "nricectx.h" // Need some enums
|
||||
#include "nsDOMNavigationTiming.h" // DOMHighResTimeStamp
|
||||
|
||||
// For RTCStatsQueryPromise typedef
|
||||
#include "signaling/src/peerconnection/PeerConnectionImpl.h"
|
||||
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <set>
|
||||
@ -25,6 +28,7 @@ class NrIceMediaStream;
|
||||
class NrIceResolver;
|
||||
class SdpFingerprintAttributeList; // TODO(bug 1494311) Use IPC type
|
||||
class TransportFlow;
|
||||
class RTCStatsQuery;
|
||||
|
||||
namespace dom {
|
||||
struct RTCConfiguration;
|
||||
@ -112,14 +116,8 @@ class MediaTransportHandler : public MediaTransportBase,
|
||||
TransportLayer::State GetState(const std::string& aTransportId,
|
||||
bool aRtcp) const override;
|
||||
|
||||
// TODO(bug 1494312): Stats stuff needs to be async.
|
||||
void GetAllIceStats(DOMHighResTimeStamp now,
|
||||
dom::RTCStatsReportInternal* report);
|
||||
|
||||
// TODO(bug 1494312): Stats stuff needs to be async.
|
||||
void GetIceStats(const std::string& aTransportId,
|
||||
DOMHighResTimeStamp now,
|
||||
dom::RTCStatsReportInternal* report);
|
||||
RefPtr<RTCStatsQueryPromise> GetIceStats(
|
||||
UniquePtr<RTCStatsQuery>&& aQuery);
|
||||
|
||||
// TODO(bug 1494311) Use IPC type
|
||||
struct CandidateInfo {
|
||||
|
@ -184,8 +184,6 @@ void PeerConnectionCtx::Destroy() {
|
||||
StopWebRtcLog();
|
||||
}
|
||||
|
||||
typedef Vector<nsAutoPtr<RTCStatsQuery>> RTCStatsQueries;
|
||||
|
||||
// Telemetry reporting every second after start of first call.
|
||||
// The threading model around the media pipelines is weird:
|
||||
// - The pipelines are containers,
|
||||
@ -203,157 +201,110 @@ FindId(const Sequence<RTCInboundRTPStreamStats>& aArray,
|
||||
return aArray.NoIndex;
|
||||
}
|
||||
|
||||
static auto
|
||||
FindId(const nsTArray<nsAutoPtr<RTCStatsReportInternal>>& aArray,
|
||||
const nsString &aId) -> decltype(aArray.Length()) {
|
||||
for (decltype(aArray.Length()) i = 0; i < aArray.Length(); i++) {
|
||||
if (aArray[i]->mPcid == aId) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return aArray.NoIndex;
|
||||
}
|
||||
|
||||
static void
|
||||
FreeOnMain_m(nsAutoPtr<RTCStatsQueries> aQueryList) {
|
||||
MOZ_ASSERT(NS_IsMainThread());
|
||||
}
|
||||
|
||||
static void
|
||||
EverySecondTelemetryCallback_s(nsAutoPtr<RTCStatsQueries> aQueryList) {
|
||||
void
|
||||
PeerConnectionCtx::DeliverStats(RTCStatsQuery& aQuery)
|
||||
{
|
||||
using namespace Telemetry;
|
||||
|
||||
if(!PeerConnectionCtx::isActive()) {
|
||||
return;
|
||||
std::unique_ptr<dom::RTCStatsReportInternal> report(aQuery.report.forget());
|
||||
// First, get reports from a second ago, if any, for calculations below
|
||||
std::unique_ptr<dom::RTCStatsReportInternal> lastReport;
|
||||
{
|
||||
auto i = mLastReports.find(report->mPcid);
|
||||
if (i != mLastReports.end()) {
|
||||
lastReport = std::move(i->second);
|
||||
}
|
||||
}
|
||||
PeerConnectionCtx *ctx = PeerConnectionCtx::GetInstance();
|
||||
|
||||
for (auto & q : *aQueryList) {
|
||||
PeerConnectionImpl::ExecuteStatsQuery_s(q);
|
||||
auto& r = *q->report;
|
||||
if (r.mInboundRTPStreamStats.WasPassed()) {
|
||||
// First, get reports from a second ago, if any, for calculations below
|
||||
const Sequence<RTCInboundRTPStreamStats> *lastInboundStats = nullptr;
|
||||
{
|
||||
auto i = FindId(ctx->mLastReports, r.mPcid);
|
||||
if (i != ctx->mLastReports.NoIndex) {
|
||||
lastInboundStats = &ctx->mLastReports[i]->mInboundRTPStreamStats.Value();
|
||||
if (report->mInboundRTPStreamStats.WasPassed()) {
|
||||
// Then, look for the things we want telemetry on
|
||||
for (auto& s : report->mInboundRTPStreamStats.Value()) {
|
||||
bool isAudio = (s.mId.Value().Find("audio") != -1);
|
||||
if (s.mPacketsLost.WasPassed() && s.mPacketsReceived.WasPassed() &&
|
||||
(s.mPacketsLost.Value() + s.mPacketsReceived.Value()) != 0) {
|
||||
HistogramID id;
|
||||
if (s.mIsRemote) {
|
||||
id = isAudio ? WEBRTC_AUDIO_QUALITY_OUTBOUND_PACKETLOSS_RATE :
|
||||
WEBRTC_VIDEO_QUALITY_OUTBOUND_PACKETLOSS_RATE;
|
||||
} else {
|
||||
id = isAudio ? WEBRTC_AUDIO_QUALITY_INBOUND_PACKETLOSS_RATE :
|
||||
WEBRTC_VIDEO_QUALITY_INBOUND_PACKETLOSS_RATE;
|
||||
}
|
||||
// *1000 so we can read in 10's of a percent (permille)
|
||||
Accumulate(id,
|
||||
(s.mPacketsLost.Value() * 1000) /
|
||||
(s.mPacketsLost.Value() + s.mPacketsReceived.Value()));
|
||||
}
|
||||
// Then, look for the things we want telemetry on
|
||||
auto& array = r.mInboundRTPStreamStats.Value();
|
||||
for (decltype(array.Length()) i = 0; i < array.Length(); i++) {
|
||||
auto& s = array[i];
|
||||
bool isAudio = (s.mId.Value().Find("audio") != -1);
|
||||
if (s.mPacketsLost.WasPassed() && s.mPacketsReceived.WasPassed() &&
|
||||
(s.mPacketsLost.Value() + s.mPacketsReceived.Value()) != 0) {
|
||||
HistogramID id;
|
||||
if (s.mIsRemote) {
|
||||
id = isAudio ? WEBRTC_AUDIO_QUALITY_OUTBOUND_PACKETLOSS_RATE :
|
||||
WEBRTC_VIDEO_QUALITY_OUTBOUND_PACKETLOSS_RATE;
|
||||
} else {
|
||||
id = isAudio ? WEBRTC_AUDIO_QUALITY_INBOUND_PACKETLOSS_RATE :
|
||||
WEBRTC_VIDEO_QUALITY_INBOUND_PACKETLOSS_RATE;
|
||||
}
|
||||
// *1000 so we can read in 10's of a percent (permille)
|
||||
Accumulate(id,
|
||||
(s.mPacketsLost.Value() * 1000) /
|
||||
(s.mPacketsLost.Value() + s.mPacketsReceived.Value()));
|
||||
if (s.mJitter.WasPassed()) {
|
||||
HistogramID id;
|
||||
if (s.mIsRemote) {
|
||||
id = isAudio ? WEBRTC_AUDIO_QUALITY_OUTBOUND_JITTER :
|
||||
WEBRTC_VIDEO_QUALITY_OUTBOUND_JITTER;
|
||||
} else {
|
||||
id = isAudio ? WEBRTC_AUDIO_QUALITY_INBOUND_JITTER :
|
||||
WEBRTC_VIDEO_QUALITY_INBOUND_JITTER;
|
||||
}
|
||||
if (s.mJitter.WasPassed()) {
|
||||
HistogramID id;
|
||||
if (s.mIsRemote) {
|
||||
id = isAudio ? WEBRTC_AUDIO_QUALITY_OUTBOUND_JITTER :
|
||||
WEBRTC_VIDEO_QUALITY_OUTBOUND_JITTER;
|
||||
} else {
|
||||
id = isAudio ? WEBRTC_AUDIO_QUALITY_INBOUND_JITTER :
|
||||
WEBRTC_VIDEO_QUALITY_INBOUND_JITTER;
|
||||
}
|
||||
Accumulate(id, s.mJitter.Value());
|
||||
}
|
||||
if (s.mRoundTripTime.WasPassed()) {
|
||||
MOZ_ASSERT(s.mIsRemote);
|
||||
HistogramID id = isAudio ? WEBRTC_AUDIO_QUALITY_OUTBOUND_RTT :
|
||||
WEBRTC_VIDEO_QUALITY_OUTBOUND_RTT;
|
||||
Accumulate(id, s.mRoundTripTime.Value());
|
||||
}
|
||||
if (lastInboundStats && s.mBytesReceived.WasPassed()) {
|
||||
auto& laststats = *lastInboundStats;
|
||||
auto i = FindId(laststats, s.mId.Value());
|
||||
if (i != laststats.NoIndex) {
|
||||
auto& lasts = laststats[i];
|
||||
if (lasts.mBytesReceived.WasPassed()) {
|
||||
auto delta_ms = int32_t(s.mTimestamp.Value() -
|
||||
lasts.mTimestamp.Value());
|
||||
// In theory we're called every second, so delta *should* be in that range.
|
||||
// Small deltas could cause errors due to division
|
||||
if (delta_ms > 500 && delta_ms < 60000) {
|
||||
HistogramID id;
|
||||
if (s.mIsRemote) {
|
||||
id = isAudio ? WEBRTC_AUDIO_QUALITY_OUTBOUND_BANDWIDTH_KBITS :
|
||||
WEBRTC_VIDEO_QUALITY_OUTBOUND_BANDWIDTH_KBITS;
|
||||
} else {
|
||||
id = isAudio ? WEBRTC_AUDIO_QUALITY_INBOUND_BANDWIDTH_KBITS :
|
||||
WEBRTC_VIDEO_QUALITY_INBOUND_BANDWIDTH_KBITS;
|
||||
}
|
||||
Accumulate(id, ((s.mBytesReceived.Value() -
|
||||
lasts.mBytesReceived.Value()) * 8) / delta_ms);
|
||||
Accumulate(id, s.mJitter.Value());
|
||||
}
|
||||
if (s.mRoundTripTime.WasPassed()) {
|
||||
MOZ_ASSERT(s.mIsRemote);
|
||||
HistogramID id = isAudio ? WEBRTC_AUDIO_QUALITY_OUTBOUND_RTT :
|
||||
WEBRTC_VIDEO_QUALITY_OUTBOUND_RTT;
|
||||
Accumulate(id, s.mRoundTripTime.Value());
|
||||
}
|
||||
if (lastReport && lastReport->mInboundRTPStreamStats.WasPassed() &&
|
||||
s.mBytesReceived.WasPassed()) {
|
||||
auto& laststats = lastReport->mInboundRTPStreamStats.Value();
|
||||
auto i = FindId(laststats, s.mId.Value());
|
||||
if (i != laststats.NoIndex) {
|
||||
auto& lasts = laststats[i];
|
||||
if (lasts.mBytesReceived.WasPassed()) {
|
||||
auto delta_ms = int32_t(s.mTimestamp.Value() -
|
||||
lasts.mTimestamp.Value());
|
||||
// In theory we're called every second, so delta *should* be in that range.
|
||||
// Small deltas could cause errors due to division
|
||||
if (delta_ms > 500 && delta_ms < 60000) {
|
||||
HistogramID id;
|
||||
if (s.mIsRemote) {
|
||||
id = isAudio ? WEBRTC_AUDIO_QUALITY_OUTBOUND_BANDWIDTH_KBITS :
|
||||
WEBRTC_VIDEO_QUALITY_OUTBOUND_BANDWIDTH_KBITS;
|
||||
} else {
|
||||
id = isAudio ? WEBRTC_AUDIO_QUALITY_INBOUND_BANDWIDTH_KBITS :
|
||||
WEBRTC_VIDEO_QUALITY_INBOUND_BANDWIDTH_KBITS;
|
||||
}
|
||||
// We could accumulate values until enough time has passed
|
||||
// and then Accumulate() but this isn't that important.
|
||||
Accumulate(id, ((s.mBytesReceived.Value() -
|
||||
lasts.mBytesReceived.Value()) * 8) / delta_ms);
|
||||
}
|
||||
// We could accumulate values until enough time has passed
|
||||
// and then Accumulate() but this isn't that important.
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Steal and hang on to reports for the next second
|
||||
ctx->mLastReports.Clear();
|
||||
for (auto & q : *aQueryList) {
|
||||
ctx->mLastReports.AppendElement(q->report.forget()); // steal avoids copy
|
||||
}
|
||||
// Container must be freed back on main thread
|
||||
NS_DispatchToMainThread(WrapRunnableNM(&FreeOnMain_m, aQueryList),
|
||||
NS_DISPATCH_NORMAL);
|
||||
|
||||
mLastReports[report->mPcid] = std::move(report);
|
||||
}
|
||||
|
||||
void
|
||||
PeerConnectionCtx::EverySecondTelemetryCallback_m(nsITimer* timer, void *closure) {
|
||||
MOZ_ASSERT(NS_IsMainThread());
|
||||
MOZ_ASSERT(PeerConnectionCtx::isActive());
|
||||
auto ctx = static_cast<PeerConnectionCtx*>(closure);
|
||||
if (ctx->mPeerConnections.empty()) {
|
||||
return;
|
||||
}
|
||||
nsresult rv;
|
||||
nsCOMPtr<nsIEventTarget> stsThread =
|
||||
do_GetService(NS_SOCKETTRANSPORTSERVICE_CONTRACTID, &rv);
|
||||
if (NS_FAILED(rv)) {
|
||||
return;
|
||||
}
|
||||
MOZ_ASSERT(stsThread);
|
||||
|
||||
nsAutoPtr<RTCStatsQueries> queries(new RTCStatsQueries);
|
||||
for (auto p = ctx->mPeerConnections.begin();
|
||||
p != ctx->mPeerConnections.end(); ++p) {
|
||||
if (p->second->HasMedia()) {
|
||||
if (!queries->append(nsAutoPtr<RTCStatsQuery>(new RTCStatsQuery(true)))) {
|
||||
return;
|
||||
}
|
||||
if (NS_WARN_IF(NS_FAILED(p->second->BuildStatsQuery_m(nullptr, // all tracks
|
||||
queries->back())))) {
|
||||
queries->popBack();
|
||||
} else {
|
||||
MOZ_ASSERT(queries->back()->report);
|
||||
}
|
||||
for (auto& idAndPc : GetInstance()->mPeerConnections) {
|
||||
if (idAndPc.second->HasMedia()) {
|
||||
idAndPc.second->GetStats(nullptr, true)->Then(
|
||||
GetMainThreadSerialEventTarget(),
|
||||
__func__,
|
||||
[=] (UniquePtr<RTCStatsQuery>&& aQuery) {
|
||||
if(PeerConnectionCtx::isActive()) {
|
||||
PeerConnectionCtx::GetInstance()->DeliverStats(*aQuery);
|
||||
}
|
||||
},
|
||||
[=] (nsresult aError) {}
|
||||
);
|
||||
}
|
||||
}
|
||||
if (!queries->empty()) {
|
||||
rv = RUN_ON_THREAD(stsThread,
|
||||
WrapRunnableNM(&EverySecondTelemetryCallback_s, queries),
|
||||
NS_DISPATCH_NORMAL);
|
||||
NS_ENSURE_SUCCESS_VOID(rv);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -6,6 +6,7 @@
|
||||
#define peerconnectionctx_h___h__
|
||||
|
||||
#include <string>
|
||||
#include <map>
|
||||
|
||||
#include "WebrtcGlobalChild.h"
|
||||
|
||||
@ -79,12 +80,11 @@ class PeerConnectionCtx {
|
||||
|
||||
nsCOMPtr<nsITimer> mTelemetryTimer;
|
||||
|
||||
public:
|
||||
// TODO(jib): If we ever enable move semantics on std::map...
|
||||
//std::map<nsString,nsAutoPtr<mozilla::dom::RTCStatsReportInternal>> mLastReports;
|
||||
nsTArray<nsAutoPtr<mozilla::dom::RTCStatsReportInternal>> mLastReports;
|
||||
private:
|
||||
|
||||
void DeliverStats(RTCStatsQuery& aQuery);
|
||||
|
||||
std::map<nsString,std::unique_ptr<mozilla::dom::RTCStatsReportInternal>> mLastReports;
|
||||
// We cannot form offers/answers properly until the Gecko Media Plugin stuff
|
||||
// has been initted, which is a complicated mess of thread dispatches,
|
||||
// including sync dispatches to main. So, we need to be able to queue up
|
||||
|
@ -233,17 +233,14 @@ const nsIID nsISupportsWeakReference::COMTypeInfo<nsSupportsWeakReference, void>
|
||||
namespace mozilla {
|
||||
|
||||
RTCStatsQuery::RTCStatsQuery(bool internal) :
|
||||
failed(false),
|
||||
internalStats(internal),
|
||||
grabAllLevels(false),
|
||||
now(0.0) {
|
||||
}
|
||||
|
||||
RTCStatsQuery::~RTCStatsQuery() {
|
||||
MOZ_ASSERT(NS_IsMainThread());
|
||||
}
|
||||
|
||||
|
||||
NS_IMPL_ISUPPORTS0(PeerConnectionImpl)
|
||||
|
||||
already_AddRefed<PeerConnectionImpl>
|
||||
@ -1667,22 +1664,19 @@ NS_IMETHODIMP
|
||||
PeerConnectionImpl::GetStats(MediaStreamTrack *aSelector) {
|
||||
PC_AUTO_ENTER_API_CALL(true);
|
||||
|
||||
if (!mMedia) {
|
||||
// Since we zero this out before the d'tor, we should check.
|
||||
return NS_ERROR_UNEXPECTED;
|
||||
}
|
||||
GetStats(aSelector, false)->Then(
|
||||
GetMainThreadSerialEventTarget(),
|
||||
__func__,
|
||||
[handle = mHandle] (UniquePtr<RTCStatsQuery>&& aQuery) {
|
||||
DeliverStatsReportToPCObserver_m(
|
||||
handle, NS_OK, nsAutoPtr<RTCStatsQuery>(aQuery.release()));
|
||||
},
|
||||
[handle = mHandle] (nsresult aError) {
|
||||
DeliverStatsReportToPCObserver_m(
|
||||
handle, aError, nsAutoPtr<RTCStatsQuery>());
|
||||
}
|
||||
);
|
||||
|
||||
nsAutoPtr<RTCStatsQuery> query(new RTCStatsQuery(false));
|
||||
|
||||
nsresult rv = BuildStatsQuery_m(aSelector, query.get());
|
||||
|
||||
NS_ENSURE_SUCCESS(rv, rv);
|
||||
|
||||
RUN_ON_THREAD(mSTSThread,
|
||||
WrapRunnableNM(&PeerConnectionImpl::GetStatsForPCObserver_s,
|
||||
mHandle,
|
||||
query),
|
||||
NS_DISPATCH_NORMAL);
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
@ -2883,6 +2877,40 @@ PeerConnectionImpl::UpdateDefaultCandidate(const std::string& defaultAddr,
|
||||
transportId);
|
||||
}
|
||||
|
||||
RefPtr<RTCStatsQueryPromise>
|
||||
PeerConnectionImpl::GetStats(dom::MediaStreamTrack* aSelector,
|
||||
bool aInternalStats)
|
||||
{
|
||||
UniquePtr<RTCStatsQuery> query(new RTCStatsQuery(aInternalStats));
|
||||
nsresult rv = BuildStatsQuery_m(aSelector, query.get());
|
||||
if (NS_FAILED(rv)) {
|
||||
return RTCStatsQueryPromise::CreateAndReject(rv, __func__);
|
||||
}
|
||||
|
||||
nsTArray<RefPtr<MediaPipeline>> pipelines;
|
||||
// Gather up pipelines from mMedia so they may be inspected on STS
|
||||
mMedia->GetTransmitPipelinesMatching(aSelector, &pipelines);
|
||||
mMedia->GetReceivePipelinesMatching(aSelector, &pipelines);
|
||||
if (!pipelines.Length()) {
|
||||
CSFLogError(LOGTAG,
|
||||
"%s: Found no pipelines matching selector.",
|
||||
__FUNCTION__);
|
||||
}
|
||||
|
||||
return InvokeAsync(
|
||||
mSTSThread,
|
||||
__func__,
|
||||
[transportHandler = mMedia->mTransportHandler,
|
||||
pipelines,
|
||||
aQuery = std::move(query)] () mutable
|
||||
{
|
||||
return PeerConnectionImpl::ExecuteStatsQuery_s(std::move(aQuery),
|
||||
pipelines,
|
||||
transportHandler);
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
nsresult
|
||||
PeerConnectionImpl::BuildStatsQuery_m(
|
||||
mozilla::dom::MediaStreamTrack *aSelector,
|
||||
@ -2892,30 +2920,18 @@ PeerConnectionImpl::BuildStatsQuery_m(
|
||||
return NS_ERROR_UNEXPECTED;
|
||||
}
|
||||
|
||||
if (!mThread) {
|
||||
CSFLogError(LOGTAG, "Could not build stats query, no MainThread");
|
||||
return NS_ERROR_UNEXPECTED;
|
||||
}
|
||||
|
||||
nsresult rv = GetTimeSinceEpoch(&(query->now));
|
||||
if (NS_FAILED(rv)) {
|
||||
CSFLogError(LOGTAG, "Could not build stats query, could not get timestamp");
|
||||
return rv;
|
||||
}
|
||||
|
||||
query->media = mMedia;
|
||||
if (!query->media) {
|
||||
CSFLogError(LOGTAG, "Could not build stats query, no ice_ctx");
|
||||
return NS_ERROR_UNEXPECTED;
|
||||
}
|
||||
|
||||
// We do not use the pcHandle here, since that's risky to expose to content.
|
||||
query->report = new RTCStatsReportInternalConstruct(
|
||||
NS_ConvertASCIItoUTF16(mName.c_str()),
|
||||
query->now);
|
||||
|
||||
query->iceStartTime = mIceStartTime;
|
||||
query->failed = isFailed(mIceConnectionState);
|
||||
query->report->mIceRestarts.Construct(mIceRestartCount);
|
||||
query->report->mIceRollbacks.Construct(mIceRollbackCount);
|
||||
|
||||
@ -2941,14 +2957,6 @@ PeerConnectionImpl::BuildStatsQuery_m(
|
||||
}
|
||||
}
|
||||
|
||||
// Gather up pipelines from mMedia so they may be inspected on STS
|
||||
mMedia->GetTransmitPipelinesMatching(aSelector, &query->pipelines);
|
||||
mMedia->GetReceivePipelinesMatching(aSelector, &query->pipelines);
|
||||
if (!query->pipelines.Length()) {
|
||||
CSFLogError(LOGTAG,
|
||||
"%s: Found no pipelines matching selector.",
|
||||
__FUNCTION__);
|
||||
}
|
||||
if (aSelector) {
|
||||
query->transportId = mMedia->GetTransportIdMatching(*aSelector);
|
||||
}
|
||||
@ -2957,24 +2965,25 @@ PeerConnectionImpl::BuildStatsQuery_m(
|
||||
query->grabAllLevels = true;
|
||||
}
|
||||
|
||||
return rv;
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
nsresult
|
||||
PeerConnectionImpl::ExecuteStatsQuery_s(RTCStatsQuery *query) {
|
||||
|
||||
ASSERT_ON_THREAD(query->media->GetSTSThread());
|
||||
|
||||
RefPtr<RTCStatsQueryPromise>
|
||||
PeerConnectionImpl::ExecuteStatsQuery_s(
|
||||
UniquePtr<RTCStatsQuery>&& query,
|
||||
const nsTArray<RefPtr<MediaPipeline>>& aPipelines,
|
||||
const RefPtr<MediaTransportHandler>& aTransportHandler)
|
||||
{
|
||||
// Gather stats from pipelines provided (can't touch mMedia + stream on STS)
|
||||
|
||||
for (size_t p = 0; p < query->pipelines.Length(); ++p) {
|
||||
MOZ_ASSERT(query->pipelines[p]);
|
||||
MOZ_ASSERT(query->pipelines[p]->Conduit());
|
||||
if (!query->pipelines[p] || !query->pipelines[p]->Conduit()) {
|
||||
for (size_t p = 0; p < aPipelines.Length(); ++p) {
|
||||
MOZ_ASSERT(aPipelines[p]);
|
||||
MOZ_ASSERT(aPipelines[p]->Conduit());
|
||||
if (!aPipelines[p] || !aPipelines[p]->Conduit()) {
|
||||
// continue if we don't have a valid conduit
|
||||
continue;
|
||||
}
|
||||
const MediaPipeline& mp = *query->pipelines[p];
|
||||
const MediaPipeline& mp = *aPipelines[p];
|
||||
bool isAudio = (mp.Conduit()->type() == MediaSessionConduit::AUDIO);
|
||||
nsString kind = isAudio ?
|
||||
NS_LITERAL_STRING("audio") : NS_LITERAL_STRING("video");
|
||||
@ -3186,37 +3195,7 @@ PeerConnectionImpl::ExecuteStatsQuery_s(RTCStatsQuery *query) {
|
||||
}
|
||||
}
|
||||
|
||||
if (query->media->mTransportHandler) {
|
||||
if (query->grabAllLevels) {
|
||||
query->media->mTransportHandler->GetAllIceStats(query->now,
|
||||
query->report);
|
||||
} else {
|
||||
query->media->mTransportHandler->GetIceStats(query->transportId,
|
||||
query->now,
|
||||
query->report);
|
||||
}
|
||||
}
|
||||
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
void PeerConnectionImpl::GetStatsForPCObserver_s(
|
||||
const std::string& pcHandle, // The Runnable holds the memory
|
||||
nsAutoPtr<RTCStatsQuery> query) {
|
||||
|
||||
MOZ_ASSERT(query);
|
||||
MOZ_ASSERT(query->media);
|
||||
ASSERT_ON_THREAD(query->media->GetSTSThread());
|
||||
|
||||
nsresult rv = PeerConnectionImpl::ExecuteStatsQuery_s(query.get());
|
||||
|
||||
NS_DispatchToMainThread(
|
||||
WrapRunnableNM(
|
||||
&PeerConnectionImpl::DeliverStatsReportToPCObserver_m,
|
||||
pcHandle,
|
||||
rv,
|
||||
query),
|
||||
NS_DISPATCH_NORMAL);
|
||||
return aTransportHandler->GetIceStats(std::move(query));
|
||||
}
|
||||
|
||||
void PeerConnectionImpl::DeliverStatsReportToPCObserver_m(
|
||||
|
@ -86,7 +86,7 @@ typedef NS_ConvertUTF8toUTF16 PCObserverString;
|
||||
#if defined(__cplusplus) && __cplusplus >= 201103L
|
||||
typedef struct Timecard Timecard;
|
||||
#else
|
||||
#include "timecard.h"
|
||||
#include "signaling/src/common/time_profiling/timecard.h"
|
||||
#endif
|
||||
|
||||
// To preserve blame, convert nsresult to ErrorResult with wrappers. These macros
|
||||
@ -129,26 +129,22 @@ class PCUuidGenerator : public mozilla::JsepUuidGenerator {
|
||||
class RTCStatsQuery {
|
||||
public:
|
||||
explicit RTCStatsQuery(bool internalStats);
|
||||
RTCStatsQuery(RTCStatsQuery&& aOrig) = default;
|
||||
~RTCStatsQuery();
|
||||
|
||||
nsAutoPtr<mozilla::dom::RTCStatsReportInternal> report;
|
||||
std::string error;
|
||||
// A timestamp to help with telemetry.
|
||||
mozilla::TimeStamp iceStartTime;
|
||||
// Just for convenience, maybe integrate into the report later
|
||||
bool failed;
|
||||
|
||||
private:
|
||||
friend class PeerConnectionImpl;
|
||||
std::string pcName;
|
||||
bool internalStats;
|
||||
nsTArray<RefPtr<mozilla::MediaPipeline>> pipelines;
|
||||
std::string transportId;
|
||||
RefPtr<PeerConnectionMedia> media;
|
||||
bool grabAllLevels;
|
||||
DOMHighResTimeStamp now;
|
||||
};
|
||||
|
||||
typedef MozPromise<UniquePtr<RTCStatsQuery>, nsresult, true>
|
||||
RTCStatsQueryPromise;
|
||||
|
||||
// Enter an API call and check that the state is OK,
|
||||
// the PC isn't closed, etc.
|
||||
#define PC_AUTO_ENTER_API_CALL(assert_ice_ready) \
|
||||
@ -544,11 +540,8 @@ public:
|
||||
// initialize telemetry for when calls start
|
||||
void startCallTelem();
|
||||
|
||||
nsresult BuildStatsQuery_m(
|
||||
mozilla::dom::MediaStreamTrack *aSelector,
|
||||
RTCStatsQuery *query);
|
||||
|
||||
static nsresult ExecuteStatsQuery_s(RTCStatsQuery *query);
|
||||
RefPtr<RTCStatsQueryPromise> GetStats(
|
||||
dom::MediaStreamTrack* aSelector, bool aInternalStats);
|
||||
|
||||
// for monitoring changes in track ownership
|
||||
// PeerConnectionMedia can't do it because it doesn't know about principals
|
||||
@ -566,6 +559,14 @@ private:
|
||||
virtual ~PeerConnectionImpl();
|
||||
PeerConnectionImpl(const PeerConnectionImpl&rhs);
|
||||
PeerConnectionImpl& operator=(PeerConnectionImpl);
|
||||
nsresult BuildStatsQuery_m(
|
||||
mozilla::dom::MediaStreamTrack *aSelector,
|
||||
RTCStatsQuery *query);
|
||||
static RefPtr<RTCStatsQueryPromise> ExecuteStatsQuery_s(
|
||||
UniquePtr<RTCStatsQuery>&& query,
|
||||
const nsTArray<RefPtr<MediaPipeline>>& aPipelines,
|
||||
const RefPtr<MediaTransportHandler>& aTransportHandler);
|
||||
|
||||
nsresult CalculateFingerprint(const std::string& algorithm,
|
||||
std::vector<uint8_t>* fingerprint) const;
|
||||
nsresult ConfigureJsepSessionCodecs();
|
||||
@ -616,10 +617,6 @@ private:
|
||||
dom::MediaStreamTrack* aSendTrack,
|
||||
ErrorResult& aRv);
|
||||
|
||||
static void GetStatsForPCObserver_s(
|
||||
const std::string& pcHandle,
|
||||
nsAutoPtr<RTCStatsQuery> query);
|
||||
|
||||
// Sends an RTCStatsReport to JS. Must run on main thread.
|
||||
static void DeliverStatsReportToPCObserver_m(
|
||||
const std::string& pcHandle,
|
||||
@ -681,7 +678,7 @@ private:
|
||||
std::string mName;
|
||||
|
||||
// The target to run stuff on
|
||||
nsCOMPtr<nsIEventTarget> mSTSThread;
|
||||
nsCOMPtr<nsISerialEventTarget> mSTSThread;
|
||||
|
||||
// DataConnection that's used to get all the DataChannels
|
||||
RefPtr<mozilla::DataChannelConnection> mDataConnection;
|
||||
|
@ -239,22 +239,23 @@ static PeerConnectionCtx* GetPeerConnectionCtx()
|
||||
static void
|
||||
OnStatsReport_m(WebrtcGlobalChild* aThisChild,
|
||||
const int aRequestId,
|
||||
nsAutoPtr<RTCStatsQueries> aQueryList)
|
||||
nsTArray<UniquePtr<RTCStatsQuery>>&& aQueryList)
|
||||
{
|
||||
MOZ_ASSERT(NS_IsMainThread());
|
||||
MOZ_ASSERT(aQueryList);
|
||||
|
||||
if (aThisChild) {
|
||||
Stats stats;
|
||||
|
||||
// Copy stats generated for the currently active PeerConnections
|
||||
for (auto&& query : *aQueryList) {
|
||||
stats.AppendElement(*(query->report));
|
||||
for (auto& query : aQueryList) {
|
||||
if (query) {
|
||||
stats.AppendElement(*query->report);
|
||||
}
|
||||
}
|
||||
// Reports saved for closed/destroyed PeerConnections
|
||||
auto ctx = PeerConnectionCtx::GetInstance();
|
||||
if (ctx) {
|
||||
for (auto&& pc : ctx->mStatsForClosedPeerConnections) {
|
||||
for (auto& pc : ctx->mStatsForClosedPeerConnections) {
|
||||
stats.AppendElement(pc);
|
||||
}
|
||||
}
|
||||
@ -273,8 +274,11 @@ OnStatsReport_m(WebrtcGlobalChild* aThisChild,
|
||||
return;
|
||||
}
|
||||
|
||||
for (auto&& query : *aQueryList) {
|
||||
request->mResult.mReports.Value().AppendElement(*(query->report), fallible);
|
||||
for (auto& query : aQueryList) {
|
||||
if (query) {
|
||||
request->mResult.mReports.Value().AppendElement(
|
||||
*(query->report), fallible);
|
||||
}
|
||||
}
|
||||
|
||||
// Reports saved for closed/destroyed PeerConnections
|
||||
@ -289,29 +293,6 @@ OnStatsReport_m(WebrtcGlobalChild* aThisChild,
|
||||
StatsRequest::Delete(aRequestId);
|
||||
}
|
||||
|
||||
static void
|
||||
GetAllStats_s(WebrtcGlobalChild* aThisChild,
|
||||
const int aRequestId,
|
||||
nsAutoPtr<RTCStatsQueries> aQueryList)
|
||||
{
|
||||
MOZ_ASSERT(aQueryList);
|
||||
// The call to PeerConnetionImpl must happen from a runnable
|
||||
// dispatched on the STS thread.
|
||||
|
||||
// Get stats from active connections.
|
||||
for (auto&& query : *aQueryList) {
|
||||
PeerConnectionImpl::ExecuteStatsQuery_s(query);
|
||||
}
|
||||
|
||||
// After the RTCStatsQueries have been filled in, control must return
|
||||
// to the main thread before their eventual destruction.
|
||||
NS_DispatchToMainThread(WrapRunnableNM(&OnStatsReport_m,
|
||||
aThisChild,
|
||||
aRequestId,
|
||||
aQueryList),
|
||||
NS_DISPATCH_NORMAL);
|
||||
}
|
||||
|
||||
static void OnGetLogging_m(WebrtcGlobalChild* aThisChild,
|
||||
const int aRequestId,
|
||||
nsAutoPtr<std::deque<std::string>> aLogList)
|
||||
@ -376,65 +357,49 @@ static void GetLogging_s(WebrtcGlobalChild* aThisChild,
|
||||
NS_DISPATCH_NORMAL);
|
||||
}
|
||||
|
||||
static nsresult
|
||||
BuildStatsQueryList(
|
||||
const std::map<const std::string, PeerConnectionImpl *>& aPeerConnections,
|
||||
const nsAString& aPcIdFilter,
|
||||
RTCStatsQueries* queries)
|
||||
{
|
||||
nsresult rv;
|
||||
|
||||
for (auto&& pc : aPeerConnections) {
|
||||
MOZ_ASSERT(pc.second);
|
||||
if (aPcIdFilter.IsEmpty() ||
|
||||
aPcIdFilter.EqualsASCII(pc.second->GetIdAsAscii().c_str())) {
|
||||
if (pc.second->HasMedia()) {
|
||||
if (!queries->append(nsAutoPtr<RTCStatsQuery>(new RTCStatsQuery(true)))) {
|
||||
return NS_ERROR_OUT_OF_MEMORY;
|
||||
}
|
||||
rv = pc.second->BuildStatsQuery_m(nullptr, queries->back()); // all tracks
|
||||
if (NS_WARN_IF(NS_FAILED(rv))) {
|
||||
return rv;
|
||||
}
|
||||
MOZ_ASSERT(queries->back()->report);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
static nsresult
|
||||
static void
|
||||
RunStatsQuery(
|
||||
const std::map<const std::string, PeerConnectionImpl *>& aPeerConnections,
|
||||
const nsAString& aPcIdFilter,
|
||||
WebrtcGlobalChild* aThisChild,
|
||||
const int aRequestId)
|
||||
{
|
||||
nsAutoPtr<RTCStatsQueries> queries(new RTCStatsQueries);
|
||||
nsresult rv = BuildStatsQueryList(aPeerConnections, aPcIdFilter, queries);
|
||||
nsTArray<RefPtr<RTCStatsQueryPromise>> promises;
|
||||
|
||||
if (NS_FAILED(rv)) {
|
||||
return rv;
|
||||
for (auto& idAndPc : aPeerConnections) {
|
||||
MOZ_ASSERT(idAndPc.second);
|
||||
PeerConnectionImpl& pc = *idAndPc.second;
|
||||
if (aPcIdFilter.IsEmpty() ||
|
||||
aPcIdFilter.EqualsASCII(pc.GetIdAsAscii().c_str())) {
|
||||
if (pc.HasMedia()) {
|
||||
promises.AppendElement(
|
||||
pc.GetStats(nullptr, true)->Then(
|
||||
GetMainThreadSerialEventTarget(),
|
||||
__func__,
|
||||
[=] (UniquePtr<RTCStatsQuery>&& aQuery) {
|
||||
return RTCStatsQueryPromise::CreateAndResolve(
|
||||
std::move(aQuery), __func__);
|
||||
},
|
||||
[=] (nsresult aError) {
|
||||
// Ignore errors! Just resolve with a nullptr.
|
||||
return RTCStatsQueryPromise::CreateAndResolve(
|
||||
UniquePtr<RTCStatsQuery>(), __func__);
|
||||
}
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nsCOMPtr<nsIEventTarget> stsThread =
|
||||
do_GetService(NS_SOCKETTRANSPORTSERVICE_CONTRACTID, &rv);
|
||||
|
||||
if (NS_FAILED(rv)) {
|
||||
return rv;
|
||||
}
|
||||
if (!stsThread) {
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
|
||||
rv = RUN_ON_THREAD(stsThread,
|
||||
WrapRunnableNM(&GetAllStats_s,
|
||||
aThisChild,
|
||||
aRequestId,
|
||||
queries),
|
||||
NS_DISPATCH_NORMAL);
|
||||
return rv;
|
||||
RTCStatsQueryPromise::All(GetMainThreadSerialEventTarget(), promises)->Then(
|
||||
GetMainThreadSerialEventTarget(),
|
||||
__func__,
|
||||
[aThisChild, aRequestId] (
|
||||
nsTArray<UniquePtr<RTCStatsQuery>>&& aQueries) {
|
||||
OnStatsReport_m(aThisChild, aRequestId, std::move(aQueries));
|
||||
},
|
||||
[=] (nsresult) {MOZ_CRASH();}
|
||||
);
|
||||
}
|
||||
|
||||
void ClearClosedStats()
|
||||
@ -516,23 +481,17 @@ WebrtcGlobalInformation::GetAllStats(
|
||||
// No content resident PeerConnectionCtx instances.
|
||||
// Check this process.
|
||||
PeerConnectionCtx* ctx = GetPeerConnectionCtx();
|
||||
nsresult rv;
|
||||
|
||||
if (ctx) {
|
||||
rv = RunStatsQuery(ctx->mGetPeerConnections(),
|
||||
filter, nullptr, request->mRequestId);
|
||||
|
||||
if (NS_FAILED(rv)) {
|
||||
StatsRequest::Delete(request->mRequestId);
|
||||
}
|
||||
RunStatsQuery(ctx->mGetPeerConnections(),
|
||||
filter, nullptr, request->mRequestId);
|
||||
} else {
|
||||
// Just send back an empty report.
|
||||
rv = NS_OK;
|
||||
request->Complete();
|
||||
StatsRequest::Delete(request->mRequestId);
|
||||
}
|
||||
|
||||
aRv = rv;
|
||||
aRv = NS_OK;
|
||||
}
|
||||
|
||||
static nsresult
|
||||
@ -721,7 +680,6 @@ WebrtcGlobalParent::RecvGetStatsResult(const int& aRequestId,
|
||||
nsTArray<RTCStatsReportInternal>&& Stats)
|
||||
{
|
||||
MOZ_ASSERT(NS_IsMainThread());
|
||||
nsresult rv = NS_OK;
|
||||
|
||||
StatsRequest* request = StatsRequest::Get(aRequestId);
|
||||
|
||||
@ -730,7 +688,7 @@ WebrtcGlobalParent::RecvGetStatsResult(const int& aRequestId,
|
||||
return IPC_FAIL_NO_REASON(this);
|
||||
}
|
||||
|
||||
for (auto&& s : Stats) {
|
||||
for (auto& s : Stats) {
|
||||
request->mResult.mReports.Value().AppendElement(s, fallible);
|
||||
}
|
||||
|
||||
@ -747,17 +705,14 @@ WebrtcGlobalParent::RecvGetStatsResult(const int& aRequestId,
|
||||
PeerConnectionCtx* ctx = GetPeerConnectionCtx();
|
||||
|
||||
if (ctx) {
|
||||
rv = RunStatsQuery(ctx->mGetPeerConnections(),
|
||||
request->mPcIdFilter, nullptr, aRequestId);
|
||||
RunStatsQuery(ctx->mGetPeerConnections(),
|
||||
request->mPcIdFilter, nullptr, aRequestId);
|
||||
} else {
|
||||
// No instance in the process, return the collections as is
|
||||
request->Complete();
|
||||
StatsRequest::Delete(aRequestId);
|
||||
}
|
||||
|
||||
if (NS_FAILED(rv)) {
|
||||
return IPC_FAIL_NO_REASON(this);
|
||||
}
|
||||
return IPC_OK();
|
||||
}
|
||||
|
||||
@ -844,11 +799,7 @@ WebrtcGlobalChild::RecvGetStatsRequest(const int& aRequestId,
|
||||
PeerConnectionCtx* ctx = GetPeerConnectionCtx();
|
||||
|
||||
if (ctx) {
|
||||
nsresult rv = RunStatsQuery(ctx->mGetPeerConnections(),
|
||||
aPcIdFilter, this, aRequestId);
|
||||
if (NS_FAILED(rv)) {
|
||||
return IPC_FAIL_NO_REASON(this);
|
||||
}
|
||||
RunStatsQuery(ctx->mGetPeerConnections(), aPcIdFilter, this, aRequestId);
|
||||
return IPC_OK();
|
||||
}
|
||||
|
||||
@ -1000,12 +951,11 @@ static uint32_t GetCandidateIpAndTransportMask(const RTCIceCandidateStats *cand)
|
||||
|
||||
static void StoreLongTermICEStatisticsImpl_m(
|
||||
nsresult result,
|
||||
nsAutoPtr<RTCStatsQuery> query) {
|
||||
RTCStatsQuery* query) {
|
||||
|
||||
using namespace Telemetry;
|
||||
|
||||
if (NS_FAILED(result) ||
|
||||
!query->error.empty() ||
|
||||
!query->report->mIceCandidateStats.WasPassed()) {
|
||||
return;
|
||||
}
|
||||
@ -1197,47 +1147,6 @@ static void StoreLongTermICEStatisticsImpl_m(
|
||||
}
|
||||
}
|
||||
|
||||
static void GetStatsForLongTermStorage_s(
|
||||
nsAutoPtr<RTCStatsQuery> query) {
|
||||
|
||||
MOZ_ASSERT(query);
|
||||
|
||||
nsresult rv = PeerConnectionImpl::ExecuteStatsQuery_s(query.get());
|
||||
|
||||
// Check whether packets were dropped due to rate limiting during
|
||||
// this call. (These calls must be made on STS)
|
||||
unsigned char rate_limit_bit_pattern = 0;
|
||||
if (!mozilla::nr_socket_short_term_violation_time().IsNull() &&
|
||||
!query->iceStartTime.IsNull() &&
|
||||
mozilla::nr_socket_short_term_violation_time() >= query->iceStartTime) {
|
||||
rate_limit_bit_pattern |= 1;
|
||||
}
|
||||
if (!mozilla::nr_socket_long_term_violation_time().IsNull() &&
|
||||
!query->iceStartTime.IsNull() &&
|
||||
mozilla::nr_socket_long_term_violation_time() >= query->iceStartTime) {
|
||||
rate_limit_bit_pattern |= 2;
|
||||
}
|
||||
|
||||
if (query->failed) {
|
||||
Telemetry::Accumulate(
|
||||
Telemetry::WEBRTC_STUN_RATE_LIMIT_EXCEEDED_BY_TYPE_GIVEN_FAILURE,
|
||||
rate_limit_bit_pattern);
|
||||
} else {
|
||||
Telemetry::Accumulate(
|
||||
Telemetry::WEBRTC_STUN_RATE_LIMIT_EXCEEDED_BY_TYPE_GIVEN_SUCCESS,
|
||||
rate_limit_bit_pattern);
|
||||
}
|
||||
|
||||
// Even if Telemetry::Accumulate is threadsafe, we still need to send the
|
||||
// query back to main, since that is where it must be destroyed.
|
||||
NS_DispatchToMainThread(
|
||||
WrapRunnableNM(
|
||||
&StoreLongTermICEStatisticsImpl_m,
|
||||
rv,
|
||||
query),
|
||||
NS_DISPATCH_NORMAL);
|
||||
}
|
||||
|
||||
void WebrtcGlobalInformation::StoreLongTermICEStatistics(
|
||||
PeerConnectionImpl& aPc) {
|
||||
Telemetry::Accumulate(Telemetry::WEBRTC_ICE_FINAL_CONNECTION_STATE,
|
||||
@ -1249,16 +1158,16 @@ void WebrtcGlobalInformation::StoreLongTermICEStatistics(
|
||||
return;
|
||||
}
|
||||
|
||||
nsAutoPtr<RTCStatsQuery> query(new RTCStatsQuery(true));
|
||||
|
||||
nsresult rv = aPc.BuildStatsQuery_m(nullptr, query.get());
|
||||
|
||||
NS_ENSURE_SUCCESS_VOID(rv);
|
||||
|
||||
RUN_ON_THREAD(aPc.GetSTSThread(),
|
||||
WrapRunnableNM(&GetStatsForLongTermStorage_s,
|
||||
query),
|
||||
NS_DISPATCH_NORMAL);
|
||||
aPc.GetStats(nullptr, true)->Then(
|
||||
GetMainThreadSerialEventTarget(),
|
||||
__func__,
|
||||
[=] (UniquePtr<RTCStatsQuery>&& aQuery) {
|
||||
StoreLongTermICEStatisticsImpl_m(NS_OK, aQuery.get());
|
||||
},
|
||||
[=] (nsresult aError) {
|
||||
StoreLongTermICEStatisticsImpl_m(aError, nullptr);
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
} // namespace dom
|
||||
|
@ -604,6 +604,7 @@ NS_IMPL_ISUPPORTS(nsSocketTransportService,
|
||||
nsISocketTransportService,
|
||||
nsIRoutedSocketTransportService,
|
||||
nsIEventTarget,
|
||||
nsISerialEventTarget,
|
||||
nsIThreadObserver,
|
||||
nsIRunnable,
|
||||
nsPISocketTransportService,
|
||||
|
@ -83,7 +83,7 @@ private:
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
class nsSocketTransportService final : public nsPISocketTransportService
|
||||
, public nsIEventTarget
|
||||
, public nsISerialEventTarget
|
||||
, public nsIThreadObserver
|
||||
, public nsIRunnable
|
||||
, public nsIObserver
|
||||
|
@ -32,6 +32,8 @@
|
||||
|
||||
#include "DataChannelLog.h"
|
||||
|
||||
#define DATACHANNEL_LOG(args) LOG(args)
|
||||
|
||||
#include "nsServiceManagerUtils.h"
|
||||
#include "nsIObserverService.h"
|
||||
#include "nsIObserver.h"
|
||||
@ -54,7 +56,6 @@
|
||||
#include "mediapacket.h"
|
||||
#endif
|
||||
|
||||
#define DATACHANNEL_LOG(args) LOG(args)
|
||||
#include "DataChannel.h"
|
||||
#include "DataChannelProtocol.h"
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user