Bug 1332664: Cherry-pick upstream webrtc.org patch to not depend on 'experimental' includes for libvpx r=rillian

This commit is contained in:
Randell Jesup 2017-01-27 21:10:58 -05:00
parent 3035683048
commit db244f7210
5 changed files with 201 additions and 18 deletions

View File

@ -5,7 +5,7 @@ import subprocess
import sys
LONG_OPTIONS = ["shard=", "shards="]
BASE_COMMAND = "./configure --enable-internal-stats --enable-experimental"
BASE_COMMAND = "./configure --enable-internal-stats"
def RunCommand(command):
run = subprocess.Popen(command, shell=True)

View File

@ -41,6 +41,9 @@ def apply_patches():
# Avoid c/asm name collision for loopfilter_sse2
os.system("patch -p1 < rename_duplicate_files.patch")
os.system("mv libvpx/vpx_dsp/x86/loopfilter_sse2.c libvpx/vpx_dsp/x86/loopfilter_intrin_sse2.c")
# Cherry-pick upstream patch for avoiding --enable-experimental
# from https://codereview.webrtc.org/2654633002
os.system("patch -p1 < vp9_svc.patch")
def update_readme(commit):

179
media/libvpx/vp9_svc.patch Normal file
View File

@ -0,0 +1,179 @@
# HG changeset patch
# Parent 30581ce4c9566cf4fb4ff2798bddff5ce21741f5
Bug 1332664: Cherry-pick upstream webrtc.org patch to not depend on 'experimental' includes for libvpx r=rillian
diff --git a/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc b/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc
--- a/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc
@@ -74,16 +74,17 @@ VP9EncoderImpl::VP9EncoderImpl()
frames_since_kf_(0),
num_temporal_layers_(0),
num_spatial_layers_(0),
num_cores_(0),
frames_encoded_(0),
// Use two spatial when screensharing with flexible mode.
spatial_layer_(new ScreenshareLayersVP9(2)) {
memset(&codec_, 0, sizeof(codec_));
+ memset(&svc_params_, 0, sizeof(vpx_svc_extra_cfg_t));
uint32_t seed = static_cast<uint32_t>(TickTime::MillisecondTimestamp());
srand(seed);
}
VP9EncoderImpl::~VP9EncoderImpl() {
Release();
}
@@ -138,24 +139,24 @@ bool VP9EncoderImpl::SetSvcRates() {
codec_.spatialLayers[i].target_bitrate_bps /
total_bitrate_bps);
}
} else {
float rate_ratio[VPX_MAX_LAYERS] = {0};
float total = 0;
for (i = 0; i < num_spatial_layers_; ++i) {
- if (svc_internal_.svc_params.scaling_factor_num[i] <= 0 ||
- svc_internal_.svc_params.scaling_factor_den[i] <= 0) {
+ if (svc_params_.scaling_factor_num[i] <= 0 ||
+ svc_params_.scaling_factor_den[i] <= 0) {
LOG(LS_ERROR) << "Scaling factors not specified!";
return false;
}
rate_ratio[i] =
- static_cast<float>(svc_internal_.svc_params.scaling_factor_num[i]) /
- svc_internal_.svc_params.scaling_factor_den[i];
+ static_cast<float>(svc_params_.scaling_factor_num[i]) /
+ svc_params_.scaling_factor_den[i];
total += rate_ratio[i];
}
for (i = 0; i < num_spatial_layers_; ++i) {
config_->ss_target_bitrate[i] = static_cast<unsigned int>(
config_->rc_target_bitrate * rate_ratio[i] / total);
if (num_temporal_layers_ == 1) {
config_->layer_target_bitrate[i] = config_->ss_target_bitrate[i];
@@ -385,32 +386,32 @@ int VP9EncoderImpl::NumberOfThreads(int
return 1;
}
}
int VP9EncoderImpl::InitAndSetControlSettings(const VideoCodec* inst) {
// Set QP-min/max per spatial and temporal layer.
int tot_num_layers = num_spatial_layers_ * num_temporal_layers_;
for (int i = 0; i < tot_num_layers; ++i) {
- svc_internal_.svc_params.max_quantizers[i] = config_->rc_max_quantizer;
- svc_internal_.svc_params.min_quantizers[i] = config_->rc_min_quantizer;
+ svc_params_.max_quantizers[i] = config_->rc_max_quantizer;
+ svc_params_.min_quantizers[i] = config_->rc_min_quantizer;
}
config_->ss_number_layers = num_spatial_layers_;
if (ExplicitlyConfiguredSpatialLayers()) {
for (int i = 0; i < num_spatial_layers_; ++i) {
const auto& layer = codec_.spatialLayers[i];
- svc_internal_.svc_params.scaling_factor_num[i] = layer.scaling_factor_num;
- svc_internal_.svc_params.scaling_factor_den[i] = layer.scaling_factor_den;
+ svc_params_.scaling_factor_num[i] = layer.scaling_factor_num;
+ svc_params_.scaling_factor_den[i] = layer.scaling_factor_den;
}
} else {
int scaling_factor_num = 256;
for (int i = num_spatial_layers_ - 1; i >= 0; --i) {
// 1:2 scaling in each dimension.
- svc_internal_.svc_params.scaling_factor_num[i] = scaling_factor_num;
- svc_internal_.svc_params.scaling_factor_den[i] = 256;
+ svc_params_.scaling_factor_num[i] = scaling_factor_num;
+ svc_params_.scaling_factor_den[i] = 256;
if (codec_.mode != kScreensharing)
scaling_factor_num /= 2;
}
}
if (!SetSvcRates()) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
@@ -424,17 +425,17 @@ int VP9EncoderImpl::InitAndSetControlSet
vpx_codec_control(encoder_, VP9E_SET_AQ_MODE,
inst->codecSpecific.VP9.adaptiveQpMode ? 3 : 0);
vpx_codec_control(
encoder_, VP9E_SET_SVC,
(num_temporal_layers_ > 1 || num_spatial_layers_ > 1) ? 1 : 0);
if (num_temporal_layers_ > 1 || num_spatial_layers_ > 1) {
vpx_codec_control(encoder_, VP9E_SET_SVC_PARAMETERS,
- &svc_internal_.svc_params);
+ &svc_params_);
}
// Register callback for getting each spatial layer.
vpx_codec_priv_output_cx_pkt_cb_pair_t cbp = {
VP9EncoderImpl::EncoderOutputCodedPacketCallback,
reinterpret_cast<void*>(this)};
vpx_codec_control(encoder_, VP9E_REGISTER_CX_CALLBACK,
reinterpret_cast<void*>(&cbp));
@@ -680,21 +681,21 @@ void VP9EncoderImpl::PopulateCodecSpecif
static_cast<uint8_t>(frames_since_kf_ % gof_.num_frames_in_gof);
vp9_info->temporal_up_switch = gof_.temporal_up_switch[vp9_info->gof_idx];
}
if (vp9_info->ss_data_available) {
vp9_info->spatial_layer_resolution_present = true;
for (size_t i = 0; i < vp9_info->num_spatial_layers; ++i) {
vp9_info->width[i] = codec_.width *
- svc_internal_.svc_params.scaling_factor_num[i] /
- svc_internal_.svc_params.scaling_factor_den[i];
+ svc_params_.scaling_factor_num[i] /
+ svc_params_.scaling_factor_den[i];
vp9_info->height[i] = codec_.height *
- svc_internal_.svc_params.scaling_factor_num[i] /
- svc_internal_.svc_params.scaling_factor_den[i];
+ svc_params_.scaling_factor_num[i] /
+ svc_params_.scaling_factor_den[i];
}
if (!vp9_info->flexible_mode) {
vp9_info->gof.CopyGofInfoVP9(gof_);
}
}
}
int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
diff --git a/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h b/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h
--- a/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h
@@ -12,17 +12,17 @@
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_VP9_IMPL_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_VP9_IMPL_H_
#include <vector>
#include "webrtc/modules/video_coding/codecs/vp9/include/vp9.h"
#include "webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h"
-#include "vpx/svc_context.h"
+#include "vpx/vp8cx.h"
#include "vpx/vpx_decoder.h"
#include "vpx/vpx_encoder.h"
namespace webrtc {
class ScreenshareLayersVP9;
class VP9EncoderImpl : public VP9Encoder {
@@ -111,17 +111,17 @@ class VP9EncoderImpl : public VP9Encoder
bool inited_;
int64_t timestamp_;
uint16_t picture_id_;
int cpu_speed_;
uint32_t rc_max_intra_target_;
vpx_codec_ctx_t* encoder_;
vpx_codec_enc_cfg_t* config_;
vpx_image_t* raw_;
- SvcInternal_t svc_internal_;
+ vpx_svc_extra_cfg_t svc_params_;
const VideoFrame* input_image_;
GofInfoVP9 gof_; // Contains each frame's temporal information for
// non-flexible mode.
uint8_t tl0_pic_idx_; // Only used in non-flexible mode.
size_t frames_since_kf_;
uint8_t num_temporal_layers_;
uint8_t num_spatial_layers_;
uint8_t num_cores_;

View File

@ -79,6 +79,7 @@ VP9EncoderImpl::VP9EncoderImpl()
// Use two spatial when screensharing with flexible mode.
spatial_layer_(new ScreenshareLayersVP9(2)) {
memset(&codec_, 0, sizeof(codec_));
memset(&svc_params_, 0, sizeof(vpx_svc_extra_cfg_t));
uint32_t seed = static_cast<uint32_t>(TickTime::MillisecondTimestamp());
srand(seed);
}
@ -143,14 +144,14 @@ bool VP9EncoderImpl::SetSvcRates() {
float total = 0;
for (i = 0; i < num_spatial_layers_; ++i) {
if (svc_internal_.svc_params.scaling_factor_num[i] <= 0 ||
svc_internal_.svc_params.scaling_factor_den[i] <= 0) {
if (svc_params_.scaling_factor_num[i] <= 0 ||
svc_params_.scaling_factor_den[i] <= 0) {
LOG(LS_ERROR) << "Scaling factors not specified!";
return false;
}
rate_ratio[i] =
static_cast<float>(svc_internal_.svc_params.scaling_factor_num[i]) /
svc_internal_.svc_params.scaling_factor_den[i];
static_cast<float>(svc_params_.scaling_factor_num[i]) /
svc_params_.scaling_factor_den[i];
total += rate_ratio[i];
}
@ -390,22 +391,22 @@ int VP9EncoderImpl::InitAndSetControlSettings(const VideoCodec* inst) {
// Set QP-min/max per spatial and temporal layer.
int tot_num_layers = num_spatial_layers_ * num_temporal_layers_;
for (int i = 0; i < tot_num_layers; ++i) {
svc_internal_.svc_params.max_quantizers[i] = config_->rc_max_quantizer;
svc_internal_.svc_params.min_quantizers[i] = config_->rc_min_quantizer;
svc_params_.max_quantizers[i] = config_->rc_max_quantizer;
svc_params_.min_quantizers[i] = config_->rc_min_quantizer;
}
config_->ss_number_layers = num_spatial_layers_;
if (ExplicitlyConfiguredSpatialLayers()) {
for (int i = 0; i < num_spatial_layers_; ++i) {
const auto& layer = codec_.spatialLayers[i];
svc_internal_.svc_params.scaling_factor_num[i] = layer.scaling_factor_num;
svc_internal_.svc_params.scaling_factor_den[i] = layer.scaling_factor_den;
svc_params_.scaling_factor_num[i] = layer.scaling_factor_num;
svc_params_.scaling_factor_den[i] = layer.scaling_factor_den;
}
} else {
int scaling_factor_num = 256;
for (int i = num_spatial_layers_ - 1; i >= 0; --i) {
// 1:2 scaling in each dimension.
svc_internal_.svc_params.scaling_factor_num[i] = scaling_factor_num;
svc_internal_.svc_params.scaling_factor_den[i] = 256;
svc_params_.scaling_factor_num[i] = scaling_factor_num;
svc_params_.scaling_factor_den[i] = 256;
if (codec_.mode != kScreensharing)
scaling_factor_num /= 2;
}
@ -429,7 +430,7 @@ int VP9EncoderImpl::InitAndSetControlSettings(const VideoCodec* inst) {
(num_temporal_layers_ > 1 || num_spatial_layers_ > 1) ? 1 : 0);
if (num_temporal_layers_ > 1 || num_spatial_layers_ > 1) {
vpx_codec_control(encoder_, VP9E_SET_SVC_PARAMETERS,
&svc_internal_.svc_params);
&svc_params_);
}
// Register callback for getting each spatial layer.
vpx_codec_priv_output_cx_pkt_cb_pair_t cbp = {
@ -685,11 +686,11 @@ void VP9EncoderImpl::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
vp9_info->spatial_layer_resolution_present = true;
for (size_t i = 0; i < vp9_info->num_spatial_layers; ++i) {
vp9_info->width[i] = codec_.width *
svc_internal_.svc_params.scaling_factor_num[i] /
svc_internal_.svc_params.scaling_factor_den[i];
svc_params_.scaling_factor_num[i] /
svc_params_.scaling_factor_den[i];
vp9_info->height[i] = codec_.height *
svc_internal_.svc_params.scaling_factor_num[i] /
svc_internal_.svc_params.scaling_factor_den[i];
svc_params_.scaling_factor_num[i] /
svc_params_.scaling_factor_den[i];
}
if (!vp9_info->flexible_mode) {
vp9_info->gof.CopyGofInfoVP9(gof_);

View File

@ -17,7 +17,7 @@
#include "webrtc/modules/video_coding/codecs/vp9/include/vp9.h"
#include "webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h"
#include "vpx/svc_context.h"
#include "vpx/vp8cx.h"
#include "vpx/vpx_decoder.h"
#include "vpx/vpx_encoder.h"
@ -116,7 +116,7 @@ class VP9EncoderImpl : public VP9Encoder {
vpx_codec_ctx_t* encoder_;
vpx_codec_enc_cfg_t* config_;
vpx_image_t* raw_;
SvcInternal_t svc_internal_;
vpx_svc_extra_cfg_t svc_params_;
const VideoFrame* input_image_;
GofInfoVP9 gof_; // Contains each frame's temporal information for
// non-flexible mode.