Merge remote-tracking branch 'qatar/master'

* qatar/master:
  libopus: Remap channels using libopus' internal remapping.
  Opus decoder using libopus
  avcodec: document the use of AVCodecContext.delay for audio decoding
  vc1dec: add flush function for WMV9 and VC-1 decoders
  http: Increase buffer sizes to cope with longer URIs
  nutenc: const correctness for ff_put_v_trace/put_s_trace function arguments
  h264_refs: Fix debug tprintf argument types
  golomb: const correctness for get_ue()/get_se() function arguments
  get_bits: const correctness for get_bits_trace()/get_xbits_trace() arguments

Conflicts:
	Changelog
	libavcodec/Makefile
	libavcodec/version.h
	libavformat/http.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2012-09-28 13:36:22 +02:00
commit 2acb5cd907
11 changed files with 91 additions and 78 deletions

View File

@ -813,6 +813,8 @@ following image formats are supported:
@item Musepack SV7 @tab @tab X
@item Musepack SV8 @tab @tab X
@item Nellymoser Asao @tab X @tab X
@item Opus @tab @tab E
@tab supported through external library libopus
@item PCM A-law @tab X @tab X
@item PCM mu-law @tab X @tab X
@item PCM 16-bit little-endian planar @tab @tab X

View File

@ -657,7 +657,7 @@ OBJS-$(CONFIG_LIBOPENCORE_AMRNB_ENCODER) += libopencore-amr.o \
OBJS-$(CONFIG_LIBOPENCORE_AMRWB_DECODER) += libopencore-amr.o
OBJS-$(CONFIG_LIBOPENJPEG_DECODER) += libopenjpegdec.o
OBJS-$(CONFIG_LIBOPENJPEG_ENCODER) += libopenjpegenc.o
OBJS-$(CONFIG_LIBOPUS_DECODER) += libopus_dec.o vorbis_data.o
OBJS-$(CONFIG_LIBOPUS_DECODER) += libopusdec.o vorbis_data.o
OBJS-$(CONFIG_LIBSCHROEDINGER_DECODER) += libschroedingerdec.o \
libschroedinger.o
OBJS-$(CONFIG_LIBSCHROEDINGER_ENCODER) += libschroedingerenc.o \

View File

@ -1608,12 +1608,15 @@ typedef struct AVCodecContext {
* encoded input.
*
* Audio:
* Number of "priming" samples added to the beginning of the stream
* during encoding. The decoded output will be delayed by this many
* samples relative to the input to the encoder. Note that this field is
* purely informational and does not directly affect the pts output by
* the encoder, which should always be based on the actual presentation
* time, including any delay.
* For encoding, this is the number of "priming" samples added to the
* beginning of the stream. The decoded output will be delayed by this
* many samples relative to the input to the encoder. Note that this
* field is purely informational and does not directly affect the pts
* output by the encoder, which should always be based on the actual
* presentation time, including any delay.
* For decoding, this is the number of samples the decoder needs to
* output before the decoder's output is valid. When seeking, you should
* start decoding this many samples prior to your desired seek point.
*
* - encoding: Set by libavcodec.
* - decoding: Set by libavcodec.

View File

@ -521,7 +521,7 @@ static inline void print_bin(int bits, int n)
av_log(NULL, AV_LOG_DEBUG, " ");
}
static inline int get_bits_trace(GetBitContext *s, int n, char *file,
static inline int get_bits_trace(GetBitContext *s, int n, const char *file,
const char *func, int line)
{
int r = get_bits(s, n);
@ -532,7 +532,7 @@ static inline int get_bits_trace(GetBitContext *s, int n, char *file,
return r;
}
static inline int get_vlc_trace(GetBitContext *s, VLC_TYPE (*table)[2],
int bits, int max_depth, char *file,
int bits, int max_depth, const char *file,
const char *func, int line)
{
int show = show_bits(s, 24);
@ -547,7 +547,7 @@ static inline int get_vlc_trace(GetBitContext *s, VLC_TYPE (*table)[2],
bits2, len, r, pos, file, func, line);
return r;
}
static inline int get_xbits_trace(GetBitContext *s, int n, char *file,
static inline int get_xbits_trace(GetBitContext *s, int n, const char *file,
const char *func, int line)
{
int show = show_bits(s, n);

View File

@ -374,7 +374,9 @@ static inline int get_sr_golomb_shorten(GetBitContext* gb, int k)
#ifdef TRACE
static inline int get_ue(GetBitContext *s, char *file, const char *func, int line){
static inline int get_ue(GetBitContext *s, const char *file, const char *func,
int line)
{
int show= show_bits(s, 24);
int pos= get_bits_count(s);
int i= get_ue_golomb(s);
@ -388,7 +390,9 @@ static inline int get_ue(GetBitContext *s, char *file, const char *func, int lin
return i;
}
static inline int get_se(GetBitContext *s, char *file, const char *func, int line){
static inline int get_se(GetBitContext *s, const char *file, const char *func,
int line)
{
int show= show_bits(s, 24);
int pos= get_bits_count(s);
int i= get_se_golomb(s);

View File

@ -147,11 +147,11 @@ int ff_h264_fill_default_ref_list(H264Context *h){
}
#ifdef TRACE
for (i=0; i<h->ref_count[0]; i++) {
tprintf(h->s.avctx, "List0: %s fn:%d 0x%p\n", (h->default_ref_list[0][i].long_ref ? "LT" : "ST"), h->default_ref_list[0][i].pic_id, h->default_ref_list[0][i].data[0]);
tprintf(h->s.avctx, "List0: %s fn:%d 0x%p\n", (h->default_ref_list[0][i].long_ref ? "LT" : "ST"), h->default_ref_list[0][i].pic_id, h->default_ref_list[0][i].f.data[0]);
}
if(h->slice_type_nos==AV_PICTURE_TYPE_B){
for (i=0; i<h->ref_count[1]; i++) {
tprintf(h->s.avctx, "List1: %s fn:%d 0x%p\n", (h->default_ref_list[1][i].long_ref ? "LT" : "ST"), h->default_ref_list[1][i].pic_id, h->default_ref_list[1][i].data[0]);
tprintf(h->s.avctx, "List1: %s fn:%d 0x%p\n", (h->default_ref_list[1][i].long_ref ? "LT" : "ST"), h->default_ref_list[1][i].pic_id, h->default_ref_list[1][i].f.data[0]);
}
}
#endif

View File

@ -21,11 +21,14 @@
#include <opus.h>
#include <opus_multistream.h>
#include "libavutil/common.h"
#include "libavutil/avassert.h"
#include "libavutil/intreadwrite.h"
#include "avcodec.h"
#include "internal.h"
#include "vorbis.h"
#include "libavutil/avassert.h"
#include "libavutil/intreadwrite.h"
#include "mathops.h"
struct libopus_context {
OpusMSDecoder *dec;
@ -36,7 +39,7 @@ struct libopus_context {
#endif
};
static int ff_opus_error_to_averror(int err)
static int opus_error_to_averror(int err)
{
switch (err) {
case OPUS_BAD_ARG: return AVERROR(EINVAL);
@ -50,40 +53,24 @@ static int ff_opus_error_to_averror(int err)
}
}
static inline void reorder(uint8_t *data, unsigned channels, unsigned bps,
unsigned samples, const uint8_t *map)
{
uint8_t tmp[8 * 4];
unsigned i;
av_assert1(channels * bps <= sizeof(tmp));
for (; samples > 0; samples--) {
for (i = 0; i < channels; i++)
memcpy(tmp + bps * i, data + bps * map[i], bps);
memcpy(data, tmp, bps * channels);
data += bps * channels;
}
}
#define OPUS_HEAD_SIZE 19
static av_cold int libopus_dec_init(AVCodecContext *avc)
static av_cold int libopus_decode_init(AVCodecContext *avc)
{
struct libopus_context *opus = avc->priv_data;
int ret, channel_map = 0, gain_db = 0, nb_streams, nb_coupled;
uint8_t mapping_stereo[] = { 0, 1 }, *mapping;
uint8_t mapping_arr[8] = { 0, 1 }, *mapping;
avc->sample_rate = 48000;
avc->sample_fmt = avc->request_sample_fmt == AV_SAMPLE_FMT_FLT ?
AV_SAMPLE_FMT_FLT : AV_SAMPLE_FMT_S16;
avc->sample_rate = 48000;
avc->sample_fmt = avc->request_sample_fmt == AV_SAMPLE_FMT_FLT ?
AV_SAMPLE_FMT_FLT : AV_SAMPLE_FMT_S16;
avc->channel_layout = avc->channels > 8 ? 0 :
ff_vorbis_channel_layouts[avc->channels - 1];
if (avc->extradata_size >= OPUS_HEAD_SIZE) {
opus->pre_skip = AV_RL16(avc->extradata + 10);
gain_db = AV_RL16(avc->extradata + 16);
channel_map = AV_RL8 (avc->extradata + 18);
gain_db -= (gain_db & 0x8000) << 1; /* signed */
gain_db = sign_extend(AV_RL16(avc->extradata + 16), 16);
channel_map = AV_RL8 (avc->extradata + 18);
}
if (avc->extradata_size >= OPUS_HEAD_SIZE + 2 + avc->channels) {
nb_streams = avc->extradata[OPUS_HEAD_SIZE + 0];
@ -99,16 +86,26 @@ static av_cold int libopus_dec_init(AVCodecContext *avc)
}
nb_streams = 1;
nb_coupled = avc->channels > 1;
mapping = mapping_stereo;
mapping = mapping_arr;
}
opus->dec = opus_multistream_decoder_create(
avc->sample_rate, avc->channels,
nb_streams, nb_coupled, mapping, &ret);
if (avc->channels > 2 && avc->channels <= 8) {
const uint8_t *vorbis_offset = ff_vorbis_channel_layout_offsets[avc->channels - 1];
int ch;
/* Remap channels from vorbis order to ffmpeg order */
for (ch = 0; ch < avc->channels; ch++)
mapping_arr[ch] = mapping[vorbis_offset[ch]];
mapping = mapping_arr;
}
opus->dec = opus_multistream_decoder_create(avc->sample_rate, avc->channels,
nb_streams, nb_coupled,
mapping, &ret);
if (!opus->dec) {
av_log(avc, AV_LOG_ERROR, "Unable to create decoder: %s\n",
opus_strerror(ret));
return ff_opus_error_to_averror(ret);
return opus_error_to_averror(ret);
}
#ifdef OPUS_SET_GAIN
@ -127,12 +124,13 @@ static av_cold int libopus_dec_init(AVCodecContext *avc)
#endif
avc->internal->skip_samples = opus->pre_skip;
avc->delay = 3840; /* Decoder delay (in samples) at 48kHz */
avcodec_get_frame_defaults(&opus->frame);
avc->coded_frame = &opus->frame;
return 0;
}
static av_cold int libopus_dec_close(AVCodecContext *avc)
static av_cold int libopus_decode_close(AVCodecContext *avc)
{
struct libopus_context *opus = avc->priv_data;
@ -140,10 +138,10 @@ static av_cold int libopus_dec_close(AVCodecContext *avc)
return 0;
}
#define MAX_FRAME_SIZE (960*6)
#define MAX_FRAME_SIZE (960 * 6)
static int libopus_dec_decode(AVCodecContext *avc, void *frame,
int *got_frame_ptr, AVPacket *pkt)
static int libopus_decode(AVCodecContext *avc, void *frame,
int *got_frame_ptr, AVPacket *pkt)
{
struct libopus_context *opus = avc->priv_data;
int ret, nb_samples;
@ -155,25 +153,19 @@ static int libopus_dec_decode(AVCodecContext *avc, void *frame,
return ret;
}
nb_samples = avc->sample_fmt == AV_SAMPLE_FMT_S16 ?
opus_multistream_decode (opus->dec, pkt->data, pkt->size,
(void *)opus->frame.data[0],
opus->frame.nb_samples, 0) :
opus_multistream_decode_float(opus->dec, pkt->data, pkt->size,
(void *)opus->frame.data[0],
opus->frame.nb_samples, 0);
if (avc->sample_fmt == AV_SAMPLE_FMT_S16)
nb_samples = opus_multistream_decode(opus->dec, pkt->data, pkt->size,
(opus_int16 *)opus->frame.data[0],
opus->frame.nb_samples, 0);
else
nb_samples = opus_multistream_decode_float(opus->dec, pkt->data, pkt->size,
(float *)opus->frame.data[0],
opus->frame.nb_samples, 0);
if (nb_samples < 0) {
av_log(avc, AV_LOG_ERROR, "Decoding error: %s\n",
opus_strerror(nb_samples));
return ff_opus_error_to_averror(nb_samples);
}
if (avc->channels > 3 && avc->channels <= 8) {
const uint8_t *m = ff_vorbis_channel_layout_offsets[avc->channels - 1];
if (avc->sample_fmt == AV_SAMPLE_FMT_S16)
reorder(opus->frame.data[0], avc->channels, 2, nb_samples, m);
else
reorder(opus->frame.data[0], avc->channels, 4, nb_samples, m);
return opus_error_to_averror(nb_samples);
}
#ifndef OPUS_SET_GAIN
@ -197,7 +189,7 @@ static int libopus_dec_decode(AVCodecContext *avc, void *frame,
return pkt->size;
}
static void libopus_dec_flush(AVCodecContext *avc)
static void libopus_flush(AVCodecContext *avc)
{
struct libopus_context *opus = avc->priv_data;
@ -212,10 +204,13 @@ AVCodec ff_libopus_decoder = {
.type = AVMEDIA_TYPE_AUDIO,
.id = AV_CODEC_ID_OPUS,
.priv_data_size = sizeof(struct libopus_context),
.init = libopus_dec_init,
.close = libopus_dec_close,
.decode = libopus_dec_decode,
.flush = libopus_dec_flush,
.init = libopus_decode_init,
.close = libopus_decode_close,
.decode = libopus_decode,
.flush = libopus_flush,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("libopus Opus"),
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
};

View File

@ -5704,6 +5704,7 @@ AVCodec ff_vc1_decoder = {
.init = vc1_decode_init,
.close = ff_vc1_decode_end,
.decode = vc1_decode_frame,
.flush = ff_mpeg_flush,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
.long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1"),
.pix_fmts = ff_hwaccel_pixfmt_list_420,
@ -5719,6 +5720,7 @@ AVCodec ff_wmv3_decoder = {
.init = vc1_decode_init,
.close = ff_vc1_decode_end,
.decode = vc1_decode_frame,
.flush = ff_mpeg_flush,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
.long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9"),
.pix_fmts = ff_hwaccel_pixfmt_list_420,

View File

@ -27,7 +27,7 @@
*/
#define LIBAVCODEC_VERSION_MAJOR 54
#define LIBAVCODEC_VERSION_MINOR 60
#define LIBAVCODEC_VERSION_MINOR 61
#define LIBAVCODEC_VERSION_MICRO 100
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \

View File

@ -32,8 +32,11 @@
/* XXX: POST protocol is not completely implemented because ffmpeg uses
only a subset of it. */
/* used for protocol handling */
#define BUFFER_SIZE 4096
/* The IO buffer size is unrelated to the max URL size in itself, but needs
* to be large enough to fit the full request headers (including long
* path names).
*/
#define BUFFER_SIZE MAX_URL_SIZE
#define MAX_REDIRECTS 8
typedef struct {
@ -101,8 +104,8 @@ static int http_open_cnx(URLContext *h)
const char *path, *proxy_path, *lower_proto = "tcp", *local_path;
char hostname[1024], hoststr[1024], proto[10];
char auth[1024], proxyauth[1024] = "";
char path1[1024];
char buf[1024], urlbuf[1024];
char path1[MAX_URL_SIZE];
char buf[1024], urlbuf[MAX_URL_SIZE];
int port, use_proxy, err, location_changed = 0, redirects = 0, attempts = 0;
HTTPAuthType cur_auth_type, cur_proxy_auth_type;
HTTPContext *s = h->priv_data;
@ -352,7 +355,7 @@ static inline int has_header(const char *str, const char *header)
static int http_read_header(URLContext *h, int *new_location)
{
HTTPContext *s = h->priv_data;
char line[1024];
char line[MAX_URL_SIZE];
int err = 0;
s->chunksize = -1;

View File

@ -266,13 +266,17 @@ static void put_s(AVIOContext *bc, int64_t val){
}
#ifdef TRACE
static inline void ff_put_v_trace(AVIOContext *bc, uint64_t v, char *file, char *func, int line){
static inline void ff_put_v_trace(AVIOContext *bc, uint64_t v, const char *file,
const char *func, int line)
{
av_log(NULL, AV_LOG_DEBUG, "ff_put_v %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
ff_put_v(bc, v);
}
static inline void put_s_trace(AVIOContext *bc, int64_t v, char *file, char *func, int line){
static inline void put_s_trace(AVIOContext *bc, int64_t v, const char *file,
const char *func, int line)
{
av_log(NULL, AV_LOG_DEBUG, "put_s %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
put_s(bc, v);