Imported Upstream version 3.1.2

This commit is contained in:
Sebastian Ramacher 2016-08-10 19:46:26 +02:00
parent c1a7bf0ae2
commit 9c01f11db3
35 changed files with 356 additions and 110 deletions

View File

@ -4,6 +4,37 @@ releases are sorted from youngest to oldest.
version <next>:
version 3.1.2:
- cmdutils: remove the current working directory from the DLL search path on win32
- avcodec/rawdec: Fix palette handling with changing palettes
- avcodec/raw: Fix decoding of ilacetest.mov
- avformat/mov: Enable mp3 parsing if a packet needs it
- avformat/hls: Use an array instead of stream offset for stream mapping
- avformat/hls: Sync starting segment across variants on live streams
- avformat/hls: Fix regression with ranged media segments
- avcodec/ffv1enc: Fix assertion failure with non zero bits per sample
- avfilter/af_hdcd: small fix in af_hdcd.c where gain was not being adjusted for "attenuate slowly"
- avformat/oggdec: Fix integer overflow with invalid pts
- ffplay: Fix invalid array index
- avcodec/alacenc: allocate bigger packets (cherry picked from commit 82b84c71b009884c8d041361027718b19922c76d)
- libavcodec/dnxhd: Enable 12-bit DNxHR support.
- lavc/vaapi_encode_h26x: Fix a crash if "." is not the decimal separator.
- jni: Return ENOSYS on unsupported platforms
- lavu/hwcontext_vaapi: Fix compilation if VA_FOURCC_ABGR is not defined.
- avcodec/vp9_parser: Check the input frame sizes for being consistent
- avformat/flvdec: parse keyframe before a\v stream was created add_keyframes_index() when stream created or keyframe parsed
- avformat/flvdec: splitting add_keyframes_index() out from parse_keyframes_index()
- libavformat/rtpdec_asf: zero initialize the AVIOContext struct
- libavutil/opt: Small bugfix in example.
- libx264: Increase x264 opts character limit to 4096
- avcodec/h264_parser: Set sps/pps_ref
- librtmp: Avoid an infiniloop setting connection arguments
- avformat/oggparsevp8: fix pts calculation on pages ending with an invisible frame
- lavc/Makefile: Fix standalone compilation of the svq3 decoder.
- lavf/vplayerdec: Improve auto-detection.
- lavc/mediacodecdec_h264: properly convert extradata to annex-b
- Revert "configure: Enable GCC vectorization on ≥4.9 on x86"
version 3.1.1:
- doc/APIchanges: document the lavu/lavf field moves
- avformat/avformat: Move new field to the end of AVStream

View File

@ -1 +1 @@
3.1.1
3.1.2

View File

@ -1 +1 @@
3.1.1
3.1.2

View File

@ -107,6 +107,15 @@ static void log_callback_report(void *ptr, int level, const char *fmt, va_list v
}
}
void init_dynload(void)
{
#ifdef _WIN32
/* Calling SetDllDirectory with the empty string (but not NULL) removes the
* current working directory from the DLL search path as a security pre-caution. */
SetDllDirectory("");
#endif
}
static void (*program_exit)(int ret);
void register_exit(void (*cb)(int ret))

View File

@ -61,6 +61,11 @@ void register_exit(void (*cb)(int ret));
*/
void exit_program(int ret) av_noreturn;
/**
* Initialize dynamic library loading
*/
void init_dynload(void);
/**
* Initialize the cmdutils option system, in particular
* allocate the *_opts contexts.

6
configure vendored
View File

@ -6125,11 +6125,7 @@ elif enabled ccc; then
add_cflags -msg_disable nonstandcast
add_cflags -msg_disable unsupieee
elif enabled gcc; then
case $gcc_basever in
4.9*) enabled x86 || check_optflags -fno-tree-vectorize ;;
4.*) check_optflags -fno-tree-vectorize ;;
*) enabled x86 || check_optflags -fno-tree-vectorize ;;
esac
check_optflags -fno-tree-vectorize
check_cflags -Werror=format-security
check_cflags -Werror=implicit-function-declaration
check_cflags -Werror=missing-prototypes

View File

@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
# This could be handy for archiving the generated documentation or
# if some version control system is used.
PROJECT_NUMBER = 3.1.1
PROJECT_NUMBER = 3.1.2
# With the PROJECT_LOGO tag one can specify a logo or icon that is included
# in the documentation. The maximum height of the logo should not exceed 55

View File

@ -4303,6 +4303,8 @@ int main(int argc, char **argv)
int ret;
int64_t ti;
init_dynload();
register_exit(ffmpeg_cleanup);
setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */

View File

@ -2936,7 +2936,7 @@ static int read_thread(void *arg)
AVStream *st = ic->streams[i];
enum AVMediaType type = st->codecpar->codec_type;
st->discard = AVDISCARD_ALL;
if (wanted_stream_spec[type] && st_index[type] == -1)
if (type >= 0 && wanted_stream_spec[type] && st_index[type] == -1)
if (avformat_match_stream_specifier(ic, st, wanted_stream_spec[type]) > 0)
st_index[type] = i;
}
@ -3776,6 +3776,8 @@ int main(int argc, char **argv)
char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
char alsa_bufsize[] = "SDL_AUDIO_ALSA_SET_BUFFER_SIZE=1";
init_dynload();
av_log_set_flags(AV_LOG_SKIP_REPEATED);
parse_loglevel(argc, argv, options);

View File

@ -3241,6 +3241,8 @@ int main(int argc, char **argv)
char *w_name = NULL, *w_args = NULL;
int ret, i;
init_dynload();
av_log_set_flags(AV_LOG_SKIP_REPEATED);
register_exit(ffprobe_cleanup);

View File

@ -3980,6 +3980,7 @@ int main(int argc, char **argv)
int cfg_parsed;
int ret = EXIT_FAILURE;
init_dynload();
config.filename = av_strdup("/etc/ffserver.conf");

View File

@ -528,7 +528,8 @@ OBJS-$(CONFIG_SUNRAST_ENCODER) += sunrastenc.o
OBJS-$(CONFIG_SVQ1_DECODER) += svq1dec.o svq1.o svq13.o h263data.o
OBJS-$(CONFIG_SVQ1_ENCODER) += svq1enc.o svq1.o h263data.o \
h263.o ituh263enc.o
OBJS-$(CONFIG_SVQ3_DECODER) += svq3.o svq13.o mpegutils.o h264_parse.o h264data.o
OBJS-$(CONFIG_SVQ3_DECODER) += svq3.o svq13.o mpegutils.o \
h264_parse.o h264data.o h264_ps.o h2645_parse.o
OBJS-$(CONFIG_TEXT_DECODER) += textdec.o ass.o
OBJS-$(CONFIG_TEXT_ENCODER) += srtenc.o ass_split.o
OBJS-$(CONFIG_TAK_DECODER) += takdec.o tak.o takdsp.o

View File

@ -623,7 +623,7 @@ static int alac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
else
max_frame_size = s->max_coded_frame_size;
if ((ret = ff_alloc_packet2(avctx, avpkt, 2 * max_frame_size, 0)) < 0)
if ((ret = ff_alloc_packet2(avctx, avpkt, 4 * max_frame_size, 0)) < 0)
return ret;
/* use verbatim mode for compression_level 0 */

View File

@ -118,11 +118,6 @@ static int dnxhd_init_vlc(DNXHDContext *ctx, uint32_t cid, int bitdepth)
av_log(ctx->avctx, AV_LOG_ERROR, "bit depth mismatches %d %d\n", ff_dnxhd_cid_table[index].bit_depth, bitdepth);
return AVERROR_INVALIDDATA;
}
if (bitdepth > 10) {
avpriv_request_sample(ctx->avctx, "DNXHR 12-bit");
if (ctx->avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL)
return AVERROR_PATCHWELCOME;
}
ctx->cid_table = &ff_dnxhd_cid_table[index];
av_log(ctx->avctx, AV_LOG_VERBOSE, "Profile cid %d.\n", cid);
@ -133,7 +128,7 @@ static int dnxhd_init_vlc(DNXHDContext *ctx, uint32_t cid, int bitdepth)
init_vlc(&ctx->ac_vlc, DNXHD_VLC_BITS, 257,
ctx->cid_table->ac_bits, 1, 1,
ctx->cid_table->ac_codes, 2, 2, 0);
init_vlc(&ctx->dc_vlc, DNXHD_DC_VLC_BITS, bitdepth + 4,
init_vlc(&ctx->dc_vlc, DNXHD_DC_VLC_BITS, bitdepth > 8 ? 14 : 12,
ctx->cid_table->dc_bits, 1, 1,
ctx->cid_table->dc_codes, 1, 1, 0);
init_vlc(&ctx->run_vlc, DNXHD_VLC_BITS, 62,

View File

@ -781,14 +781,12 @@ FF_ENABLE_DEPRECATION_WARNINGS
s->colorspace = 1;
s->transparency = 1;
s->chroma_planes = 1;
if (!avctx->bits_per_raw_sample)
s->bits_per_raw_sample = 8;
s->bits_per_raw_sample = 8;
break;
case AV_PIX_FMT_0RGB32:
s->colorspace = 1;
s->chroma_planes = 1;
if (!avctx->bits_per_raw_sample)
s->bits_per_raw_sample = 8;
s->bits_per_raw_sample = 8;
break;
case AV_PIX_FMT_GBRP9:
if (!avctx->bits_per_raw_sample)

View File

@ -367,13 +367,26 @@ static inline int parse_nal_units(AVCodecParserContext *s,
"non-existing PPS %u referenced\n", pps_id);
goto fail;
}
p->ps.pps = (const PPS*)p->ps.pps_list[pps_id]->data;
av_buffer_unref(&p->ps.pps_ref);
av_buffer_unref(&p->ps.sps_ref);
p->ps.pps = NULL;
p->ps.sps = NULL;
p->ps.pps_ref = av_buffer_ref(p->ps.pps_list[pps_id]);
if (!p->ps.pps_ref)
goto fail;
p->ps.pps = (const PPS*)p->ps.pps_ref->data;
if (!p->ps.sps_list[p->ps.pps->sps_id]) {
av_log(avctx, AV_LOG_ERROR,
"non-existing SPS %u referenced\n", p->ps.pps->sps_id);
goto fail;
}
p->ps.sps = (SPS*)p->ps.sps_list[p->ps.pps->sps_id]->data;
p->ps.sps_ref = av_buffer_ref(p->ps.sps_list[p->ps.pps->sps_id]);
if (!p->ps.sps_ref)
goto fail;
p->ps.sps = (SPS*)p->ps.sps_ref->data;
sps = p->ps.sps;

View File

@ -20,19 +20,18 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include <stdlib.h>
#include "config.h"
#include "libavutil/error.h"
#include "jni.h"
#if CONFIG_JNI
#include <errno.h>
#include <jni.h>
#include <pthread.h>
#include "libavutil/log.h"
#include "libavutil/error.h"
#include "ffjni.h"
void *java_vm;
@ -69,7 +68,7 @@ void *av_jni_get_java_vm(void *log_ctx)
int av_jni_set_java_vm(void *vm, void *log_ctx)
{
return 0;
return AVERROR(ENOSYS);
}
void *av_jni_get_java_vm(void *log_ctx)

View File

@ -777,8 +777,8 @@ FF_ENABLE_DEPRECATION_WARNINGS
if(x4->x264opts){
const char *p= x4->x264opts;
while(p){
char param[256]={0}, val[256]={0};
if(sscanf(p, "%255[^:=]=%255[^:]", param, val) == 1){
char param[4096]={0}, val[4096]={0};
if(sscanf(p, "%4095[^:=]=%4095[^:]", param, val) == 1){
OPT_STR(param, "1");
}else
OPT_STR(param, val);

View File

@ -65,6 +65,58 @@ static av_cold int mediacodec_decode_close(AVCodecContext *avctx)
return 0;
}
static int h264_ps_to_nalu(const uint8_t *src, int src_size, uint8_t **out, int *out_size)
{
int i;
int ret = 0;
uint8_t *p = NULL;
static const uint8_t nalu_header[] = { 0x00, 0x00, 0x00, 0x01 };
if (!out || !out_size) {
return AVERROR(EINVAL);
}
p = av_malloc(sizeof(nalu_header) + src_size);
if (!p) {
return AVERROR(ENOMEM);
}
*out = p;
*out_size = sizeof(nalu_header) + src_size;
memcpy(p, nalu_header, sizeof(nalu_header));
memcpy(p + sizeof(nalu_header), src, src_size);
/* Escape 0x00, 0x00, 0x0{0-3} pattern */
for (i = 4; i < *out_size; i++) {
if (i < *out_size - 3 &&
p[i + 0] == 0 &&
p[i + 1] == 0 &&
p[i + 2] <= 3) {
uint8_t *new;
*out_size += 1;
new = av_realloc(*out, *out_size);
if (!new) {
ret = AVERROR(ENOMEM);
goto done;
}
*out = p = new;
i = i + 3;
memmove(p + i, p + i - 1, *out_size - i);
p[i - 1] = 0x03;
}
}
done:
if (ret < 0) {
av_freep(out);
*out_size = 0;
}
return ret;
}
static av_cold int mediacodec_decode_init(AVCodecContext *avctx)
{
int i;
@ -112,24 +164,19 @@ static av_cold int mediacodec_decode_init(AVCodecContext *avctx)
}
if (pps && sps) {
static const uint8_t nal_headers[] = { 0x00, 0x00, 0x00, 0x01 };
uint8_t *data = NULL;
size_t data_size = sizeof(nal_headers) + FFMAX(sps->data_size, pps->data_size);
size_t data_size = 0;
data = av_mallocz(data_size);
if (!data) {
ret = AVERROR(ENOMEM);
if ((ret = h264_ps_to_nalu(sps->data, sps->data_size, &data, &data_size)) < 0) {
goto done;
}
ff_AMediaFormat_setBuffer(format, "csd-0", (void*)data, data_size);
av_freep(&data);
memcpy(data, nal_headers, sizeof(nal_headers));
memcpy(data + sizeof(nal_headers), sps->data, sps->data_size);
ff_AMediaFormat_setBuffer(format, "csd-0", (void*)data, sizeof(nal_headers) + sps->data_size);
memcpy(data + sizeof(nal_headers), pps->data, pps->data_size);
ff_AMediaFormat_setBuffer(format, "csd-1", (void*)data, sizeof(nal_headers) + pps->data_size);
if ((ret = h264_ps_to_nalu(pps->data, pps->data_size, &data, &data_size)) < 0) {
goto done;
}
ff_AMediaFormat_setBuffer(format, "csd-1", (void*)data, data_size);
av_freep(&data);
} else {
av_log(avctx, AV_LOG_ERROR, "Could not extract PPS/SPS from extradata");

View File

@ -31,6 +31,7 @@
const PixelFormatTag ff_raw_pix_fmt_tags[] = {
{ AV_PIX_FMT_YUV420P, MKTAG('I', '4', '2', '0') }, /* Planar formats */
{ AV_PIX_FMT_YUV420P, MKTAG('I', 'Y', 'U', 'V') },
{ AV_PIX_FMT_YUV420P, MKTAG('y', 'v', '1', '2') },
{ AV_PIX_FMT_YUV420P, MKTAG('Y', 'V', '1', '2') },
{ AV_PIX_FMT_YUV410P, MKTAG('Y', 'U', 'V', '9') },
{ AV_PIX_FMT_YUV410P, MKTAG('Y', 'V', 'U', '9') },

View File

@ -365,20 +365,29 @@ static int raw_decode(AVCodecContext *avctx, void *data, int *got_frame,
if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE,
NULL);
if (pal) {
av_buffer_unref(&context->palette);
int ret;
if (!context->palette)
context->palette = av_buffer_alloc(AVPALETTE_SIZE);
if (!context->palette) {
av_buffer_unref(&frame->buf[0]);
return AVERROR(ENOMEM);
}
if (!context->palette) {
av_buffer_unref(&frame->buf[0]);
return AVERROR(ENOMEM);
}
ret = av_buffer_make_writable(&context->palette);
if (ret < 0) {
av_buffer_unref(&frame->buf[0]);
return ret;
}
if (pal) {
memcpy(context->palette->data, pal, AVPALETTE_SIZE);
frame->palette_has_changed = 1;
} else if (context->is_nut_pal8) {
int vid_size = avctx->width * avctx->height;
if (avpkt->size - vid_size) {
int pal_size = avpkt->size - vid_size;
if (avpkt->size > vid_size && pal_size <= AVPALETTE_SIZE) {
pal = avpkt->data + vid_size;
memcpy(context->palette->data, pal, avpkt->size - vid_size);
memcpy(context->palette->data, pal, pal_size);
frame->palette_has_changed = 1;
}
}

View File

@ -967,10 +967,10 @@ static const AVCodecDefault vaapi_encode_h264_defaults[] = {
{ "b", "0" },
{ "bf", "2" },
{ "g", "120" },
{ "i_qfactor", "1.0" },
{ "i_qoffset", "0.0" },
{ "b_qfactor", "1.2" },
{ "b_qoffset", "0.0" },
{ "i_qfactor", "1" },
{ "i_qoffset", "0" },
{ "b_qfactor", "6/5" },
{ "b_qoffset", "0" },
{ NULL },
};

View File

@ -1338,10 +1338,10 @@ static const AVCodecDefault vaapi_encode_h265_defaults[] = {
{ "b", "0" },
{ "bf", "2" },
{ "g", "120" },
{ "i_qfactor", "1.0" },
{ "i_qoffset", "0.0" },
{ "b_qfactor", "1.2" },
{ "b_qoffset", "0.0" },
{ "i_qfactor", "1" },
{ "i_qoffset", "0" },
{ "b_qfactor", "6/5" },
{ "b_qoffset", "0" },
{ NULL },
};

View File

@ -28,6 +28,7 @@
typedef struct VP9ParseContext {
int n_frames; // 1-8
int size[8];
int marker_size;
int64_t pts;
} VP9ParseContext;
@ -88,6 +89,21 @@ static int parse(AVCodecParserContext *ctx,
return 0;
}
if (s->n_frames > 0) {
int i;
int size_sum = 0;
for (i = 0; i < s->n_frames ;i++)
size_sum += s->size[i];
size_sum += s->marker_size;
if (size_sum != size) {
av_log(avctx, AV_LOG_ERROR, "Inconsistent input frame sizes %d %d\n",
size_sum, size);
s->n_frames = 0;
}
}
if (s->n_frames > 0) {
*out_data = data;
*out_size = s->size[--s->n_frames];
@ -131,6 +147,7 @@ static int parse(AVCodecParserContext *ctx,
data += sz; \
size -= sz; \
} \
s->marker_size = size; \
parse_frame(ctx, *out_data, *out_size); \
return s->n_frames > 0 ? *out_size : full_size

View File

@ -949,6 +949,7 @@ static int hdcd_envelope(int32_t *samples, int count, int stride, int gain, int
int len = FFMIN(count, target_gain - gain);
/* attenuate slowly */
for (i = 0; i < len; i++) {
++gain;
APPLY_GAIN(*samples, gain);
samples += stride;
}

View File

@ -61,6 +61,11 @@ typedef struct FLVContext {
int broken_sizes;
int sum_flv_tag_size;
int last_keyframe_stream_index;
int keyframe_count;
int64_t *keyframe_times;
int64_t *keyframe_filepositions;
} FLVContext;
static int probe(AVProbeData *p, int live)
@ -92,8 +97,38 @@ static int live_flv_probe(AVProbeData *p)
return probe(p, 1);
}
static void add_keyframes_index(AVFormatContext *s)
{
FLVContext *flv = s->priv_data;
AVStream *stream = NULL;
unsigned int i = 0;
if (flv->last_keyframe_stream_index < 0) {
av_log(s, AV_LOG_DEBUG, "keyframe stream hasn't been created\n");
return;
}
av_assert0(flv->last_keyframe_stream_index <= s->nb_streams);
stream = s->streams[flv->last_keyframe_stream_index];
if (stream->nb_index_entries == 0) {
for (i = 0; i < flv->keyframe_count; i++) {
av_add_index_entry(stream, flv->keyframe_filepositions[i],
flv->keyframe_times[i] * 1000, 0, 0, AVINDEX_KEYFRAME);
}
} else
av_log(s, AV_LOG_WARNING, "Skipping duplicate index\n");
if (stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
av_freep(&flv->keyframe_times);
av_freep(&flv->keyframe_filepositions);
flv->keyframe_count = 0;
}
}
static AVStream *create_stream(AVFormatContext *s, int codec_type)
{
FLVContext *flv = s->priv_data;
AVStream *st = avformat_new_stream(s, NULL);
if (!st)
return NULL;
@ -104,6 +139,8 @@ static AVStream *create_stream(AVFormatContext *s, int codec_type)
s->ctx_flags &= ~AVFMTCTX_NOHEADER;
avpriv_set_pts_info(st, 32, 1, 1000); /* 32 bit pts in ms */
flv->last_keyframe_stream_index = s->nb_streams - 1;
add_keyframes_index(s);
return st;
}
@ -305,8 +342,7 @@ static int amf_get_string(AVIOContext *ioc, char *buffer, int buffsize)
return length;
}
static int parse_keyframes_index(AVFormatContext *s, AVIOContext *ioc,
AVStream *vstream, int64_t max_pos)
static int parse_keyframes_index(AVFormatContext *s, AVIOContext *ioc, int64_t max_pos)
{
FLVContext *flv = s->priv_data;
unsigned int timeslen = 0, fileposlen = 0, i;
@ -316,10 +352,12 @@ static int parse_keyframes_index(AVFormatContext *s, AVIOContext *ioc,
int ret = AVERROR(ENOSYS);
int64_t initial_pos = avio_tell(ioc);
if (vstream->nb_index_entries>0) {
av_log(s, AV_LOG_WARNING, "Skipping duplicate index\n");
if (flv->keyframe_count > 0) {
av_log(s, AV_LOG_DEBUG, "keyframes have been paresed\n");
return 0;
}
av_assert0(!flv->keyframe_times);
av_assert0(!flv->keyframe_filepositions);
if (s->flags & AVFMT_FLAG_IGNIDX)
return 0;
@ -368,15 +406,16 @@ static int parse_keyframes_index(AVFormatContext *s, AVIOContext *ioc,
}
if (timeslen == fileposlen && fileposlen>1 && max_pos <= filepositions[0]) {
for (i = 0; i < fileposlen; i++) {
av_add_index_entry(vstream, filepositions[i], times[i] * 1000,
0, 0, AVINDEX_KEYFRAME);
if (i < 2) {
flv->validate_index[i].pos = filepositions[i];
flv->validate_index[i].dts = times[i] * 1000;
flv->validate_count = i + 1;
}
for (i = 0; i < FFMIN(2,fileposlen); i++) {
flv->validate_index[i].pos = filepositions[i];
flv->validate_index[i].dts = times[i] * 1000;
flv->validate_count = i + 1;
}
flv->keyframe_times = times;
flv->keyframe_filepositions = filepositions;
flv->keyframe_count = timeslen;
times = NULL;
filepositions = NULL;
} else {
invalid:
av_log(s, AV_LOG_WARNING, "Invalid keyframes object, skipping.\n");
@ -418,13 +457,14 @@ static int amf_parse_object(AVFormatContext *s, AVStream *astream,
}
break;
case AMF_DATA_TYPE_OBJECT:
if ((vstream || astream) && key &&
if (key &&
ioc->seekable &&
!strcmp(KEYFRAMES_TAG, key) && depth == 1)
if (parse_keyframes_index(s, ioc, vstream ? vstream : astream,
if (parse_keyframes_index(s, ioc,
max_pos) < 0)
av_log(s, AV_LOG_ERROR, "Keyframe index parsing failed\n");
else
add_keyframes_index(s);
while (avio_tell(ioc) < max_pos - 2 &&
amf_get_string(ioc, str_val, sizeof(str_val)) > 0)
if (amf_parse_object(s, astream, vstream, str_val, max_pos,
@ -574,6 +614,7 @@ static int amf_parse_object(AVFormatContext *s, AVStream *astream,
static int flv_read_metabody(AVFormatContext *s, int64_t next_pos)
{
FLVContext *flv = s->priv_data;
AMFDataType type;
AVStream *stream, *astream, *vstream;
AVStream av_unused *dstream;
@ -612,10 +653,14 @@ static int flv_read_metabody(AVFormatContext *s, int64_t next_pos)
// the lookup every time it is called.
for (i = 0; i < s->nb_streams; i++) {
stream = s->streams[i];
if (stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
if (stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
vstream = stream;
else if (stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
flv->last_keyframe_stream_index = i;
} else if (stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
astream = stream;
if (flv->last_keyframe_stream_index == -1)
flv->last_keyframe_stream_index = i;
}
else if (stream->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE)
dstream = stream;
}
@ -643,6 +688,7 @@ static int flv_read_header(AVFormatContext *s)
s->start_time = 0;
flv->sum_flv_tag_size = 0;
flv->last_keyframe_stream_index = -1;
return 0;
}
@ -653,6 +699,8 @@ static int flv_read_close(AVFormatContext *s)
FLVContext *flv = s->priv_data;
for (i=0; i<FLV_STREAM_TYPE_NB; i++)
av_freep(&flv->new_extradata[i]);
av_freep(&flv->keyframe_times);
av_freep(&flv->keyframe_filepositions);
return 0;
}

View File

@ -98,7 +98,11 @@ struct playlist {
int index;
AVFormatContext *ctx;
AVPacket pkt;
int stream_offset;
/* main demuxer streams associated with this playlist
* indexed by the subdemuxer stream indexes */
AVStream **main_streams;
int n_main_streams;
int finished;
enum PlaylistType type;
@ -239,6 +243,7 @@ static void free_playlist_list(HLSContext *c)
struct playlist *pls = c->playlists[i];
free_segment_list(pls);
free_init_section_list(pls);
av_freep(&pls->main_streams);
av_freep(&pls->renditions);
av_freep(&pls->id3_buf);
av_dict_free(&pls->id3_initial);
@ -590,7 +595,7 @@ static void update_options(char **dest, const char *name, void *src)
}
static int open_url(AVFormatContext *s, AVIOContext **pb, const char *url,
AVDictionary *opts, AVDictionary *opts2)
AVDictionary *opts, AVDictionary *opts2, int *is_http)
{
HLSContext *c = s->priv_data;
AVDictionary *tmp = NULL;
@ -631,6 +636,9 @@ static int open_url(AVFormatContext *s, AVIOContext **pb, const char *url,
av_dict_free(&tmp);
if (is_http)
*is_http = av_strstart(proto_name, "http", NULL);
return ret;
}
@ -1072,6 +1080,7 @@ static int open_input(HLSContext *c, struct playlist *pls, struct segment *seg)
{
AVDictionary *opts = NULL;
int ret;
int is_http = 0;
// broker prior HTTP options that should be consistent across requests
av_dict_set(&opts, "user-agent", c->user_agent, 0);
@ -1091,13 +1100,13 @@ static int open_input(HLSContext *c, struct playlist *pls, struct segment *seg)
seg->url, seg->url_offset, pls->index);
if (seg->key_type == KEY_NONE) {
ret = open_url(pls->parent, &pls->input, seg->url, c->avio_opts, opts);
ret = open_url(pls->parent, &pls->input, seg->url, c->avio_opts, opts, &is_http);
} else if (seg->key_type == KEY_AES_128) {
AVDictionary *opts2 = NULL;
char iv[33], key[33], url[MAX_URL_SIZE];
if (strcmp(seg->key, pls->key_url)) {
AVIOContext *pb;
if (open_url(pls->parent, &pb, seg->key, c->avio_opts, opts) == 0) {
if (open_url(pls->parent, &pb, seg->key, c->avio_opts, opts, NULL) == 0) {
ret = avio_read(pb, pls->key, sizeof(pls->key));
if (ret != sizeof(pls->key)) {
av_log(NULL, AV_LOG_ERROR, "Unable to read key file %s\n",
@ -1122,7 +1131,7 @@ static int open_input(HLSContext *c, struct playlist *pls, struct segment *seg)
av_dict_set(&opts2, "key", key, 0);
av_dict_set(&opts2, "iv", iv, 0);
ret = open_url(pls->parent, &pls->input, url, opts2, opts);
ret = open_url(pls->parent, &pls->input, url, opts2, opts, &is_http);
av_dict_free(&opts2);
@ -1140,8 +1149,15 @@ static int open_input(HLSContext *c, struct playlist *pls, struct segment *seg)
/* Seek to the requested position. If this was a HTTP request, the offset
* should already be where want it to, but this allows e.g. local testing
* without a HTTP server. */
if (ret == 0 && seg->key_type == KEY_NONE && seg->url_offset) {
* without a HTTP server.
*
* This is not done for HTTP at all as avio_seek() does internal bookkeeping
* of file offset which is out-of-sync with the actual offset when "offset"
* AVOption is used with http protocol, causing the seek to not be a no-op
* as would be expected. Wrong offset received from the server will not be
* noticed without the call, though.
*/
if (ret == 0 && !is_http && seg->key_type == KEY_NONE && seg->url_offset) {
int64_t seekret = avio_seek(pls->input, seg->url_offset, SEEK_SET);
if (seekret < 0) {
av_log(pls->parent, AV_LOG_ERROR, "Unable to seek to offset %"PRId64" of HLS segment '%s'\n", seg->url_offset, seg->url);
@ -1237,13 +1253,13 @@ restart:
/* Check that the playlist is still needed before opening a new
* segment. */
if (v->ctx && v->ctx->nb_streams &&
v->parent->nb_streams >= v->stream_offset + v->ctx->nb_streams) {
if (v->ctx && v->ctx->nb_streams) {
v->needed = 0;
for (i = v->stream_offset; i < v->stream_offset + v->ctx->nb_streams;
i++) {
if (v->parent->streams[i]->discard < AVDISCARD_ALL)
for (i = 0; i < v->n_main_streams; i++) {
if (v->main_streams[i]->discard < AVDISCARD_ALL) {
v->needed = 1;
break;
}
}
}
if (!v->needed) {
@ -1381,8 +1397,8 @@ static void add_metadata_from_renditions(AVFormatContext *s, struct playlist *pl
int rend_idx = 0;
int i;
for (i = 0; i < pls->ctx->nb_streams; i++) {
AVStream *st = s->streams[pls->stream_offset + i];
for (i = 0; i < pls->n_main_streams; i++) {
AVStream *st = pls->main_streams[i];
if (st->codecpar->codec_type != type)
continue;
@ -1508,7 +1524,8 @@ static int hls_read_header(AVFormatContext *s)
{
void *u = (s->flags & AVFMT_FLAG_CUSTOM_IO) ? NULL : s->pb;
HLSContext *c = s->priv_data;
int ret = 0, i, j, stream_offset = 0;
int ret = 0, i, j;
int highest_cur_seq_no = 0;
c->ctx = s;
c->interrupt_callback = &s->interrupt_callback;
@ -1583,6 +1600,17 @@ static int hls_read_header(AVFormatContext *s)
add_renditions_to_variant(c, var, AVMEDIA_TYPE_SUBTITLE, var->subtitles_group);
}
/* Select the starting segments */
for (i = 0; i < c->n_playlists; i++) {
struct playlist *pls = c->playlists[i];
if (pls->n_segments == 0)
continue;
pls->cur_seq_no = select_cur_seq_no(c, pls);
highest_cur_seq_no = FFMAX(highest_cur_seq_no, pls->cur_seq_no);
}
/* Open the demuxer for each playlist */
for (i = 0; i < c->n_playlists; i++) {
struct playlist *pls = c->playlists[i];
@ -1599,7 +1627,18 @@ static int hls_read_header(AVFormatContext *s)
pls->index = i;
pls->needed = 1;
pls->parent = s;
pls->cur_seq_no = select_cur_seq_no(c, pls);
/*
* If this is a live stream and this playlist looks like it is one segment
* behind, try to sync it up so that every substream starts at the same
* time position (so e.g. avformat_find_stream_info() will see packets from
* all active streams within the first few seconds). This is not very generic,
* though, as the sequence numbers are technically independent.
*/
if (!pls->finished && pls->cur_seq_no == highest_cur_seq_no - 1 &&
highest_cur_seq_no < pls->start_seq_no + pls->n_segments) {
pls->cur_seq_no = highest_cur_seq_no;
}
pls->read_buffer = av_malloc(INITIAL_BUFFER_SIZE);
if (!pls->read_buffer){
@ -1625,7 +1664,6 @@ static int hls_read_header(AVFormatContext *s)
}
pls->ctx->pb = &pls->pb;
pls->ctx->io_open = nested_io_open;
pls->stream_offset = stream_offset;
if ((ret = ff_copy_whiteblacklists(pls->ctx, s)) < 0)
goto fail;
@ -1665,13 +1703,13 @@ static int hls_read_header(AVFormatContext *s)
avpriv_set_pts_info(st, 33, 1, MPEG_TIME_BASE);
else
avpriv_set_pts_info(st, ist->pts_wrap_bits, ist->time_base.num, ist->time_base.den);
dynarray_add(&pls->main_streams, &pls->n_main_streams, st);
}
add_metadata_from_renditions(s, pls, AVMEDIA_TYPE_AUDIO);
add_metadata_from_renditions(s, pls, AVMEDIA_TYPE_VIDEO);
add_metadata_from_renditions(s, pls, AVMEDIA_TYPE_SUBTITLE);
stream_offset += pls->ctx->nb_streams;
}
/* Create a program for each variant */
@ -1689,10 +1727,10 @@ static int hls_read_header(AVFormatContext *s)
int is_shared = playlist_in_multiple_variants(c, pls);
int k;
for (k = 0; k < pls->ctx->nb_streams; k++) {
struct AVStream *st = s->streams[pls->stream_offset + k];
for (k = 0; k < pls->n_main_streams; k++) {
struct AVStream *st = pls->main_streams[k];
av_program_add_stream_index(s, i, pls->stream_offset + k);
av_program_add_stream_index(s, i, st->index);
/* Set variant_bitrate for streams unique to this variant */
if (!is_shared && v->bandwidth)
@ -1871,8 +1909,17 @@ static int hls_read_packet(AVFormatContext *s, AVPacket *pkt)
/* If we got a packet, return it */
if (minplaylist >= 0) {
struct playlist *pls = c->playlists[minplaylist];
if (pls->pkt.stream_index >= pls->n_main_streams) {
av_log(s, AV_LOG_ERROR, "stream index inconsistency: index %d, %d main streams, %d subdemuxer streams\n",
pls->pkt.stream_index, pls->n_main_streams, pls->ctx->nb_streams);
av_packet_unref(&pls->pkt);
reset_packet(&pls->pkt);
return AVERROR_BUG;
}
*pkt = pls->pkt;
pkt->stream_index += pls->stream_offset;
pkt->stream_index = pls->main_streams[pls->pkt.stream_index]->index;
reset_packet(&c->playlists[minplaylist]->pkt);
if (pkt->dts != AV_NOPTS_VALUE)
@ -1904,6 +1951,8 @@ static int hls_read_seek(AVFormatContext *s, int stream_index,
HLSContext *c = s->priv_data;
struct playlist *seek_pls = NULL;
int i, seq_no;
int j;
int stream_subdemuxer_index;
int64_t first_timestamp, seek_timestamp, duration;
if ((flags & AVSEEK_FLAG_BYTE) ||
@ -1927,10 +1976,12 @@ static int hls_read_seek(AVFormatContext *s, int stream_index,
/* find the playlist with the specified stream */
for (i = 0; i < c->n_playlists; i++) {
struct playlist *pls = c->playlists[i];
if (stream_index >= pls->stream_offset &&
stream_index - pls->stream_offset < pls->ctx->nb_streams) {
seek_pls = pls;
break;
for (j = 0; j < pls->n_main_streams; j++) {
if (pls->main_streams[j] == s->streams[stream_index]) {
seek_pls = pls;
stream_subdemuxer_index = j;
break;
}
}
}
/* check if the timestamp is valid for the playlist with the
@ -1940,7 +1991,7 @@ static int hls_read_seek(AVFormatContext *s, int stream_index,
/* set segment now so we do not need to search again below */
seek_pls->cur_seq_no = seq_no;
seek_pls->seek_stream_index = stream_index - seek_pls->stream_offset;
seek_pls->seek_stream_index = stream_subdemuxer_index;
for (i = 0; i < c->n_playlists; i++) {
/* Reset reading */

View File

@ -193,6 +193,8 @@ static int rtmp_open(URLContext *s, const char *uri, int flags)
if (sep)
p = sep + 1;
else
break;
}
}
if (ctx->playpath) {

View File

@ -43,6 +43,7 @@
#include "libavutil/sha.h"
#include "libavutil/timecode.h"
#include "libavcodec/ac3tab.h"
#include "libavcodec/mpegaudiodecheader.h"
#include "avformat.h"
#include "internal.h"
#include "avio_internal.h"
@ -5222,6 +5223,10 @@ static int mov_read_packet(AVFormatContext *s, AVPacket *pkt)
return ret;
}
#endif
if (st->codecpar->codec_id == AV_CODEC_ID_MP3 && !st->need_parsing && pkt->size > 4) {
if (ff_mpa_check_header(AV_RB32(pkt->data)) < 0)
st->need_parsing = AVSTREAM_PARSE_FULL;
}
}
pkt->stream_index = sc->ffindex;

View File

@ -162,6 +162,11 @@ ogg_gptopts (AVFormatContext * s, int i, uint64_t gp, int64_t *dts)
if (dts)
*dts = pts;
}
if (pts > INT64_MAX && pts != AV_NOPTS_VALUE) {
// The return type is unsigned, we thus cannot return negative pts
av_log(s, AV_LOG_ERROR, "invalid pts %"PRId64"\n", pts);
pts = AV_NOPTS_VALUE;
}
return pts;
}

View File

@ -82,7 +82,11 @@ static uint64_t vp8_gptopts(AVFormatContext *s, int idx,
struct ogg *ogg = s->priv_data;
struct ogg_stream *os = ogg->streams + idx;
uint64_t pts = (granule >> 32);
int invcnt = !((granule >> 30) & 3);
// If page granule is that of an invisible vp8 frame, its pts will be
// that of the end of the next visible frame. We substract 1 for those
// to prevent messing up pts calculations.
uint64_t pts = (granule >> 32) - invcnt;
uint32_t dist = (granule >> 3) & 0x07ffffff;
if (!dist)

View File

@ -101,7 +101,7 @@ int ff_wms_parse_sdp_a_line(AVFormatContext *s, const char *p)
{
int ret = 0;
if (av_strstart(p, "pgmpu:data:application/vnd.ms.wms-hdr.asfv1;base64,", &p)) {
AVIOContext pb;
AVIOContext pb = { 0 };
RTSPState *rt = s->priv_data;
AVDictionary *opts = NULL;
int len = strlen(p) * 6 / 8;

View File

@ -36,8 +36,8 @@ static int vplayer_probe(AVProbeData *p)
char c;
const unsigned char *ptr = p->buf;
if ((sscanf(ptr, "%*d:%*d:%*d.%*d%c", &c) == 1 ||
sscanf(ptr, "%*d:%*d:%*d%c", &c) == 1) && strchr(": =", c))
if ((sscanf(ptr, "%*3d:%*2d:%*2d.%*2d%c", &c) == 1 ||
sscanf(ptr, "%*3d:%*2d:%*2d%c", &c) == 1) && strchr(": =", c))
return AVPROBE_SCORE_MAX;
return 0;
}

View File

@ -115,8 +115,10 @@ static struct {
MAP(BGRX, RGB32, BGR0),
MAP(RGBA, RGB32, RGBA),
MAP(RGBX, RGB32, RGB0),
#ifdef VA_FOURCC_ABGR
MAP(ABGR, RGB32, ABGR),
MAP(XBGR, RGB32, 0BGR),
#endif
MAP(ARGB, RGB32, ARGB),
MAP(XRGB, RGB32, 0RGB),
};

View File

@ -58,7 +58,7 @@
* The following example illustrates an AVOptions-enabled struct:
* @code
* typedef struct test_struct {
* AVClass *class;
* const AVClass *class;
* int int_opt;
* char *str_opt;
* uint8_t *bin_opt;
@ -96,7 +96,7 @@
* @code
* test_struct *alloc_test_struct(void)
* {
* test_struct *ret = av_malloc(sizeof(*ret));
* test_struct *ret = av_mallocz(sizeof(*ret));
* ret->class = &test_class;
* av_opt_set_defaults(ret);
* return ret;