2012-09-30 22:49:16 +00:00
|
|
|
/*
|
|
|
|
* muxing functions for use within Libav
|
|
|
|
* Copyright (c) 2000, 2001, 2002 Fabrice Bellard
|
|
|
|
*
|
|
|
|
* This file is part of Libav.
|
|
|
|
*
|
|
|
|
* Libav is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* Libav is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with Libav; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "avformat.h"
|
|
|
|
#include "avio_internal.h"
|
|
|
|
#include "internal.h"
|
|
|
|
#include "libavcodec/internal.h"
|
|
|
|
#include "libavcodec/bytestream.h"
|
|
|
|
#include "libavutil/opt.h"
|
|
|
|
#include "libavutil/dict.h"
|
|
|
|
#include "libavutil/pixdesc.h"
|
|
|
|
#include "metadata.h"
|
|
|
|
#include "id3v2.h"
|
|
|
|
#include "libavutil/avstring.h"
|
2013-03-27 17:36:51 +00:00
|
|
|
#include "libavutil/internal.h"
|
2012-09-30 22:49:16 +00:00
|
|
|
#include "libavutil/mathematics.h"
|
|
|
|
#include "libavutil/parseutils.h"
|
|
|
|
#include "libavutil/time.h"
|
|
|
|
#include "riff.h"
|
|
|
|
#include "audiointerleave.h"
|
|
|
|
#include "url.h"
|
|
|
|
#include <stdarg.h>
|
|
|
|
#if CONFIG_NETWORK
|
|
|
|
#include "network.h"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#undef NDEBUG
|
|
|
|
#include <assert.h>
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @file
|
|
|
|
* muxing functions for use within Libav
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int validate_codec_tag(AVFormatContext *s, AVStream *st)
|
|
|
|
{
|
|
|
|
const AVCodecTag *avctag;
|
|
|
|
int n;
|
|
|
|
enum AVCodecID id = AV_CODEC_ID_NONE;
|
|
|
|
unsigned int tag = 0;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Check that tag + id is in the table
|
|
|
|
* If neither is in the table -> OK
|
|
|
|
* If tag is in the table with another id -> FAIL
|
|
|
|
* If id is in the table with another tag -> FAIL unless strict < normal
|
|
|
|
*/
|
|
|
|
for (n = 0; s->oformat->codec_tag[n]; n++) {
|
|
|
|
avctag = s->oformat->codec_tag[n];
|
|
|
|
while (avctag->id != AV_CODEC_ID_NONE) {
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
if (avpriv_toupper4(avctag->tag) == avpriv_toupper4(st->codecpar->codec_tag)) {
|
2012-09-30 22:49:16 +00:00
|
|
|
id = avctag->id;
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
if (id == st->codecpar->codec_id)
|
2012-09-30 22:49:16 +00:00
|
|
|
return 1;
|
|
|
|
}
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
if (avctag->id == st->codecpar->codec_id)
|
2012-09-30 22:49:16 +00:00
|
|
|
tag = avctag->tag;
|
|
|
|
avctag++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (id != AV_CODEC_ID_NONE)
|
|
|
|
return 0;
|
2014-07-05 09:56:16 +00:00
|
|
|
if (tag && (s->strict_std_compliance >= FF_COMPLIANCE_NORMAL))
|
2012-09-30 22:49:16 +00:00
|
|
|
return 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2012-09-30 22:49:18 +00:00
|
|
|
|
|
|
|
static int init_muxer(AVFormatContext *s, AVDictionary **options)
|
2012-09-30 22:49:16 +00:00
|
|
|
{
|
|
|
|
int ret = 0, i;
|
|
|
|
AVStream *st;
|
|
|
|
AVDictionary *tmp = NULL;
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
AVCodecParameters *par = NULL;
|
2012-09-30 22:49:17 +00:00
|
|
|
AVOutputFormat *of = s->oformat;
|
2015-10-07 13:51:11 +00:00
|
|
|
const AVCodecDescriptor *desc;
|
2012-09-30 22:49:16 +00:00
|
|
|
|
|
|
|
if (options)
|
|
|
|
av_dict_copy(&tmp, *options, 0);
|
2012-09-30 22:49:17 +00:00
|
|
|
|
2012-09-30 22:49:16 +00:00
|
|
|
if ((ret = av_opt_set_dict(s, &tmp)) < 0)
|
|
|
|
goto fail;
|
|
|
|
|
2014-05-01 08:43:10 +00:00
|
|
|
#if FF_API_LAVF_BITEXACT
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
FF_DISABLE_DEPRECATION_WARNINGS
|
2015-06-29 19:59:37 +00:00
|
|
|
if (s->nb_streams && s->streams[0]->codec->flags & AV_CODEC_FLAG_BITEXACT)
|
2014-05-01 08:43:10 +00:00
|
|
|
s->flags |= AVFMT_FLAG_BITEXACT;
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
FF_ENABLE_DEPRECATION_WARNINGS
|
2014-05-01 08:43:10 +00:00
|
|
|
#endif
|
|
|
|
|
2012-09-30 22:49:16 +00:00
|
|
|
// some sanity checks
|
2012-09-30 22:49:17 +00:00
|
|
|
if (s->nb_streams == 0 && !(of->flags & AVFMT_NOSTREAMS)) {
|
2012-09-30 22:49:16 +00:00
|
|
|
av_log(s, AV_LOG_ERROR, "no streams\n");
|
|
|
|
ret = AVERROR(EINVAL);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < s->nb_streams; i++) {
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
st = s->streams[i];
|
|
|
|
par = st->codecpar;
|
2012-09-30 22:49:16 +00:00
|
|
|
|
2014-05-18 10:12:59 +00:00
|
|
|
#if FF_API_LAVF_CODEC_TB
|
|
|
|
FF_DISABLE_DEPRECATION_WARNINGS
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
if (!st->time_base.num && st->codec->time_base.num) {
|
2014-05-18 10:12:59 +00:00
|
|
|
av_log(s, AV_LOG_WARNING, "Using AVStream.codec.time_base as a "
|
|
|
|
"timebase hint to the muxer is deprecated. Set "
|
|
|
|
"AVStream.time_base instead.\n");
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
avpriv_set_pts_info(st, 64, st->codec->time_base.num, st->codec->time_base.den);
|
|
|
|
}
|
|
|
|
FF_ENABLE_DEPRECATION_WARNINGS
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if FF_API_LAVF_AVCTX
|
|
|
|
FF_DISABLE_DEPRECATION_WARNINGS
|
|
|
|
if (st->codecpar->codec_type == AVMEDIA_TYPE_UNKNOWN &&
|
|
|
|
st->codec->codec_type != AVMEDIA_TYPE_UNKNOWN) {
|
|
|
|
av_log(s, AV_LOG_WARNING, "Using AVStream.codec to pass codec "
|
|
|
|
"parameters to muxers is deprecated, use AVStream.codecpar "
|
|
|
|
"instead.\n");
|
|
|
|
ret = avcodec_parameters_from_context(st->codecpar, st->codec);
|
|
|
|
if (ret < 0)
|
|
|
|
goto fail;
|
2014-05-18 10:12:59 +00:00
|
|
|
}
|
|
|
|
FF_ENABLE_DEPRECATION_WARNINGS
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (!st->time_base.num) {
|
|
|
|
/* fall back on the default timebase values */
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
if (par->codec_type == AVMEDIA_TYPE_AUDIO && par->sample_rate)
|
|
|
|
avpriv_set_pts_info(st, 64, 1, par->sample_rate);
|
2014-05-18 10:12:59 +00:00
|
|
|
else
|
|
|
|
avpriv_set_pts_info(st, 33, 1, 90000);
|
|
|
|
}
|
|
|
|
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
switch (par->codec_type) {
|
2012-09-30 22:49:16 +00:00
|
|
|
case AVMEDIA_TYPE_AUDIO:
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
if (par->sample_rate <= 0) {
|
2012-09-30 22:49:16 +00:00
|
|
|
av_log(s, AV_LOG_ERROR, "sample rate not set\n");
|
|
|
|
ret = AVERROR(EINVAL);
|
|
|
|
goto fail;
|
|
|
|
}
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
if (!par->block_align)
|
|
|
|
par->block_align = par->channels *
|
|
|
|
av_get_bits_per_sample(par->codec_id) >> 3;
|
2012-09-30 22:49:16 +00:00
|
|
|
break;
|
|
|
|
case AVMEDIA_TYPE_VIDEO:
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
if ((par->width <= 0 || par->height <= 0) &&
|
2012-09-30 22:49:17 +00:00
|
|
|
!(of->flags & AVFMT_NODIMENSIONS)) {
|
2012-09-30 22:49:16 +00:00
|
|
|
av_log(s, AV_LOG_ERROR, "dimensions not set\n");
|
|
|
|
ret = AVERROR(EINVAL);
|
|
|
|
goto fail;
|
|
|
|
}
|
2012-09-30 22:49:17 +00:00
|
|
|
|
|
|
|
if (av_cmp_q(st->sample_aspect_ratio,
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
par->sample_aspect_ratio)) {
|
2013-06-23 21:00:34 +00:00
|
|
|
if (st->sample_aspect_ratio.num != 0 &&
|
|
|
|
st->sample_aspect_ratio.den != 0 &&
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
par->sample_aspect_ratio.den != 0 &&
|
|
|
|
par->sample_aspect_ratio.den != 0) {
|
2013-06-23 21:00:34 +00:00
|
|
|
av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between muxer "
|
|
|
|
"(%d/%d) and encoder layer (%d/%d)\n",
|
|
|
|
st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
par->sample_aspect_ratio.num,
|
|
|
|
par->sample_aspect_ratio.den);
|
2013-06-23 21:00:34 +00:00
|
|
|
ret = AVERROR(EINVAL);
|
|
|
|
goto fail;
|
|
|
|
}
|
2012-09-30 22:49:16 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
desc = avcodec_descriptor_get(par->codec_id);
|
2015-10-07 13:51:11 +00:00
|
|
|
if (desc && desc->props & AV_CODEC_PROP_REORDER)
|
|
|
|
st->internal->reorder = 1;
|
|
|
|
|
2012-09-30 22:49:17 +00:00
|
|
|
if (of->codec_tag) {
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
if (par->codec_tag &&
|
|
|
|
par->codec_id == AV_CODEC_ID_RAWVIDEO &&
|
|
|
|
!av_codec_get_tag(of->codec_tag, par->codec_id) &&
|
2012-09-30 22:49:17 +00:00
|
|
|
!validate_codec_tag(s, st)) {
|
|
|
|
// the current rawvideo encoding system ends up setting
|
|
|
|
// the wrong codec_tag for avi, we override it here
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
par->codec_tag = 0;
|
2012-09-30 22:49:16 +00:00
|
|
|
}
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
if (par->codec_tag) {
|
2012-09-30 22:49:16 +00:00
|
|
|
if (!validate_codec_tag(s, st)) {
|
|
|
|
char tagbuf[32];
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
av_get_codec_tag_string(tagbuf, sizeof(tagbuf), par->codec_tag);
|
2012-09-30 22:49:16 +00:00
|
|
|
av_log(s, AV_LOG_ERROR,
|
|
|
|
"Tag %s/0x%08x incompatible with output codec id '%d'\n",
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
tagbuf, par->codec_tag, par->codec_id);
|
2012-09-30 22:49:16 +00:00
|
|
|
ret = AVERROR_INVALIDDATA;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
} else
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
par->codec_tag = av_codec_get_tag(of->codec_tag, par->codec_id);
|
2012-09-30 22:49:16 +00:00
|
|
|
}
|
|
|
|
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
if (par->codec_type != AVMEDIA_TYPE_ATTACHMENT)
|
2014-01-20 12:59:06 +00:00
|
|
|
s->internal->nb_interleaved_streams++;
|
2012-09-30 22:49:16 +00:00
|
|
|
}
|
|
|
|
|
2012-09-30 22:49:17 +00:00
|
|
|
if (!s->priv_data && of->priv_data_size > 0) {
|
|
|
|
s->priv_data = av_mallocz(of->priv_data_size);
|
2012-09-30 22:49:16 +00:00
|
|
|
if (!s->priv_data) {
|
|
|
|
ret = AVERROR(ENOMEM);
|
|
|
|
goto fail;
|
|
|
|
}
|
2012-09-30 22:49:17 +00:00
|
|
|
if (of->priv_class) {
|
|
|
|
*(const AVClass **)s->priv_data = of->priv_class;
|
2012-09-30 22:49:16 +00:00
|
|
|
av_opt_set_defaults(s->priv_data);
|
|
|
|
if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* set muxer identification string */
|
2014-05-01 08:43:10 +00:00
|
|
|
if (!(s->flags & AVFMT_FLAG_BITEXACT)) {
|
2012-09-30 22:49:16 +00:00
|
|
|
av_dict_set(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0);
|
|
|
|
}
|
|
|
|
|
2012-09-30 22:49:18 +00:00
|
|
|
if (options) {
|
|
|
|
av_dict_free(options);
|
|
|
|
*options = tmp;
|
2012-09-30 22:49:16 +00:00
|
|
|
}
|
|
|
|
|
2012-09-30 22:49:18 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
av_dict_free(&tmp);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int avformat_write_header(AVFormatContext *s, AVDictionary **options)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (ret = init_muxer(s, options))
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (s->oformat->write_header) {
|
|
|
|
ret = s->oformat->write_header(s);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2012-09-30 22:49:16 +00:00
|
|
|
}
|
2012-09-30 22:49:18 +00:00
|
|
|
|
2012-09-26 13:55:16 +00:00
|
|
|
if (s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_AUTO) {
|
|
|
|
if (s->oformat->flags & (AVFMT_TS_NEGATIVE | AVFMT_NOTIMESTAMPS)) {
|
|
|
|
s->avoid_negative_ts = 0;
|
|
|
|
} else
|
|
|
|
s->avoid_negative_ts = AVFMT_AVOID_NEG_TS_MAKE_NON_NEGATIVE;
|
|
|
|
}
|
|
|
|
|
2012-09-30 22:49:16 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-10-07 13:51:11 +00:00
|
|
|
#if FF_API_COMPUTE_PKT_FIELDS2
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
FF_DISABLE_DEPRECATION_WARNINGS
|
2012-09-30 22:49:16 +00:00
|
|
|
//FIXME merge with compute_pkt_fields
|
|
|
|
static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt)
|
|
|
|
{
|
|
|
|
int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
|
2014-05-18 10:36:00 +00:00
|
|
|
int num, den, i;
|
2012-09-30 22:49:16 +00:00
|
|
|
|
2015-10-07 13:51:11 +00:00
|
|
|
if (!s->internal->missing_ts_warning &&
|
|
|
|
!(s->oformat->flags & AVFMT_NOTIMESTAMPS) &&
|
|
|
|
(pkt->pts == AV_NOPTS_VALUE || pkt->dts == AV_NOPTS_VALUE)) {
|
|
|
|
av_log(s, AV_LOG_WARNING,
|
|
|
|
"Timestamps are unset in a packet for stream %d. "
|
|
|
|
"This is deprecated and will stop working in the future. "
|
|
|
|
"Fix your code to set the timestamps properly\n", st->index);
|
|
|
|
s->internal->missing_ts_warning = 1;
|
|
|
|
}
|
|
|
|
|
2015-03-16 08:57:35 +00:00
|
|
|
av_log(s, AV_LOG_TRACE, "compute_pkt_fields2: pts:%" PRId64 " dts:%" PRId64 " cur_dts:%" PRId64 " b:%d size:%d st:%d\n",
|
2012-09-30 22:49:16 +00:00
|
|
|
pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
|
|
|
|
|
|
|
|
/* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
|
|
|
|
* return AVERROR(EINVAL);*/
|
|
|
|
|
|
|
|
/* duration field */
|
|
|
|
if (pkt->duration == 0) {
|
2014-10-08 19:23:14 +00:00
|
|
|
ff_compute_frame_duration(s, &num, &den, st, NULL, pkt);
|
2012-09-30 22:49:16 +00:00
|
|
|
if (den && num) {
|
|
|
|
pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay == 0)
|
|
|
|
pkt->pts = pkt->dts;
|
|
|
|
|
|
|
|
//calculate dts from pts
|
|
|
|
if (pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY) {
|
|
|
|
st->pts_buffer[0] = pkt->pts;
|
|
|
|
for (i = 1; i < delay + 1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
|
|
|
|
st->pts_buffer[i] = pkt->pts + (i - delay - 1) * pkt->duration;
|
|
|
|
for (i = 0; i<delay && st->pts_buffer[i] > st->pts_buffer[i + 1]; i++)
|
|
|
|
FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i + 1]);
|
|
|
|
|
|
|
|
pkt->dts = st->pts_buffer[0];
|
|
|
|
}
|
|
|
|
|
|
|
|
if (st->cur_dts && st->cur_dts != AV_NOPTS_VALUE &&
|
|
|
|
((!(s->oformat->flags & AVFMT_TS_NONSTRICT) &&
|
|
|
|
st->cur_dts >= pkt->dts) || st->cur_dts > pkt->dts)) {
|
|
|
|
av_log(s, AV_LOG_ERROR,
|
|
|
|
"Application provided invalid, non monotonically increasing dts to muxer in stream %d: %" PRId64 " >= %" PRId64 "\n",
|
|
|
|
st->index, st->cur_dts, pkt->dts);
|
|
|
|
return AVERROR(EINVAL);
|
|
|
|
}
|
|
|
|
if (pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts) {
|
2014-10-09 15:18:03 +00:00
|
|
|
av_log(s, AV_LOG_ERROR,
|
|
|
|
"pts %" PRId64 " < dts %" PRId64 " in stream %d\n",
|
|
|
|
pkt->pts, pkt->dts,
|
|
|
|
st->index);
|
2012-09-30 22:49:16 +00:00
|
|
|
return AVERROR(EINVAL);
|
|
|
|
}
|
|
|
|
|
2015-03-16 08:57:35 +00:00
|
|
|
av_log(s, AV_LOG_TRACE, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n",
|
2012-09-30 22:49:16 +00:00
|
|
|
pkt->pts, pkt->dts);
|
|
|
|
st->cur_dts = pkt->dts;
|
2014-05-18 10:36:00 +00:00
|
|
|
|
2012-09-30 22:49:16 +00:00
|
|
|
return 0;
|
|
|
|
}
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
FF_ENABLE_DEPRECATION_WARNINGS
|
2015-10-07 13:51:11 +00:00
|
|
|
#endif
|
2012-09-30 22:49:16 +00:00
|
|
|
|
2013-04-03 12:11:10 +00:00
|
|
|
/*
|
|
|
|
* FIXME: this function should NEVER get undefined pts/dts beside when the
|
|
|
|
* AVFMT_NOTIMESTAMPS is set.
|
|
|
|
* Those additional safety checks should be dropped once the correct checks
|
|
|
|
* are set in the callers.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int write_packet(AVFormatContext *s, AVPacket *pkt)
|
|
|
|
{
|
2013-09-11 12:02:06 +00:00
|
|
|
int ret;
|
2012-09-26 13:55:16 +00:00
|
|
|
if (s->avoid_negative_ts > 0) {
|
2013-04-03 12:11:10 +00:00
|
|
|
AVRational time_base = s->streams[pkt->stream_index]->time_base;
|
|
|
|
int64_t offset = 0;
|
|
|
|
|
2015-02-06 13:53:40 +00:00
|
|
|
if (s->internal->offset == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE &&
|
2012-09-26 13:55:16 +00:00
|
|
|
(pkt->dts < 0 || s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_MAKE_ZERO)) {
|
2015-02-06 13:53:40 +00:00
|
|
|
s->internal->offset = -pkt->dts;
|
|
|
|
s->internal->offset_timebase = time_base;
|
2013-04-03 12:11:10 +00:00
|
|
|
}
|
2015-02-06 13:53:40 +00:00
|
|
|
if (s->internal->offset != AV_NOPTS_VALUE)
|
|
|
|
offset = av_rescale_q(s->internal->offset, s->internal->offset_timebase, time_base);
|
2013-04-03 12:11:10 +00:00
|
|
|
|
|
|
|
if (pkt->dts != AV_NOPTS_VALUE)
|
|
|
|
pkt->dts += offset;
|
|
|
|
if (pkt->pts != AV_NOPTS_VALUE)
|
|
|
|
pkt->pts += offset;
|
2014-10-22 14:15:02 +00:00
|
|
|
|
|
|
|
if (pkt->dts != AV_NOPTS_VALUE && pkt->dts < 0) {
|
|
|
|
av_log(s, AV_LOG_WARNING,
|
|
|
|
"Packets poorly interleaved, failed to avoid negative "
|
|
|
|
"timestamp %"PRId64" in stream %d.\n"
|
|
|
|
"Try -max_interleave_delta 0 as a possible workaround.\n",
|
|
|
|
pkt->dts, pkt->stream_index);
|
|
|
|
}
|
2013-04-03 12:11:10 +00:00
|
|
|
}
|
2013-09-11 12:02:06 +00:00
|
|
|
ret = s->oformat->write_packet(s, pkt);
|
|
|
|
|
2015-08-27 04:04:16 +00:00
|
|
|
if (s->pb && ret >= 0) {
|
|
|
|
if (s->flags & AVFMT_FLAG_FLUSH_PACKETS)
|
|
|
|
avio_flush(s->pb);
|
|
|
|
if (s->pb->error < 0)
|
|
|
|
ret = s->pb->error;
|
|
|
|
}
|
2013-09-11 12:02:06 +00:00
|
|
|
|
|
|
|
return ret;
|
2013-04-03 12:11:10 +00:00
|
|
|
}
|
|
|
|
|
2014-01-20 13:10:01 +00:00
|
|
|
static int check_packet(AVFormatContext *s, AVPacket *pkt)
|
|
|
|
{
|
|
|
|
if (!pkt)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (pkt->stream_index < 0 || pkt->stream_index >= s->nb_streams) {
|
|
|
|
av_log(s, AV_LOG_ERROR, "Invalid packet stream index: %d\n",
|
|
|
|
pkt->stream_index);
|
|
|
|
return AVERROR(EINVAL);
|
|
|
|
}
|
|
|
|
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
if (s->streams[pkt->stream_index]->codecpar->codec_type == AVMEDIA_TYPE_ATTACHMENT) {
|
2014-01-20 13:10:01 +00:00
|
|
|
av_log(s, AV_LOG_ERROR, "Received a packet for an attachment stream.\n");
|
|
|
|
return AVERROR(EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-10-07 13:51:11 +00:00
|
|
|
static int prepare_input_packet(AVFormatContext *s, AVPacket *pkt)
|
2012-09-30 22:49:16 +00:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2014-01-20 13:10:01 +00:00
|
|
|
ret = check_packet(s, pkt);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
2015-10-07 13:51:11 +00:00
|
|
|
#if !FF_API_COMPUTE_PKT_FIELDS2
|
|
|
|
/* sanitize the timestamps */
|
|
|
|
if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
|
|
|
|
AVStream *st = s->streams[pkt->stream_index];
|
|
|
|
|
|
|
|
/* when there is no reordering (so dts is equal to pts), but
|
|
|
|
* only one of them is set, set the other as well */
|
|
|
|
if (!st->internal->reorder) {
|
|
|
|
if (pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE)
|
|
|
|
pkt->pts = pkt->dts;
|
|
|
|
if (pkt->dts == AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE)
|
|
|
|
pkt->dts = pkt->pts;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* check that the timestamps are set */
|
|
|
|
if (pkt->pts == AV_NOPTS_VALUE || pkt->dts == AV_NOPTS_VALUE) {
|
|
|
|
av_log(s, AV_LOG_ERROR,
|
|
|
|
"Timestamps are unset in a packet for stream %d\n", st->index);
|
|
|
|
return AVERROR(EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* check that the dts are increasing (or at least non-decreasing,
|
|
|
|
* if the format allows it */
|
|
|
|
if (st->cur_dts != AV_NOPTS_VALUE &&
|
|
|
|
((!(s->oformat->flags & AVFMT_TS_NONSTRICT) && st->cur_dts >= pkt->dts) ||
|
|
|
|
st->cur_dts > pkt->dts)) {
|
|
|
|
av_log(s, AV_LOG_ERROR,
|
|
|
|
"Application provided invalid, non monotonically increasing "
|
|
|
|
"dts to muxer in stream %d: %" PRId64 " >= %" PRId64 "\n",
|
|
|
|
st->index, st->cur_dts, pkt->dts);
|
|
|
|
return AVERROR(EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pkt->pts < pkt->dts) {
|
|
|
|
av_log(s, AV_LOG_ERROR, "pts %" PRId64 " < dts %" PRId64 " in stream %d\n",
|
|
|
|
pkt->pts, pkt->dts, st->index);
|
|
|
|
return AVERROR(EINVAL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int av_write_frame(AVFormatContext *s, AVPacket *pkt)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = prepare_input_packet(s, pkt);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
2012-09-30 22:49:16 +00:00
|
|
|
if (!pkt) {
|
|
|
|
if (s->oformat->flags & AVFMT_ALLOW_FLUSH)
|
|
|
|
return s->oformat->write_packet(s, pkt);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2015-10-07 13:51:11 +00:00
|
|
|
#if FF_API_COMPUTE_PKT_FIELDS2
|
2012-09-30 22:49:16 +00:00
|
|
|
ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt);
|
|
|
|
|
|
|
|
if (ret < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
|
|
|
|
return ret;
|
2015-10-07 13:51:11 +00:00
|
|
|
#endif
|
2012-09-30 22:49:16 +00:00
|
|
|
|
2013-04-03 12:11:10 +00:00
|
|
|
ret = write_packet(s, pkt);
|
2012-09-30 22:49:16 +00:00
|
|
|
|
|
|
|
if (ret >= 0)
|
|
|
|
s->streams[pkt->stream_index]->nb_frames++;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-07-14 06:22:44 +00:00
|
|
|
int ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
|
|
|
|
int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
|
2012-09-30 22:49:16 +00:00
|
|
|
{
|
2014-07-14 06:22:44 +00:00
|
|
|
int ret;
|
2012-09-30 22:49:16 +00:00
|
|
|
AVPacketList **next_point, *this_pktl;
|
|
|
|
|
|
|
|
this_pktl = av_mallocz(sizeof(AVPacketList));
|
2014-07-14 06:22:44 +00:00
|
|
|
if (!this_pktl)
|
|
|
|
return AVERROR(ENOMEM);
|
2015-10-23 09:11:33 +00:00
|
|
|
|
|
|
|
if ((ret = av_packet_ref(&this_pktl->pkt, pkt)) < 0) {
|
2014-07-14 06:22:44 +00:00
|
|
|
av_free(this_pktl);
|
|
|
|
return ret;
|
|
|
|
}
|
2012-09-30 22:49:16 +00:00
|
|
|
|
|
|
|
if (s->streams[pkt->stream_index]->last_in_packet_buffer) {
|
|
|
|
next_point = &(s->streams[pkt->stream_index]->last_in_packet_buffer->next);
|
|
|
|
} else
|
2015-02-06 13:53:40 +00:00
|
|
|
next_point = &s->internal->packet_buffer;
|
2012-09-30 22:49:16 +00:00
|
|
|
|
|
|
|
if (*next_point) {
|
2015-02-06 13:53:40 +00:00
|
|
|
if (compare(s, &s->internal->packet_buffer_end->pkt, pkt)) {
|
2012-09-30 22:49:16 +00:00
|
|
|
while (!compare(s, &(*next_point)->pkt, pkt))
|
|
|
|
next_point = &(*next_point)->next;
|
|
|
|
goto next_non_null;
|
|
|
|
} else {
|
2015-02-06 13:53:40 +00:00
|
|
|
next_point = &(s->internal->packet_buffer_end->next);
|
2012-09-30 22:49:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
assert(!*next_point);
|
|
|
|
|
2015-02-06 13:53:40 +00:00
|
|
|
s->internal->packet_buffer_end = this_pktl;
|
2012-09-30 22:49:16 +00:00
|
|
|
next_non_null:
|
|
|
|
|
|
|
|
this_pktl->next = *next_point;
|
|
|
|
|
|
|
|
s->streams[pkt->stream_index]->last_in_packet_buffer =
|
|
|
|
*next_point = this_pktl;
|
2014-07-14 06:22:44 +00:00
|
|
|
|
2015-10-23 09:11:33 +00:00
|
|
|
av_packet_unref(pkt);
|
|
|
|
|
2014-07-14 06:22:44 +00:00
|
|
|
return 0;
|
2012-09-30 22:49:16 +00:00
|
|
|
}
|
|
|
|
|
2013-04-20 19:47:15 +00:00
|
|
|
static int interleave_compare_dts(AVFormatContext *s, AVPacket *next,
|
|
|
|
AVPacket *pkt)
|
2012-09-30 22:49:16 +00:00
|
|
|
{
|
|
|
|
AVStream *st = s->streams[pkt->stream_index];
|
|
|
|
AVStream *st2 = s->streams[next->stream_index];
|
|
|
|
int comp = av_compare_ts(next->dts, st2->time_base, pkt->dts,
|
|
|
|
st->time_base);
|
|
|
|
|
|
|
|
if (comp == 0)
|
|
|
|
return pkt->stream_index < next->stream_index;
|
|
|
|
return comp > 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int ff_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
|
|
|
|
AVPacket *pkt, int flush)
|
|
|
|
{
|
|
|
|
AVPacketList *pktl;
|
|
|
|
int stream_count = 0;
|
2014-07-14 06:22:44 +00:00
|
|
|
int i, ret;
|
2012-09-30 22:49:16 +00:00
|
|
|
|
|
|
|
if (pkt) {
|
2014-07-14 06:22:44 +00:00
|
|
|
if ((ret = ff_interleave_add_packet(s, pkt, interleave_compare_dts)) < 0)
|
|
|
|
return ret;
|
2012-09-30 22:49:16 +00:00
|
|
|
}
|
|
|
|
|
2015-02-06 13:53:40 +00:00
|
|
|
if (s->max_interleave_delta > 0 && s->internal->packet_buffer && !flush) {
|
|
|
|
AVPacket *top_pkt = &s->internal->packet_buffer->pkt;
|
2014-01-20 12:28:37 +00:00
|
|
|
int64_t delta_dts = INT64_MIN;
|
|
|
|
int64_t top_dts = av_rescale_q(top_pkt->dts,
|
|
|
|
s->streams[top_pkt->stream_index]->time_base,
|
|
|
|
AV_TIME_BASE_Q);
|
|
|
|
|
|
|
|
for (i = 0; i < s->nb_streams; i++) {
|
|
|
|
int64_t last_dts;
|
|
|
|
const AVPacketList *last = s->streams[i]->last_in_packet_buffer;
|
|
|
|
|
|
|
|
if (!last)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
last_dts = av_rescale_q(last->pkt.dts,
|
|
|
|
s->streams[i]->time_base,
|
|
|
|
AV_TIME_BASE_Q);
|
|
|
|
delta_dts = FFMAX(delta_dts, last_dts - top_dts);
|
|
|
|
stream_count++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (delta_dts > s->max_interleave_delta) {
|
|
|
|
av_log(s, AV_LOG_DEBUG,
|
|
|
|
"Delay between the first packet and last packet in the "
|
|
|
|
"muxing queue is %"PRId64" > %"PRId64": forcing output\n",
|
|
|
|
delta_dts, s->max_interleave_delta);
|
|
|
|
flush = 1;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (i = 0; i < s->nb_streams; i++)
|
|
|
|
stream_count += !!s->streams[i]->last_in_packet_buffer;
|
|
|
|
}
|
|
|
|
|
2012-09-30 22:49:16 +00:00
|
|
|
|
2014-01-20 12:59:06 +00:00
|
|
|
if (stream_count && (s->internal->nb_interleaved_streams == stream_count || flush)) {
|
2015-02-06 13:53:40 +00:00
|
|
|
pktl = s->internal->packet_buffer;
|
2012-09-30 22:49:16 +00:00
|
|
|
*out = pktl->pkt;
|
|
|
|
|
2015-02-06 13:53:40 +00:00
|
|
|
s->internal->packet_buffer = pktl->next;
|
|
|
|
if (!s->internal->packet_buffer)
|
|
|
|
s->internal->packet_buffer_end = NULL;
|
2012-09-30 22:49:16 +00:00
|
|
|
|
|
|
|
if (s->streams[out->stream_index]->last_in_packet_buffer == pktl)
|
|
|
|
s->streams[out->stream_index]->last_in_packet_buffer = NULL;
|
|
|
|
av_freep(&pktl);
|
|
|
|
return 1;
|
|
|
|
} else {
|
|
|
|
av_init_packet(out);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Interleave an AVPacket correctly so it can be muxed.
|
|
|
|
* @param out the interleaved packet will be output here
|
|
|
|
* @param in the input packet
|
|
|
|
* @param flush 1 if no further packets are available as input and all
|
|
|
|
* remaining packets should be output
|
|
|
|
* @return 1 if a packet was output, 0 if no packet could be output,
|
|
|
|
* < 0 if an error occurred
|
|
|
|
*/
|
|
|
|
static int interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush)
|
|
|
|
{
|
|
|
|
if (s->oformat->interleave_packet) {
|
|
|
|
int ret = s->oformat->interleave_packet(s, out, in, flush);
|
|
|
|
if (in)
|
2015-10-23 09:11:31 +00:00
|
|
|
av_packet_unref(in);
|
2012-09-30 22:49:16 +00:00
|
|
|
return ret;
|
|
|
|
} else
|
|
|
|
return ff_interleave_packet_per_dts(s, out, in, flush);
|
|
|
|
}
|
|
|
|
|
|
|
|
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
|
|
|
|
{
|
|
|
|
int ret, flush = 0;
|
|
|
|
|
2015-10-07 13:51:11 +00:00
|
|
|
ret = prepare_input_packet(s, pkt);
|
2014-01-20 13:10:01 +00:00
|
|
|
if (ret < 0)
|
2014-02-04 14:58:11 +00:00
|
|
|
goto fail;
|
2014-01-20 13:10:01 +00:00
|
|
|
|
2012-09-30 22:49:16 +00:00
|
|
|
if (pkt) {
|
2016-05-03 22:15:41 +00:00
|
|
|
#if FF_API_COMPUTE_PKT_FIELDS2
|
2012-09-30 22:49:16 +00:00
|
|
|
AVStream *st = s->streams[pkt->stream_index];
|
|
|
|
|
2015-03-16 08:57:35 +00:00
|
|
|
av_log(s, AV_LOG_TRACE, "av_interleaved_write_frame size:%d dts:%" PRId64 " pts:%" PRId64 "\n",
|
2012-09-30 22:49:16 +00:00
|
|
|
pkt->size, pkt->dts, pkt->pts);
|
|
|
|
if ((ret = compute_pkt_fields2(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
|
2014-02-04 14:58:11 +00:00
|
|
|
goto fail;
|
2015-10-07 13:51:11 +00:00
|
|
|
#endif
|
2012-09-30 22:49:16 +00:00
|
|
|
|
2014-02-04 14:58:11 +00:00
|
|
|
if (pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
|
|
|
|
ret = AVERROR(EINVAL);
|
|
|
|
goto fail;
|
|
|
|
}
|
2012-09-30 22:49:16 +00:00
|
|
|
} else {
|
2015-03-16 08:57:35 +00:00
|
|
|
av_log(s, AV_LOG_TRACE, "av_interleaved_write_frame FLUSH\n");
|
2012-09-30 22:49:16 +00:00
|
|
|
flush = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (;; ) {
|
|
|
|
AVPacket opkt;
|
|
|
|
int ret = interleave_packet(s, &opkt, pkt, flush);
|
2014-02-04 14:58:11 +00:00
|
|
|
if (pkt) {
|
|
|
|
memset(pkt, 0, sizeof(*pkt));
|
|
|
|
av_init_packet(pkt);
|
|
|
|
pkt = NULL;
|
|
|
|
}
|
2012-09-30 22:49:16 +00:00
|
|
|
if (ret <= 0) //FIXME cleanup needed for ret<0 ?
|
|
|
|
return ret;
|
|
|
|
|
2013-04-03 12:11:10 +00:00
|
|
|
ret = write_packet(s, &opkt);
|
2012-09-30 22:49:16 +00:00
|
|
|
if (ret >= 0)
|
|
|
|
s->streams[opkt.stream_index]->nb_frames++;
|
|
|
|
|
2015-10-23 09:11:31 +00:00
|
|
|
av_packet_unref(&opkt);
|
2012-09-30 22:49:16 +00:00
|
|
|
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
2014-02-04 14:58:11 +00:00
|
|
|
fail:
|
|
|
|
av_packet_unref(pkt);
|
|
|
|
return ret;
|
2012-09-30 22:49:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int av_write_trailer(AVFormatContext *s)
|
|
|
|
{
|
|
|
|
int ret, i;
|
|
|
|
|
|
|
|
for (;; ) {
|
|
|
|
AVPacket pkt;
|
|
|
|
ret = interleave_packet(s, &pkt, NULL, 1);
|
|
|
|
if (ret < 0) //FIXME cleanup needed for ret<0 ?
|
|
|
|
goto fail;
|
|
|
|
if (!ret)
|
|
|
|
break;
|
|
|
|
|
2013-04-03 12:11:10 +00:00
|
|
|
ret = write_packet(s, &pkt);
|
2012-09-30 22:49:16 +00:00
|
|
|
if (ret >= 0)
|
|
|
|
s->streams[pkt.stream_index]->nb_frames++;
|
|
|
|
|
2015-10-23 09:11:31 +00:00
|
|
|
av_packet_unref(&pkt);
|
2012-09-30 22:49:16 +00:00
|
|
|
|
|
|
|
if (ret < 0)
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s->oformat->write_trailer)
|
|
|
|
ret = s->oformat->write_trailer(s);
|
|
|
|
|
2014-12-19 13:40:02 +00:00
|
|
|
if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
|
2012-09-30 22:49:16 +00:00
|
|
|
avio_flush(s->pb);
|
|
|
|
|
|
|
|
fail:
|
|
|
|
for (i = 0; i < s->nb_streams; i++) {
|
|
|
|
av_freep(&s->streams[i]->priv_data);
|
|
|
|
av_freep(&s->streams[i]->index_entries);
|
|
|
|
}
|
|
|
|
if (s->oformat->priv_class)
|
|
|
|
av_opt_free(s->priv_data);
|
|
|
|
av_freep(&s->priv_data);
|
|
|
|
return ret;
|
|
|
|
}
|
2013-06-15 09:56:36 +00:00
|
|
|
|
|
|
|
int ff_write_chained(AVFormatContext *dst, int dst_stream, AVPacket *pkt,
|
|
|
|
AVFormatContext *src)
|
|
|
|
{
|
|
|
|
AVPacket local_pkt;
|
|
|
|
|
|
|
|
local_pkt = *pkt;
|
|
|
|
local_pkt.stream_index = dst_stream;
|
|
|
|
if (pkt->pts != AV_NOPTS_VALUE)
|
|
|
|
local_pkt.pts = av_rescale_q(pkt->pts,
|
|
|
|
src->streams[pkt->stream_index]->time_base,
|
|
|
|
dst->streams[dst_stream]->time_base);
|
|
|
|
if (pkt->dts != AV_NOPTS_VALUE)
|
|
|
|
local_pkt.dts = av_rescale_q(pkt->dts,
|
|
|
|
src->streams[pkt->stream_index]->time_base,
|
|
|
|
dst->streams[dst_stream]->time_base);
|
|
|
|
return av_write_frame(dst, &local_pkt);
|
|
|
|
}
|