mirror of
https://github.com/libretro/RetroArch.git
synced 2024-11-27 02:00:41 +00:00
Fix building against FFmpeg 5.0 (#13611)
Co-authored-by: Maxime Gauduin <alucryd@archlinux.org>
This commit is contained in:
parent
03edf4f6a9
commit
bbfcecfab0
@ -96,11 +96,8 @@ static unsigned sw_sws_threads;
|
||||
static video_buffer_t *video_buffer;
|
||||
static tpool_t *tpool;
|
||||
|
||||
/* If libavutil is at least version 55 or higher,
|
||||
* and if libavcodec is at least version 57.80.100 or higher,
|
||||
* enable hardware acceleration */
|
||||
#define ENABLE_HW_ACCEL ((LIBAVUTIL_VERSION_MAJOR > 55) && ENABLE_HW_ACCEL_CHECK2())
|
||||
#define ENABLE_HW_ACCEL_CHECK2() ((LIBAVCODEC_VERSION_MAJOR == 57 && LIBAVCODEC_VERSION_MINOR >= 80 && LIBAVCODEC_VERSION_MICRO >= 100) || (LIBAVCODEC_VERSION_MAJOR > 57))
|
||||
#define FFMPEG3 ((LIBAVUTIL_VERSION_INT < (56, 6, 100)) || \
|
||||
(LIBAVCODEC_VERSION_INT < AV_VERSION_INT(58, 10, 100)))
|
||||
|
||||
#if ENABLE_HW_ACCEL
|
||||
static enum AVHWDeviceType hw_decoder;
|
||||
@ -240,7 +237,9 @@ void CORE_PREFIX(retro_init)(void)
|
||||
{
|
||||
reset_triggered = false;
|
||||
|
||||
#if FFMPEG3
|
||||
av_register_all();
|
||||
#endif
|
||||
|
||||
if (CORE_PREFIX(environ_cb)(RETRO_ENVIRONMENT_GET_INPUT_BITMASKS, NULL))
|
||||
libretro_supports_bitmasks = true;
|
||||
@ -462,10 +461,12 @@ static void check_variables(bool firststart)
|
||||
hw_decoder = AV_HWDEVICE_TYPE_DRM;
|
||||
else if (string_is_equal(hw_var.value, "dxva2"))
|
||||
hw_decoder = AV_HWDEVICE_TYPE_DXVA2;
|
||||
#if !FFMPEG3
|
||||
else if (string_is_equal(hw_var.value, "mediacodec"))
|
||||
hw_decoder = AV_HWDEVICE_TYPE_MEDIACODEC;
|
||||
else if (string_is_equal(hw_var.value, "opencl"))
|
||||
hw_decoder = AV_HWDEVICE_TYPE_OPENCL;
|
||||
#endif
|
||||
else if (string_is_equal(hw_var.value, "qsv"))
|
||||
hw_decoder = AV_HWDEVICE_TYPE_QSV;
|
||||
else if (string_is_equal(hw_var.value, "vaapi"))
|
||||
@ -988,8 +989,9 @@ static enum AVPixelFormat init_hw_decoder(struct AVCodecContext *ctx,
|
||||
{
|
||||
int ret = 0;
|
||||
enum AVPixelFormat decoder_pix_fmt = AV_PIX_FMT_NONE;
|
||||
struct AVCodec *codec = avcodec_find_decoder(fctx->streams[video_stream_index]->codec->codec_id);
|
||||
const AVCodec *codec = avcodec_find_decoder(fctx->streams[video_stream_index]->codecpar->codec_id);
|
||||
|
||||
#if !FFMPEG3
|
||||
for (int i = 0;; i++)
|
||||
{
|
||||
const AVCodecHWConfig *config = avcodec_get_hw_config(codec, i);
|
||||
@ -1002,12 +1004,15 @@ static enum AVPixelFormat init_hw_decoder(struct AVCodecContext *ctx,
|
||||
if (config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX &&
|
||||
config->device_type == type)
|
||||
{
|
||||
enum AVPixelFormat device_pix_fmt = config->pix_fmt;
|
||||
#else
|
||||
enum AVPixelFormat device_pix_fmt =
|
||||
pix_fmts ? ctx->get_format(ctx, pix_fmts) : decoder_pix_fmt;
|
||||
#endif
|
||||
log_cb(RETRO_LOG_INFO, "[FFMPEG] Selected HW decoder %s.\n",
|
||||
av_hwdevice_get_type_name(type));
|
||||
log_cb(RETRO_LOG_INFO, "[FFMPEG] Selected HW pixel format %s.\n",
|
||||
av_get_pix_fmt_name(config->pix_fmt));
|
||||
|
||||
enum AVPixelFormat device_pix_fmt = config->pix_fmt;
|
||||
av_get_pix_fmt_name(device_pix_fmt));
|
||||
|
||||
if (pix_fmts != NULL)
|
||||
{
|
||||
@ -1019,16 +1024,17 @@ static enum AVPixelFormat init_hw_decoder(struct AVCodecContext *ctx,
|
||||
goto exit;
|
||||
}
|
||||
log_cb(RETRO_LOG_ERROR, "[FFMPEG] Codec %s does not support device pixel format %s.\n",
|
||||
codec->name, av_get_pix_fmt_name(config->pix_fmt));
|
||||
codec->name, av_get_pix_fmt_name(device_pix_fmt));
|
||||
}
|
||||
else
|
||||
{
|
||||
decoder_pix_fmt = device_pix_fmt;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
#if !FFMPEG3
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
exit:
|
||||
if (decoder_pix_fmt != AV_PIX_FMT_NONE)
|
||||
@ -1070,7 +1076,6 @@ static enum AVPixelFormat auto_hw_decoder(AVCodecContext *ctx,
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
static enum AVPixelFormat select_decoder(AVCodecContext *ctx,
|
||||
const enum AVPixelFormat *pix_fmts)
|
||||
{
|
||||
@ -1098,7 +1103,7 @@ static enum AVPixelFormat select_decoder(AVCodecContext *ctx,
|
||||
ctx->thread_count = sw_decoder_threads;
|
||||
log_cb(RETRO_LOG_INFO, "[FFMPEG] Configured software decoding threads: %d\n", sw_decoder_threads);
|
||||
|
||||
format = fctx->streams[video_stream_index]->codec->pix_fmt;
|
||||
format = fctx->streams[video_stream_index]->codecpar->format;
|
||||
|
||||
#if ENABLE_HW_ACCEL
|
||||
hw_decoding_enabled = false;
|
||||
@ -1132,14 +1137,15 @@ static bool open_codec(AVCodecContext **ctx, enum AVMediaType type, unsigned ind
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
AVCodec *codec = avcodec_find_decoder(fctx->streams[index]->codec->codec_id);
|
||||
const AVCodec *codec = avcodec_find_decoder(fctx->streams[index]->codecpar->codec_id);
|
||||
if (!codec)
|
||||
{
|
||||
log_cb(RETRO_LOG_ERROR, "[FFMPEG] Couldn't find suitable decoder\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
*ctx = fctx->streams[index]->codec;
|
||||
*ctx = avcodec_alloc_context3(codec);
|
||||
avcodec_parameters_to_context((*ctx), fctx->streams[index]->codecpar);
|
||||
|
||||
if (type == AVMEDIA_TYPE_VIDEO)
|
||||
{
|
||||
@ -1230,7 +1236,7 @@ static bool open_codecs(void)
|
||||
|
||||
for (i = 0; i < fctx->nb_streams; i++)
|
||||
{
|
||||
enum AVMediaType type = fctx->streams[i]->codec->codec_type;
|
||||
enum AVMediaType type = fctx->streams[i]->codecpar->codec_type;
|
||||
switch (type)
|
||||
{
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
@ -1245,7 +1251,7 @@ static bool open_codecs(void)
|
||||
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
if (!vctx
|
||||
&& !codec_is_image(fctx->streams[i]->codec->codec_id))
|
||||
&& !codec_is_image(fctx->streams[i]->codecpar->codec_id))
|
||||
{
|
||||
if (!open_codec(&vctx, type, i))
|
||||
return false;
|
||||
@ -1255,7 +1261,7 @@ static bool open_codecs(void)
|
||||
case AVMEDIA_TYPE_SUBTITLE:
|
||||
#ifdef HAVE_SSA
|
||||
if (subtitle_streams_num < MAX_STREAMS
|
||||
&& codec_id_is_ass(fctx->streams[i]->codec->codec_id))
|
||||
&& codec_id_is_ass(fctx->streams[i]->codecpar->codec_id))
|
||||
{
|
||||
int size;
|
||||
AVCodecContext **s = &sctx[subtitle_streams_num];
|
||||
@ -1280,9 +1286,9 @@ static bool open_codecs(void)
|
||||
|
||||
case AVMEDIA_TYPE_ATTACHMENT:
|
||||
{
|
||||
AVCodecContext *ctx = fctx->streams[i]->codec;
|
||||
if (codec_id_is_ttf(ctx->codec_id))
|
||||
append_attachment(ctx->extradata, ctx->extradata_size);
|
||||
AVCodecParameters *params = fctx->streams[i]->codecpar;
|
||||
if (codec_id_is_ttf(params->codec_id))
|
||||
append_attachment(params->extradata, params->extradata_size);
|
||||
}
|
||||
break;
|
||||
|
||||
@ -1464,12 +1470,12 @@ static void sws_worker_thread(void *arg)
|
||||
|
||||
ctx->sws = sws_getCachedContext(ctx->sws,
|
||||
media.width, media.height, (enum AVPixelFormat)tmp_frame->format,
|
||||
media.width, media.height, PIX_FMT_RGB32,
|
||||
media.width, media.height, AV_PIX_FMT_RGB32,
|
||||
SWS_POINT, NULL, NULL, NULL);
|
||||
|
||||
set_colorspace(ctx->sws, media.width, media.height,
|
||||
av_frame_get_colorspace(tmp_frame),
|
||||
av_frame_get_color_range(tmp_frame));
|
||||
tmp_frame->colorspace,
|
||||
tmp_frame->color_range);
|
||||
|
||||
if ((ret = sws_scale(ctx->sws, (const uint8_t *const*)tmp_frame->data,
|
||||
tmp_frame->linesize, 0, media.height,
|
||||
@ -1620,7 +1626,7 @@ static int16_t *decode_audio(AVCodecContext *ctx, AVPacket *pkt,
|
||||
#endif
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
required_buffer = frame->nb_samples * sizeof(int16_t) * 2;
|
||||
if (required_buffer > *buffer_cap)
|
||||
{
|
||||
@ -1637,7 +1643,7 @@ static int16_t *decode_audio(AVCodecContext *ctx, AVPacket *pkt,
|
||||
pts = frame->best_effort_timestamp;
|
||||
slock_lock(fifo_lock);
|
||||
|
||||
while (!decode_thread_dead &&
|
||||
while (!decode_thread_dead &&
|
||||
FIFO_WRITE_AVAIL(audio_decode_fifo) < required_buffer)
|
||||
{
|
||||
if (!main_sleeping)
|
||||
@ -1740,7 +1746,7 @@ static void decode_thread(void *data)
|
||||
|
||||
if (video_stream_index >= 0)
|
||||
{
|
||||
frame_size = avpicture_get_size(PIX_FMT_RGB32, media.width, media.height);
|
||||
frame_size = av_image_get_buffer_size(AV_PIX_FMT_RGB32, media.width, media.height, 1);
|
||||
video_buffer = video_buffer_create(4, frame_size, media.width, media.height);
|
||||
tpool = tpool_create(sw_sws_threads);
|
||||
log_cb(RETRO_LOG_INFO, "[FFMPEG] Configured worker threads: %d\n", sw_sws_threads);
|
||||
@ -1813,7 +1819,7 @@ static void decode_thread(void *data)
|
||||
if (!packet_buffer_empty(video_packet_buffer))
|
||||
next_video_end = video_timebase * packet_buffer_peek_end_pts(video_packet_buffer);
|
||||
|
||||
/*
|
||||
/*
|
||||
* Decode audio packet if:
|
||||
* 1. it's the start of file or it's audio only media
|
||||
* 2. there is a video packet for in the buffer
|
||||
@ -1835,7 +1841,7 @@ static void decode_thread(void *data)
|
||||
av_packet_unref(pkt);
|
||||
}
|
||||
|
||||
/*
|
||||
/*
|
||||
* Decode video packet if:
|
||||
* 1. we already decoded an audio packet
|
||||
* 2. there is no audio stream to play
|
||||
@ -1865,7 +1871,7 @@ static void decode_thread(void *data)
|
||||
av_packet_free(&pkt);
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
// Read the next frame and stage it in case of audio or video frame.
|
||||
if (av_read_frame(fctx, pkt) < 0)
|
||||
eof = true;
|
||||
@ -1878,7 +1884,7 @@ static void decode_thread(void *data)
|
||||
/**
|
||||
* Decode subtitle packets right away, since SSA/ASS can operate this way.
|
||||
* If we ever support other subtitles, we need to handle this with a
|
||||
* buffer too
|
||||
* buffer too
|
||||
**/
|
||||
AVSubtitle sub;
|
||||
int finished = 0;
|
||||
|
@ -2,6 +2,7 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavutil/imgutils.h>
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
@ -69,17 +70,18 @@ video_buffer_t *video_buffer_create(
|
||||
b->buffer[i].pts = 0;
|
||||
b->buffer[i].sws = sws_alloc_context();
|
||||
b->buffer[i].source = av_frame_alloc();
|
||||
#if LIBAVUTIL_VERSION_MAJOR > 55
|
||||
#if ENABLE_HW_ACCEL
|
||||
b->buffer[i].hw_source = av_frame_alloc();
|
||||
#endif
|
||||
b->buffer[i].target = av_frame_alloc();
|
||||
|
||||
avpicture_alloc((AVPicture*)b->buffer[i].target,
|
||||
PIX_FMT_RGB32, width, height);
|
||||
AVFrame* frame = b->buffer[i].target;
|
||||
av_image_alloc(frame->data, frame->linesize,
|
||||
width, height, AV_PIX_FMT_RGB32, 1);
|
||||
|
||||
if (!b->buffer[i].sws ||
|
||||
!b->buffer[i].source ||
|
||||
#if LIBAVUTIL_VERSION_MAJOR > 55
|
||||
#if ENABLE_HW_ACCEL
|
||||
!b->buffer[i].hw_source ||
|
||||
#endif
|
||||
!b->buffer[i].target)
|
||||
@ -106,11 +108,11 @@ void video_buffer_destroy(video_buffer_t *video_buffer)
|
||||
{
|
||||
for (i = 0; i < video_buffer->capacity; i++)
|
||||
{
|
||||
#if LIBAVUTIL_VERSION_MAJOR > 55
|
||||
#if ENABLE_HW_ACCEL
|
||||
av_frame_free(&video_buffer->buffer[i].hw_source);
|
||||
#endif
|
||||
av_frame_free(&video_buffer->buffer[i].source);
|
||||
avpicture_free((AVPicture*)video_buffer->buffer[i].target);
|
||||
av_freep((AVFrame*)video_buffer->buffer[i].target);
|
||||
av_frame_free(&video_buffer->buffer[i].target);
|
||||
sws_freeContext(video_buffer->buffer[i].sws);
|
||||
}
|
||||
|
@ -31,22 +31,24 @@ extern "C" {
|
||||
|
||||
RETRO_BEGIN_DECLS
|
||||
|
||||
#ifndef PIX_FMT_RGB32
|
||||
#define PIX_FMT_RGB32 AV_PIX_FMT_RGB32
|
||||
#endif
|
||||
/* If libavutil is at least version 55,
|
||||
* and if libavcodec is at least version 57.80.100,
|
||||
* enable hardware acceleration */
|
||||
#define ENABLE_HW_ACCEL ((LIBAVUTIL_VERSION_MAJOR >= 55) && \
|
||||
(LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 80, 100)))
|
||||
|
||||
/**
|
||||
* video_decoder_context
|
||||
*
|
||||
*
|
||||
* Context object for the sws worker threads.
|
||||
*
|
||||
*
|
||||
*/
|
||||
struct video_decoder_context
|
||||
{
|
||||
int64_t pts;
|
||||
struct SwsContext *sws;
|
||||
AVFrame *source;
|
||||
#if LIBAVUTIL_VERSION_MAJOR > 55
|
||||
#if ENABLE_HW_ACCEL
|
||||
AVFrame *hw_source;
|
||||
#endif
|
||||
AVFrame *target;
|
||||
@ -60,15 +62,15 @@ typedef struct video_decoder_context video_decoder_context_t;
|
||||
|
||||
/**
|
||||
* video_buffer
|
||||
*
|
||||
* The video buffer is a ring buffer, that can be used as a
|
||||
*
|
||||
* The video buffer is a ring buffer, that can be used as a
|
||||
* buffer for many workers while keeping the order.
|
||||
*
|
||||
*
|
||||
* It is thread safe in a sensem that it is designed to work
|
||||
* with one work coordinator, that allocates work slots for
|
||||
* workers threads to work on and later collect the work
|
||||
* product in the same order, as the slots were allocated.
|
||||
*
|
||||
*
|
||||
*/
|
||||
struct video_buffer;
|
||||
typedef struct video_buffer video_buffer_t;
|
||||
@ -81,38 +83,38 @@ typedef struct video_buffer video_buffer_t;
|
||||
* @height : Height of the target frame.
|
||||
*
|
||||
* Create a video buffer.
|
||||
*
|
||||
*
|
||||
* Returns: A video buffer.
|
||||
*/
|
||||
video_buffer_t *video_buffer_create(size_t capacity, int frame_size, int width, int height);
|
||||
|
||||
/**
|
||||
/**
|
||||
* video_buffer_destroy:
|
||||
* @video_buffer : video buffer.
|
||||
*
|
||||
*
|
||||
* Destroys a video buffer.
|
||||
*
|
||||
*
|
||||
* Does also free the buffer allocated with video_buffer_create().
|
||||
* User has to shut down any external worker threads that may have
|
||||
* a reference to this video buffer.
|
||||
*
|
||||
*
|
||||
**/
|
||||
void video_buffer_destroy(video_buffer_t *video_buffer);
|
||||
|
||||
/**
|
||||
/**
|
||||
* video_buffer_clear:
|
||||
* @video_buffer : video buffer.
|
||||
*
|
||||
*
|
||||
* Clears a video buffer.
|
||||
*
|
||||
*
|
||||
**/
|
||||
void video_buffer_clear(video_buffer_t *video_buffer);
|
||||
|
||||
/**
|
||||
/**
|
||||
* video_buffer_get_open_slot:
|
||||
* @video_buffer : video buffer.
|
||||
* @context : sws context.
|
||||
*
|
||||
*
|
||||
* Returns the next open context inside the ring buffer
|
||||
* and it's index. The status of the slot will be marked as
|
||||
* 'in progress' until slot is marked as finished with
|
||||
@ -121,21 +123,21 @@ void video_buffer_clear(video_buffer_t *video_buffer);
|
||||
**/
|
||||
void video_buffer_get_open_slot(video_buffer_t *video_buffer, video_decoder_context_t **context);
|
||||
|
||||
/**
|
||||
/**
|
||||
* video_buffer_return_open_slot:
|
||||
* @video_buffer : video buffer.
|
||||
* @context : sws context.
|
||||
*
|
||||
*
|
||||
* Marks the given sws context that is "in progress" as "open" again.
|
||||
*
|
||||
**/
|
||||
void video_buffer_return_open_slot(video_buffer_t *video_buffer, video_decoder_context_t *context);
|
||||
|
||||
/**
|
||||
/**
|
||||
* video_buffer_open_slot:
|
||||
* @video_buffer : video buffer.
|
||||
* @context : sws context.
|
||||
*
|
||||
*
|
||||
* Sets the status of the given context from "finished" to "open".
|
||||
* The slot is then available for producers to claim again with video_buffer_get_open_slot().
|
||||
**/
|
||||
@ -145,7 +147,7 @@ void video_buffer_open_slot(video_buffer_t *video_buffer, video_decoder_context_
|
||||
* video_buffer_get_finished_slot:
|
||||
* @video_buffer : video buffer.
|
||||
* @context : sws context.
|
||||
*
|
||||
*
|
||||
* Returns a reference for the next context inside
|
||||
* the ring buffer. User needs to use video_buffer_open_slot()
|
||||
* to open the slot in the ringbuffer for the next
|
||||
@ -158,7 +160,7 @@ void video_buffer_get_finished_slot(video_buffer_t *video_buffer, video_decoder_
|
||||
* video_buffer_finish_slot:
|
||||
* @video_buffer : video buffer.
|
||||
* @context : sws context.
|
||||
*
|
||||
*
|
||||
* Sets the status of the given context from "in progress" to "finished".
|
||||
* This is normally done by a producer. User can then retrieve the finished work
|
||||
* context by calling video_buffer_get_finished_slot().
|
||||
@ -168,9 +170,9 @@ void video_buffer_finish_slot(video_buffer_t *video_buffer, video_decoder_contex
|
||||
/**
|
||||
* video_buffer_wait_for_open_slot:
|
||||
* @video_buffer : video buffer.
|
||||
*
|
||||
*
|
||||
* Blocks until open slot is available.
|
||||
*
|
||||
*
|
||||
* Returns true if the buffer has a open slot available.
|
||||
*/
|
||||
bool video_buffer_wait_for_open_slot(video_buffer_t *video_buffer);
|
||||
@ -180,7 +182,7 @@ bool video_buffer_wait_for_open_slot(video_buffer_t *video_buffer);
|
||||
* @video_buffer : video buffer.
|
||||
*
|
||||
* Blocks until finished slot is available.
|
||||
*
|
||||
*
|
||||
* Returns true if the buffers next slot is finished and a
|
||||
* context available.
|
||||
*/
|
||||
@ -190,7 +192,7 @@ bool video_buffer_wait_for_finished_slot(video_buffer_t *video_buffer);
|
||||
* bool video_buffer_has_open_slot(video_buffer_t *video_buffer)
|
||||
:
|
||||
* @video_buffer : video buffer.
|
||||
*
|
||||
*
|
||||
* Returns true if the buffer has a open slot available.
|
||||
*/
|
||||
bool video_buffer_has_open_slot(video_buffer_t *video_buffer);
|
||||
|
@ -32,25 +32,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "libavcodec/avcodec.h"
|
||||
#include "libavutil/mathematics.h"
|
||||
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT( 52, 23, 0 )
|
||||
# include "libavformat/avformat.h"
|
||||
static AVPacket null_packet = {AV_NOPTS_VALUE, AV_NOPTS_VALUE};
|
||||
# define av_init_packet(a) *(a) = null_packet
|
||||
#endif
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
# define avcodec_decode_audio3(a,b,c,d) avcodec_decode_audio2(a,b,c,(d)->data,(d)->size)
|
||||
#endif
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 54
|
||||
#define AVSampleFormat SampleFormat
|
||||
#define AV_SAMPLE_FMT_NONE SAMPLE_FMT_NONE
|
||||
#define AV_SAMPLE_FMT_U8 SAMPLE_FMT_U8
|
||||
#define AV_SAMPLE_FMT_S16 SAMPLE_FMT_S16
|
||||
#define AV_SAMPLE_FMT_S32 SAMPLE_FMT_S32
|
||||
#define AV_SAMPLE_FMT_FLT SAMPLE_FMT_FLT
|
||||
#define AV_SAMPLE_FMT_DBL SAMPLE_FMT_DBL
|
||||
#endif
|
||||
|
||||
/* Buffering requirements */
|
||||
#define INPUT_MIN_BUFFER_SIZE (4*1024)
|
||||
|
@ -33,15 +33,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#define attribute_deprecated
|
||||
#include "libavcodec/avcodec.h"
|
||||
#include "libavutil/mathematics.h"
|
||||
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT( 52, 23, 0 )
|
||||
# include "libavformat/avformat.h"
|
||||
static AVPacket null_packet = {AV_NOPTS_VALUE, AV_NOPTS_VALUE};
|
||||
# define av_init_packet(a) *(a) = null_packet
|
||||
#endif
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
# define avcodec_decode_video2(a,b,c,d) avcodec_decode_video(a,b,c,(d)->data,(d)->size)
|
||||
#endif
|
||||
|
||||
/* Buffering requirements */
|
||||
#define INPUT_MIN_BUFFER_SIZE (800*1024)
|
||||
@ -76,7 +67,7 @@ typedef struct MMAL_COMPONENT_MODULE_T
|
||||
int width;
|
||||
int height;
|
||||
enum PixelFormat pix_fmt;
|
||||
AVPicture layout;
|
||||
AVFrame layout;
|
||||
unsigned int planes;
|
||||
|
||||
int frame_size;
|
||||
@ -180,7 +171,7 @@ static MMAL_STATUS_T avcodec_output_port_set_format(MMAL_PORT_T *port)
|
||||
module->height = port->format->es->video.height;
|
||||
|
||||
module->frame_size =
|
||||
avpicture_fill(&module->layout, 0, module->pix_fmt, module->width, module->height);
|
||||
av_image_fill_arrays(&module->layout->data, &module->layout->linesize, 0, module->pix_fmt, module->width, module->height, 1);
|
||||
if (module->frame_size < 0)
|
||||
return MMAL_EINVAL;
|
||||
|
||||
@ -309,12 +300,13 @@ static MMAL_STATUS_T avcodec_send_picture(MMAL_COMPONENT_T *component, MMAL_PORT
|
||||
if (!out)
|
||||
return MMAL_EAGAIN;
|
||||
|
||||
size = avpicture_layout((AVPicture *)module->picture, module->pix_fmt,
|
||||
module->width, module->height, out->data, out->alloc_size);
|
||||
AVFrame* frame = module->picture;
|
||||
size = av_image_copy_to_buffer(out->data, out->alloc_size, frame->data, frame->linesize, module->pix_fmt,
|
||||
module->width, module->height, 1);
|
||||
if (size < 0)
|
||||
{
|
||||
mmal_queue_put_back(module->queue_out, out);
|
||||
LOG_ERROR("avpicture_layout failed: %i, %i, %i, %i",module->pix_fmt,
|
||||
LOG_ERROR("av_image_copy_to_buffer failed: %i, %i, %i, %i",module->pix_fmt,
|
||||
module->width, module->height, out->alloc_size );
|
||||
mmal_event_error_send(component, MMAL_EINVAL);
|
||||
return MMAL_EINVAL;
|
||||
@ -526,9 +518,7 @@ static struct {
|
||||
{MMAL_ENCODING_WMV1, CODEC_ID_WMV1},
|
||||
{MMAL_ENCODING_WVC1, CODEC_ID_VC1},
|
||||
{MMAL_ENCODING_VP6, CODEC_ID_VP6},
|
||||
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT( 52, 68, 2 )
|
||||
{MMAL_ENCODING_VP8, CODEC_ID_VP8},
|
||||
#endif
|
||||
{MMAL_ENCODING_THEORA, CODEC_ID_THEORA},
|
||||
|
||||
{MMAL_ENCODING_GIF, CODEC_ID_GIF},
|
||||
|
@ -70,67 +70,12 @@ extern "C" {
|
||||
#include "../../retroarch.h"
|
||||
#include "../../verbosity.h"
|
||||
|
||||
#ifndef AV_CODEC_FLAG_QSCALE
|
||||
#define AV_CODEC_FLAG_QSCALE CODEC_FLAG_QSCALE
|
||||
#endif
|
||||
|
||||
#ifndef AV_CODEC_FLAG_GLOBAL_HEADER
|
||||
#define AV_CODEC_FLAG_GLOBAL_HEADER CODEC_FLAG_GLOBAL_HEADER
|
||||
#endif
|
||||
|
||||
#ifndef AV_INPUT_BUFFER_MIN_SIZE
|
||||
#define AV_INPUT_BUFFER_MIN_SIZE FF_MIN_BUFFER_SIZE
|
||||
#endif
|
||||
|
||||
#ifndef PIX_FMT_RGB32
|
||||
#define PIX_FMT_RGB32 AV_PIX_FMT_RGB32
|
||||
#endif
|
||||
|
||||
#ifndef PIX_FMT_YUV444P
|
||||
#define PIX_FMT_YUV444P AV_PIX_FMT_YUV444P
|
||||
#endif
|
||||
|
||||
#ifndef PIX_FMT_YUV420P
|
||||
#define PIX_FMT_YUV420P AV_PIX_FMT_YUV420P
|
||||
#endif
|
||||
|
||||
#ifndef PIX_FMT_BGR24
|
||||
#define PIX_FMT_BGR24 AV_PIX_FMT_BGR24
|
||||
#endif
|
||||
|
||||
#ifndef PIX_FMT_RGB24
|
||||
#define PIX_FMT_RGB24 AV_PIX_FMT_RGB24
|
||||
#endif
|
||||
|
||||
#ifndef PIX_FMT_RGB8
|
||||
#define PIX_FMT_RGB8 AV_PIX_FMT_RGB8
|
||||
#endif
|
||||
|
||||
#ifndef PIX_FMT_RGB565
|
||||
#define PIX_FMT_RGB565 AV_PIX_FMT_RGB565
|
||||
#endif
|
||||
|
||||
#ifndef PIX_FMT_RGBA
|
||||
#define PIX_FMT_RGBA AV_PIX_FMT_RGBA
|
||||
#endif
|
||||
|
||||
#ifndef PIX_FMT_NONE
|
||||
#define PIX_FMT_NONE AV_PIX_FMT_NONE
|
||||
#endif
|
||||
|
||||
#ifndef PixelFormat
|
||||
#define PixelFormat AVPixelFormat
|
||||
#endif
|
||||
|
||||
#if LIBAVUTIL_VERSION_INT <= AV_VERSION_INT(52, 9, 0)
|
||||
#define av_frame_alloc avcodec_alloc_frame
|
||||
#define av_frame_free avcodec_free_frame
|
||||
#endif
|
||||
#define FFMPEG3 (LIBAVCODEC_VERSION_INT < AV_VERSION_INT(58, 10, 100))
|
||||
|
||||
struct ff_video_info
|
||||
{
|
||||
AVCodecContext *codec;
|
||||
AVCodec *encoder;
|
||||
const AVCodec *encoder;
|
||||
|
||||
AVFrame *conv_frame;
|
||||
uint8_t *conv_frame_buf;
|
||||
@ -140,9 +85,9 @@ struct ff_video_info
|
||||
size_t outbuf_size;
|
||||
|
||||
/* Output pixel format. */
|
||||
enum PixelFormat pix_fmt;
|
||||
enum AVPixelFormat pix_fmt;
|
||||
/* Input pixel format. Only used by sws. */
|
||||
enum PixelFormat in_pix_fmt;
|
||||
enum AVPixelFormat in_pix_fmt;
|
||||
|
||||
unsigned frame_drop_ratio;
|
||||
unsigned frame_drop_count;
|
||||
@ -160,7 +105,7 @@ struct ff_video_info
|
||||
struct ff_audio_info
|
||||
{
|
||||
AVCodecContext *codec;
|
||||
AVCodec *encoder;
|
||||
const AVCodec *encoder;
|
||||
|
||||
uint8_t *buffer;
|
||||
size_t frames_in_buffer;
|
||||
@ -209,7 +154,7 @@ struct ff_config_param
|
||||
char vcodec[64];
|
||||
char acodec[64];
|
||||
char format[64];
|
||||
enum PixelFormat out_pix_fmt;
|
||||
enum AVPixelFormat out_pix_fmt;
|
||||
unsigned threads;
|
||||
unsigned frame_drop_ratio;
|
||||
unsigned sample_rate;
|
||||
@ -237,6 +182,8 @@ typedef struct ffmpeg
|
||||
|
||||
struct record_params params;
|
||||
|
||||
AVPacket *pkt;
|
||||
|
||||
scond_t *cond;
|
||||
slock_t *cond_lock;
|
||||
slock_t *lock;
|
||||
@ -341,7 +288,7 @@ static bool ffmpeg_init_audio(ffmpeg_t *handle, const char *audio_resampler)
|
||||
struct ff_config_param *params = &handle->config;
|
||||
struct ff_audio_info *audio = &handle->audio;
|
||||
struct record_params *param = &handle->params;
|
||||
AVCodec *codec = avcodec_find_encoder_by_name(
|
||||
const AVCodec *codec = avcodec_find_encoder_by_name(
|
||||
*params->acodec ? params->acodec : "flac");
|
||||
if (!codec)
|
||||
{
|
||||
@ -364,7 +311,7 @@ static bool ffmpeg_init_audio(ffmpeg_t *handle, const char *audio_resampler)
|
||||
|
||||
if (params->sample_rate)
|
||||
{
|
||||
audio->ratio = (double)params->sample_rate
|
||||
audio->ratio = (double)params->sample_rate
|
||||
/ param->samplerate;
|
||||
audio->codec->sample_rate = params->sample_rate;
|
||||
audio->codec->time_base = av_d2q(1.0 / params->sample_rate, 1000000);
|
||||
@ -410,10 +357,6 @@ static bool ffmpeg_init_audio(ffmpeg_t *handle, const char *audio_resampler)
|
||||
audio->codec->channels *
|
||||
audio->sample_size);
|
||||
|
||||
#if 0
|
||||
RARCH_LOG("[FFmpeg]: Audio frame size: %d.\n", audio->codec->frame_size);
|
||||
#endif
|
||||
|
||||
if (!audio->buffer)
|
||||
return false;
|
||||
|
||||
@ -432,7 +375,7 @@ static bool ffmpeg_init_video(ffmpeg_t *handle)
|
||||
struct ff_config_param *params = &handle->config;
|
||||
struct ff_video_info *video = &handle->video;
|
||||
struct record_params *param = &handle->params;
|
||||
AVCodec *codec = NULL;
|
||||
const AVCodec *codec = NULL;
|
||||
|
||||
if (*params->vcodec)
|
||||
codec = avcodec_find_encoder_by_name(params->vcodec);
|
||||
@ -459,19 +402,19 @@ static bool ffmpeg_init_video(ffmpeg_t *handle)
|
||||
* and it's non-trivial to fix upstream as it's heavily geared towards YUV.
|
||||
* If we're dealing with strange formats or YUV, just use libswscale.
|
||||
*/
|
||||
if (params->out_pix_fmt != PIX_FMT_NONE)
|
||||
if (params->out_pix_fmt != AV_PIX_FMT_NONE)
|
||||
{
|
||||
video->pix_fmt = params->out_pix_fmt;
|
||||
if (video->pix_fmt != PIX_FMT_BGR24 && video->pix_fmt != PIX_FMT_RGB32)
|
||||
if (video->pix_fmt != AV_PIX_FMT_BGR24 && video->pix_fmt != AV_PIX_FMT_RGB32)
|
||||
video->use_sws = true;
|
||||
|
||||
switch (video->pix_fmt)
|
||||
{
|
||||
case PIX_FMT_BGR24:
|
||||
case AV_PIX_FMT_BGR24:
|
||||
video->scaler.out_fmt = SCALER_FMT_BGR24;
|
||||
break;
|
||||
|
||||
case PIX_FMT_RGB32:
|
||||
case AV_PIX_FMT_RGB32:
|
||||
video->scaler.out_fmt = SCALER_FMT_ARGB8888;
|
||||
break;
|
||||
|
||||
@ -481,7 +424,7 @@ static bool ffmpeg_init_video(ffmpeg_t *handle)
|
||||
}
|
||||
else /* Use BGR24 as default out format. */
|
||||
{
|
||||
video->pix_fmt = PIX_FMT_BGR24;
|
||||
video->pix_fmt = AV_PIX_FMT_BGR24;
|
||||
video->scaler.out_fmt = SCALER_FMT_BGR24;
|
||||
}
|
||||
|
||||
@ -489,19 +432,19 @@ static bool ffmpeg_init_video(ffmpeg_t *handle)
|
||||
{
|
||||
case FFEMU_PIX_RGB565:
|
||||
video->scaler.in_fmt = SCALER_FMT_RGB565;
|
||||
video->in_pix_fmt = PIX_FMT_RGB565;
|
||||
video->in_pix_fmt = AV_PIX_FMT_RGB565;
|
||||
video->pix_size = 2;
|
||||
break;
|
||||
|
||||
case FFEMU_PIX_BGR24:
|
||||
video->scaler.in_fmt = SCALER_FMT_BGR24;
|
||||
video->in_pix_fmt = PIX_FMT_BGR24;
|
||||
video->in_pix_fmt = AV_PIX_FMT_BGR24;
|
||||
video->pix_size = 3;
|
||||
break;
|
||||
|
||||
case FFEMU_PIX_ARGB8888:
|
||||
video->scaler.in_fmt = SCALER_FMT_ARGB8888;
|
||||
video->in_pix_fmt = PIX_FMT_RGB32;
|
||||
video->in_pix_fmt = AV_PIX_FMT_RGB32;
|
||||
video->pix_size = 4;
|
||||
break;
|
||||
|
||||
@ -550,13 +493,14 @@ static bool ffmpeg_init_video(ffmpeg_t *handle)
|
||||
|
||||
video->frame_drop_ratio = params->frame_drop_ratio;
|
||||
|
||||
size = avpicture_get_size(video->pix_fmt, param->out_width,
|
||||
param->out_height);
|
||||
size = av_image_get_buffer_size(video->pix_fmt, param->out_width,
|
||||
param->out_height, 1);
|
||||
video->conv_frame_buf = (uint8_t*)av_malloc(size);
|
||||
video->conv_frame = av_frame_alloc();
|
||||
|
||||
avpicture_fill((AVPicture*)video->conv_frame, video->conv_frame_buf,
|
||||
video->pix_fmt, param->out_width, param->out_height);
|
||||
AVFrame* frame = video->conv_frame;
|
||||
av_image_fill_arrays(frame->data, frame->linesize, video->conv_frame_buf,
|
||||
video->pix_fmt, param->out_width, param->out_height, 1);
|
||||
|
||||
video->conv_frame->width = param->out_width;
|
||||
video->conv_frame->height = param->out_height;
|
||||
@ -581,7 +525,7 @@ static bool ffmpeg_init_config_common(struct ff_config_param *params,
|
||||
params->frame_drop_ratio = 1;
|
||||
params->audio_enable = true;
|
||||
params->audio_global_quality = 75;
|
||||
params->out_pix_fmt = PIX_FMT_YUV420P;
|
||||
params->out_pix_fmt = AV_PIX_FMT_YUV420P;
|
||||
|
||||
strlcpy(params->vcodec, "libx264", sizeof(params->vcodec));
|
||||
strlcpy(params->acodec, "aac", sizeof(params->acodec));
|
||||
@ -597,7 +541,7 @@ static bool ffmpeg_init_config_common(struct ff_config_param *params,
|
||||
params->frame_drop_ratio = 1;
|
||||
params->audio_enable = true;
|
||||
params->audio_global_quality = 75;
|
||||
params->out_pix_fmt = PIX_FMT_YUV420P;
|
||||
params->out_pix_fmt = AV_PIX_FMT_YUV420P;
|
||||
|
||||
strlcpy(params->vcodec, "libx264", sizeof(params->vcodec));
|
||||
strlcpy(params->acodec, "aac", sizeof(params->acodec));
|
||||
@ -613,7 +557,7 @@ static bool ffmpeg_init_config_common(struct ff_config_param *params,
|
||||
params->frame_drop_ratio = 1;
|
||||
params->audio_enable = true;
|
||||
params->audio_global_quality = 100;
|
||||
params->out_pix_fmt = PIX_FMT_YUV420P;
|
||||
params->out_pix_fmt = AV_PIX_FMT_YUV420P;
|
||||
|
||||
strlcpy(params->vcodec, "libx264", sizeof(params->vcodec));
|
||||
strlcpy(params->acodec, "aac", sizeof(params->acodec));
|
||||
@ -628,7 +572,7 @@ static bool ffmpeg_init_config_common(struct ff_config_param *params,
|
||||
params->frame_drop_ratio = 1;
|
||||
params->audio_enable = true;
|
||||
params->audio_global_quality = 80;
|
||||
params->out_pix_fmt = PIX_FMT_BGR24;
|
||||
params->out_pix_fmt = AV_PIX_FMT_BGR24;
|
||||
|
||||
strlcpy(params->vcodec, "libx264rgb", sizeof(params->vcodec));
|
||||
strlcpy(params->acodec, "flac", sizeof(params->acodec));
|
||||
@ -641,7 +585,7 @@ static bool ffmpeg_init_config_common(struct ff_config_param *params,
|
||||
params->frame_drop_ratio = 1;
|
||||
params->audio_enable = true;
|
||||
params->audio_global_quality = 50;
|
||||
params->out_pix_fmt = PIX_FMT_YUV420P;
|
||||
params->out_pix_fmt = AV_PIX_FMT_YUV420P;
|
||||
|
||||
strlcpy(params->vcodec, "libvpx", sizeof(params->vcodec));
|
||||
strlcpy(params->acodec, "libopus", sizeof(params->acodec));
|
||||
@ -655,7 +599,7 @@ static bool ffmpeg_init_config_common(struct ff_config_param *params,
|
||||
params->frame_drop_ratio = 1;
|
||||
params->audio_enable = true;
|
||||
params->audio_global_quality = 75;
|
||||
params->out_pix_fmt = PIX_FMT_YUV420P;
|
||||
params->out_pix_fmt = AV_PIX_FMT_YUV420P;
|
||||
|
||||
strlcpy(params->vcodec, "libvpx", sizeof(params->vcodec));
|
||||
strlcpy(params->acodec, "libopus", sizeof(params->acodec));
|
||||
@ -669,7 +613,7 @@ static bool ffmpeg_init_config_common(struct ff_config_param *params,
|
||||
params->frame_drop_ratio = 4;
|
||||
params->audio_enable = false;
|
||||
params->audio_global_quality = 0;
|
||||
params->out_pix_fmt = PIX_FMT_RGB8;
|
||||
params->out_pix_fmt = AV_PIX_FMT_RGB8;
|
||||
|
||||
strlcpy(params->vcodec, "gif", sizeof(params->vcodec));
|
||||
strlcpy(params->acodec, "", sizeof(params->acodec));
|
||||
@ -682,7 +626,7 @@ static bool ffmpeg_init_config_common(struct ff_config_param *params,
|
||||
params->frame_drop_ratio = 1;
|
||||
params->audio_enable = false;
|
||||
params->audio_global_quality = 0;
|
||||
params->out_pix_fmt = PIX_FMT_RGB24;
|
||||
params->out_pix_fmt = AV_PIX_FMT_RGB24;
|
||||
|
||||
strlcpy(params->vcodec, "apng", sizeof(params->vcodec));
|
||||
strlcpy(params->acodec, "", sizeof(params->acodec));
|
||||
@ -695,7 +639,7 @@ static bool ffmpeg_init_config_common(struct ff_config_param *params,
|
||||
params->frame_drop_ratio = 1;
|
||||
params->audio_enable = true;
|
||||
params->audio_global_quality = 50;
|
||||
params->out_pix_fmt = PIX_FMT_YUV420P;
|
||||
params->out_pix_fmt = AV_PIX_FMT_YUV420P;
|
||||
|
||||
strlcpy(params->vcodec, "libx264", sizeof(params->vcodec));
|
||||
strlcpy(params->acodec, "aac", sizeof(params->acodec));
|
||||
@ -761,7 +705,7 @@ static bool ffmpeg_init_config_common(struct ff_config_param *params,
|
||||
video_stream_scale_factor : 1;
|
||||
else
|
||||
params->scale_factor = 1;
|
||||
if ( streaming_mode == STREAMING_MODE_YOUTUBE ||
|
||||
if ( streaming_mode == STREAMING_MODE_YOUTUBE ||
|
||||
streaming_mode == STREAMING_MODE_TWITCH ||
|
||||
streaming_mode == STREAMING_MODE_FACEBOOK)
|
||||
strlcpy(params->format, "flv", sizeof(params->format));
|
||||
@ -777,32 +721,13 @@ static bool ffmpeg_init_config_common(struct ff_config_param *params,
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
static bool ffmpeg_init_config_recording(struct ff_config_param *params)
|
||||
{
|
||||
return true;
|
||||
params->threads = 0;
|
||||
params->audio_global_quality = 100;
|
||||
|
||||
strlcpy(params->vcodec, "libx264rgb", sizeof(params->vcodec));
|
||||
strlcpy(params->format, "matroska", sizeof(params->format));
|
||||
|
||||
av_dict_set(¶ms->video_opts, "video_preset", "slow", 0);
|
||||
av_dict_set(¶ms->video_opts, "video_tune", "film", 0);
|
||||
av_dict_set(¶ms->video_opts, "video_crf", "10", 0);
|
||||
av_dict_set(¶ms->audio_opts, "audio_global_quality", "100", 0);
|
||||
|
||||
return true;
|
||||
}
|
||||
*/
|
||||
|
||||
static bool ffmpeg_init_config(struct ff_config_param *params,
|
||||
const char *config)
|
||||
{
|
||||
struct config_file_entry entry;
|
||||
char pix_fmt[64] = {0};
|
||||
|
||||
params->out_pix_fmt = PIX_FMT_NONE;
|
||||
params->out_pix_fmt = AV_PIX_FMT_NONE;
|
||||
params->scale_factor = 1;
|
||||
params->threads = 1;
|
||||
params->frame_drop_ratio = 1;
|
||||
@ -848,7 +773,7 @@ static bool ffmpeg_init_config(struct ff_config_param *params,
|
||||
if (config_get_array(params->conf, "pix_fmt", pix_fmt, sizeof(pix_fmt)))
|
||||
{
|
||||
params->out_pix_fmt = av_get_pix_fmt(pix_fmt);
|
||||
if (params->out_pix_fmt == PIX_FMT_NONE)
|
||||
if (params->out_pix_fmt == AV_PIX_FMT_NONE)
|
||||
{
|
||||
RARCH_ERR("[FFmpeg] Cannot find pix_fmt \"%s\".\n", pix_fmt);
|
||||
return false;
|
||||
@ -878,23 +803,34 @@ static bool ffmpeg_init_config(struct ff_config_param *params,
|
||||
static bool ffmpeg_init_muxer_pre(ffmpeg_t *handle)
|
||||
{
|
||||
ctx = avformat_alloc_context();
|
||||
handle->muxer.ctx = ctx;
|
||||
#if !FFMPEG3
|
||||
unsigned short int len = MIN(strlen(handle->params.filename) + 1, PATH_MAX_LENGTH);
|
||||
ctx->url = av_malloc(len);
|
||||
av_strlcpy(ctx->url, handle->params.filename, len);
|
||||
#else
|
||||
av_strlcpy(ctx->filename, handle->params.filename, sizeof(ctx->filename));
|
||||
#endif
|
||||
|
||||
if (*handle->config.format)
|
||||
ctx->oformat = av_guess_format(handle->config.format, NULL, NULL);
|
||||
else
|
||||
#if !FFMPEG3
|
||||
ctx->oformat = av_guess_format(NULL, ctx->url, NULL);
|
||||
#else
|
||||
ctx->oformat = av_guess_format(NULL, ctx->filename, NULL);
|
||||
#endif
|
||||
|
||||
if (!ctx->oformat)
|
||||
return false;
|
||||
|
||||
#if !FFMPEG3
|
||||
if (avio_open(&ctx->pb, ctx->url, AVIO_FLAG_WRITE) < 0)
|
||||
#else
|
||||
if (avio_open(&ctx->pb, ctx->filename, AVIO_FLAG_WRITE) < 0)
|
||||
{
|
||||
av_free(ctx);
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
handle->muxer.ctx = ctx;
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -903,8 +839,8 @@ static bool ffmpeg_init_muxer_post(ffmpeg_t *handle)
|
||||
AVStream *stream = avformat_new_stream(handle->muxer.ctx,
|
||||
handle->video.encoder);
|
||||
|
||||
stream->codec = handle->video.codec;
|
||||
stream->time_base = stream->codec->time_base;
|
||||
avcodec_parameters_from_context(stream->codecpar, handle->video.codec);
|
||||
stream->time_base = handle->video.codec->time_base;
|
||||
handle->muxer.vstream = stream;
|
||||
handle->muxer.vstream->sample_aspect_ratio =
|
||||
handle->video.codec->sample_aspect_ratio;
|
||||
@ -913,8 +849,8 @@ static bool ffmpeg_init_muxer_post(ffmpeg_t *handle)
|
||||
{
|
||||
stream = avformat_new_stream(handle->muxer.ctx,
|
||||
handle->audio.encoder);
|
||||
stream->codec = handle->audio.codec;
|
||||
stream->time_base = stream->codec->time_base;
|
||||
avcodec_parameters_from_context(stream->codecpar, handle->audio.codec);
|
||||
stream->time_base = handle->audio.codec->time_base;
|
||||
handle->muxer.astream = stream;
|
||||
}
|
||||
|
||||
@ -1038,8 +974,17 @@ static void ffmpeg_free(void *data)
|
||||
av_free(handle->audio.resample_out);
|
||||
av_free(handle->audio.fixed_conv);
|
||||
av_free(handle->audio.planar_buf);
|
||||
#if !FFMPEG3
|
||||
av_free(handle->muxer.ctx->url);
|
||||
#endif
|
||||
av_free(handle->muxer.ctx);
|
||||
av_packet_free(&handle->pkt);
|
||||
|
||||
free(handle);
|
||||
|
||||
#if FFMPEG3
|
||||
avformat_network_deinit();
|
||||
#endif
|
||||
}
|
||||
|
||||
static void *ffmpeg_new(const struct record_params *params)
|
||||
@ -1048,10 +993,13 @@ static void *ffmpeg_new(const struct record_params *params)
|
||||
if (!handle)
|
||||
return NULL;
|
||||
|
||||
#if FFMPEG3
|
||||
av_register_all();
|
||||
avformat_network_init();
|
||||
#endif
|
||||
|
||||
handle->params = *params;
|
||||
handle->pkt = av_packet_alloc();
|
||||
|
||||
switch (params->preset)
|
||||
{
|
||||
@ -1081,7 +1029,7 @@ static void *ffmpeg_new(const struct record_params *params)
|
||||
if (!ffmpeg_init_video(handle))
|
||||
goto error;
|
||||
|
||||
if (handle->config.audio_enable &&
|
||||
if (handle->config.audio_enable &&
|
||||
!ffmpeg_init_audio(handle,
|
||||
params->audio_resampler))
|
||||
goto error;
|
||||
@ -1220,12 +1168,12 @@ static bool ffmpeg_push_audio(void *data,
|
||||
|
||||
static bool encode_video(ffmpeg_t *handle, AVFrame *frame)
|
||||
{
|
||||
AVPacket pkt;
|
||||
AVPacket *pkt;
|
||||
int ret;
|
||||
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = handle->video.outbuf;
|
||||
pkt.size = handle->video.outbuf_size;
|
||||
pkt = handle->pkt;
|
||||
pkt->data = handle->video.outbuf;
|
||||
pkt->size = handle->video.outbuf_size;
|
||||
|
||||
ret = avcodec_send_frame(handle->video.codec, frame);
|
||||
if (ret < 0)
|
||||
@ -1240,7 +1188,7 @@ static bool encode_video(ffmpeg_t *handle, AVFrame *frame)
|
||||
|
||||
while (ret >= 0)
|
||||
{
|
||||
ret = avcodec_receive_packet(handle->video.codec, &pkt);
|
||||
ret = avcodec_receive_packet(handle->video.codec, pkt);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
else if (ret < 0)
|
||||
@ -1253,16 +1201,17 @@ static bool encode_video(ffmpeg_t *handle, AVFrame *frame)
|
||||
return false;
|
||||
}
|
||||
|
||||
pkt.pts = av_rescale_q(pkt.pts, handle->video.codec->time_base,
|
||||
handle->muxer.vstream->time_base);
|
||||
|
||||
pkt.dts = av_rescale_q(pkt.dts,
|
||||
pkt->pts = av_rescale_q(pkt->pts,
|
||||
handle->video.codec->time_base,
|
||||
handle->muxer.vstream->time_base);
|
||||
|
||||
pkt.stream_index = handle->muxer.vstream->index;
|
||||
|
||||
ret = av_interleaved_write_frame(handle->muxer.ctx, &pkt);
|
||||
pkt->dts = av_rescale_q(pkt->dts,
|
||||
handle->video.codec->time_base,
|
||||
handle->muxer.vstream->time_base);
|
||||
|
||||
pkt->stream_index = handle->muxer.vstream->index;
|
||||
|
||||
ret = av_interleaved_write_frame(handle->muxer.ctx, pkt);
|
||||
if (ret < 0)
|
||||
{
|
||||
#ifdef __cplusplus
|
||||
@ -1272,6 +1221,8 @@ static bool encode_video(ffmpeg_t *handle, AVFrame *frame)
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
av_packet_unref(pkt);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@ -1377,14 +1328,14 @@ static void planarize_audio(ffmpeg_t *handle)
|
||||
static bool encode_audio(ffmpeg_t *handle, bool dry)
|
||||
{
|
||||
AVFrame *frame;
|
||||
AVPacket pkt;
|
||||
AVPacket *pkt;
|
||||
int samples_size;
|
||||
int ret;
|
||||
|
||||
av_init_packet(&pkt);
|
||||
pkt = handle->pkt;
|
||||
|
||||
pkt.data = handle->audio.outbuf;
|
||||
pkt.size = handle->audio.outbuf_size;
|
||||
pkt->data = handle->audio.outbuf;
|
||||
pkt->size = handle->audio.outbuf_size;
|
||||
|
||||
frame = av_frame_alloc();
|
||||
|
||||
@ -1404,10 +1355,11 @@ static bool encode_audio(ffmpeg_t *handle, bool dry)
|
||||
handle->audio.frames_in_buffer,
|
||||
handle->audio.codec->sample_fmt, 0);
|
||||
|
||||
av_frame_get_buffer(frame, 0);
|
||||
avcodec_fill_audio_frame(frame,
|
||||
handle->audio.codec->channels,
|
||||
handle->audio.codec->sample_fmt,
|
||||
handle->audio.is_planar
|
||||
handle->audio.is_planar
|
||||
? (uint8_t*)handle->audio.planar_buf :
|
||||
handle->audio.buffer,
|
||||
samples_size, 0);
|
||||
@ -1424,9 +1376,9 @@ static bool encode_audio(ffmpeg_t *handle, bool dry)
|
||||
return false;
|
||||
}
|
||||
|
||||
while (ret >= 0)
|
||||
while (ret >= 0)
|
||||
{
|
||||
ret = avcodec_receive_packet(handle->audio.codec, &pkt);
|
||||
ret = avcodec_receive_packet(handle->audio.codec, pkt);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
else if (ret < 0)
|
||||
@ -1440,17 +1392,17 @@ static bool encode_audio(ffmpeg_t *handle, bool dry)
|
||||
return false;
|
||||
}
|
||||
|
||||
pkt.pts = av_rescale_q(pkt.pts,
|
||||
pkt->pts = av_rescale_q(pkt->pts,
|
||||
handle->audio.codec->time_base,
|
||||
handle->muxer.astream->time_base);
|
||||
|
||||
pkt.dts = av_rescale_q(pkt.dts,
|
||||
pkt->dts = av_rescale_q(pkt->dts,
|
||||
handle->audio.codec->time_base,
|
||||
handle->muxer.astream->time_base);
|
||||
|
||||
pkt.stream_index = handle->muxer.astream->index;
|
||||
pkt->stream_index = handle->muxer.astream->index;
|
||||
|
||||
ret = av_interleaved_write_frame(handle->muxer.ctx, &pkt);
|
||||
ret = av_interleaved_write_frame(handle->muxer.ctx, pkt);
|
||||
if (ret < 0)
|
||||
{
|
||||
av_frame_free(&frame);
|
||||
@ -1461,6 +1413,8 @@ static bool encode_audio(ffmpeg_t *handle, bool dry)
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
av_packet_unref(pkt);
|
||||
}
|
||||
|
||||
av_frame_free(&frame);
|
||||
@ -1481,8 +1435,8 @@ static void ffmpeg_audio_resample(ffmpeg_t *handle,
|
||||
return;
|
||||
|
||||
handle->audio.float_conv_frames = aud->frames;
|
||||
/* To make sure we don't accidentially overflow. */
|
||||
handle->audio.resample_out_frames = aud->frames
|
||||
/* To make sure we don't accidentally overflow. */
|
||||
handle->audio.resample_out_frames = aud->frames
|
||||
* handle->audio.ratio + 16;
|
||||
handle->audio.resample_out = (float*)
|
||||
av_realloc(handle->audio.resample_out,
|
||||
@ -1496,7 +1450,7 @@ static void ffmpeg_audio_resample(ffmpeg_t *handle,
|
||||
handle->audio.float_conv_frames);
|
||||
handle->audio.fixed_conv = (int16_t*)av_realloc(
|
||||
handle->audio.fixed_conv,
|
||||
handle->audio.fixed_conv_frames *
|
||||
handle->audio.fixed_conv_frames *
|
||||
handle->params.channels * sizeof(int16_t));
|
||||
|
||||
if (!handle->audio.fixed_conv)
|
||||
@ -1506,7 +1460,7 @@ static void ffmpeg_audio_resample(ffmpeg_t *handle,
|
||||
if (handle->audio.use_float || handle->audio.resampler)
|
||||
{
|
||||
convert_s16_to_float(handle->audio.float_conv,
|
||||
(const int16_t*)aud->data, aud->frames
|
||||
(const int16_t*)aud->data, aud->frames
|
||||
* handle->params.channels, 1.0);
|
||||
aud->data = handle->audio.float_conv;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user