mirror of
https://github.com/xenia-project/FFmpeg.git
synced 2024-11-24 12:09:55 +00:00
Merge commit 'bc4620e5d61a4dd9a1f654fadd281a172aab04be'
* commit 'bc4620e5d61a4dd9a1f654fadd281a172aab04be': Remove libmpeg2 #define remnants De-doxygenize some top-level files Conflicts: ffmpeg.c ffmpeg.h ffmpeg_filter.c ffplay.c Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
commit
b4ca1b159f
11
ffmpeg.c
11
ffmpeg.c
@ -975,7 +975,7 @@ static void do_video_stats(AVFormatContext *os, OutputStream *ost,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Get and encode new output from any of the filtergraphs, without causing
|
||||
* activity.
|
||||
*
|
||||
@ -2403,10 +2403,7 @@ static int transcode_init(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return 1 if there are still streams where more output is wanted,
|
||||
* 0 otherwise
|
||||
*/
|
||||
/* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
|
||||
static int need_output(void)
|
||||
{
|
||||
int i;
|
||||
@ -2685,8 +2682,8 @@ static void reset_eagain(void)
|
||||
output_streams[i]->unavailable = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return
|
||||
/*
|
||||
* Return
|
||||
* - 0 -- one packet was read and processed
|
||||
* - AVERROR(EAGAIN) -- no packets were available for selected file,
|
||||
* this function should be called again
|
||||
|
4
ffmpeg.h
4
ffmpeg.h
@ -57,12 +57,12 @@
|
||||
|
||||
/* select an input stream for an output stream */
|
||||
typedef struct StreamMap {
|
||||
int disabled; /** 1 is this mapping is disabled by a negative map */
|
||||
int disabled; /* 1 is this mapping is disabled by a negative map */
|
||||
int file_index;
|
||||
int stream_index;
|
||||
int sync_file_index;
|
||||
int sync_stream_index;
|
||||
char *linklabel; /** name of an output link, for mapping lavfi outputs */
|
||||
char *linklabel; /* name of an output link, for mapping lavfi outputs */
|
||||
} StreamMap;
|
||||
|
||||
typedef struct {
|
||||
|
@ -129,10 +129,8 @@ static char *choose_pix_fmts(OutputStream *ost)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* Define a function for building a string containing a list of
|
||||
* allowed formats,
|
||||
*/
|
||||
/* Define a function for building a string containing a list of
|
||||
* allowed formats. */
|
||||
#define DEF_CHOOSE_FORMAT(type, var, supported_list, none, get_name, separator)\
|
||||
static char *choose_ ## var ## s(OutputStream *ost) \
|
||||
{ \
|
||||
|
@ -367,7 +367,7 @@ static int opt_map_channel(void *optctx, const char *opt, const char *arg)
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse a metadata specifier in arg.
|
||||
* Parse a metadata specifier passed as 'arg' parameter.
|
||||
* @param type metadata type is written here -- g(lobal)/s(tream)/c(hapter)/p(rogram)
|
||||
* @param index for type c/p, chapter/program index is written here
|
||||
* @param stream_spec for type s, the stream specifier is written here
|
||||
@ -541,10 +541,8 @@ static AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVStream *
|
||||
return avcodec_find_decoder(st->codec->codec_id);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add all the streams from the given input file to the global
|
||||
* list of input streams.
|
||||
*/
|
||||
/* Add all the streams from the given input file to the global
|
||||
* list of input streams. */
|
||||
static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
|
||||
{
|
||||
int i;
|
||||
|
16
ffplay.c
16
ffplay.c
@ -100,8 +100,8 @@ typedef struct PacketQueue {
|
||||
#define SUBPICTURE_QUEUE_SIZE 4
|
||||
|
||||
typedef struct VideoPicture {
|
||||
double pts; ///< presentation time stamp for this picture
|
||||
int64_t pos; ///< byte position in file
|
||||
double pts; // presentation timestamp for this picture
|
||||
int64_t pos; // byte position in file
|
||||
int skip;
|
||||
SDL_Overlay *bmp;
|
||||
int width, height; /* source height & width */
|
||||
@ -210,13 +210,13 @@ typedef struct VideoState {
|
||||
double frame_last_returned_time;
|
||||
double frame_last_filter_delay;
|
||||
int64_t frame_last_dropped_pos;
|
||||
double video_clock; ///< pts of last decoded frame / predicted pts of next decoded frame
|
||||
double video_clock; // pts of last decoded frame / predicted pts of next decoded frame
|
||||
int video_stream;
|
||||
AVStream *video_st;
|
||||
PacketQueue videoq;
|
||||
double video_current_pts; ///< current displayed pts (different from video_clock if frame fifos are used)
|
||||
double video_current_pts_drift; ///< video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
|
||||
int64_t video_current_pos; ///< current displayed file pos
|
||||
double video_current_pts; // current displayed pts (different from video_clock if frame fifos are used)
|
||||
double video_current_pts_drift; // video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
|
||||
int64_t video_current_pos; // current displayed file pos
|
||||
VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
|
||||
int pictq_size, pictq_rindex, pictq_windex;
|
||||
SDL_mutex *pictq_mutex;
|
||||
@ -230,8 +230,8 @@ typedef struct VideoState {
|
||||
int step;
|
||||
|
||||
#if CONFIG_AVFILTER
|
||||
AVFilterContext *in_video_filter; ///< the first filter in the video chain
|
||||
AVFilterContext *out_video_filter; ///< the last filter in the video chain
|
||||
AVFilterContext *in_video_filter; // the first filter in the video chain
|
||||
AVFilterContext *out_video_filter; // the last filter in the video chain
|
||||
int use_dr1;
|
||||
FrameBuffer *buffer_pool;
|
||||
#endif
|
||||
|
@ -97,5 +97,8 @@
|
||||
#ifndef FF_API_AVCODEC_RESAMPLE
|
||||
#define FF_API_AVCODEC_RESAMPLE (LIBAVCODEC_VERSION_MAJOR < 55)
|
||||
#endif
|
||||
#ifndef FF_API_LIBMPEG2
|
||||
#define FF_API_LIBMPEG2 (LIBAVCODEC_VERSION_MAJOR < 55)
|
||||
#endif
|
||||
|
||||
#endif /* AVCODEC_VERSION_H */
|
||||
|
Loading…
Reference in New Issue
Block a user