mirror of
https://gitee.com/openharmony/third_party_ffmpeg
synced 2024-11-23 11:19:55 +00:00
cosmetics: Fix two common typos: wont --> will not, lets --> let us.
Originally committed as revision 14372 to svn://svn.ffmpeg.org/ffmpeg/trunk
This commit is contained in:
parent
cc8de8e8a5
commit
ca74c0a180
@ -508,7 +508,7 @@ retry:
|
||||
s->padding_bug_score= 256*256*256*64;
|
||||
|
||||
/* very ugly XVID padding bug detection FIXME/XXX solve this differently
|
||||
* lets hope this at least works
|
||||
* Let us hope this at least works.
|
||||
*/
|
||||
if( s->resync_marker==0 && s->data_partitioning==0 && s->divx_version==0
|
||||
&& s->codec_id==CODEC_ID_MPEG4 && s->vo_type==0)
|
||||
|
@ -24,7 +24,7 @@
|
||||
* Motion estimation template.
|
||||
*/
|
||||
|
||||
//lets hope gcc will remove the unused vars ...(gcc 3.2.2 seems to do it ...)
|
||||
//Let us hope gcc will remove the unused vars ...(gcc 3.2.2 seems to do it ...)
|
||||
#define LOAD_COMMON\
|
||||
uint32_t av_unused * const score_map= c->score_map;\
|
||||
const int av_unused xmin= c->xmin;\
|
||||
|
@ -1396,7 +1396,7 @@ static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
|
||||
linesize = s->current_picture.linesize[0] << field_based;
|
||||
uvlinesize = s->current_picture.linesize[1] << field_based;
|
||||
|
||||
if(s->quarter_sample){ //FIXME obviously not perfect but qpel wont work in lowres anyway
|
||||
if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
|
||||
motion_x/=2;
|
||||
motion_y/=2;
|
||||
}
|
||||
|
@ -230,7 +230,7 @@ void av_resample_compensate(AVResampleContext *c, int sample_delta, int compensa
|
||||
* @param consumed the number of samples of src which have been consumed are returned here
|
||||
* @param src_size the number of unconsumed samples available
|
||||
* @param dst_size the amount of space in samples available in dst
|
||||
* @param update_ctx if this is 0 then the context wont be modified, that way several channels can be resampled with the same context
|
||||
* @param update_ctx If this is 0 then the context will not be modified, that way several channels can be resampled with the same context.
|
||||
* @return the number of samples written in dst or -1 if an error occurred
|
||||
*/
|
||||
int av_resample(AVResampleContext *c, short *dst, short *src, int *consumed, int src_size, int dst_size, int update_ctx){
|
||||
|
@ -155,7 +155,7 @@ static int dc1394_v1_read_header(AVFormatContext *c, AVFormatParameters * ap)
|
||||
if (dc1394_read_common(c,ap,&fmt,&fps) != 0)
|
||||
return -1;
|
||||
|
||||
/* Now lets prep the hardware */
|
||||
/* Now let us prep the hardware. */
|
||||
dc1394->handle = dc1394_create_handle(0); /* FIXME: gotta have ap->port */
|
||||
if (!dc1394->handle) {
|
||||
av_log(c, AV_LOG_ERROR, "Can't acquire dc1394 handle on port %d\n", 0 /* ap->port */);
|
||||
@ -248,7 +248,7 @@ static int dc1394_v2_read_header(AVFormatContext *c, AVFormatParameters * ap)
|
||||
if (dc1394_read_common(c,ap,&fmt,&fps) != 0)
|
||||
return -1;
|
||||
|
||||
/* Now lets prep the hardware */
|
||||
/* Now let us prep the hardware. */
|
||||
dc1394->d = dc1394_new();
|
||||
dc1394_camera_enumerate (dc1394->d, &list);
|
||||
if ( !list || list->num == 0) {
|
||||
|
@ -255,7 +255,7 @@ int dv_assemble_frame(DVMuxContext *c, AVStream* st,
|
||||
av_log(st->codec, AV_LOG_ERROR, "Can't process DV frame #%d. Insufficient video data or severe sync problem.\n", c->frames);
|
||||
av_fifo_generic_write(&c->audio_data[i], data, data_size, NULL);
|
||||
|
||||
/* Lets see if we've got enough audio for one DV frame */
|
||||
/* Let us see if we've got enough audio for one DV frame. */
|
||||
c->has_audio |= ((reqasize <= av_fifo_size(&c->audio_data[i])) << i);
|
||||
|
||||
break;
|
||||
@ -263,7 +263,7 @@ int dv_assemble_frame(DVMuxContext *c, AVStream* st,
|
||||
break;
|
||||
}
|
||||
|
||||
/* Lets see if we have enough data to construct one DV frame */
|
||||
/* Let us see if we have enough data to construct one DV frame. */
|
||||
if (c->has_video == 1 && c->has_audio + 1 == 1<<c->n_ast) {
|
||||
dv_inject_metadata(c, *frame);
|
||||
c->has_audio = 0;
|
||||
|
@ -74,7 +74,7 @@ static inline av_const SoftFloat av_normalize1_sf(SoftFloat a){
|
||||
/**
|
||||
*
|
||||
* @return will not be more denormalized then a+b, so if either input is
|
||||
* normalized then the output wont be worse then the other input
|
||||
* normalized then the output will not be worse then the other input
|
||||
* if both are normalized then the output will be normalized
|
||||
*/
|
||||
static inline av_const SoftFloat av_mul_sf(SoftFloat a, SoftFloat b){
|
||||
|
Loading…
Reference in New Issue
Block a user