mirror of
https://github.com/xenia-project/FFmpeg.git
synced 2025-02-21 04:33:24 +00:00
Remove time_rate, we cannot compute exactly when fragments are
used and we cannot determine if fragments are present or not in streamed mode. Originally committed as revision 19148 to svn://svn.ffmpeg.org/ffmpeg/trunk
This commit is contained in:
parent
818062f2f3
commit
bbe46bc4c2
@ -106,7 +106,6 @@ typedef struct MOVStreamContext {
|
|||||||
unsigned int keyframe_count;
|
unsigned int keyframe_count;
|
||||||
int *keyframes;
|
int *keyframes;
|
||||||
int time_scale;
|
int time_scale;
|
||||||
int time_rate;
|
|
||||||
int time_offset; ///< time offset of the first edit list entry
|
int time_offset; ///< time offset of the first edit list entry
|
||||||
int current_sample;
|
int current_sample;
|
||||||
unsigned int bytes_per_frame;
|
unsigned int bytes_per_frame;
|
||||||
|
@ -1253,8 +1253,6 @@ static int mov_read_stts(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
|
|||||||
sc->stts_data[i].count= sample_count;
|
sc->stts_data[i].count= sample_count;
|
||||||
sc->stts_data[i].duration= sample_duration;
|
sc->stts_data[i].duration= sample_duration;
|
||||||
|
|
||||||
sc->time_rate= av_gcd(sc->time_rate, sample_duration);
|
|
||||||
|
|
||||||
dprintf(c->fc, "sample_count=%d, sample_duration=%d\n",sample_count,sample_duration);
|
dprintf(c->fc, "sample_count=%d, sample_duration=%d\n",sample_count,sample_duration);
|
||||||
|
|
||||||
duration+=(int64_t)sample_duration*sample_count;
|
duration+=(int64_t)sample_duration*sample_count;
|
||||||
@ -1282,8 +1280,6 @@ static int mov_read_cslg(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
|
|||||||
sc->dts_shift = get_be32(pb);
|
sc->dts_shift = get_be32(pb);
|
||||||
dprintf(c->fc, "dts shift %d\n", sc->dts_shift);
|
dprintf(c->fc, "dts shift %d\n", sc->dts_shift);
|
||||||
|
|
||||||
sc->time_rate= av_gcd(sc->time_rate, FFABS(sc->dts_shift));
|
|
||||||
|
|
||||||
get_be32(pb); // least dts to pts delta
|
get_be32(pb); // least dts to pts delta
|
||||||
get_be32(pb); // greatest dts to pts delta
|
get_be32(pb); // greatest dts to pts delta
|
||||||
get_be32(pb); // pts start
|
get_be32(pb); // pts start
|
||||||
@ -1317,8 +1313,6 @@ static int mov_read_ctts(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
|
|||||||
|
|
||||||
sc->ctts_data[i].count = count;
|
sc->ctts_data[i].count = count;
|
||||||
sc->ctts_data[i].duration= duration;
|
sc->ctts_data[i].duration= duration;
|
||||||
|
|
||||||
sc->time_rate= av_gcd(sc->time_rate, FFABS(duration));
|
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1337,8 +1331,7 @@ static void mov_build_index(MOVContext *mov, AVStream *st)
|
|||||||
/* adjust first dts according to edit list */
|
/* adjust first dts according to edit list */
|
||||||
if (sc->time_offset) {
|
if (sc->time_offset) {
|
||||||
int rescaled = sc->time_offset < 0 ? av_rescale(sc->time_offset, sc->time_scale, mov->time_scale) : sc->time_offset;
|
int rescaled = sc->time_offset < 0 ? av_rescale(sc->time_offset, sc->time_scale, mov->time_scale) : sc->time_offset;
|
||||||
assert(sc->time_offset % sc->time_rate == 0);
|
current_dts = -rescaled;
|
||||||
current_dts = - (rescaled / sc->time_rate);
|
|
||||||
if (sc->ctts_data && sc->ctts_data[0].duration / sc->stts_data[0].duration > 16) {
|
if (sc->ctts_data && sc->ctts_data[0].duration / sc->stts_data[0].duration > 16) {
|
||||||
/* more than 16 frames delay, dts are likely wrong
|
/* more than 16 frames delay, dts are likely wrong
|
||||||
this happens with files created by iMovie */
|
this happens with files created by iMovie */
|
||||||
@ -1356,7 +1349,6 @@ static void mov_build_index(MOVContext *mov, AVStream *st)
|
|||||||
unsigned int distance = 0;
|
unsigned int distance = 0;
|
||||||
int key_off = sc->keyframes && sc->keyframes[0] == 1;
|
int key_off = sc->keyframes && sc->keyframes[0] == 1;
|
||||||
|
|
||||||
sc->dts_shift /= sc->time_rate;
|
|
||||||
current_dts -= sc->dts_shift;
|
current_dts -= sc->dts_shift;
|
||||||
|
|
||||||
st->nb_frames = sc->sample_count;
|
st->nb_frames = sc->sample_count;
|
||||||
@ -1394,8 +1386,7 @@ static void mov_build_index(MOVContext *mov, AVStream *st)
|
|||||||
}
|
}
|
||||||
|
|
||||||
current_offset += sample_size;
|
current_offset += sample_size;
|
||||||
assert(sc->stts_data[stts_index].duration % sc->time_rate == 0);
|
current_dts += sc->stts_data[stts_index].duration;
|
||||||
current_dts += sc->stts_data[stts_index].duration / sc->time_rate;
|
|
||||||
distance++;
|
distance++;
|
||||||
stts_sample++;
|
stts_sample++;
|
||||||
current_sample++;
|
current_sample++;
|
||||||
@ -1443,8 +1434,7 @@ static void mov_build_index(MOVContext *mov, AVStream *st)
|
|||||||
size, samples);
|
size, samples);
|
||||||
|
|
||||||
current_offset += size;
|
current_offset += size;
|
||||||
assert(samples % sc->time_rate == 0);
|
current_dts += samples;
|
||||||
current_dts += samples / sc->time_rate;
|
|
||||||
chunk_samples -= samples;
|
chunk_samples -= samples;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1477,12 +1467,10 @@ static int mov_read_trak(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!sc->time_rate)
|
|
||||||
sc->time_rate = 1;
|
|
||||||
if (!sc->time_scale)
|
if (!sc->time_scale)
|
||||||
sc->time_scale = c->time_scale;
|
sc->time_scale = c->time_scale;
|
||||||
|
|
||||||
av_set_pts_info(st, 64, sc->time_rate, sc->time_scale);
|
av_set_pts_info(st, 64, 1, sc->time_scale);
|
||||||
|
|
||||||
if (st->codec->codec_type == CODEC_TYPE_AUDIO &&
|
if (st->codec->codec_type == CODEC_TYPE_AUDIO &&
|
||||||
!st->codec->frame_size && sc->stts_count == 1) {
|
!st->codec->frame_size && sc->stts_count == 1) {
|
||||||
@ -1491,11 +1479,6 @@ static int mov_read_trak(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
|
|||||||
dprintf(c->fc, "frame size %d\n", st->codec->frame_size);
|
dprintf(c->fc, "frame size %d\n", st->codec->frame_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (st->duration != AV_NOPTS_VALUE) {
|
|
||||||
assert(st->duration % sc->time_rate == 0);
|
|
||||||
st->duration /= sc->time_rate;
|
|
||||||
}
|
|
||||||
|
|
||||||
mov_build_index(c, st);
|
mov_build_index(c, st);
|
||||||
|
|
||||||
if (sc->dref_id-1 < sc->drefs_count && sc->drefs[sc->dref_id-1].path) {
|
if (sc->dref_id-1 < sc->drefs_count && sc->drefs[sc->dref_id-1].path) {
|
||||||
@ -1754,8 +1737,7 @@ static int mov_read_trun(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
|
|||||||
"size %d, distance %d, keyframe %d\n", st->index, sc->sample_count+i,
|
"size %d, distance %d, keyframe %d\n", st->index, sc->sample_count+i,
|
||||||
offset, dts, sample_size, distance, keyframe);
|
offset, dts, sample_size, distance, keyframe);
|
||||||
distance++;
|
distance++;
|
||||||
assert(sample_duration % sc->time_rate == 0);
|
dts += sample_duration;
|
||||||
dts += sample_duration / sc->time_rate;
|
|
||||||
offset += sample_size;
|
offset += sample_size;
|
||||||
}
|
}
|
||||||
frag->moof_offset = offset;
|
frag->moof_offset = offset;
|
||||||
@ -1856,7 +1838,6 @@ static int mov_read_elst(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
|
|||||||
get_be32(pb); /* Media rate */
|
get_be32(pb); /* Media rate */
|
||||||
if (i == 0 && time >= -1) {
|
if (i == 0 && time >= -1) {
|
||||||
sc->time_offset = time != -1 ? time : -duration;
|
sc->time_offset = time != -1 ? time : -duration;
|
||||||
sc->time_rate = av_gcd(sc->time_rate, FFABS(sc->time_offset));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2006,8 +1987,7 @@ static int mov_read_packet(AVFormatContext *s, AVPacket *pkt)
|
|||||||
MOVStreamContext *msc = st->priv_data;
|
MOVStreamContext *msc = st->priv_data;
|
||||||
if (st->discard != AVDISCARD_ALL && msc->pb && msc->current_sample < st->nb_index_entries) {
|
if (st->discard != AVDISCARD_ALL && msc->pb && msc->current_sample < st->nb_index_entries) {
|
||||||
AVIndexEntry *current_sample = &st->index_entries[msc->current_sample];
|
AVIndexEntry *current_sample = &st->index_entries[msc->current_sample];
|
||||||
int64_t dts = av_rescale(current_sample->timestamp * (int64_t)msc->time_rate,
|
int64_t dts = av_rescale(current_sample->timestamp, AV_TIME_BASE, msc->time_scale);
|
||||||
AV_TIME_BASE, msc->time_scale);
|
|
||||||
dprintf(s, "stream %d, sample %d, dts %"PRId64"\n", i, msc->current_sample, dts);
|
dprintf(s, "stream %d, sample %d, dts %"PRId64"\n", i, msc->current_sample, dts);
|
||||||
if (!sample || (url_is_streamed(s->pb) && current_sample->pos < sample->pos) ||
|
if (!sample || (url_is_streamed(s->pb) && current_sample->pos < sample->pos) ||
|
||||||
(!url_is_streamed(s->pb) &&
|
(!url_is_streamed(s->pb) &&
|
||||||
@ -2051,8 +2031,7 @@ static int mov_read_packet(AVFormatContext *s, AVPacket *pkt)
|
|||||||
pkt->stream_index = sc->ffindex;
|
pkt->stream_index = sc->ffindex;
|
||||||
pkt->dts = sample->timestamp;
|
pkt->dts = sample->timestamp;
|
||||||
if (sc->ctts_data) {
|
if (sc->ctts_data) {
|
||||||
assert(sc->ctts_data[sc->ctts_index].duration % sc->time_rate == 0);
|
pkt->pts = pkt->dts + sc->dts_shift + sc->ctts_data[sc->ctts_index].duration;
|
||||||
pkt->pts = pkt->dts + sc->dts_shift + sc->ctts_data[sc->ctts_index].duration / sc->time_rate;
|
|
||||||
/* update ctts context */
|
/* update ctts context */
|
||||||
sc->ctts_sample++;
|
sc->ctts_sample++;
|
||||||
if (sc->ctts_index < sc->ctts_count &&
|
if (sc->ctts_index < sc->ctts_count &&
|
||||||
|
Loading…
x
Reference in New Issue
Block a user