dirac: K&R formatting cosmetics

Signed-off-by: Diego Biurrun <diego@biurrun.de>
This commit is contained in:
Gabriel Dume 2014-09-08 18:32:34 -04:00 committed by Diego Biurrun
parent 1ec335513f
commit 9752d07d33
2 changed files with 81 additions and 81 deletions

View File

@ -26,8 +26,9 @@
*/
#include "libavutil/imgutils.h"
#include "dirac.h"
#include "avcodec.h"
#include "dirac.h"
#include "golomb.h"
#include "internal.h"
#include "mpeg12data.h"
@ -61,20 +62,20 @@ static const dirac_source_params dirac_source_parameters_defaults[] = {
/* [DIRAC_STD] Table 10.4 - Available preset pixel aspect ratio values */
static const AVRational dirac_preset_aspect_ratios[] = {
{1, 1},
{10, 11},
{12, 11},
{40, 33},
{16, 11},
{4, 3},
{ 1, 1 },
{ 10, 11 },
{ 12, 11 },
{ 40, 33 },
{ 16, 11 },
{ 4, 3 },
};
/* [DIRAC_STD] Values 9,10 of 10.3.5 Frame Rate.
* Table 10.3 Available preset frame rate values
*/
static const AVRational dirac_frame_rate[] = {
{15000, 1001},
{25, 2},
{ 15000, 1001 },
{ 25, 2 },
};
/* [DIRAC_STD] This should be equivalent to Table 10.5 Available signal
@ -83,10 +84,10 @@ static const struct {
uint8_t bitdepth;
enum AVColorRange color_range;
} pixel_range_presets[] = {
{8, AVCOL_RANGE_JPEG},
{8, AVCOL_RANGE_MPEG},
{10, AVCOL_RANGE_MPEG},
{12, AVCOL_RANGE_MPEG},
{ 8, AVCOL_RANGE_JPEG },
{ 8, AVCOL_RANGE_MPEG },
{ 10, AVCOL_RANGE_MPEG },
{ 12, AVCOL_RANGE_MPEG },
};
static const enum AVColorPrimaries dirac_primaries[] = {
@ -118,7 +119,7 @@ static const enum AVPixelFormat dirac_pix_fmt[2][3] = {
static int parse_source_parameters(AVCodecContext *avctx, GetBitContext *gb,
dirac_source_params *source)
{
AVRational frame_rate = {0,0};
AVRational frame_rate = { 0, 0 };
unsigned luma_depth = 8, luma_offset = 16;
int idx;
@ -169,10 +170,10 @@ static int parse_source_parameters(AVCodecContext *avctx, GetBitContext *gb,
frame_rate = ff_mpeg12_frame_rate_tab[source->frame_rate_index];
else
/* [DIRAC_STD] Table 10.3 values 9-10 */
frame_rate = dirac_frame_rate[source->frame_rate_index-9];
frame_rate = dirac_frame_rate[source->frame_rate_index - 9];
}
av_reduce(&avctx->time_base.num, &avctx->time_base.den,
frame_rate.den, frame_rate.num, 1<<30);
frame_rate.den, frame_rate.num, 1 << 30);
/* [DIRAC_STD] 10.3.6 Pixel Aspect Ratio.
* pixel_aspect_ratio(video_params) */
@ -192,7 +193,7 @@ static int parse_source_parameters(AVCodecContext *avctx, GetBitContext *gb,
* aspect ratio values */
if (source->aspect_ratio_index > 0)
avctx->sample_aspect_ratio =
dirac_preset_aspect_ratios[source->aspect_ratio_index-1];
dirac_preset_aspect_ratios[source->aspect_ratio_index - 1];
/* [DIRAC_STD] 10.3.7 Clean area. clean_area(video_params) */
if (get_bits1(gb)) { /* [DIRAC_STD] custom_clean_area_flag */
@ -219,16 +220,17 @@ static int parse_source_parameters(AVCodecContext *avctx, GetBitContext *gb,
// This assumes either fullrange or MPEG levels only
if (!source->pixel_range_index) {
luma_offset = svq3_get_ue_golomb(gb);
luma_depth = av_log2(svq3_get_ue_golomb(gb))+1;
luma_depth = av_log2(svq3_get_ue_golomb(gb)) + 1;
svq3_get_ue_golomb(gb); /* chroma offset */
svq3_get_ue_golomb(gb); /* chroma excursion */
avctx->color_range = luma_offset ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
avctx->color_range = luma_offset ? AVCOL_RANGE_MPEG
: AVCOL_RANGE_JPEG;
}
}
/* [DIRAC_STD] Table 10.5
* Available signal range presets <--> pixel_range_presets */
if (source->pixel_range_index > 0) {
idx = source->pixel_range_index-1;
idx = source->pixel_range_index - 1;
luma_depth = pixel_range_presets[idx].bitdepth;
avctx->color_range = pixel_range_presets[idx].color_range;
}

View File

@ -31,6 +31,7 @@
#include "libavutil/intreadwrite.h"
#include "libavutil/mem.h"
#include "parser.h"
#define DIRAC_PARSE_INFO_PREFIX 0x42424344
@ -75,11 +76,11 @@ static int find_frame_end(DiracParseContext *pc,
pc->sync_offset = 0;
for (; i < buf_size; i++) {
if (state == DIRAC_PARSE_INFO_PREFIX) {
if ((buf_size-i) >= pc->header_bytes_needed) {
if ((buf_size - i) >= pc->header_bytes_needed) {
pc->state = -1;
return i + pc->header_bytes_needed;
} else {
pc->header_bytes_needed = 9-(buf_size-i);
pc->header_bytes_needed = 9 - (buf_size - i);
break;
}
} else
@ -90,8 +91,7 @@ static int find_frame_end(DiracParseContext *pc,
return -1;
}
typedef struct DiracParseUnit
{
typedef struct DiracParseUnit {
int next_pu_offset;
int prev_pu_offset;
uint8_t pu_type;
@ -102,12 +102,12 @@ static int unpack_parse_unit(DiracParseUnit *pu, DiracParseContext *pc,
{
uint8_t *start = pc->buffer + offset;
uint8_t *end = pc->buffer + pc->index;
if (start < pc->buffer || (start+13 > end))
if (start < pc->buffer || (start + 13 > end))
return 0;
pu->pu_type = start[4];
pu->next_pu_offset = AV_RB32(start+5);
pu->prev_pu_offset = AV_RB32(start+9);
pu->next_pu_offset = AV_RB32(start + 5);
pu->prev_pu_offset = AV_RB32(start + 9);
if (pu->pu_type == 0x10 && pu->next_pu_offset == 0)
pu->next_pu_offset = 13;
@ -134,13 +134,13 @@ static int dirac_combine_frame(AVCodecParserContext *s, AVCodecContext *avctx,
}
}
if ( next == -1) {
if (next == -1) {
/* Found a possible frame start but not a frame end */
void *new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size,
pc->index + (*buf_size -
pc->sync_offset));
void *new_buffer =
av_fast_realloc(pc->buffer, &pc->buffer_size,
pc->index + (*buf_size - pc->sync_offset));
pc->buffer = new_buffer;
memcpy(pc->buffer+pc->index, (*buf + pc->sync_offset),
memcpy(pc->buffer + pc->index, (*buf + pc->sync_offset),
*buf_size - pc->sync_offset);
pc->index += *buf_size - pc->sync_offset;
return -1;
@ -163,7 +163,7 @@ static int dirac_combine_frame(AVCodecParserContext *s, AVCodecContext *avctx,
!unpack_parse_unit(&pu, pc, pc->index - 13 - pu1.prev_pu_offset) ||
pu.next_pu_offset != pu1.prev_pu_offset) {
pc->index -= 9;
*buf_size = next-9;
*buf_size = next - 9;
pc->header_bytes_needed = 9;
return -1;
}
@ -177,7 +177,7 @@ static int dirac_combine_frame(AVCodecParserContext *s, AVCodecContext *avctx,
pc->dirac_unit_size += pu.next_pu_offset;
if ((pu.pu_type&0x08) != 0x08) {
if ((pu.pu_type & 0x08) != 0x08) {
pc->header_bytes_needed = 9;
*buf_size = next;
return -1;
@ -191,7 +191,7 @@ static int dirac_combine_frame(AVCodecParserContext *s, AVCodecContext *avctx,
if (s->last_pts == 0 && s->last_dts == 0)
s->dts = pts - 1;
else
s->dts = s->last_dts+1;
s->dts = s->last_dts + 1;
s->pts = pts;
if (!avctx->has_b_frames && (cur_pu[4] & 0x03))
avctx->has_b_frames = 1;
@ -204,7 +204,7 @@ static int dirac_combine_frame(AVCodecParserContext *s, AVCodecContext *avctx,
*buf_size = pc->dirac_unit_size;
pc->dirac_unit_size = 0;
pc->overread_index = pc->index-13;
pc->overread_index = pc->index - 13;
pc->header_bytes_needed = 9;
}
return next;
@ -227,15 +227,13 @@ static int dirac_parse(AVCodecParserContext *s, AVCodecContext *avctx,
/* Assume that data has been packetized into an encapsulation unit. */
} else {
next = find_frame_end(pc, buf, buf_size);
if (!pc->is_synced && next == -1) {
if (!pc->is_synced && next == -1)
/* No frame start found yet. So throw away the entire buffer. */
return buf_size;
}
if (dirac_combine_frame(s, avctx, next, &buf, &buf_size) < 0) {
if (dirac_combine_frame(s, avctx, next, &buf, &buf_size) < 0)
return buf_size;
}
}
*poutbuf = buf;
*poutbuf_size = buf_size;