New qpel MC functions conforming to VC-1 standard.

Existing DSPUtil functions cause chroma artifacts on some files.

Originally committed as revision 6139 to svn://svn.ffmpeg.org/ffmpeg/trunk
This commit is contained in:
Kostya Shishkov 2006-08-31 04:44:54 +00:00
parent 2d5eadccb5
commit 74691b7bcb
4 changed files with 189 additions and 14 deletions

View File

@ -2598,6 +2598,10 @@ void ff_vc1dsp_init(DSPContext* c, AVCodecContext *avctx);
void ff_put_vc1_mspel_mc00_c(uint8_t *dst, uint8_t *src, int stride, int rnd) {
put_pixels8_c(dst, src, stride, 8);
}
void ff_put_vc1_qpel_mc00_c(uint8_t *dst, uint8_t *src, int stride, int rnd) {
put_pixels8_c(dst, src, stride, 8);
}
#endif /* CONFIG_VC1_DECODER||CONFIG_WMV3_DECODER */
static void wmv2_mspel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int w){

View File

@ -396,6 +396,7 @@ typedef struct DSPContext {
* last argument is actually round value instead of height
*/
op_pixels_func put_vc1_mspel_pixels_tab[16];
op_pixels_func put_vc1_qpel_pixels_tab[16];
} DSPContext;
void dsputil_static_init(void);

View File

@ -879,13 +879,8 @@ static void vc1_mc_1mv(VC1Context *v, int dir)
if(s->flags & CODEC_FLAG_GRAY) return;
/* Chroma MC always uses qpel bilinear */
uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
if(!v->rnd){
dsp->put_qpel_pixels_tab[1][uvdxy](s->dest[1], srcU, s->uvlinesize);
dsp->put_qpel_pixels_tab[1][uvdxy](s->dest[2], srcV, s->uvlinesize);
}else{
dsp->put_no_rnd_qpel_pixels_tab[1][uvdxy](s->dest[1], srcU, s->uvlinesize);
dsp->put_no_rnd_qpel_pixels_tab[1][uvdxy](s->dest[2], srcV, s->uvlinesize);
}
dsp->put_vc1_qpel_pixels_tab[uvdxy](s->dest[1], srcU, s->uvlinesize, v->rnd);
dsp->put_vc1_qpel_pixels_tab[uvdxy](s->dest[2], srcV, s->uvlinesize, v->rnd);
}
/** Do motion compensation for 4-MV macroblock - luminance block
@ -1082,13 +1077,8 @@ static void vc1_mc_4mv_chroma(VC1Context *v)
/* Chroma MC always uses qpel bilinear */
uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
if(!v->rnd){
dsp->put_qpel_pixels_tab[1][uvdxy](s->dest[1], srcU, s->uvlinesize);
dsp->put_qpel_pixels_tab[1][uvdxy](s->dest[2], srcV, s->uvlinesize);
}else{
dsp->put_no_rnd_qpel_pixels_tab[1][uvdxy](s->dest[1], srcU, s->uvlinesize);
dsp->put_no_rnd_qpel_pixels_tab[1][uvdxy](s->dest[2], srcV, s->uvlinesize);
}
dsp->put_vc1_qpel_pixels_tab[uvdxy](s->dest[1], srcU, s->uvlinesize, v->rnd);
dsp->put_vc1_qpel_pixels_tab[uvdxy](s->dest[2], srcV, s->uvlinesize, v->rnd);
}
/**

View File

@ -424,6 +424,169 @@ static void ff_put_vc1_mspel_mc33_c(uint8_t *dst, const uint8_t *src, int stride
vc1_mspel_mc(dst, src, stride, 0xF, rnd);
}
/** Filter used to interpolate fractional pel values
* except for half-pel cases for _mcXY:
* A = (4-X)*(4-Y)
* B = X *(4-Y)
* C = (4-X)* Y
* D = X * Y
*/
#define VC1_QPEL_FILTER(src, i, stride, rnd, A, B, C, D) \
clip_uint8((A*src[i] + B*src[i+1] + C*src[i+stride] + D*src[i+stride+1] + 8 - rnd) >> 4)
/* this one is defined in dsputil.c */
void ff_put_vc1_qpel_mc00_c(uint8_t *dst, const uint8_t *src, int stride, int rnd);
static void ff_put_vc1_qpel_mc10_c(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
int i, j;
for(j = 0; j < 8; j++) {
for(i = 0; i < 8; i++)
dst[i] = VC1_QPEL_FILTER(src, i, stride, rnd, 12, 4, 0, 0);
dst += stride;
src += stride;
}
}
static void ff_put_vc1_qpel_mc20_c(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
int i, j;
for(j = 0; j < 8; j++) {
for(i = 0; i < 8; i++)
dst[i] = clip_uint8((src[i] + src[i + 1] + 1 - rnd) >> 1);
dst += stride;
src += stride;
}
}
static void ff_put_vc1_qpel_mc30_c(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
int i, j;
for(j = 0; j < 8; j++) {
for(i = 0; i < 8; i++)
dst[i] = VC1_QPEL_FILTER(src, i, stride, rnd, 4, 12, 0, 0);
dst += stride;
src += stride;
}
}
static void ff_put_vc1_qpel_mc01_c(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
int i, j;
for(j = 0; j < 8; j++) {
for(i = 0; i < 8; i++)
dst[i] = VC1_QPEL_FILTER(src, i, stride, rnd, 12, 0, 4, 0);
dst += stride;
src += stride;
}
}
static void ff_put_vc1_qpel_mc11_c(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
int i, j;
for(j = 0; j < 8; j++) {
for(i = 0; i < 8; i++)
dst[i] = VC1_QPEL_FILTER(src, i, stride, rnd, 9, 3, 3, 1);
dst += stride;
src += stride;
}
}
static void ff_put_vc1_qpel_mc21_c(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
int i, j;
for(j = 0; j < 8; j++) {
for(i = 0; i < 8; i++)
dst[i] = VC1_QPEL_FILTER(src, i, stride, rnd, 6, 6, 2, 2);
dst += stride;
src += stride;
}
}
static void ff_put_vc1_qpel_mc31_c(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
int i, j;
for(j = 0; j < 8; j++) {
for(i = 0; i < 8; i++)
dst[i] = VC1_QPEL_FILTER(src, i, stride, rnd, 3, 9, 1, 3);
dst += stride;
src += stride;
}
}
static void ff_put_vc1_qpel_mc02_c(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
int i, j;
for(j = 0; j < 8; j++) {
for(i = 0; i < 8; i++)
dst[i] = clip_uint8((src[i] + src[i + stride] + 1 - rnd) >> 1);
dst += stride;
src += stride;
}
}
static void ff_put_vc1_qpel_mc12_c(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
int i, j;
for(j = 0; j < 8; j++) {
for(i = 0; i < 8; i++)
dst[i] = VC1_QPEL_FILTER(src, i, stride, rnd, 6, 2, 6, 2);
dst += stride;
src += stride;
}
}
static void ff_put_vc1_qpel_mc22_c(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
int i, j;
for(j = 0; j < 8; j++) {
for(i = 0; i < 8; i++)
dst[i] = clip_uint8((src[i] + src[i + 1] + src[i + stride] + src[i + stride + 1] + 2 - rnd) >> 2);
dst += stride;
src += stride;
}
}
static void ff_put_vc1_qpel_mc32_c(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
int i, j;
for(j = 0; j < 8; j++) {
for(i = 0; i < 8; i++)
dst[i] = VC1_QPEL_FILTER(src, i, stride, rnd, 2, 6, 2, 6);
dst += stride;
src += stride;
}
}
static void ff_put_vc1_qpel_mc03_c(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
int i, j;
for(j = 0; j < 8; j++) {
for(i = 0; i < 8; i++)
dst[i] = VC1_QPEL_FILTER(src, i, stride, rnd, 4, 0, 12, 0);
dst += stride;
src += stride;
}
}
static void ff_put_vc1_qpel_mc13_c(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
int i, j;
for(j = 0; j < 8; j++) {
for(i = 0; i < 8; i++)
dst[i] = VC1_QPEL_FILTER(src, i, stride, rnd, 3, 1, 9, 3);
dst += stride;
src += stride;
}
}
static void ff_put_vc1_qpel_mc23_c(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
int i, j;
for(j = 0; j < 8; j++) {
for(i = 0; i < 8; i++)
dst[i] = VC1_QPEL_FILTER(src, i, stride, rnd, 2, 2, 6, 6);
dst += stride;
src += stride;
}
}
static void ff_put_vc1_qpel_mc33_c(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
int i, j;
for(j = 0; j < 8; j++) {
for(i = 0; i < 8; i++)
dst[i] = VC1_QPEL_FILTER(src, i, stride, rnd, 1, 3, 3, 9);
dst += stride;
src += stride;
}
}
void ff_vc1dsp_init(DSPContext* dsp, AVCodecContext *avctx) {
dsp->vc1_inv_trans_8x8 = vc1_inv_trans_8x8_c;
dsp->vc1_inv_trans_4x8 = vc1_inv_trans_4x8_c;
@ -448,4 +611,21 @@ void ff_vc1dsp_init(DSPContext* dsp, AVCodecContext *avctx) {
dsp->put_vc1_mspel_pixels_tab[13] = ff_put_vc1_mspel_mc13_c;
dsp->put_vc1_mspel_pixels_tab[14] = ff_put_vc1_mspel_mc23_c;
dsp->put_vc1_mspel_pixels_tab[15] = ff_put_vc1_mspel_mc33_c;
dsp->put_vc1_qpel_pixels_tab[ 0] = ff_put_vc1_qpel_mc00_c;
dsp->put_vc1_qpel_pixels_tab[ 1] = ff_put_vc1_qpel_mc10_c;
dsp->put_vc1_qpel_pixels_tab[ 2] = ff_put_vc1_qpel_mc20_c;
dsp->put_vc1_qpel_pixels_tab[ 3] = ff_put_vc1_qpel_mc30_c;
dsp->put_vc1_qpel_pixels_tab[ 4] = ff_put_vc1_qpel_mc01_c;
dsp->put_vc1_qpel_pixels_tab[ 5] = ff_put_vc1_qpel_mc11_c;
dsp->put_vc1_qpel_pixels_tab[ 6] = ff_put_vc1_qpel_mc21_c;
dsp->put_vc1_qpel_pixels_tab[ 7] = ff_put_vc1_qpel_mc31_c;
dsp->put_vc1_qpel_pixels_tab[ 8] = ff_put_vc1_qpel_mc02_c;
dsp->put_vc1_qpel_pixels_tab[ 9] = ff_put_vc1_qpel_mc12_c;
dsp->put_vc1_qpel_pixels_tab[10] = ff_put_vc1_qpel_mc22_c;
dsp->put_vc1_qpel_pixels_tab[11] = ff_put_vc1_qpel_mc32_c;
dsp->put_vc1_qpel_pixels_tab[12] = ff_put_vc1_qpel_mc03_c;
dsp->put_vc1_qpel_pixels_tab[13] = ff_put_vc1_qpel_mc13_c;
dsp->put_vc1_qpel_pixels_tab[14] = ff_put_vc1_qpel_mc23_c;
dsp->put_vc1_qpel_pixels_tab[15] = ff_put_vc1_qpel_mc33_c;
}