third_party_ffmpeg/tests/checkasm/vp9dsp.c
Martin Storsjö 9c8bc74c2b arm: vp9itxfm: Skip empty slices in the first pass of idct_idct 16x16 and 32x32
This work is sponsored by, and copyright, Google.

Previously all subpartitions except the eob=1 (DC) case ran with
the same runtime:

                                     Cortex A7       A8       A9      A53
vp9_inv_dct_dct_16x16_sub16_add_neon:   3188.1   2435.4   2499.0   1969.0
vp9_inv_dct_dct_32x32_sub32_add_neon:  18531.7  16582.3  14207.6  12000.3

By skipping individual 4x16 or 4x32 pixel slices in the first pass,
we reduce the runtime of these functions like this:

vp9_inv_dct_dct_16x16_sub1_add_neon:     274.6    189.5    211.7    235.8
vp9_inv_dct_dct_16x16_sub2_add_neon:    2064.0   1534.8   1719.4   1248.7
vp9_inv_dct_dct_16x16_sub4_add_neon:    2135.0   1477.2   1736.3   1249.5
vp9_inv_dct_dct_16x16_sub8_add_neon:    2446.7   1828.7   1993.6   1494.7
vp9_inv_dct_dct_16x16_sub12_add_neon:   2832.4   2118.3   2266.5   1735.1
vp9_inv_dct_dct_16x16_sub16_add_neon:   3211.7   2475.3   2523.5   1983.1
vp9_inv_dct_dct_32x32_sub1_add_neon:     756.2    456.7    862.0    553.9
vp9_inv_dct_dct_32x32_sub2_add_neon:   10682.2   8190.4   8539.2   6762.5
vp9_inv_dct_dct_32x32_sub4_add_neon:   10813.5   8014.9   8518.3   6762.8
vp9_inv_dct_dct_32x32_sub8_add_neon:   11859.6   9313.0   9347.4   7514.5
vp9_inv_dct_dct_32x32_sub12_add_neon:  12946.6  10752.4  10192.2   8280.2
vp9_inv_dct_dct_32x32_sub16_add_neon:  14074.6  11946.5  11001.4   9008.6
vp9_inv_dct_dct_32x32_sub20_add_neon:  15269.9  13662.7  11816.1   9762.6
vp9_inv_dct_dct_32x32_sub24_add_neon:  16327.9  14940.1  12626.7  10516.0
vp9_inv_dct_dct_32x32_sub28_add_neon:  17462.7  15776.1  13446.2  11264.7
vp9_inv_dct_dct_32x32_sub32_add_neon:  18575.5  17157.0  14249.3  12015.1

I.e. in general a very minor overhead for the full subpartition case due
to the additional loads and cmps, but a significant speedup for the cases
when we only need to process a small part of the actual input data.

In common VP9 content in a few inspected clips, 70-90% of the non-dc-only
16x16 and 32x32 IDCTs only have nonzero coefficients in the upper left
8x8 or 16x16 subpartitions respectively.

Signed-off-by: Martin Storsjö <martin@martin.st>
2016-11-30 23:54:07 +02:00

566 lines
20 KiB
C

/*
* Copyright (c) 2015 Ronald S. Bultje <rsbultje@gmail.com>
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with Libav; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <math.h>
#include <string.h>
#include "libavutil/common.h"
#include "libavutil/internal.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/mathematics.h"
#include "libavcodec/vp9.h"
#include "libavcodec/vp9data.h"
#include "checkasm.h"
static const uint32_t pixel_mask[3] = { 0xffffffff, 0x03ff03ff, 0x0fff0fff };
#define BIT_DEPTH 8
#define SIZEOF_PIXEL ((BIT_DEPTH + 7) / 8)
#define randomize_buffers() \
do { \
uint32_t mask = pixel_mask[(BIT_DEPTH - 8) >> 1]; \
for (y = 0; y < sz; y++) { \
for (x = 0; x < sz * SIZEOF_PIXEL; x += 4) { \
uint32_t r = rnd() & mask; \
AV_WN32A(dst + y * sz * SIZEOF_PIXEL + x, r); \
AV_WN32A(src + y * sz * SIZEOF_PIXEL + x, rnd() & mask); \
} \
for (x = 0; x < sz; x++) { \
if (BIT_DEPTH == 8) { \
coef[y * sz + x] = src[y * sz + x] - dst[y * sz + x]; \
} else { \
((int32_t *) coef)[y * sz + x] = \
((uint16_t *) src)[y * sz + x] - \
((uint16_t *) dst)[y * sz + x]; \
} \
} \
} \
} while(0)
// wht function copied from libvpx
static void fwht_1d(double *out, const double *in, int sz)
{
double t0 = in[0] + in[1];
double t3 = in[3] - in[2];
double t4 = trunc((t0 - t3) * 0.5);
double t1 = t4 - in[1];
double t2 = t4 - in[2];
out[0] = t0 - t2;
out[1] = t2;
out[2] = t3 + t1;
out[3] = t1;
}
// standard DCT-II
static void fdct_1d(double *out, const double *in, int sz)
{
int k, n;
for (k = 0; k < sz; k++) {
out[k] = 0.0;
for (n = 0; n < sz; n++)
out[k] += in[n] * cos(M_PI * (2 * n + 1) * k / (sz * 2.0));
}
out[0] *= M_SQRT1_2;
}
// see "Towards jointly optimal spatial prediction and adaptive transform in
// video/image coding", by J. Han, A. Saxena, and K. Rose
// IEEE Proc. ICASSP, pp. 726-729, Mar. 2010.
static void fadst4_1d(double *out, const double *in, int sz)
{
int k, n;
for (k = 0; k < sz; k++) {
out[k] = 0.0;
for (n = 0; n < sz; n++)
out[k] += in[n] * sin(M_PI * (n + 1) * (2 * k + 1) / (sz * 2.0 + 1.0));
}
}
// see "A Butterfly Structured Design of The Hybrid Transform Coding Scheme",
// by Jingning Han, Yaowu Xu, and Debargha Mukherjee
// http://static.googleusercontent.com/media/research.google.com/en//pubs/archive/41418.pdf
static void fadst_1d(double *out, const double *in, int sz)
{
int k, n;
for (k = 0; k < sz; k++) {
out[k] = 0.0;
for (n = 0; n < sz; n++)
out[k] += in[n] * sin(M_PI * (2 * n + 1) * (2 * k + 1) / (sz * 4.0));
}
}
typedef void (*ftx1d_fn)(double *out, const double *in, int sz);
static void ftx_2d(double *out, const double *in, enum TxfmMode tx,
enum TxfmType txtp, int sz)
{
static const double scaling_factors[5][4] = {
{ 4.0, 16.0 * M_SQRT1_2 / 3.0, 16.0 * M_SQRT1_2 / 3.0, 32.0 / 9.0 },
{ 2.0, 2.0, 2.0, 2.0 },
{ 1.0, 1.0, 1.0, 1.0 },
{ 0.25 },
{ 4.0 }
};
static const ftx1d_fn ftx1d_tbl[5][4][2] = {
{
{ fdct_1d, fdct_1d },
{ fadst4_1d, fdct_1d },
{ fdct_1d, fadst4_1d },
{ fadst4_1d, fadst4_1d },
}, {
{ fdct_1d, fdct_1d },
{ fadst_1d, fdct_1d },
{ fdct_1d, fadst_1d },
{ fadst_1d, fadst_1d },
}, {
{ fdct_1d, fdct_1d },
{ fadst_1d, fdct_1d },
{ fdct_1d, fadst_1d },
{ fadst_1d, fadst_1d },
}, {
{ fdct_1d, fdct_1d },
}, {
{ fwht_1d, fwht_1d },
},
};
double temp[1024];
double scaling_factor = scaling_factors[tx][txtp];
int i, j;
// cols
for (i = 0; i < sz; ++i) {
double temp_out[32];
ftx1d_tbl[tx][txtp][0](temp_out, &in[i * sz], sz);
// scale and transpose
for (j = 0; j < sz; ++j)
temp[j * sz + i] = temp_out[j] * scaling_factor;
}
// rows
for (i = 0; i < sz; i++)
ftx1d_tbl[tx][txtp][1](&out[i * sz], &temp[i * sz], sz);
}
static void ftx(int16_t *buf, enum TxfmMode tx,
enum TxfmType txtp, int sz, int bit_depth)
{
double ind[1024], outd[1024];
int n;
emms_c();
for (n = 0; n < sz * sz; n++) {
if (bit_depth == 8)
ind[n] = buf[n];
else
ind[n] = ((int32_t *) buf)[n];
}
ftx_2d(outd, ind, tx, txtp, sz);
for (n = 0; n < sz * sz; n++) {
if (bit_depth == 8)
buf[n] = lrint(outd[n]);
else
((int32_t *) buf)[n] = lrint(outd[n]);
}
}
static int copy_subcoefs(int16_t *out, const int16_t *in, enum TxfmMode tx,
enum TxfmType txtp, int sz, int sub, int bit_depth)
{
// copy the topleft coefficients such that the return value (being the
// coefficient scantable index for the eob token) guarantees that only
// the topleft $sub out of $sz (where $sz >= $sub) coefficients in both
// dimensions are non-zero. This leads to braching to specific optimized
// simd versions (e.g. dc-only) so that we get full asm coverage in this
// test
int n;
const int16_t *scan = ff_vp9_scans[tx][txtp];
int eob;
for (n = 0; n < sz * sz; n++) {
int rc = scan[n], rcx = rc % sz, rcy = rc / sz;
// find eob for this sub-idct
if (rcx >= sub || rcy >= sub)
break;
// copy coef
if (bit_depth == 8) {
out[rc] = in[rc];
} else {
AV_COPY32(&out[rc * 2], &in[rc * 2]);
}
}
eob = n;
for (; n < sz * sz; n++) {
int rc = scan[n];
// zero
if (bit_depth == 8) {
out[rc] = 0;
} else {
AV_ZERO32(&out[rc * 2]);
}
}
return eob;
}
static int iszero(const int16_t *c, int sz)
{
int n;
for (n = 0; n < sz / sizeof(int16_t); n += 2)
if (AV_RN32A(&c[n]))
return 0;
return 1;
}
#define SIZEOF_COEF (2 * ((BIT_DEPTH + 7) / 8))
static void check_itxfm(void)
{
LOCAL_ALIGNED_32(uint8_t, src, [32 * 32 * 2]);
LOCAL_ALIGNED(32, uint8_t, dst, [32 * 32 * 2]);
LOCAL_ALIGNED(32, uint8_t, dst0, [32 * 32 * 2]);
LOCAL_ALIGNED(32, uint8_t, dst1, [32 * 32 * 2]);
LOCAL_ALIGNED(32, int16_t, coef, [32 * 32 * 2]);
LOCAL_ALIGNED(32, int16_t, subcoef0, [32 * 32 * 2]);
LOCAL_ALIGNED(32, int16_t, subcoef1, [32 * 32 * 2]);
declare_func(void, uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
VP9DSPContext dsp;
int y, x, tx, txtp, sub;
static const char *const txtp_types[N_TXFM_TYPES] = {
[DCT_DCT] = "dct_dct", [DCT_ADST] = "adst_dct",
[ADST_DCT] = "dct_adst", [ADST_ADST] = "adst_adst"
};
ff_vp9dsp_init(&dsp);
for (tx = TX_4X4; tx <= N_TXFM_SIZES /* 4 = lossless */; tx++) {
int sz = 4 << (tx & 3);
int n_txtps = tx < TX_32X32 ? N_TXFM_TYPES : 1;
for (txtp = 0; txtp < n_txtps; txtp++) {
// skip testing sub-IDCTs for WHT or ADST since they don't
// implement it in any of the SIMD functions. If they do,
// consider changing this to ensure we have complete test
// coverage. Test sub=1 for dc-only, then 2, 4, 8, 12, etc,
// since the arm version can distinguish them at that level.
for (sub = (txtp == 0 && tx < 4) ? 1 : sz; sub <= sz;
sub < 4 ? (sub <<= 1) : (sub += 4)) {
if (check_func(dsp.itxfm_add[tx][txtp],
"vp9_inv_%s_%dx%d_sub%d_add",
tx == 4 ? "wht_wht" : txtp_types[txtp],
sz, sz, sub)) {
int eob;
randomize_buffers();
ftx(coef, tx, txtp, sz, BIT_DEPTH);
if (sub < sz) {
eob = copy_subcoefs(subcoef0, coef, tx, txtp,
sz, sub, BIT_DEPTH);
} else {
eob = sz * sz;
memcpy(subcoef0, coef, sz * sz * SIZEOF_COEF);
}
memcpy(dst0, dst, sz * sz * SIZEOF_PIXEL);
memcpy(dst1, dst, sz * sz * SIZEOF_PIXEL);
memcpy(subcoef1, subcoef0, sz * sz * SIZEOF_COEF);
call_ref(dst0, sz * SIZEOF_PIXEL, subcoef0, eob);
call_new(dst1, sz * SIZEOF_PIXEL, subcoef1, eob);
if (memcmp(dst0, dst1, sz * sz * SIZEOF_PIXEL) ||
!iszero(subcoef0, sz * sz * SIZEOF_COEF) ||
!iszero(subcoef1, sz * sz * SIZEOF_COEF))
fail();
bench_new(dst, sz * SIZEOF_PIXEL, coef, eob);
}
}
}
}
report("itxfm");
}
#undef randomize_buffers
#define setpx(a,b,c) \
do { \
if (SIZEOF_PIXEL == 1) { \
buf0[(a) + (b) * jstride] = av_clip_uint8(c); \
} else { \
((uint16_t *)buf0)[(a) + (b) * jstride] = av_clip_uintp2(c, BIT_DEPTH); \
} \
} while (0)
#define setdx(a,b,c,d) setpx(a,b,c-(d)+(rnd()%((d)*2+1)))
#define setsx(a,b,c,d) setdx(a,b,c,(d) << (BIT_DEPTH - 8))
static void randomize_loopfilter_buffers(int bidx, int lineoff, int str,
int bit_depth, int dir,
const int *E, const int *F,
const int *H, const int *I,
uint8_t *buf0, uint8_t *buf1)
{
uint32_t mask = (1 << BIT_DEPTH) - 1;
int off = dir ? lineoff : lineoff * 16;
int istride = dir ? 1 : 16;
int jstride = dir ? str : 1;
int i, j;
for (i = 0; i < 2; i++) /* flat16 */ {
int idx = off + i * istride, p0, q0;
setpx(idx, 0, q0 = rnd() & mask);
setsx(idx, -1, p0 = q0, E[bidx] >> 2);
for (j = 1; j < 8; j++) {
setsx(idx, -1 - j, p0, F[bidx]);
setsx(idx, j, q0, F[bidx]);
}
}
for (i = 2; i < 4; i++) /* flat8 */ {
int idx = off + i * istride, p0, q0;
setpx(idx, 0, q0 = rnd() & mask);
setsx(idx, -1, p0 = q0, E[bidx] >> 2);
for (j = 1; j < 4; j++) {
setsx(idx, -1 - j, p0, F[bidx]);
setsx(idx, j, q0, F[bidx]);
}
for (j = 4; j < 8; j++) {
setpx(idx, -1 - j, rnd() & mask);
setpx(idx, j, rnd() & mask);
}
}
for (i = 4; i < 6; i++) /* regular */ {
int idx = off + i * istride, p2, p1, p0, q0, q1, q2;
setpx(idx, 0, q0 = rnd() & mask);
setsx(idx, 1, q1 = q0, I[bidx]);
setsx(idx, 2, q2 = q1, I[bidx]);
setsx(idx, 3, q2, I[bidx]);
setsx(idx, -1, p0 = q0, E[bidx] >> 2);
setsx(idx, -2, p1 = p0, I[bidx]);
setsx(idx, -3, p2 = p1, I[bidx]);
setsx(idx, -4, p2, I[bidx]);
for (j = 4; j < 8; j++) {
setpx(idx, -1 - j, rnd() & mask);
setpx(idx, j, rnd() & mask);
}
}
for (i = 6; i < 8; i++) /* off */ {
int idx = off + i * istride;
for (j = 0; j < 8; j++) {
setpx(idx, -1 - j, rnd() & mask);
setpx(idx, j, rnd() & mask);
}
}
}
#define randomize_buffers(bidx, lineoff, str) \
randomize_loopfilter_buffers(bidx, lineoff, str, BIT_DEPTH, dir, \
E, F, H, I, buf0, buf1)
static void check_loopfilter(void)
{
LOCAL_ALIGNED_32(uint8_t, base0, [32 + 16 * 16 * 2]);
LOCAL_ALIGNED_32(uint8_t, base1, [32 + 16 * 16 * 2]);
VP9DSPContext dsp;
int dir, wd, wd2;
static const char *const dir_name[2] = { "h", "v" };
static const int E[2] = { 20, 28 }, I[2] = { 10, 16 };
static const int H[2] = { 7, 11 }, F[2] = { 1, 1 };
declare_func(void, uint8_t *dst, ptrdiff_t stride, int E, int I, int H);
ff_vp9dsp_init(&dsp);
for (dir = 0; dir < 2; dir++) {
uint8_t *buf0, *buf1;
int midoff = (dir ? 8 * 8 : 8) * SIZEOF_PIXEL;
int midoff_aligned = (dir ? 8 * 8 : 16) * SIZEOF_PIXEL;
buf0 = base0 + midoff_aligned;
buf1 = base1 + midoff_aligned;
for (wd = 0; wd < 3; wd++) {
// 4/8/16wd_8px
if (check_func(dsp.loop_filter_8[wd][dir],
"vp9_loop_filter_%s_%d_8",
dir_name[dir], 4 << wd)) {
randomize_buffers(0, 0, 8);
memcpy(buf1 - midoff, buf0 - midoff,
16 * 8 * SIZEOF_PIXEL);
call_ref(buf0, 16 * SIZEOF_PIXEL >> dir, E[0], I[0], H[0]);
call_new(buf1, 16 * SIZEOF_PIXEL >> dir, E[0], I[0], H[0]);
if (memcmp(buf0 - midoff, buf1 - midoff, 16 * 8 * SIZEOF_PIXEL))
fail();
bench_new(buf1, 16 * SIZEOF_PIXEL >> dir, E[0], I[0], H[0]);
}
}
midoff = (dir ? 16 * 8 : 8) * SIZEOF_PIXEL;
midoff_aligned = (dir ? 16 * 8 : 16) * SIZEOF_PIXEL;
buf0 = base0 + midoff_aligned;
buf1 = base1 + midoff_aligned;
// 16wd_16px loopfilter
if (check_func(dsp.loop_filter_16[dir],
"vp9_loop_filter_%s_16_16",
dir_name[dir])) {
randomize_buffers(0, 0, 16);
randomize_buffers(0, 8, 16);
memcpy(buf1 - midoff, buf0 - midoff, 16 * 16 * SIZEOF_PIXEL);
call_ref(buf0, 16 * SIZEOF_PIXEL, E[0], I[0], H[0]);
call_new(buf1, 16 * SIZEOF_PIXEL, E[0], I[0], H[0]);
if (memcmp(buf0 - midoff, buf1 - midoff, 16 * 16 * SIZEOF_PIXEL))
fail();
bench_new(buf1, 16 * SIZEOF_PIXEL, E[0], I[0], H[0]);
}
for (wd = 0; wd < 2; wd++) {
for (wd2 = 0; wd2 < 2; wd2++) {
// mix2 loopfilter
if (check_func(dsp.loop_filter_mix2[wd][wd2][dir],
"vp9_loop_filter_mix2_%s_%d%d_16",
dir_name[dir], 4 << wd, 4 << wd2)) {
randomize_buffers(0, 0, 16);
randomize_buffers(1, 8, 16);
memcpy(buf1 - midoff, buf0 - midoff, 16 * 16 * SIZEOF_PIXEL);
#define M(a) ((a[1] << 8) | a[0])
call_ref(buf0, 16 * SIZEOF_PIXEL, M(E), M(I), M(H));
call_new(buf1, 16 * SIZEOF_PIXEL, M(E), M(I), M(H));
if (memcmp(buf0 - midoff, buf1 - midoff, 16 * 16 * SIZEOF_PIXEL))
fail();
bench_new(buf1, 16 * SIZEOF_PIXEL, M(E), M(I), M(H));
#undef M
}
}
}
}
report("loopfilter");
}
#undef setsx
#undef setpx
#undef setdx
#undef randomize_buffers
#define DST_BUF_SIZE (size * size * SIZEOF_PIXEL)
#define SRC_BUF_STRIDE 72
#define SRC_BUF_SIZE ((size + 7) * SRC_BUF_STRIDE * SIZEOF_PIXEL)
#define src (buf + 3 * SIZEOF_PIXEL * (SRC_BUF_STRIDE + 1))
#define randomize_buffers() \
do { \
uint32_t mask = pixel_mask[(BIT_DEPTH - 8) >> 1]; \
int k; \
for (k = 0; k < SRC_BUF_SIZE; k += 4) { \
uint32_t r = rnd() & mask; \
AV_WN32A(buf + k, r); \
} \
if (op == 1) { \
for (k = 0; k < DST_BUF_SIZE; k += 4) { \
uint32_t r = rnd() & mask; \
AV_WN32A(dst0 + k, r); \
AV_WN32A(dst1 + k, r); \
} \
} \
} while (0)
static void check_mc(void)
{
static const char *const filter_names[4] = {
"8tap_smooth", "8tap_regular", "8tap_sharp", "bilin"
};
static const char *const subpel_names[2][2] = { { "", "h" }, { "v", "hv" } };
static const char *const op_names[2] = { "put", "avg" };
LOCAL_ALIGNED_32(uint8_t, buf, [72 * 72 * 2]);
LOCAL_ALIGNED_32(uint8_t, dst0, [64 * 64 * 2]);
LOCAL_ALIGNED_32(uint8_t, dst1, [64 * 64 * 2]);
char str[256];
VP9DSPContext dsp;
int op, hsize, filter, dx, dy;
declare_func_emms(AV_CPU_FLAG_MMX | AV_CPU_FLAG_MMXEXT,
void, uint8_t *dst, ptrdiff_t dst_stride,
const uint8_t *ref, ptrdiff_t ref_stride,
int h, int mx, int my);
for (op = 0; op < 2; op++) {
ff_vp9dsp_init(&dsp);
for (hsize = 0; hsize < 5; hsize++) {
int size = 64 >> hsize;
for (filter = 0; filter < 4; filter++) {
for (dx = 0; dx < 2; dx++) {
for (dy = 0; dy < 2; dy++) {
if (dx || dy) {
snprintf(str, sizeof(str), "%s_%s_%d%s", op_names[op],
filter_names[filter], size,
subpel_names[dy][dx]);
} else {
snprintf(str, sizeof(str), "%s%d", op_names[op], size);
}
if (check_func(dsp.mc[hsize][filter][op][dx][dy],
"vp9_%s", str)) {
int mx = dx ? 1 + (rnd() % 14) : 0;
int my = dy ? 1 + (rnd() % 14) : 0;
randomize_buffers();
call_ref(dst0, size * SIZEOF_PIXEL,
src, SRC_BUF_STRIDE * SIZEOF_PIXEL,
size, mx, my);
call_new(dst1, size * SIZEOF_PIXEL,
src, SRC_BUF_STRIDE * SIZEOF_PIXEL,
size, mx, my);
if (memcmp(dst0, dst1, DST_BUF_SIZE))
fail();
// SIMD implementations for each filter of subpel
// functions are identical
if (filter >= 1 && filter <= 2) continue;
bench_new(dst1, size * SIZEOF_PIXEL,
src, SRC_BUF_STRIDE * SIZEOF_PIXEL,
size, mx, my);
}
}
}
}
}
}
report("mc");
}
void checkasm_check_vp9dsp(void)
{
check_itxfm();
check_loopfilter();
check_mc();
}