1 /*
2 * Copyright (c) 2015 Ronald S. Bultje <rsbultje@gmail.com>
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21 #include <math.h>
22 #include <string.h>
30
31 static const uint32_t
pixel_mask[3] = { 0xffffffff, 0x03ff03ff, 0x0fff0fff };
32 #define SIZEOF_PIXEL ((bit_depth + 7) / 8)
33
34 #define randomize_buffers() \
35 do { \
36 uint32_t mask = pixel_mask[(bit_depth - 8) >> 1]; \
37 int k; \
38 for (k = -4; k < SIZEOF_PIXEL * FFMAX(8, size); k += 4) { \
39 uint32_t r = rnd() & mask; \
40 AV_WN32A(a + k, r); \
41 } \
42 for (k = 0; k < size * SIZEOF_PIXEL; k += 4) { \
43 uint32_t r = rnd() & mask; \
44 AV_WN32A(l + k, r); \
45 } \
46 } while (0)
47
49 {
75 };
76
77 for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
79 for (tx = 0; tx < 4; tx++) {
81
84 mode_names[mode], size, size, bit_depth)) {
87 call_new(dst1, size * SIZEOF_PIXEL, l, a);
88 if (memcmp(dst0, dst1, size * size * SIZEOF_PIXEL))
90 bench_new(dst1, size * SIZEOF_PIXEL,l, a);
91 }
92 }
93 }
94 }
96 }
97
98 #undef randomize_buffers
99
100 #define randomize_buffers() \
101 do { \
102 uint32_t mask = pixel_mask[(bit_depth - 8) >> 1]; \
103 for (y = 0; y < sz; y++) { \
104 for (x = 0; x < sz * SIZEOF_PIXEL; x += 4) { \
105 uint32_t r = rnd() & mask; \
106 AV_WN32A(dst + y * sz * SIZEOF_PIXEL + x, r); \
107 AV_WN32A(src + y * sz * SIZEOF_PIXEL + x, rnd() & mask); \
108 } \
109 for (x = 0; x < sz; x++) { \
110 if (bit_depth == 8) { \
111 coef[y * sz + x] = src[y * sz + x] - dst[y * sz + x]; \
112 } else { \
113 ((int32_t *) coef)[y * sz + x] = \
114 ((uint16_t *) src)[y * sz + x] - \
115 ((uint16_t *) dst)[y * sz + x]; \
116 } \
117 } \
118 } \
119 } while(0)
120
121 // wht function copied from libvpx
123 {
124 double t0 = in[0] + in[1];
125 double t3 = in[3] - in[2];
126 double t4 =
trunc((t0 - t3) * 0.5);
127 double t1 = t4 - in[1];
128 double t2 = t4 - in[2];
129
134 }
135
136 // standard DCT-II
138 {
140
141 for (k = 0; k < sz; k++) {
142 out[k] = 0.0;
143 for (n = 0; n < sz; n++)
144 out[k] += in[n] * cos(
M_PI * (2 * n + 1) * k / (sz * 2.0));
145 }
147 }
148
149 // see "Towards jointly optimal spatial prediction and adaptive transform in
150 // video/image coding", by J. Han, A. Saxena, and K. Rose
151 // IEEE Proc. ICASSP, pp. 726-729, Mar. 2010.
153 {
155
156 for (k = 0; k < sz; k++) {
157 out[k] = 0.0;
158 for (n = 0; n < sz; n++)
159 out[k] += in[n] * sin(
M_PI * (n + 1) * (2 * k + 1) / (sz * 2.0 + 1.0));
160 }
161 }
162
163 // see "A Butterfly Structured Design of The Hybrid Transform Coding Scheme",
164 // by Jingning Han, Yaowu Xu, and Debargha Mukherjee
165 // http://static.googleusercontent.com/media/research.google.com/en//pubs/archive/41418.pdf
167 {
169
170 for (k = 0; k < sz; k++) {
171 out[k] = 0.0;
172 for (n = 0; n < sz; n++)
173 out[k] += in[n] * sin(
M_PI * (2 * n + 1) * (2 * k + 1) / (sz * 4.0));
174 }
175 }
176
180 {
181 static const double scaling_factors[5][4] = {
183 { 2.0, 2.0, 2.0, 2.0 },
184 { 1.0, 1.0, 1.0, 1.0 },
185 { 0.25 },
186 { 4.0 }
187 };
188 static const ftx1d_fn ftx1d_tbl[5][4][2] = {
189 {
194 }, {
199 }, {
204 }, {
206 }, {
208 },
209 };
211 double scaling_factor = scaling_factors[tx][txtp];
212 int i, j;
213
214 // cols
215 for (i = 0; i < sz; ++i) {
216 double temp_out[32];
217
218 ftx1d_tbl[tx][txtp][0](temp_out, &in[i * sz], sz);
219 // scale and transpose
220 for (j = 0; j < sz; ++j)
221 temp[j * sz + i] = temp_out[j] * scaling_factor;
222 }
223
224 // rows
225 for (i = 0; i < sz; i++)
226 ftx1d_tbl[tx][txtp][1](&out[i * sz], &temp[i * sz], sz);
227 }
228
231 {
232 double ind[1024], outd[1024];
234
235 emms_c();
236 for (n = 0; n < sz * sz; n++) {
237 if (bit_depth == 8)
239 else
241 }
242 ftx_2d(outd, ind, tx, txtp, sz);
243 for (n = 0; n < sz * sz; n++) {
244 if (bit_depth == 8)
246 else
248 }
249 }
250
253 {
254 // copy the topleft coefficients such that the return value (being the
255 // coefficient scantable index for the eob token) guarantees that only
256 // the topleft $sub out of $sz (where $sz >= $sub) coefficients in both
257 // dimensions are non-zero. This leads to braching to specific optimized
258 // simd versions (e.g. dc-only) so that we get full asm coverage in this
259 // test
260
262 const int16_t *scan =
vp9_scans[tx][txtp];
263 int eob;
264
265 for (n = 0; n < sz * sz; n++) {
266 int rc = scan[
n], rcx = rc % sz, rcy = rc / sz;
267
268 // find eob for this sub-idct
269 if (rcx >= sub || rcy >= sub)
270 break;
271
272 // copy coef
273 if (bit_depth == 8) {
274 out[rc] = in[rc];
275 } else {
277 }
278 }
279
281
282 for (; n < sz * sz; n++) {
284
285 // zero
286 if (bit_depth == 8) {
287 out[rc] = 0;
288 } else {
290 }
291 }
292
293 return eob;
294 }
295
297 {
299
300 for (n = 0; n < sz / sizeof(int16_t); n += 2)
302 return 0;
303
304 return 1;
305 }
306
307 #define SIZEOF_COEF (2 * ((bit_depth + 7) / 8))
308
310 {
324 };
325
326 for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
328
330 int sz = 4 << (tx & 3);
332
333 for (txtp = 0; txtp < n_txtps; txtp++) {
335 tx == 4 ? "wht_wht" : txtp_types[txtp], sz, sz,
336 bit_depth)) {
338 ftx(coef, tx, txtp, sz, bit_depth);
339
340 for (sub = (txtp == 0) ? 1 : 2; sub <= sz; sub <<= 1) {
341 int eob;
342
343 if (sub < sz) {
345 sz, sub, bit_depth);
346 } else {
347 eob = sz * sz;
349 }
350
352 memcpy(dst1, dst, sz * sz * SIZEOF_PIXEL);
354 call_ref(dst0, sz * SIZEOF_PIXEL, subcoef0, eob);
355 call_new(dst1, sz * SIZEOF_PIXEL, subcoef1, eob);
356 if (memcmp(dst0, dst1, sz * sz * SIZEOF_PIXEL) ||
357 !
iszero(subcoef0, sz * sz * SIZEOF_COEF) ||
358 !
iszero(subcoef1, sz * sz * SIZEOF_COEF))
360 }
362 }
363 }
364 }
365 }
367 }
368
369 #undef randomize_buffers
370
371 #define setpx(a,b,c) \
372 do { \
373 if (SIZEOF_PIXEL == 1) { \
374 buf0[(a) + (b) * jstride] = av_clip_uint8(c); \
375 } else { \
376 ((uint16_t *)buf0)[(a) + (b) * jstride] = av_clip_uintp2(c, bit_depth); \
377 } \
378 } while (0)
379
380 // c can be an assignment and must not be put under ()
381 #define setdx(a,b,c,d) setpx(a,b,c-(d)+(rnd()%((d)*2+1)))
382 #define setsx(a,b,c,d) setdx(a,b,c,(d) << (bit_depth - 8))
385 const int *
F,
const int *
H,
const int *I,
387 {
389 int off = dir ? lineoff : lineoff * 16;
390 int istride = dir ? 1 : 16;
391 int jstride = dir ? str : 1;
392 int i, j;
393 for (i = 0; i < 2; i++) /* flat16 */ {
394 int idx = off + i * istride, p0,
q0;
396 setsx(idx, -1, p0 = q0, E[bidx] >> 2);
397 for (j = 1; j < 8; j++) {
398 setsx(idx, -1 - j, p0, F[bidx]);
399 setsx(idx, j, q0, F[bidx]);
400 }
401 }
402 for (i = 2; i < 4; i++) /* flat8 */ {
403 int idx = off + i * istride, p0,
q0;
405 setsx(idx, -1, p0 = q0, E[bidx] >> 2);
406 for (j = 1; j < 4; j++) {
407 setsx(idx, -1 - j, p0, F[bidx]);
408 setsx(idx, j, q0, F[bidx]);
409 }
410 for (j = 4; j < 8; j++) {
413 }
414 }
415 for (i = 4; i < 6; i++) /* regular */ {
416 int idx = off + i * istride, p2, p1, p0,
q0,
q1, q2;
418 setsx(idx, 1, q1 = q0, I[bidx]);
419 setsx(idx, 2, q2 = q1, I[bidx]);
420 setsx(idx, 3, q2, I[bidx]);
421 setsx(idx, -1, p0 = q0, E[bidx] >> 2);
422 setsx(idx, -2, p1 = p0, I[bidx]);
423 setsx(idx, -3, p2 = p1, I[bidx]);
424 setsx(idx, -4, p2, I[bidx]);
425 for (j = 4; j < 8; j++) {
428 }
429 }
430 for (i = 6; i < 8; i++) /* off */ {
431 int idx = off + i * istride;
432 for (j = 0; j < 8; j++) {
435 }
436 }
437 }
438 #define randomize_buffers(bidx, lineoff, str) \
439 randomize_loopfilter_buffers(bidx, lineoff, str, bit_depth, dir, \
440 E, F, H, I, buf0, buf1)
441
443 {
448 static const char *const dir_name[2] = { "h", "v" };
449 static const int E[2] = { 20, 28 }, I[2] = { 10, 16 };
450 static const int H[2] = { 7, 11 },
F[2] = { 1, 1 };
452
453 for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
455
456 for (dir = 0; dir < 2; dir++) {
459 uint8_t *buf0 = base0 + midoff_aligned;
460 uint8_t *buf1 = base1 + midoff_aligned;
461
462 for (wd = 0; wd < 3; wd++) {
463 // 4/8/16wd_8px
465 "vp9_loop_filter_%s_%d_8_%dbpp",
466 dir_name[dir], 4 << wd, bit_depth)) {
468 memcpy(buf1 - midoff, buf0 - midoff,
472 if (memcmp(buf0 - midoff, buf1 - midoff, 16 * 8 *
SIZEOF_PIXEL))
475 }
476 }
477
480
481 buf0 = base0 + midoff_aligned;
482 buf1 = base1 + midoff_aligned;
483
484 // 16wd_16px loopfilter
486 "vp9_loop_filter_%s_16_16_%dbpp",
487 dir_name[dir], bit_depth)) {
490 memcpy(buf1 - midoff, buf0 - midoff, 16 * 16 *
SIZEOF_PIXEL);
493 if (memcmp(buf0 - midoff, buf1 - midoff, 16 * 16 *
SIZEOF_PIXEL))
496 }
497
498 for (wd = 0; wd < 2; wd++) {
499 for (wd2 = 0; wd2 < 2; wd2++) {
500 // mix2 loopfilter
502 "vp9_loop_filter_mix2_%s_%d%d_16_%dbpp",
503 dir_name[dir], 4 << wd, 4 << wd2, bit_depth)) {
506 memcpy(buf1 - midoff, buf0 - midoff, 16 * 16 *
SIZEOF_PIXEL);
507 #define M(a) (((a)[1] << 8) | (a)[0])
510 if (memcmp(buf0 - midoff, buf1 - midoff, 16 * 16 *
SIZEOF_PIXEL))
513 #undef M
514 }
515 }
516 }
517 }
518 }
520 }
521
522 #undef setsx
523 #undef setpx
524 #undef setdx
525 #undef randomize_buffers
526
527 #define DST_BUF_SIZE (size * size * SIZEOF_PIXEL)
528 #define SRC_BUF_STRIDE 72
529 #define SRC_BUF_SIZE ((size + 7) * SRC_BUF_STRIDE * SIZEOF_PIXEL)
530 #define src (buf + 3 * SIZEOF_PIXEL * (SRC_BUF_STRIDE + 1))
531
532 #define randomize_buffers() \
533 do { \
534 uint32_t mask = pixel_mask[(bit_depth - 8) >> 1]; \
535 int k; \
536 for (k = 0; k < SRC_BUF_SIZE; k += 4) { \
537 uint32_t r = rnd() & mask; \
538 AV_WN32A(buf + k, r); \
539 } \
540 if (op == 1) { \
541 for (k = 0; k < DST_BUF_SIZE; k += 4) { \
542 uint32_t r = rnd() & mask; \
543 AV_WN32A(dst0 + k, r); \
544 AV_WN32A(dst1 + k, r); \
545 } \
546 } \
547 } while (0)
548
550 {
557 const uint8_t *ref, ptrdiff_t ref_stride,
558 int h,
int mx,
int my);
559 static const char *const filter_names[4] = {
560 "8tap_smooth", "8tap_regular", "8tap_sharp", "bilin"
561 };
562 static const char *const subpel_names[2][2] = { { "", "h" }, { "v", "hv" } };
563 static const char *const op_names[2] = { "put", "avg" };
564 char str[256];
565
566 for (op = 0; op < 2; op++) {
567 for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
569 for (hsize = 0; hsize < 5; hsize++) {
570 int size = 64 >> hsize;
571
572 for (filter = 0; filter < 4; filter++) {
573 for (dx = 0; dx < 2; dx++) {
574 for (dy = 0; dy < 2; dy++) {
575 if (dx || dy) {
577 "%s_%s_%d%s", op_names[op],
578 filter_names[filter], size,
579 subpel_names[dy][dx]);
580 } else {
582 "%s%d", op_names[op], size);
583 }
585 "vp9_%s_%dbpp", str, bit_depth)) {
586 int mx = dx ? 1 + (
rnd() % 14) : 0;
587 int my = dy ? 1 + (
rnd() % 14) : 0;
591 size, mx, my);
594 size, mx, my);
597
598 // simd implementations for each filter of subpel
599 // functions are identical
600 if (filter >= 1 && filter <= 2) continue;
601 // 10/12 bpp for bilin are identical
602 if (bit_depth == 12 && filter == 3) continue;
603
606 size, mx, my);
607 }
608 }
609 }
610 }
611 }
612 }
613 }
615 }
616
618 {
623 }
static void check_loopfilter(void)
static int copy_subcoefs(int16_t *out, const int16_t *in, enum TxfmMode tx, enum TxfmType txtp, int sz, int sub, int bit_depth)
void(* intra_pred[N_TXFM_SIZES][N_INTRA_PRED_MODES])(uint8_t *dst, ptrdiff_t stride, const uint8_t *left, const uint8_t *top)
static const uint8_t q1[256]
#define setsx(a, b, c, d)
vp9_mc_func mc[5][4][2][2][2]
static void randomize_loopfilter_buffers(int bidx, int lineoff, int str, int bit_depth, int dir, const int *E, const int *F, const int *H, const int *I, uint8_t *buf0, uint8_t *buf1)
static void fadst4_1d(double *out, const double *in, int sz)
#define AV_CPU_FLAG_MMXEXT
SSE integer functions or AMD MMX ext.
static void filter(int16_t *output, ptrdiff_t out_stride, int16_t *low, ptrdiff_t low_stride, int16_t *high, ptrdiff_t high_stride, int len, uint8_t clip)
static void ftx(int16_t *buf, enum TxfmMode tx, enum TxfmType txtp, int sz, int bit_depth)
static void fwht_1d(double *out, const double *in, int sz)
av_cold void ff_vp9dsp_init(VP9DSPContext *dsp, int bpp, int bitexact)
static int iszero(const int16_t *c, int sz)
static const uint16_t mask[17]
#define declare_func(ret,...)
static void fadst_1d(double *out, const double *in, int sz)
static const uint8_t q0[256]
common internal API header
typedef void(APIENTRY *FF_PFNGLACTIVETEXTUREPROC)(GLenum texture)
static av_always_inline av_const double trunc(double x)
void(* loop_filter_16[2])(uint8_t *dst, ptrdiff_t stride, int mb_lim, int lim, int hev_thr)
void(* ftx1d_fn)(double *out, const double *in, int sz)
#define declare_func_emms(cpu_flags, ret,...)
static void check_mc(void)
void(* loop_filter_mix2[2][2][2])(uint8_t *dst, ptrdiff_t stride, int mb_lim, int lim, int hev_thr)
void(* loop_filter_8[3][2])(uint8_t *dst, ptrdiff_t stride, int mb_lim, int lim, int hev_thr)
#define AV_CPU_FLAG_MMX
standard MMX
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> in
static const uint32_t pixel_mask[3]
#define check_func(func,...)
static const int16_t *const vp9_scans[5][4]
static void ftx_2d(double *out, const double *in, enum TxfmMode tx, enum TxfmType txtp, int sz)
#define LOCAL_ALIGNED_32(t, v,...)
#define randomize_buffers()
static int op(uint8_t **dst, const uint8_t *dst_end, GetByteContext *gb, int pixel, int count, int *x, int width, int linesize)
Perform decode operation.
GLint GLenum GLboolean GLsizei stride
void(* itxfm_add[N_TXFM_SIZES+1][N_TXFM_TYPES])(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob)
static void fdct_1d(double *out, const double *in, int sz)
common internal and external API header
static unsigned bit_depth(uint64_t mask)
void checkasm_check_vp9dsp(void)
static void check_ipred(void)
static void check_itxfm(void)