1 /*
2 * DSP utils
3 * Copyright (c) 2000, 2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <stddef.h>
24
34 #include "config.h"
35 #include "config_components.h"
36
39 {
42
43 for (
i = 0;
i <
h;
i++) {
44 s += sq[pix1[0] - pix2[0]];
45 s += sq[pix1[1] - pix2[1]];
46 s += sq[pix1[2] - pix2[2]];
47 s += sq[pix1[3] - pix2[3]];
50 }
52 }
53
56 {
59
60 for (
i = 0;
i <
h;
i++) {
61 s += sq[pix1[0] - pix2[0]];
62 s += sq[pix1[1] - pix2[1]];
63 s += sq[pix1[2] - pix2[2]];
64 s += sq[pix1[3] - pix2[3]];
65 s += sq[pix1[4] - pix2[4]];
66 s += sq[pix1[5] - pix2[5]];
67 s += sq[pix1[6] - pix2[6]];
68 s += sq[pix1[7] - pix2[7]];
71 }
73 }
74
77 {
80
81 for (
i = 0;
i <
h;
i++) {
82 s += sq[pix1[0] - pix2[0]];
83 s += sq[pix1[1] - pix2[1]];
84 s += sq[pix1[2] - pix2[2]];
85 s += sq[pix1[3] - pix2[3]];
86 s += sq[pix1[4] - pix2[4]];
87 s += sq[pix1[5] - pix2[5]];
88 s += sq[pix1[6] - pix2[6]];
89 s += sq[pix1[7] - pix2[7]];
90 s += sq[pix1[8] - pix2[8]];
91 s += sq[pix1[9] - pix2[9]];
92 s += sq[pix1[10] - pix2[10]];
93 s += sq[pix1[11] - pix2[11]];
94 s += sq[pix1[12] - pix2[12]];
95 s += sq[pix1[13] - pix2[13]];
96 s += sq[pix1[14] - pix2[14]];
97 s += sq[pix1[15] - pix2[15]];
98
101 }
103 }
104
106 {
108
109 for (
i = 0;
i < 64;
i++)
111 return sum;
112 }
113
114 #define avg2(a, b) (((a) + (b) + 1) >> 1)
115 #define avg4(a, b, c, d) (((a) + (b) + (c) + (d) + 2) >> 2)
116
119 {
121
122 for (
i = 0;
i <
h;
i++) {
123 s +=
abs(pix1[0] - pix2[0]);
124 s +=
abs(pix1[1] - pix2[1]);
125 s +=
abs(pix1[2] - pix2[2]);
126 s +=
abs(pix1[3] - pix2[3]);
127 s +=
abs(pix1[4] - pix2[4]);
128 s +=
abs(pix1[5] - pix2[5]);
129 s +=
abs(pix1[6] - pix2[6]);
130 s +=
abs(pix1[7] - pix2[7]);
131 s +=
abs(pix1[8] - pix2[8]);
132 s +=
abs(pix1[9] - pix2[9]);
133 s +=
abs(pix1[10] - pix2[10]);
134 s +=
abs(pix1[11] - pix2[11]);
135 s +=
abs(pix1[12] - pix2[12]);
136 s +=
abs(pix1[13] - pix2[13]);
137 s +=
abs(pix1[14] - pix2[14]);
138 s +=
abs(pix1[15] - pix2[15]);
141 }
143 }
144
147 {
149
150 #define V(x) (pix1[x] - pix2[x])
151
168
171
172 for (
i = 1;
i <
h;
i++) {
174 for (j = 1; j < 16; j++)
178
179 }
180 #undef V
182 }
183
186 {
188
189 for (
i = 0;
i <
h;
i++) {
190 s +=
abs(pix1[0] -
avg2(pix2[0], pix2[1]));
191 s +=
abs(pix1[1] -
avg2(pix2[1], pix2[2]));
192 s +=
abs(pix1[2] -
avg2(pix2[2], pix2[3]));
193 s +=
abs(pix1[3] -
avg2(pix2[3], pix2[4]));
194 s +=
abs(pix1[4] -
avg2(pix2[4], pix2[5]));
195 s +=
abs(pix1[5] -
avg2(pix2[5], pix2[6]));
196 s +=
abs(pix1[6] -
avg2(pix2[6], pix2[7]));
197 s +=
abs(pix1[7] -
avg2(pix2[7], pix2[8]));
198 s +=
abs(pix1[8] -
avg2(pix2[8], pix2[9]));
199 s +=
abs(pix1[9] -
avg2(pix2[9], pix2[10]));
200 s +=
abs(pix1[10] -
avg2(pix2[10], pix2[11]));
201 s +=
abs(pix1[11] -
avg2(pix2[11], pix2[12]));
202 s +=
abs(pix1[12] -
avg2(pix2[12], pix2[13]));
203 s +=
abs(pix1[13] -
avg2(pix2[13], pix2[14]));
204 s +=
abs(pix1[14] -
avg2(pix2[14], pix2[15]));
205 s +=
abs(pix1[15] -
avg2(pix2[15], pix2[16]));
208 }
210 }
211
214 {
216 const uint8_t *pix3 = pix2 +
stride;
217
218 for (
i = 0;
i <
h;
i++) {
219 s +=
abs(pix1[0] -
avg2(pix2[0], pix3[0]));
220 s +=
abs(pix1[1] -
avg2(pix2[1], pix3[1]));
221 s +=
abs(pix1[2] -
avg2(pix2[2], pix3[2]));
222 s +=
abs(pix1[3] -
avg2(pix2[3], pix3[3]));
223 s +=
abs(pix1[4] -
avg2(pix2[4], pix3[4]));
224 s +=
abs(pix1[5] -
avg2(pix2[5], pix3[5]));
225 s +=
abs(pix1[6] -
avg2(pix2[6], pix3[6]));
226 s +=
abs(pix1[7] -
avg2(pix2[7], pix3[7]));
227 s +=
abs(pix1[8] -
avg2(pix2[8], pix3[8]));
228 s +=
abs(pix1[9] -
avg2(pix2[9], pix3[9]));
229 s +=
abs(pix1[10] -
avg2(pix2[10], pix3[10]));
230 s +=
abs(pix1[11] -
avg2(pix2[11], pix3[11]));
231 s +=
abs(pix1[12] -
avg2(pix2[12], pix3[12]));
232 s +=
abs(pix1[13] -
avg2(pix2[13], pix3[13]));
233 s +=
abs(pix1[14] -
avg2(pix2[14], pix3[14]));
234 s +=
abs(pix1[15] -
avg2(pix2[15], pix3[15]));
238 }
240 }
241
244 {
246 const uint8_t *pix3 = pix2 +
stride;
247
248 for (
i = 0;
i <
h;
i++) {
249 s +=
abs(pix1[0] -
avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
250 s +=
abs(pix1[1] -
avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
251 s +=
abs(pix1[2] -
avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
252 s +=
abs(pix1[3] -
avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
253 s +=
abs(pix1[4] -
avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
254 s +=
abs(pix1[5] -
avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
255 s +=
abs(pix1[6] -
avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
256 s +=
abs(pix1[7] -
avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
257 s +=
abs(pix1[8] -
avg4(pix2[8], pix2[9], pix3[8], pix3[9]));
258 s +=
abs(pix1[9] -
avg4(pix2[9], pix2[10], pix3[9], pix3[10]));
259 s +=
abs(pix1[10] -
avg4(pix2[10], pix2[11], pix3[10], pix3[11]));
260 s +=
abs(pix1[11] -
avg4(pix2[11], pix2[12], pix3[11], pix3[12]));
261 s +=
abs(pix1[12] -
avg4(pix2[12], pix2[13], pix3[12], pix3[13]));
262 s +=
abs(pix1[13] -
avg4(pix2[13], pix2[14], pix3[13], pix3[14]));
263 s +=
abs(pix1[14] -
avg4(pix2[14], pix2[15], pix3[14], pix3[15]));
264 s +=
abs(pix1[15] -
avg4(pix2[15], pix2[16], pix3[15], pix3[16]));
268 }
270 }
271
274 {
276
277 for (
i = 0;
i <
h;
i++) {
278 s +=
abs(pix1[0] - pix2[0]);
279 s +=
abs(pix1[1] - pix2[1]);
280 s +=
abs(pix1[2] - pix2[2]);
281 s +=
abs(pix1[3] - pix2[3]);
282 s +=
abs(pix1[4] - pix2[4]);
283 s +=
abs(pix1[5] - pix2[5]);
284 s +=
abs(pix1[6] - pix2[6]);
285 s +=
abs(pix1[7] - pix2[7]);
288 }
290 }
291
294 {
296
297 #define V(x) (pix1[x] - pix2[x])
298
307
310
311 for (
i = 1;
i <
h;
i++) {
313 for (j = 1; j < 8; j++)
317
318 }
319 #undef V
321 }
322
325 {
327
328 for (
i = 0;
i <
h;
i++) {
329 s +=
abs(pix1[0] -
avg2(pix2[0], pix2[1]));
330 s +=
abs(pix1[1] -
avg2(pix2[1], pix2[2]));
331 s +=
abs(pix1[2] -
avg2(pix2[2], pix2[3]));
332 s +=
abs(pix1[3] -
avg2(pix2[3], pix2[4]));
333 s +=
abs(pix1[4] -
avg2(pix2[4], pix2[5]));
334 s +=
abs(pix1[5] -
avg2(pix2[5], pix2[6]));
335 s +=
abs(pix1[6] -
avg2(pix2[6], pix2[7]));
336 s +=
abs(pix1[7] -
avg2(pix2[7], pix2[8]));
339 }
341 }
342
345 {
347 const uint8_t *pix3 = pix2 +
stride;
348
349 for (
i = 0;
i <
h;
i++) {
350 s +=
abs(pix1[0] -
avg2(pix2[0], pix3[0]));
351 s +=
abs(pix1[1] -
avg2(pix2[1], pix3[1]));
352 s +=
abs(pix1[2] -
avg2(pix2[2], pix3[2]));
353 s +=
abs(pix1[3] -
avg2(pix2[3], pix3[3]));
354 s +=
abs(pix1[4] -
avg2(pix2[4], pix3[4]));
355 s +=
abs(pix1[5] -
avg2(pix2[5], pix3[5]));
356 s +=
abs(pix1[6] -
avg2(pix2[6], pix3[6]));
357 s +=
abs(pix1[7] -
avg2(pix2[7], pix3[7]));
361 }
363 }
364
367 {
369 const uint8_t *pix3 = pix2 +
stride;
370
371 for (
i = 0;
i <
h;
i++) {
372 s +=
abs(pix1[0] -
avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
373 s +=
abs(pix1[1] -
avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
374 s +=
abs(pix1[2] -
avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
375 s +=
abs(pix1[3] -
avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
376 s +=
abs(pix1[4] -
avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
377 s +=
abs(pix1[5] -
avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
378 s +=
abs(pix1[6] -
avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
379 s +=
abs(pix1[7] -
avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
383 }
385 }
386
389 {
390 int score1 = 0, score2 = 0, x, y;
391
392 for (y = 0; y <
h; y++) {
393 for (x = 0; x < 16; x++)
394 score1 += (s1[x] - s2[x]) * (s1[x] - s2[x]);
396 for (x = 0; x < 15; x++)
398 s1[x + 1] + s1[x +
stride + 1]) -
400 s2[x + 1] + s2[x +
stride + 1]);
401 }
404 }
405
407 return score1 +
FFABS(score2) *
c->c.avctx->nsse_weight;
408 else
409 return score1 +
FFABS(score2) * 8;
410 }
411
414 {
415 int score1 = 0, score2 = 0, x, y;
416
417 for (y = 0; y <
h; y++) {
418 for (x = 0; x < 8; x++)
419 score1 += (s1[x] - s2[x]) * (s1[x] - s2[x]);
421 for (x = 0; x < 7; x++)
423 s1[x + 1] + s1[x +
stride + 1]) -
425 s2[x + 1] + s2[x +
stride + 1]);
426 }
429 }
430
432 return score1 +
FFABS(score2) *
c->c.avctx->nsse_weight;
433 else
434 return score1 +
FFABS(score2) * 8;
435 }
436
439 {
440 return 0;
441 }
442
444 {
445 #define ENTRY(CMP_FLAG, ARRAY, MPVENC_ONLY) \
446 [FF_CMP_ ## CMP_FLAG] = { \
447 .offset = offsetof(MECmpContext, ARRAY), \
448 .mpv_only = MPVENC_ONLY, \
449 .available = 1, \
450 }
451 static const struct {
453 char mpv_only;
455 } cmp_func_list[] = {
458 ENTRY(SATD, hadamard8_diff, 0),
459 ENTRY(DCT, dct_sad, 1),
460 ENTRY(PSNR, quant_psnr, 1),
465 ENTRY(NSSE, nsse, 0),
466 #if CONFIG_SNOW_DECODER || CONFIG_SNOW_ENCODER
469 #endif
470 ENTRY(DCTMAX, dct_max, 1),
471 #if CONFIG_GPL
472 ENTRY(DCT264, dct264_sad, 1),
473 #endif
474 ENTRY(MEDIAN_SAD, median_sad, 0),
475 };
477
479
481 for (
int i = 0;
i < 6;
i++)
483 return 0;
484 }
487 !mpvenc && cmp_func_list[
type].mpv_only) {
489 "invalid cmp function selection\n");
491 }
492 me_cmp_func_array = (
const me_cmp_func*)(((
const char*)
c) + cmp_func_list[
type].offset);
493 for (
int i = 0;
i < 6;
i++)
494 cmp[
i] = me_cmp_func_array[
i];
495
496 return 0;
497 }
498
499 #define BUTTERFLY2(o1, o2, i1, i2) \
500 o1 = (i1) + (i2); \
501 o2 = (i1) - (i2);
502
503 #define BUTTERFLY1(x, y) \
504 { \
505 int a, b; \
506 a = x; \
507 b = y; \
508 x = a + b; \
509 y = a - b; \
510 }
511
512 #define BUTTERFLYA(x, y) (FFABS((x) + (y)) + FFABS((x) - (y)))
513
516 {
517 int i,
temp[64], sum = 0;
518
519 for (
i = 0;
i < 8;
i++) {
520 // FIXME: try pointer walks
533
538
543 }
544
545 for (
i = 0;
i < 8;
i++) {
550
555
560 }
561 return sum;
562 }
563
566 {
567 int i,
temp[64], sum = 0;
568
569 for (
i = 0;
i < 8;
i++) {
570 // FIXME: try pointer walks
579
584
589 }
590
591 for (
i = 0;
i < 8;
i++) {
596
601
602 sum +=
607 }
608
610
611 return sum;
612 }
613
616 {
618
621 return s->sum_abs_dctelem(
temp);
622 }
623
624 #if CONFIG_GPL
625 #define DCT8_1D \
626 { \
627 const int s07 = SRC(0) + SRC(7); \
628 const int s16 = SRC(1) + SRC(6); \
629 const int s25 = SRC(2) + SRC(5); \
630 const int s34 = SRC(3) + SRC(4); \
631 const int a0 = s07 + s34; \
632 const int a1 = s16 + s25; \
633 const int a2 = s07 - s34; \
634 const int a3 = s16 - s25; \
635 const int d07 = SRC(0) - SRC(7); \
636 const int d16 = SRC(1) - SRC(6); \
637 const int d25 = SRC(2) - SRC(5); \
638 const int d34 = SRC(3) - SRC(4); \
639 const int a4 = d16 + d25 + (d07 + (d07 >> 1)); \
640 const int a5 = d07 - d34 - (d25 + (d25 >> 1)); \
641 const int a6 = d07 + d34 - (d16 + (d16 >> 1)); \
642 const int a7 = d16 - d25 + (d34 + (d34 >> 1)); \
643 DST(0, a0 + a1); \
644 DST(1, a4 + (a7 >> 2)); \
645 DST(2, a2 + (a3 >> 1)); \
646 DST(3, a5 + (a6 >> 2)); \
647 DST(4, a0 - a1); \
648 DST(5, a6 - (a5 >> 2)); \
649 DST(6, (a2 >> 1) - a3); \
650 DST(7, (a4 >> 2) - a7); \
651 }
652
655 {
658
660
661 #define SRC(x) dct[i][x]
662 #define DST(x, v) dct[i][x] = v
663 for (
i = 0;
i < 8;
i++)
667
669 #define DST(x, v) sum += FFABS(v)
670 for (
i = 0;
i < 8;
i++)
674 return sum;
675 }
676 #endif
677
680 {
683
686
687 for (
i = 0;
i < 64;
i++)
689
690 return sum;
691 }
692
695 {
697 int16_t *
const bak =
temp + 64;
699
701
703
704 memcpy(bak,
temp, 64 *
sizeof(int16_t));
705
706 s->c.block_last_index[0
/* FIXME */] =
707 s->dct_quantize(
s,
temp, 0
/* FIXME */,
s->c.qscale, &
i);
708 s->c.dct_unquantize_inter(&
s->c,
temp, 0,
s->c.qscale);
710
711 for (
i = 0;
i < 64;
i++)
713
714 return sum;
715 }
716
719 {
720 const uint8_t *scantable =
s->c.intra_scantable.permutated;
725 const int esc_length =
s->ac_esc_length;
726 const uint8_t *length, *last_length;
727
730
731 s->pdsp.diff_pixels(
temp, lsrc1, lsrc2, 8);
732
733 s->c.block_last_index[0
/* FIXME */] =
734 last =
735 s->dct_quantize(
s,
temp, 0
/* FIXME */,
s->c.qscale, &
i);
736
738
740 start_i = 1;
741 length =
s->intra_ac_vlc_length;
742 last_length =
s->intra_ac_vlc_last_length;
743 bits +=
s->luma_dc_vlc_length[
temp[0] + 256];
// FIXME: chroma
744 } else {
745 start_i = 0;
746 length =
s->inter_ac_vlc_length;
747 last_length =
s->inter_ac_vlc_last_length;
748 }
749
750 if (last >= start_i) {
752 for (
i = start_i;
i < last;
i++) {
753 int j = scantable[
i];
755
758 if ((
level & (~127)) == 0)
760 else
763 } else
765 }
767
769
771
772 if ((
level & (~127)) == 0) {
774 } else
776 }
777
778 if (last >= 0) {
780 s->c.dct_unquantize_intra(&
s->c,
temp, 0,
s->c.qscale);
781 else
782 s->c.dct_unquantize_inter(&
s->c,
temp, 0,
s->c.qscale);
783 }
784
785 s->c.idsp.idct_add(lsrc2, 8,
temp);
786
787 distortion =
s->sse_cmp[1](
NULL, lsrc2, lsrc1, 8, 8);
788
789 return distortion + ((
bits *
s->c.qscale *
s->c.qscale * 109 + 64) >> 7);
790 }
791
794 {
795 const uint8_t *scantable =
s->c.intra_scantable.permutated;
798 const int esc_length =
s->ac_esc_length;
799 const uint8_t *length, *last_length;
800
802
803 s->c.block_last_index[0
/* FIXME */] =
804 last =
805 s->dct_quantize(
s,
temp, 0
/* FIXME */,
s->c.qscale, &
i);
806
808
810 start_i = 1;
811 length =
s->intra_ac_vlc_length;
812 last_length =
s->intra_ac_vlc_last_length;
813 bits +=
s->luma_dc_vlc_length[
temp[0] + 256];
// FIXME: chroma
814 } else {
815 start_i = 0;
816 length =
s->inter_ac_vlc_length;
817 last_length =
s->inter_ac_vlc_last_length;
818 }
819
820 if (last >= start_i) {
822 for (
i = start_i;
i < last;
i++) {
823 int j = scantable[
i];
825
828 if ((
level & (~127)) == 0)
830 else
833 } else
835 }
837
839
841
842 if ((
level & (~127)) == 0)
844 else
846 }
847
849 }
850
851 #define VSAD_INTRA(size) \
852 static int vsad_intra ## size ## _c(MPVEncContext *unused, \
853 const uint8_t *s, const uint8_t *dummy, \
854 ptrdiff_t stride, int h) \
855 { \
856 int score = 0, x, y; \
857 \
858 for (y = 1; y < h; y++) { \
859 for (x = 0; x < size; x += 4) { \
860 score += FFABS(s[x] - s[x + stride]) + \
861 FFABS(s[x + 1] - s[x + stride + 1]) + \
862 FFABS(s[x + 2] - s[x + 2 + stride]) + \
863 FFABS(s[x + 3] - s[x + 3 + stride]); \
864 } \
865 s += stride; \
866 } \
867 \
868 return score; \
869 }
872
874 static int vsad ## size ## _c(MPVEncContext *unused, \
875 const uint8_t *s1, const uint8_t *s2, \
876 ptrdiff_t stride, int h) \
877 { \
878 int score = 0, x, y; \
879 \
880 for (y = 1; y < h; y++) { \
881 for (x = 0; x < size; x++) \
882 score += FFABS(s1[x] - s2[x] - s1[x + stride] + s2[x + stride]); \
883 s1 += stride; \
884 s2 += stride; \
885 } \
886 \
887 return score; \
888 }
891
892 #define SQ(a) ((a) * (a))
893 #define VSSE_INTRA(size) \
894 static int vsse_intra ## size ## _c(MPVEncContext *unused, \
895 const uint8_t *s, const uint8_t *dummy, \
896 ptrdiff_t stride, int h) \
897 { \
898 int score = 0, x, y; \
899 \
900 for (y = 1; y < h; y++) { \
901 for (x = 0; x < size; x += 4) { \
902 score += SQ(s[x] - s[x + stride]) + \
903 SQ(s[x + 1] - s[x + stride + 1]) + \
904 SQ(s[x + 2] - s[x + stride + 2]) + \
905 SQ(s[x + 3] - s[x + stride + 3]); \
906 } \
907 s += stride; \
908 } \
909 \
910 return score; \
911 }
914
916 static int vsse ## size ## _c(MPVEncContext *unused, const uint8_t *s1, \
917 const uint8_t *s2, ptrdiff_t stride, int h) \
918 { \
919 int score = 0, x, y; \
920 \
921 for (y = 1; y < h; y++) { \
922 for (x = 0; x < size; x++) \
923 score += SQ(s1[x] - s2[x] - s1[x + stride] + s2[x + stride]); \
924 s1 += stride; \
925 s2 += stride; \
926 } \
927 \
928 return score; \
929 }
932
933 #define WRAPPER8_16_SQ(name8, name16) \
934 static int name16(MPVEncContext *const s, const uint8_t *dst, \
935 const uint8_t *src, ptrdiff_t stride, int h) \
936 { \
937 int score = 0; \
938 \
939 score += name8(s, dst, src, stride, 8); \
940 score += name8(s, dst + 8, src + 8, stride, 8); \
941 if (h == 16) { \
942 dst += 8 * stride; \
943 src += 8 * stride; \
944 score += name8(s, dst, src, stride, 8); \
945 score += name8(s, dst + 8, src + 8, stride, 8); \
946 } \
947 return score; \
948 }
949
953 #if CONFIG_GPL
955 #endif
960
962 {
963 memset(
c, 0,
sizeof(*
c));
964
966
967 /* TODO [0] 16 [1] 8 */
976
977 #define SET_CMP_FUNC(name) \
978 c->name[0] = name ## 16_c; \
979 c->name[1] = name ## 8x8_c;
980
982 c->hadamard8_diff[4] = hadamard8_intra16_c;
986 #if CONFIG_GPL
988 #endif
997 c->vsad[0] = vsad16_c;
998 c->vsad[1] = vsad8_c;
999 c->vsad[4] = vsad_intra16_c;
1000 c->vsad[5] = vsad_intra8_c;
1001 c->vsse[0] = vsse16_c;
1002 c->vsse[1] = vsse8_c;
1003 c->vsse[4] = vsse_intra16_c;
1004 c->vsse[5] = vsse_intra8_c;
1007 #if CONFIG_SNOW_DECODER || CONFIG_SNOW_ENCODER
1009 #endif
1010
1013
1014 #if ARCH_AARCH64
1016 #elif ARCH_ARM
1018 #elif ARCH_PPC
1020 #elif ARCH_RISCV
1022 #elif ARCH_X86
1024 #elif ARCH_MIPS
1026 #endif
1027
1028 }