1 /*
2 * SIMD-optimized halfpel functions
3 * Copyright (c) 2000, 2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 *
22 * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
23 */
24
34
36 ptrdiff_t line_size, int h);
38 ptrdiff_t line_size, int h);
40 ptrdiff_t line_size, int h);
42 ptrdiff_t line_size, int h);
44 ptrdiff_t line_size, int h);
46 ptrdiff_t line_size, int h);
48 ptrdiff_t line_size, int h);
50 ptrdiff_t line_size, int h);
52 ptrdiff_t line_size, int h);
54 ptrdiff_t line_size, int h);
57 ptrdiff_t line_size, int h);
60 ptrdiff_t line_size, int h);
62 ptrdiff_t line_size, int h);
64 ptrdiff_t line_size, int h);
66 ptrdiff_t line_size, int h);
68 ptrdiff_t line_size, int h);
71 ptrdiff_t line_size, int h);
74 ptrdiff_t line_size, int h);
76 ptrdiff_t line_size, int h);
78 ptrdiff_t line_size, int h);
80 ptrdiff_t line_size, int h);
82 ptrdiff_t line_size, int h);
84 ptrdiff_t line_size, int h);
86 ptrdiff_t line_size, int h);
88 ptrdiff_t line_size, int h);
90 ptrdiff_t line_size, int h);
91
92 #define avg_pixels8_mmx ff_avg_pixels8_mmx
93 #define avg_pixels8_x2_mmx ff_avg_pixels8_x2_mmx
94 #define avg_pixels16_mmx ff_avg_pixels16_mmx
95 #define avg_pixels8_xy2_mmx ff_avg_pixels8_xy2_mmx
96 #define avg_pixels16_xy2_mmx ff_avg_pixels16_xy2_mmx
97 #define put_pixels8_mmx ff_put_pixels8_mmx
98 #define put_pixels16_mmx ff_put_pixels16_mmx
99 #define put_pixels8_xy2_mmx ff_put_pixels8_xy2_mmx
100 #define put_pixels16_xy2_mmx ff_put_pixels16_xy2_mmx
101 #define avg_no_rnd_pixels16_mmx ff_avg_pixels16_mmx
102 #define put_no_rnd_pixels8_mmx ff_put_pixels8_mmx
103 #define put_no_rnd_pixels16_mmx ff_put_pixels16_mmx
104
105 #if HAVE_INLINE_ASM
106
107 /***********************************/
108 /* MMX no rounding */
109 #define DEF(x, y) x ## _no_rnd_ ## y ## _mmx
110 #define SET_RND MOVQ_WONE
111 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
112 #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
113 #define STATIC static
114
117
118 #undef DEF
119 #undef SET_RND
120 #undef PAVGBP
121 #undef PAVGB
122 #undef STATIC
123
124 CALL_2X_PIXELS(avg_no_rnd_pixels16_y2_mmx, avg_no_rnd_pixels8_y2_mmx, 8)
125 CALL_2X_PIXELS(put_no_rnd_pixels16_y2_mmx, put_no_rnd_pixels8_y2_mmx, 8)
126
127 CALL_2X_PIXELS(avg_no_rnd_pixels16_xy2_mmx, avg_no_rnd_pixels8_xy2_mmx, 8)
128 CALL_2X_PIXELS(put_no_rnd_pixels16_xy2_mmx, put_no_rnd_pixels8_xy2_mmx, 8)
129
130 /***********************************/
131 /* MMX rounding */
132
133 #define DEF(x, y) x ## _ ## y ## _mmx
134 #define SET_RND MOVQ_WTWO
135 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
136 #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
137
139
140 #undef DEF
141 #define DEF(x, y) ff_ ## x ## _ ## y ## _mmx
142 #define STATIC
143
145
146 #undef DEF
147 #undef SET_RND
148 #undef PAVGBP
149 #undef PAVGB
150
153
156
157 #endif /* HAVE_INLINE_ASM */
158
159
160 #if HAVE_YASM
161
162 #define HPELDSP_AVG_PIXELS16(CPUEXT) \
163 CALL_2X_PIXELS(put_no_rnd_pixels16_x2 ## CPUEXT, ff_put_no_rnd_pixels8_x2 ## CPUEXT, 8) \
164 CALL_2X_PIXELS(put_pixels16_y2 ## CPUEXT, ff_put_pixels8_y2 ## CPUEXT, 8) \
165 CALL_2X_PIXELS(put_no_rnd_pixels16_y2 ## CPUEXT, ff_put_no_rnd_pixels8_y2 ## CPUEXT, 8) \
166 CALL_2X_PIXELS(avg_pixels16 ## CPUEXT, ff_avg_pixels8 ## CPUEXT, 8) \
167 CALL_2X_PIXELS(avg_pixels16_x2 ## CPUEXT, ff_avg_pixels8_x2 ## CPUEXT, 8) \
168 CALL_2X_PIXELS(avg_pixels16_y2 ## CPUEXT, ff_avg_pixels8_y2 ## CPUEXT, 8) \
169 CALL_2X_PIXELS(avg_pixels16_xy2 ## CPUEXT, ff_avg_pixels8_xy2 ## CPUEXT, 8) \
170 CALL_2X_PIXELS(avg_approx_pixels16_xy2## CPUEXT, ff_avg_approx_pixels8_xy2## CPUEXT, 8)
171
172 HPELDSP_AVG_PIXELS16(_3dnow)
173 HPELDSP_AVG_PIXELS16(_mmxext)
174
175 #endif /* HAVE_YASM */
176
177 #define SET_HPEL_FUNCS_EXT(PFX, IDX, SIZE, CPU) \
178 if (HAVE_MMX_EXTERNAL) \
179 c->PFX ## _pixels_tab IDX [0] = PFX ## _pixels ## SIZE ## _ ## CPU;
180
181 #if HAVE_MMX_INLINE
182 #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
183 do { \
184 SET_HPEL_FUNCS_EXT(PFX, IDX, SIZE, CPU) \
185 c->PFX ## _pixels_tab IDX [1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
186 c->PFX ## _pixels_tab IDX [2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
187 c->PFX ## _pixels_tab IDX [3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU; \
188 } while (0)
189 #else
190 #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
191 do { \
192 SET_HPEL_FUNCS_EXT(PFX, IDX, SIZE, CPU) \
193 } while (0)
194 #endif
195
197 {
204 if (HAVE_MMX_EXTERNAL) {
207 }
208 #if HAVE_MMX_INLINE
211 #endif
212 }
213
215 {
216 #if HAVE_MMXEXT_EXTERNAL
219
224
227
232
238
241 }
242
243 if (flags & CODEC_FLAG_BITEXACT && CONFIG_VP3_DECODER) {
246 }
247 #endif /* HAVE_MMXEXT_EXTERNAL */
248 }
249
251 {
252 #if HAVE_AMD3DNOW_EXTERNAL
255
260
263
268
274
277 }
278
279 if (flags & CODEC_FLAG_BITEXACT && CONFIG_VP3_DECODER) {
282 }
283 #endif /* HAVE_AMD3DNOW_EXTERNAL */
284 }
285
287 {
288 #if HAVE_SSE2_EXTERNAL
290 // these functions are slower than mmx on AMD, but faster on Intel
300 }
301 #endif /* HAVE_SSE2_EXTERNAL */
302 }
303
305 {
306 #if HAVE_SSSE3_EXTERNAL
311 #endif
312 }
313
315 {
317
320
323
326
329
332 }