vp8dsp_init.c
Go to the documentation of this file.
1 /*
2  * VP8 DSP functions x86-optimized
3  * Copyright (c) 2010 Ronald S. Bultje <rsbultje@gmail.com>
4  * Copyright (c) 2010 Jason Garrett-Glaser <darkshikari@gmail.com>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include "libavutil/cpu.h"
24 #include "libavutil/mem.h"
25 #include "libavutil/x86/asm.h"
26 #include "libavcodec/vp8dsp.h"
27 
28 #if HAVE_YASM
29 
30 /*
31  * MC functions
32  */
33 void ff_put_vp8_epel4_h4_mmxext(uint8_t *dst, ptrdiff_t dststride,
34  uint8_t *src, ptrdiff_t srcstride,
35  int height, int mx, int my);
36 void ff_put_vp8_epel4_h6_mmxext(uint8_t *dst, ptrdiff_t dststride,
37  uint8_t *src, ptrdiff_t srcstride,
38  int height, int mx, int my);
39 void ff_put_vp8_epel4_v4_mmxext(uint8_t *dst, ptrdiff_t dststride,
40  uint8_t *src, ptrdiff_t srcstride,
41  int height, int mx, int my);
42 void ff_put_vp8_epel4_v6_mmxext(uint8_t *dst, ptrdiff_t dststride,
43  uint8_t *src, ptrdiff_t srcstride,
44  int height, int mx, int my);
45 
46 void ff_put_vp8_epel8_h4_sse2 (uint8_t *dst, ptrdiff_t dststride,
47  uint8_t *src, ptrdiff_t srcstride,
48  int height, int mx, int my);
49 void ff_put_vp8_epel8_h6_sse2 (uint8_t *dst, ptrdiff_t dststride,
50  uint8_t *src, ptrdiff_t srcstride,
51  int height, int mx, int my);
52 void ff_put_vp8_epel8_v4_sse2 (uint8_t *dst, ptrdiff_t dststride,
53  uint8_t *src, ptrdiff_t srcstride,
54  int height, int mx, int my);
55 void ff_put_vp8_epel8_v6_sse2 (uint8_t *dst, ptrdiff_t dststride,
56  uint8_t *src, ptrdiff_t srcstride,
57  int height, int mx, int my);
58 
59 void ff_put_vp8_epel4_h4_ssse3 (uint8_t *dst, ptrdiff_t dststride,
60  uint8_t *src, ptrdiff_t srcstride,
61  int height, int mx, int my);
62 void ff_put_vp8_epel4_h6_ssse3 (uint8_t *dst, ptrdiff_t dststride,
63  uint8_t *src, ptrdiff_t srcstride,
64  int height, int mx, int my);
65 void ff_put_vp8_epel4_v4_ssse3 (uint8_t *dst, ptrdiff_t dststride,
66  uint8_t *src, ptrdiff_t srcstride,
67  int height, int mx, int my);
68 void ff_put_vp8_epel4_v6_ssse3 (uint8_t *dst, ptrdiff_t dststride,
69  uint8_t *src, ptrdiff_t srcstride,
70  int height, int mx, int my);
71 void ff_put_vp8_epel8_h4_ssse3 (uint8_t *dst, ptrdiff_t dststride,
72  uint8_t *src, ptrdiff_t srcstride,
73  int height, int mx, int my);
74 void ff_put_vp8_epel8_h6_ssse3 (uint8_t *dst, ptrdiff_t dststride,
75  uint8_t *src, ptrdiff_t srcstride,
76  int height, int mx, int my);
77 void ff_put_vp8_epel8_v4_ssse3 (uint8_t *dst, ptrdiff_t dststride,
78  uint8_t *src, ptrdiff_t srcstride,
79  int height, int mx, int my);
80 void ff_put_vp8_epel8_v6_ssse3 (uint8_t *dst, ptrdiff_t dststride,
81  uint8_t *src, ptrdiff_t srcstride,
82  int height, int mx, int my);
83 
84 void ff_put_vp8_bilinear4_h_mmxext(uint8_t *dst, ptrdiff_t dststride,
85  uint8_t *src, ptrdiff_t srcstride,
86  int height, int mx, int my);
87 void ff_put_vp8_bilinear8_h_sse2 (uint8_t *dst, ptrdiff_t dststride,
88  uint8_t *src, ptrdiff_t srcstride,
89  int height, int mx, int my);
90 void ff_put_vp8_bilinear4_h_ssse3 (uint8_t *dst, ptrdiff_t dststride,
91  uint8_t *src, ptrdiff_t srcstride,
92  int height, int mx, int my);
93 void ff_put_vp8_bilinear8_h_ssse3 (uint8_t *dst, ptrdiff_t dststride,
94  uint8_t *src, ptrdiff_t srcstride,
95  int height, int mx, int my);
96 
97 void ff_put_vp8_bilinear4_v_mmxext(uint8_t *dst, ptrdiff_t dststride,
98  uint8_t *src, ptrdiff_t srcstride,
99  int height, int mx, int my);
100 void ff_put_vp8_bilinear8_v_sse2 (uint8_t *dst, ptrdiff_t dststride,
101  uint8_t *src, ptrdiff_t srcstride,
102  int height, int mx, int my);
103 void ff_put_vp8_bilinear4_v_ssse3 (uint8_t *dst, ptrdiff_t dststride,
104  uint8_t *src, ptrdiff_t srcstride,
105  int height, int mx, int my);
106 void ff_put_vp8_bilinear8_v_ssse3 (uint8_t *dst, ptrdiff_t dststride,
107  uint8_t *src, ptrdiff_t srcstride,
108  int height, int mx, int my);
109 
110 
111 void ff_put_vp8_pixels8_mmx (uint8_t *dst, ptrdiff_t dststride,
112  uint8_t *src, ptrdiff_t srcstride,
113  int height, int mx, int my);
114 void ff_put_vp8_pixels16_mmx(uint8_t *dst, ptrdiff_t dststride,
115  uint8_t *src, ptrdiff_t srcstride,
116  int height, int mx, int my);
117 void ff_put_vp8_pixels16_sse(uint8_t *dst, ptrdiff_t dststride,
118  uint8_t *src, ptrdiff_t srcstride,
119  int height, int mx, int my);
120 
121 #define TAP_W16(OPT, FILTERTYPE, TAPTYPE) \
122 static void ff_put_vp8_ ## FILTERTYPE ## 16_ ## TAPTYPE ## _ ## OPT( \
123  uint8_t *dst, ptrdiff_t dststride, uint8_t *src, \
124  ptrdiff_t srcstride, int height, int mx, int my) \
125 { \
126  ff_put_vp8_ ## FILTERTYPE ## 8_ ## TAPTYPE ## _ ## OPT( \
127  dst, dststride, src, srcstride, height, mx, my); \
128  ff_put_vp8_ ## FILTERTYPE ## 8_ ## TAPTYPE ## _ ## OPT( \
129  dst + 8, dststride, src + 8, srcstride, height, mx, my); \
130 }
131 #define TAP_W8(OPT, FILTERTYPE, TAPTYPE) \
132 static void ff_put_vp8_ ## FILTERTYPE ## 8_ ## TAPTYPE ## _ ## OPT( \
133  uint8_t *dst, ptrdiff_t dststride, uint8_t *src, \
134  ptrdiff_t srcstride, int height, int mx, int my) \
135 { \
136  ff_put_vp8_ ## FILTERTYPE ## 4_ ## TAPTYPE ## _ ## OPT( \
137  dst, dststride, src, srcstride, height, mx, my); \
138  ff_put_vp8_ ## FILTERTYPE ## 4_ ## TAPTYPE ## _ ## OPT( \
139  dst + 4, dststride, src + 4, srcstride, height, mx, my); \
140 }
141 
142 #if ARCH_X86_32
143 TAP_W8 (mmxext, epel, h4)
144 TAP_W8 (mmxext, epel, h6)
145 TAP_W16(mmxext, epel, h6)
146 TAP_W8 (mmxext, epel, v4)
147 TAP_W8 (mmxext, epel, v6)
148 TAP_W16(mmxext, epel, v6)
149 TAP_W8 (mmxext, bilinear, h)
150 TAP_W16(mmxext, bilinear, h)
151 TAP_W8 (mmxext, bilinear, v)
152 TAP_W16(mmxext, bilinear, v)
153 #endif
154 
155 TAP_W16(sse2, epel, h6)
156 TAP_W16(sse2, epel, v6)
157 TAP_W16(sse2, bilinear, h)
158 TAP_W16(sse2, bilinear, v)
159 
160 TAP_W16(ssse3, epel, h6)
161 TAP_W16(ssse3, epel, v6)
162 TAP_W16(ssse3, bilinear, h)
163 TAP_W16(ssse3, bilinear, v)
164 
165 #define HVTAP(OPT, ALIGN, TAPNUMX, TAPNUMY, SIZE, MAXHEIGHT) \
166 static void ff_put_vp8_epel ## SIZE ## _h ## TAPNUMX ## v ## TAPNUMY ## _ ## OPT( \
167  uint8_t *dst, ptrdiff_t dststride, uint8_t *src, \
168  ptrdiff_t srcstride, int height, int mx, int my) \
169 { \
170  DECLARE_ALIGNED(ALIGN, uint8_t, tmp)[SIZE * (MAXHEIGHT + TAPNUMY - 1)]; \
171  uint8_t *tmpptr = tmp + SIZE * (TAPNUMY / 2 - 1); \
172  src -= srcstride * (TAPNUMY / 2 - 1); \
173  ff_put_vp8_epel ## SIZE ## _h ## TAPNUMX ## _ ## OPT( \
174  tmp, SIZE, src, srcstride, height + TAPNUMY - 1, mx, my); \
175  ff_put_vp8_epel ## SIZE ## _v ## TAPNUMY ## _ ## OPT( \
176  dst, dststride, tmpptr, SIZE, height, mx, my); \
177 }
178 
179 #if ARCH_X86_32
180 #define HVTAPMMX(x, y) \
181 HVTAP(mmxext, 8, x, y, 4, 8) \
182 HVTAP(mmxext, 8, x, y, 8, 16)
183 
184 HVTAP(mmxext, 8, 6, 6, 16, 16)
185 #else
186 #define HVTAPMMX(x, y) \
187 HVTAP(mmxext, 8, x, y, 4, 8)
188 #endif
189 
190 HVTAPMMX(4, 4)
191 HVTAPMMX(4, 6)
192 HVTAPMMX(6, 4)
193 HVTAPMMX(6, 6)
194 
195 #define HVTAPSSE2(x, y, w) \
196 HVTAP(sse2, 16, x, y, w, 16) \
197 HVTAP(ssse3, 16, x, y, w, 16)
198 
199 HVTAPSSE2(4, 4, 8)
200 HVTAPSSE2(4, 6, 8)
201 HVTAPSSE2(6, 4, 8)
202 HVTAPSSE2(6, 6, 8)
203 HVTAPSSE2(6, 6, 16)
204 
205 HVTAP(ssse3, 16, 4, 4, 4, 8)
206 HVTAP(ssse3, 16, 4, 6, 4, 8)
207 HVTAP(ssse3, 16, 6, 4, 4, 8)
208 HVTAP(ssse3, 16, 6, 6, 4, 8)
209 
210 #define HVBILIN(OPT, ALIGN, SIZE, MAXHEIGHT) \
211 static void ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT( \
212  uint8_t *dst, ptrdiff_t dststride, uint8_t *src, \
213  ptrdiff_t srcstride, int height, int mx, int my) \
214 { \
215  DECLARE_ALIGNED(ALIGN, uint8_t, tmp)[SIZE * (MAXHEIGHT + 2)]; \
216  ff_put_vp8_bilinear ## SIZE ## _h_ ## OPT( \
217  tmp, SIZE, src, srcstride, height + 1, mx, my); \
218  ff_put_vp8_bilinear ## SIZE ## _v_ ## OPT( \
219  dst, dststride, tmp, SIZE, height, mx, my); \
220 }
221 
222 HVBILIN(mmxext, 8, 4, 8)
223 #if ARCH_X86_32
224 HVBILIN(mmxext, 8, 8, 16)
225 HVBILIN(mmxext, 8, 16, 16)
226 #endif
227 HVBILIN(sse2, 8, 8, 16)
228 HVBILIN(sse2, 8, 16, 16)
229 HVBILIN(ssse3, 8, 4, 8)
230 HVBILIN(ssse3, 8, 8, 16)
231 HVBILIN(ssse3, 8, 16, 16)
232 
233 void ff_vp8_idct_dc_add_mmx(uint8_t *dst, int16_t block[16],
234  ptrdiff_t stride);
235 void ff_vp8_idct_dc_add_sse4(uint8_t *dst, int16_t block[16],
236  ptrdiff_t stride);
237 void ff_vp8_idct_dc_add4y_mmx(uint8_t *dst, int16_t block[4][16],
238  ptrdiff_t stride);
239 void ff_vp8_idct_dc_add4y_sse2(uint8_t *dst, int16_t block[4][16],
240  ptrdiff_t stride);
241 void ff_vp8_idct_dc_add4uv_mmx(uint8_t *dst, int16_t block[2][16],
242  ptrdiff_t stride);
243 void ff_vp8_luma_dc_wht_mmx(int16_t block[4][4][16], int16_t dc[16]);
244 void ff_vp8_luma_dc_wht_sse(int16_t block[4][4][16], int16_t dc[16]);
245 void ff_vp8_idct_add_mmx(uint8_t *dst, int16_t block[16], ptrdiff_t stride);
246 void ff_vp8_idct_add_sse(uint8_t *dst, int16_t block[16], ptrdiff_t stride);
247 
248 #define DECLARE_LOOP_FILTER(NAME) \
249 void ff_vp8_v_loop_filter_simple_ ## NAME(uint8_t *dst, \
250  ptrdiff_t stride, \
251  int flim); \
252 void ff_vp8_h_loop_filter_simple_ ## NAME(uint8_t *dst, \
253  ptrdiff_t stride, \
254  int flim); \
255 void ff_vp8_v_loop_filter16y_inner_ ## NAME (uint8_t *dst, \
256  ptrdiff_t stride, \
257  int e, int i, int hvt); \
258 void ff_vp8_h_loop_filter16y_inner_ ## NAME (uint8_t *dst, \
259  ptrdiff_t stride, \
260  int e, int i, int hvt); \
261 void ff_vp8_v_loop_filter8uv_inner_ ## NAME (uint8_t *dstU, \
262  uint8_t *dstV, \
263  ptrdiff_t s, \
264  int e, int i, int hvt); \
265 void ff_vp8_h_loop_filter8uv_inner_ ## NAME (uint8_t *dstU, \
266  uint8_t *dstV, \
267  ptrdiff_t s, \
268  int e, int i, int hvt); \
269 void ff_vp8_v_loop_filter16y_mbedge_ ## NAME(uint8_t *dst, \
270  ptrdiff_t stride, \
271  int e, int i, int hvt); \
272 void ff_vp8_h_loop_filter16y_mbedge_ ## NAME(uint8_t *dst, \
273  ptrdiff_t stride, \
274  int e, int i, int hvt); \
275 void ff_vp8_v_loop_filter8uv_mbedge_ ## NAME(uint8_t *dstU, \
276  uint8_t *dstV, \
277  ptrdiff_t s, \
278  int e, int i, int hvt); \
279 void ff_vp8_h_loop_filter8uv_mbedge_ ## NAME(uint8_t *dstU, \
280  uint8_t *dstV, \
281  ptrdiff_t s, \
282  int e, int i, int hvt);
283 
284 DECLARE_LOOP_FILTER(mmx)
285 DECLARE_LOOP_FILTER(mmxext)
286 DECLARE_LOOP_FILTER(sse2)
287 DECLARE_LOOP_FILTER(ssse3)
288 DECLARE_LOOP_FILTER(sse4)
289 
290 #endif /* HAVE_YASM */
291 
292 #define VP8_LUMA_MC_FUNC(IDX, SIZE, OPT) \
293  c->put_vp8_epel_pixels_tab[IDX][0][2] = ff_put_vp8_epel ## SIZE ## _h6_ ## OPT; \
294  c->put_vp8_epel_pixels_tab[IDX][2][0] = ff_put_vp8_epel ## SIZE ## _v6_ ## OPT; \
295  c->put_vp8_epel_pixels_tab[IDX][2][2] = ff_put_vp8_epel ## SIZE ## _h6v6_ ## OPT
296 
297 #define VP8_MC_FUNC(IDX, SIZE, OPT) \
298  c->put_vp8_epel_pixels_tab[IDX][0][1] = ff_put_vp8_epel ## SIZE ## _h4_ ## OPT; \
299  c->put_vp8_epel_pixels_tab[IDX][1][0] = ff_put_vp8_epel ## SIZE ## _v4_ ## OPT; \
300  c->put_vp8_epel_pixels_tab[IDX][1][1] = ff_put_vp8_epel ## SIZE ## _h4v4_ ## OPT; \
301  c->put_vp8_epel_pixels_tab[IDX][1][2] = ff_put_vp8_epel ## SIZE ## _h6v4_ ## OPT; \
302  c->put_vp8_epel_pixels_tab[IDX][2][1] = ff_put_vp8_epel ## SIZE ## _h4v6_ ## OPT; \
303  VP8_LUMA_MC_FUNC(IDX, SIZE, OPT)
304 
305 #define VP8_BILINEAR_MC_FUNC(IDX, SIZE, OPT) \
306  c->put_vp8_bilinear_pixels_tab[IDX][0][1] = ff_put_vp8_bilinear ## SIZE ## _h_ ## OPT; \
307  c->put_vp8_bilinear_pixels_tab[IDX][0][2] = ff_put_vp8_bilinear ## SIZE ## _h_ ## OPT; \
308  c->put_vp8_bilinear_pixels_tab[IDX][1][0] = ff_put_vp8_bilinear ## SIZE ## _v_ ## OPT; \
309  c->put_vp8_bilinear_pixels_tab[IDX][1][1] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT; \
310  c->put_vp8_bilinear_pixels_tab[IDX][1][2] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT; \
311  c->put_vp8_bilinear_pixels_tab[IDX][2][0] = ff_put_vp8_bilinear ## SIZE ## _v_ ## OPT; \
312  c->put_vp8_bilinear_pixels_tab[IDX][2][1] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT; \
313  c->put_vp8_bilinear_pixels_tab[IDX][2][2] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT
314 
315 
317 {
318 #if HAVE_YASM
319  int mm_flags = av_get_cpu_flags();
320 
321  if (mm_flags & AV_CPU_FLAG_MMX) {
322  c->vp8_idct_dc_add = ff_vp8_idct_dc_add_mmx;
323  c->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_mmx;
324 #if ARCH_X86_32
325  c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_mmx;
326  c->vp8_idct_add = ff_vp8_idct_add_mmx;
327  c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_mmx;
328  c->put_vp8_epel_pixels_tab[0][0][0] =
329  c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_mmx;
330 #endif
331  c->put_vp8_epel_pixels_tab[1][0][0] =
332  c->put_vp8_bilinear_pixels_tab[1][0][0] = ff_put_vp8_pixels8_mmx;
333 
334 #if ARCH_X86_32
335  c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmx;
336  c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmx;
337 
338  c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_mmx;
339  c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_mmx;
340  c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_mmx;
341  c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_mmx;
342 
343  c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_mmx;
344  c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_mmx;
345  c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_mmx;
346  c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_mmx;
347 #endif
348  }
349 
350  /* note that 4-tap width=16 functions are missing because w=16
351  * is only used for luma, and luma is always a copy or sixtap. */
352  if (mm_flags & AV_CPU_FLAG_MMXEXT) {
353  VP8_MC_FUNC(2, 4, mmxext);
354  VP8_BILINEAR_MC_FUNC(2, 4, mmxext);
355 #if ARCH_X86_32
356  VP8_LUMA_MC_FUNC(0, 16, mmxext);
357  VP8_MC_FUNC(1, 8, mmxext);
358  VP8_BILINEAR_MC_FUNC(0, 16, mmxext);
359  VP8_BILINEAR_MC_FUNC(1, 8, mmxext);
360 
361  c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmxext;
362  c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmxext;
363 
364  c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_mmxext;
365  c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_mmxext;
366  c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_mmxext;
367  c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_mmxext;
368 
369  c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_mmxext;
370  c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_mmxext;
371  c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_mmxext;
372  c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_mmxext;
373 #endif
374  }
375 
376  if (mm_flags & AV_CPU_FLAG_SSE) {
377  c->vp8_idct_add = ff_vp8_idct_add_sse;
378  c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_sse;
379  c->put_vp8_epel_pixels_tab[0][0][0] =
380  c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_sse;
381  }
382 
383  if (mm_flags & (AV_CPU_FLAG_SSE2|AV_CPU_FLAG_SSE2SLOW)) {
384  VP8_LUMA_MC_FUNC(0, 16, sse2);
385  VP8_MC_FUNC(1, 8, sse2);
386  VP8_BILINEAR_MC_FUNC(0, 16, sse2);
387  VP8_BILINEAR_MC_FUNC(1, 8, sse2);
388 
389  c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_sse2;
390 
391  c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_sse2;
392  c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_sse2;
393 
394  c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_sse2;
395  c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_sse2;
396  }
397 
398  if (mm_flags & AV_CPU_FLAG_SSE2) {
399  c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_sse2;
400 
401  c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_sse2;
402 
403  c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_sse2;
404  c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_sse2;
405 
406  c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_sse2;
407  c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_sse2;
408  }
409 
410  if (mm_flags & AV_CPU_FLAG_SSSE3) {
411  VP8_LUMA_MC_FUNC(0, 16, ssse3);
412  VP8_MC_FUNC(1, 8, ssse3);
413  VP8_MC_FUNC(2, 4, ssse3);
414  VP8_BILINEAR_MC_FUNC(0, 16, ssse3);
415  VP8_BILINEAR_MC_FUNC(1, 8, ssse3);
416  VP8_BILINEAR_MC_FUNC(2, 4, ssse3);
417 
418  c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_ssse3;
419  c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_ssse3;
420 
421  c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_ssse3;
422  c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_ssse3;
423  c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_ssse3;
424  c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_ssse3;
425 
426  c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_ssse3;
427  c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_ssse3;
428  c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_ssse3;
429  c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_ssse3;
430  }
431 
432  if (mm_flags & AV_CPU_FLAG_SSE4) {
433  c->vp8_idct_dc_add = ff_vp8_idct_dc_add_sse4;
434 
435  c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_sse4;
436  c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_sse4;
437  c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_sse4;
438  }
439 #endif /* HAVE_YASM */
440 }
void(* vp8_idct_dc_add)(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
Definition: vp8dsp.h:41
float v
void(* vp8_idct_dc_add4y)(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
Definition: vp8dsp.h:42
memory handling functions
#define AV_CPU_FLAG_SSE
SSE functions.
Definition: cpu.h:33
vp8_mc_func put_vp8_bilinear_pixels_tab[3][3][3]
Definition: vp8dsp.h:81
void(* vp8_v_loop_filter8uv_inner)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:62
#define VP8_MC_FUNC(IDX, SIZE, OPT)
Definition: vp8dsp_init.c:297
int stride
Definition: mace.c:144
vp8_mc_func put_vp8_epel_pixels_tab[3][3][3]
first dimension: width>>3, height is assumed equal to width second dimension: 0 if no vertical interp...
Definition: vp8dsp.h:80
uint8_t
#define av_cold
Definition: attributes.h:78
void(* vp8_idct_dc_add4uv)(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
Definition: vp8dsp.h:44
VP8 compatible video decoder.
#define AV_CPU_FLAG_MMXEXT
SSE integer functions or AMD MMX ext.
Definition: cpu.h:30
#define AV_CPU_FLAG_SSE2SLOW
SSE2 supported, but usually not faster.
Definition: cpu.h:35
void(* vp8_v_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:52
#define AV_CPU_FLAG_SSSE3
Conroe SSSE3 functions.
Definition: cpu.h:39
void(* vp8_idct_add)(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
Definition: vp8dsp.h:40
#define VP8_LUMA_MC_FUNC(IDX, SIZE, OPT)
Definition: vp8dsp_init.c:292
void(* vp8_h_loop_filter8uv_inner)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:65
AVS_Value src
Definition: avisynth_c.h:523
#define VP8_BILINEAR_MC_FUNC(IDX, SIZE, OPT)
Definition: vp8dsp_init.c:305
av_cold void ff_vp8dsp_init_x86(VP8DSPContext *c)
Definition: vp8dsp_init.c:316
void(* vp8_v_loop_filter16y)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:48
#define AV_CPU_FLAG_MMX
standard MMX
Definition: cpu.h:29
FIXME Range Coding of cr are mx and my are Motion Vector top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff)*mv_scale Intra DC Predicton block[y][x] dc[1]
Definition: snow.txt:392
BYTE int const BYTE int int int height
Definition: avisynth_c.h:713
#define AV_CPU_FLAG_SSE4
Penryn SSE4.1 functions.
Definition: cpu.h:41
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
Definition: cpu.c:30
void(* vp8_h_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:54
void(* vp8_h_loop_filter16y_inner)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:60
void(* vp8_luma_dc_wht)(int16_t block[4][4][16], int16_t dc[16])
Definition: vp8dsp.h:38
void(* vp8_v_loop_filter_simple)(uint8_t *dst, ptrdiff_t stride, int flim)
Definition: vp8dsp.h:69
void(* vp8_h_loop_filter_simple)(uint8_t *dst, ptrdiff_t stride, int flim)
Definition: vp8dsp.h:70
void(* vp8_v_loop_filter16y_inner)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:58
static double c[64]
else dst[i][x+y *dst_stride[i]]
Definition: vf_mcdeint.c:160
#define AV_CPU_FLAG_SSE2
PIV SSE2 functions.
Definition: cpu.h:34
void(* vp8_h_loop_filter16y)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:50