h264pred.c
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * H.264 / AVC / MPEG4 part10 prediction functions.
25  * @author Michael Niedermayer <michaelni@gmx.at>
26  */
27 
28 #include "libavutil/avassert.h"
29 #include "dsputil.h"
30 #include "h264pred.h"
31 #include "avcodec.h" // for AV_CODEC_ID_*
32 
33 #define BIT_DEPTH 8
34 #include "h264pred_template.c"
35 #undef BIT_DEPTH
36 
37 #define BIT_DEPTH 9
38 #include "h264pred_template.c"
39 #undef BIT_DEPTH
40 
41 #define BIT_DEPTH 10
42 #include "h264pred_template.c"
43 #undef BIT_DEPTH
44 
45 #define BIT_DEPTH 12
46 #include "h264pred_template.c"
47 #undef BIT_DEPTH
48 
49 #define BIT_DEPTH 14
50 #include "h264pred_template.c"
51 #undef BIT_DEPTH
52 
53 static void pred4x4_vertical_vp8_c(uint8_t *src, const uint8_t *topright,
54  ptrdiff_t stride)
55 {
56  const unsigned lt = src[-1-1*stride];
59  uint32_t v = PACK_4U8((lt + 2*t0 + t1 + 2) >> 2,
60  (t0 + 2*t1 + t2 + 2) >> 2,
61  (t1 + 2*t2 + t3 + 2) >> 2,
62  (t2 + 2*t3 + t4 + 2) >> 2);
63 
64  AV_WN32A(src+0*stride, v);
65  AV_WN32A(src+1*stride, v);
66  AV_WN32A(src+2*stride, v);
67  AV_WN32A(src+3*stride, v);
68 }
69 
70 static void pred4x4_horizontal_vp8_c(uint8_t *src, const uint8_t *topright,
71  ptrdiff_t stride)
72 {
73  const unsigned lt = src[-1-1*stride];
75 
76  AV_WN32A(src+0*stride, ((lt + 2*l0 + l1 + 2) >> 2)*0x01010101);
77  AV_WN32A(src+1*stride, ((l0 + 2*l1 + l2 + 2) >> 2)*0x01010101);
78  AV_WN32A(src+2*stride, ((l1 + 2*l2 + l3 + 2) >> 2)*0x01010101);
79  AV_WN32A(src+3*stride, ((l2 + 2*l3 + l3 + 2) >> 2)*0x01010101);
80 }
81 
82 static void pred4x4_down_left_svq3_c(uint8_t *src, const uint8_t *topright,
83  ptrdiff_t stride)
84 {
87 
88  src[0+0*stride]=(l1 + t1)>>1;
89  src[1+0*stride]=
90  src[0+1*stride]=(l2 + t2)>>1;
91  src[2+0*stride]=
92  src[1+1*stride]=
93  src[0+2*stride]=
94  src[3+0*stride]=
95  src[2+1*stride]=
96  src[1+2*stride]=
97  src[0+3*stride]=
98  src[3+1*stride]=
99  src[2+2*stride]=
100  src[1+3*stride]=
101  src[3+2*stride]=
102  src[2+3*stride]=
103  src[3+3*stride]=(l3 + t3)>>1;
104 }
105 
106 static void pred4x4_down_left_rv40_c(uint8_t *src, const uint8_t *topright,
107  ptrdiff_t stride)
108 {
113 
114  src[0+0*stride]=(t0 + t2 + 2*t1 + 2 + l0 + l2 + 2*l1 + 2)>>3;
115  src[1+0*stride]=
116  src[0+1*stride]=(t1 + t3 + 2*t2 + 2 + l1 + l3 + 2*l2 + 2)>>3;
117  src[2+0*stride]=
118  src[1+1*stride]=
119  src[0+2*stride]=(t2 + t4 + 2*t3 + 2 + l2 + l4 + 2*l3 + 2)>>3;
120  src[3+0*stride]=
121  src[2+1*stride]=
122  src[1+2*stride]=
123  src[0+3*stride]=(t3 + t5 + 2*t4 + 2 + l3 + l5 + 2*l4 + 2)>>3;
124  src[3+1*stride]=
125  src[2+2*stride]=
126  src[1+3*stride]=(t4 + t6 + 2*t5 + 2 + l4 + l6 + 2*l5 + 2)>>3;
127  src[3+2*stride]=
128  src[2+3*stride]=(t5 + t7 + 2*t6 + 2 + l5 + l7 + 2*l6 + 2)>>3;
129  src[3+3*stride]=(t6 + t7 + 1 + l6 + l7 + 1)>>2;
130 }
131 
133  const uint8_t *topright,
134  ptrdiff_t stride)
135 {
139 
140  src[0+0*stride]=(t0 + t2 + 2*t1 + 2 + l0 + l2 + 2*l1 + 2)>>3;
141  src[1+0*stride]=
142  src[0+1*stride]=(t1 + t3 + 2*t2 + 2 + l1 + l3 + 2*l2 + 2)>>3;
143  src[2+0*stride]=
144  src[1+1*stride]=
145  src[0+2*stride]=(t2 + t4 + 2*t3 + 2 + l2 + 3*l3 + 2)>>3;
146  src[3+0*stride]=
147  src[2+1*stride]=
148  src[1+2*stride]=
149  src[0+3*stride]=(t3 + t5 + 2*t4 + 2 + l3*4 + 2)>>3;
150  src[3+1*stride]=
151  src[2+2*stride]=
152  src[1+3*stride]=(t4 + t6 + 2*t5 + 2 + l3*4 + 2)>>3;
153  src[3+2*stride]=
154  src[2+3*stride]=(t5 + t7 + 2*t6 + 2 + l3*4 + 2)>>3;
155  src[3+3*stride]=(t6 + t7 + 1 + 2*l3 + 1)>>2;
156 }
157 
158 static void pred4x4_vertical_left_rv40(uint8_t *src, const uint8_t *topright,
159  ptrdiff_t stride,
160  const int l0, const int l1, const int l2,
161  const int l3, const int l4)
162 {
165 
166  src[0+0*stride]=(2*t0 + 2*t1 + l1 + 2*l2 + l3 + 4)>>3;
167  src[1+0*stride]=
168  src[0+2*stride]=(t1 + t2 + 1)>>1;
169  src[2+0*stride]=
170  src[1+2*stride]=(t2 + t3 + 1)>>1;
171  src[3+0*stride]=
172  src[2+2*stride]=(t3 + t4+ 1)>>1;
173  src[3+2*stride]=(t4 + t5+ 1)>>1;
174  src[0+1*stride]=(t0 + 2*t1 + t2 + l2 + 2*l3 + l4 + 4)>>3;
175  src[1+1*stride]=
176  src[0+3*stride]=(t1 + 2*t2 + t3 + 2)>>2;
177  src[2+1*stride]=
178  src[1+3*stride]=(t2 + 2*t3 + t4 + 2)>>2;
179  src[3+1*stride]=
180  src[2+3*stride]=(t3 + 2*t4 + t5 + 2)>>2;
181  src[3+3*stride]=(t4 + 2*t5 + t6 + 2)>>2;
182 }
183 
184 static void pred4x4_vertical_left_rv40_c(uint8_t *src, const uint8_t *topright,
185  ptrdiff_t stride)
186 {
189 
190  pred4x4_vertical_left_rv40(src, topright, stride, l0, l1, l2, l3, l4);
191 }
192 
194  const uint8_t *topright,
195  ptrdiff_t stride)
196 {
198 
199  pred4x4_vertical_left_rv40(src, topright, stride, l0, l1, l2, l3, l3);
200 }
201 
202 static void pred4x4_vertical_left_vp8_c(uint8_t *src, const uint8_t *topright,
203  ptrdiff_t stride)
204 {
207 
208  src[0+0*stride]=(t0 + t1 + 1)>>1;
209  src[1+0*stride]=
210  src[0+2*stride]=(t1 + t2 + 1)>>1;
211  src[2+0*stride]=
212  src[1+2*stride]=(t2 + t3 + 1)>>1;
213  src[3+0*stride]=
214  src[2+2*stride]=(t3 + t4 + 1)>>1;
215  src[0+1*stride]=(t0 + 2*t1 + t2 + 2)>>2;
216  src[1+1*stride]=
217  src[0+3*stride]=(t1 + 2*t2 + t3 + 2)>>2;
218  src[2+1*stride]=
219  src[1+3*stride]=(t2 + 2*t3 + t4 + 2)>>2;
220  src[3+1*stride]=
221  src[2+3*stride]=(t3 + 2*t4 + t5 + 2)>>2;
222  src[3+2*stride]=(t4 + 2*t5 + t6 + 2)>>2;
223  src[3+3*stride]=(t5 + 2*t6 + t7 + 2)>>2;
224 }
225 
226 static void pred4x4_horizontal_up_rv40_c(uint8_t *src, const uint8_t *topright,
227  ptrdiff_t stride)
228 {
233 
234  src[0+0*stride]=(t1 + 2*t2 + t3 + 2*l0 + 2*l1 + 4)>>3;
235  src[1+0*stride]=(t2 + 2*t3 + t4 + l0 + 2*l1 + l2 + 4)>>3;
236  src[2+0*stride]=
237  src[0+1*stride]=(t3 + 2*t4 + t5 + 2*l1 + 2*l2 + 4)>>3;
238  src[3+0*stride]=
239  src[1+1*stride]=(t4 + 2*t5 + t6 + l1 + 2*l2 + l3 + 4)>>3;
240  src[2+1*stride]=
241  src[0+2*stride]=(t5 + 2*t6 + t7 + 2*l2 + 2*l3 + 4)>>3;
242  src[3+1*stride]=
243  src[1+2*stride]=(t6 + 3*t7 + l2 + 3*l3 + 4)>>3;
244  src[3+2*stride]=
245  src[1+3*stride]=(l3 + 2*l4 + l5 + 2)>>2;
246  src[0+3*stride]=
247  src[2+2*stride]=(t6 + t7 + l3 + l4 + 2)>>2;
248  src[2+3*stride]=(l4 + l5 + 1)>>1;
249  src[3+3*stride]=(l4 + 2*l5 + l6 + 2)>>2;
250 }
251 
253  const uint8_t *topright,
254  ptrdiff_t stride)
255 {
259 
260  src[0+0*stride]=(t1 + 2*t2 + t3 + 2*l0 + 2*l1 + 4)>>3;
261  src[1+0*stride]=(t2 + 2*t3 + t4 + l0 + 2*l1 + l2 + 4)>>3;
262  src[2+0*stride]=
263  src[0+1*stride]=(t3 + 2*t4 + t5 + 2*l1 + 2*l2 + 4)>>3;
264  src[3+0*stride]=
265  src[1+1*stride]=(t4 + 2*t5 + t6 + l1 + 2*l2 + l3 + 4)>>3;
266  src[2+1*stride]=
267  src[0+2*stride]=(t5 + 2*t6 + t7 + 2*l2 + 2*l3 + 4)>>3;
268  src[3+1*stride]=
269  src[1+2*stride]=(t6 + 3*t7 + l2 + 3*l3 + 4)>>3;
270  src[3+2*stride]=
271  src[1+3*stride]=l3;
272  src[0+3*stride]=
273  src[2+2*stride]=(t6 + t7 + 2*l3 + 2)>>2;
274  src[2+3*stride]=
275  src[3+3*stride]=l3;
276 }
277 
278 static void pred4x4_tm_vp8_c(uint8_t *src, const uint8_t *topright,
279  ptrdiff_t stride)
280 {
281  const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP - src[-1-stride];
282  uint8_t *top = src-stride;
283  int y;
284 
285  for (y = 0; y < 4; y++) {
286  const uint8_t *cm_in = cm + src[-1];
287  src[0] = cm_in[top[0]];
288  src[1] = cm_in[top[1]];
289  src[2] = cm_in[top[2]];
290  src[3] = cm_in[top[3]];
291  src += stride;
292  }
293 }
294 
295 static void pred16x16_plane_svq3_c(uint8_t *src, ptrdiff_t stride)
296 {
297  pred16x16_plane_compat_8_c(src, stride, 1, 0);
298 }
299 
300 static void pred16x16_plane_rv40_c(uint8_t *src, ptrdiff_t stride)
301 {
302  pred16x16_plane_compat_8_c(src, stride, 0, 1);
303 }
304 
305 static void pred16x16_tm_vp8_c(uint8_t *src, ptrdiff_t stride)
306 {
307  const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP - src[-1-stride];
308  uint8_t *top = src-stride;
309  int y;
310 
311  for (y = 0; y < 16; y++) {
312  const uint8_t *cm_in = cm + src[-1];
313  src[0] = cm_in[top[0]];
314  src[1] = cm_in[top[1]];
315  src[2] = cm_in[top[2]];
316  src[3] = cm_in[top[3]];
317  src[4] = cm_in[top[4]];
318  src[5] = cm_in[top[5]];
319  src[6] = cm_in[top[6]];
320  src[7] = cm_in[top[7]];
321  src[8] = cm_in[top[8]];
322  src[9] = cm_in[top[9]];
323  src[10] = cm_in[top[10]];
324  src[11] = cm_in[top[11]];
325  src[12] = cm_in[top[12]];
326  src[13] = cm_in[top[13]];
327  src[14] = cm_in[top[14]];
328  src[15] = cm_in[top[15]];
329  src += stride;
330  }
331 }
332 
333 static void pred8x8_left_dc_rv40_c(uint8_t *src, ptrdiff_t stride)
334 {
335  int i;
336  unsigned dc0;
337 
338  dc0=0;
339  for(i=0;i<8; i++)
340  dc0+= src[-1+i*stride];
341  dc0= 0x01010101*((dc0 + 4)>>3);
342 
343  for(i=0; i<8; i++){
344  ((uint32_t*)(src+i*stride))[0]=
345  ((uint32_t*)(src+i*stride))[1]= dc0;
346  }
347 }
348 
349 static void pred8x8_top_dc_rv40_c(uint8_t *src, ptrdiff_t stride)
350 {
351  int i;
352  unsigned dc0;
353 
354  dc0=0;
355  for(i=0;i<8; i++)
356  dc0+= src[i-stride];
357  dc0= 0x01010101*((dc0 + 4)>>3);
358 
359  for(i=0; i<8; i++){
360  ((uint32_t*)(src+i*stride))[0]=
361  ((uint32_t*)(src+i*stride))[1]= dc0;
362  }
363 }
364 
365 static void pred8x8_dc_rv40_c(uint8_t *src, ptrdiff_t stride)
366 {
367  int i;
368  unsigned dc0 = 0;
369 
370  for(i=0;i<4; i++){
371  dc0+= src[-1+i*stride] + src[i-stride];
372  dc0+= src[4+i-stride];
373  dc0+= src[-1+(i+4)*stride];
374  }
375  dc0= 0x01010101*((dc0 + 8)>>4);
376 
377  for(i=0; i<4; i++){
378  ((uint32_t*)(src+i*stride))[0]= dc0;
379  ((uint32_t*)(src+i*stride))[1]= dc0;
380  }
381  for(i=4; i<8; i++){
382  ((uint32_t*)(src+i*stride))[0]= dc0;
383  ((uint32_t*)(src+i*stride))[1]= dc0;
384  }
385 }
386 
387 static void pred8x8_tm_vp8_c(uint8_t *src, ptrdiff_t stride)
388 {
389  const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP - src[-1-stride];
390  uint8_t *top = src-stride;
391  int y;
392 
393  for (y = 0; y < 8; y++) {
394  const uint8_t *cm_in = cm + src[-1];
395  src[0] = cm_in[top[0]];
396  src[1] = cm_in[top[1]];
397  src[2] = cm_in[top[2]];
398  src[3] = cm_in[top[3]];
399  src[4] = cm_in[top[4]];
400  src[5] = cm_in[top[5]];
401  src[6] = cm_in[top[6]];
402  src[7] = cm_in[top[7]];
403  src += stride;
404  }
405 }
406 
407 /**
408  * Set the intra prediction function pointers.
409  */
410 void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth,
411  int chroma_format_idc)
412 {
413 #undef FUNC
414 #undef FUNCC
415 #define FUNC(a, depth) a ## _ ## depth
416 #define FUNCC(a, depth) a ## _ ## depth ## _c
417 #define FUNCD(a) a ## _c
418 
419 #define H264_PRED(depth) \
420  if(codec_id != AV_CODEC_ID_RV40){\
421  if(codec_id == AV_CODEC_ID_VP8) {\
422  h->pred4x4[VERT_PRED ]= FUNCD(pred4x4_vertical_vp8);\
423  h->pred4x4[HOR_PRED ]= FUNCD(pred4x4_horizontal_vp8);\
424  } else {\
425  h->pred4x4[VERT_PRED ]= FUNCC(pred4x4_vertical , depth);\
426  h->pred4x4[HOR_PRED ]= FUNCC(pred4x4_horizontal , depth);\
427  }\
428  h->pred4x4[DC_PRED ]= FUNCC(pred4x4_dc , depth);\
429  if(codec_id == AV_CODEC_ID_SVQ3)\
430  h->pred4x4[DIAG_DOWN_LEFT_PRED ]= FUNCD(pred4x4_down_left_svq3);\
431  else\
432  h->pred4x4[DIAG_DOWN_LEFT_PRED ]= FUNCC(pred4x4_down_left , depth);\
433  h->pred4x4[DIAG_DOWN_RIGHT_PRED]= FUNCC(pred4x4_down_right , depth);\
434  h->pred4x4[VERT_RIGHT_PRED ]= FUNCC(pred4x4_vertical_right , depth);\
435  h->pred4x4[HOR_DOWN_PRED ]= FUNCC(pred4x4_horizontal_down , depth);\
436  if (codec_id == AV_CODEC_ID_VP8) {\
437  h->pred4x4[VERT_LEFT_PRED ]= FUNCD(pred4x4_vertical_left_vp8);\
438  } else\
439  h->pred4x4[VERT_LEFT_PRED ]= FUNCC(pred4x4_vertical_left , depth);\
440  h->pred4x4[HOR_UP_PRED ]= FUNCC(pred4x4_horizontal_up , depth);\
441  if(codec_id != AV_CODEC_ID_VP8) {\
442  h->pred4x4[LEFT_DC_PRED ]= FUNCC(pred4x4_left_dc , depth);\
443  h->pred4x4[TOP_DC_PRED ]= FUNCC(pred4x4_top_dc , depth);\
444  h->pred4x4[DC_128_PRED ]= FUNCC(pred4x4_128_dc , depth);\
445  } else {\
446  h->pred4x4[TM_VP8_PRED ]= FUNCD(pred4x4_tm_vp8);\
447  h->pred4x4[DC_127_PRED ]= FUNCC(pred4x4_127_dc , depth);\
448  h->pred4x4[DC_129_PRED ]= FUNCC(pred4x4_129_dc , depth);\
449  h->pred4x4[VERT_VP8_PRED ]= FUNCC(pred4x4_vertical , depth);\
450  h->pred4x4[HOR_VP8_PRED ]= FUNCC(pred4x4_horizontal , depth);\
451  }\
452  }else{\
453  h->pred4x4[VERT_PRED ]= FUNCC(pred4x4_vertical , depth);\
454  h->pred4x4[HOR_PRED ]= FUNCC(pred4x4_horizontal , depth);\
455  h->pred4x4[DC_PRED ]= FUNCC(pred4x4_dc , depth);\
456  h->pred4x4[DIAG_DOWN_LEFT_PRED ]= FUNCD(pred4x4_down_left_rv40);\
457  h->pred4x4[DIAG_DOWN_RIGHT_PRED]= FUNCC(pred4x4_down_right , depth);\
458  h->pred4x4[VERT_RIGHT_PRED ]= FUNCC(pred4x4_vertical_right , depth);\
459  h->pred4x4[HOR_DOWN_PRED ]= FUNCC(pred4x4_horizontal_down , depth);\
460  h->pred4x4[VERT_LEFT_PRED ]= FUNCD(pred4x4_vertical_left_rv40);\
461  h->pred4x4[HOR_UP_PRED ]= FUNCD(pred4x4_horizontal_up_rv40);\
462  h->pred4x4[LEFT_DC_PRED ]= FUNCC(pred4x4_left_dc , depth);\
463  h->pred4x4[TOP_DC_PRED ]= FUNCC(pred4x4_top_dc , depth);\
464  h->pred4x4[DC_128_PRED ]= FUNCC(pred4x4_128_dc , depth);\
465  h->pred4x4[DIAG_DOWN_LEFT_PRED_RV40_NODOWN]= FUNCD(pred4x4_down_left_rv40_nodown);\
466  h->pred4x4[HOR_UP_PRED_RV40_NODOWN]= FUNCD(pred4x4_horizontal_up_rv40_nodown);\
467  h->pred4x4[VERT_LEFT_PRED_RV40_NODOWN]= FUNCD(pred4x4_vertical_left_rv40_nodown);\
468  }\
469 \
470  h->pred8x8l[VERT_PRED ]= FUNCC(pred8x8l_vertical , depth);\
471  h->pred8x8l[HOR_PRED ]= FUNCC(pred8x8l_horizontal , depth);\
472  h->pred8x8l[DC_PRED ]= FUNCC(pred8x8l_dc , depth);\
473  h->pred8x8l[DIAG_DOWN_LEFT_PRED ]= FUNCC(pred8x8l_down_left , depth);\
474  h->pred8x8l[DIAG_DOWN_RIGHT_PRED]= FUNCC(pred8x8l_down_right , depth);\
475  h->pred8x8l[VERT_RIGHT_PRED ]= FUNCC(pred8x8l_vertical_right , depth);\
476  h->pred8x8l[HOR_DOWN_PRED ]= FUNCC(pred8x8l_horizontal_down , depth);\
477  h->pred8x8l[VERT_LEFT_PRED ]= FUNCC(pred8x8l_vertical_left , depth);\
478  h->pred8x8l[HOR_UP_PRED ]= FUNCC(pred8x8l_horizontal_up , depth);\
479  h->pred8x8l[LEFT_DC_PRED ]= FUNCC(pred8x8l_left_dc , depth);\
480  h->pred8x8l[TOP_DC_PRED ]= FUNCC(pred8x8l_top_dc , depth);\
481  h->pred8x8l[DC_128_PRED ]= FUNCC(pred8x8l_128_dc , depth);\
482 \
483  if (chroma_format_idc == 1) {\
484  h->pred8x8[VERT_PRED8x8 ]= FUNCC(pred8x8_vertical , depth);\
485  h->pred8x8[HOR_PRED8x8 ]= FUNCC(pred8x8_horizontal , depth);\
486  } else {\
487  h->pred8x8[VERT_PRED8x8 ]= FUNCC(pred8x16_vertical , depth);\
488  h->pred8x8[HOR_PRED8x8 ]= FUNCC(pred8x16_horizontal , depth);\
489  }\
490  if (codec_id != AV_CODEC_ID_VP8) {\
491  if (chroma_format_idc == 1) {\
492  h->pred8x8[PLANE_PRED8x8]= FUNCC(pred8x8_plane , depth);\
493  } else {\
494  h->pred8x8[PLANE_PRED8x8]= FUNCC(pred8x16_plane , depth);\
495  }\
496  } else\
497  h->pred8x8[PLANE_PRED8x8]= FUNCD(pred8x8_tm_vp8);\
498  if(codec_id != AV_CODEC_ID_RV40 && codec_id != AV_CODEC_ID_VP8){\
499  if (chroma_format_idc == 1) {\
500  h->pred8x8[DC_PRED8x8 ]= FUNCC(pred8x8_dc , depth);\
501  h->pred8x8[LEFT_DC_PRED8x8]= FUNCC(pred8x8_left_dc , depth);\
502  h->pred8x8[TOP_DC_PRED8x8 ]= FUNCC(pred8x8_top_dc , depth);\
503  h->pred8x8[ALZHEIMER_DC_L0T_PRED8x8 ]= FUNC(pred8x8_mad_cow_dc_l0t, depth);\
504  h->pred8x8[ALZHEIMER_DC_0LT_PRED8x8 ]= FUNC(pred8x8_mad_cow_dc_0lt, depth);\
505  h->pred8x8[ALZHEIMER_DC_L00_PRED8x8 ]= FUNC(pred8x8_mad_cow_dc_l00, depth);\
506  h->pred8x8[ALZHEIMER_DC_0L0_PRED8x8 ]= FUNC(pred8x8_mad_cow_dc_0l0, depth);\
507  } else {\
508  h->pred8x8[DC_PRED8x8 ]= FUNCC(pred8x16_dc , depth);\
509  h->pred8x8[LEFT_DC_PRED8x8]= FUNCC(pred8x16_left_dc , depth);\
510  h->pred8x8[TOP_DC_PRED8x8 ]= FUNCC(pred8x16_top_dc , depth);\
511  h->pred8x8[ALZHEIMER_DC_L0T_PRED8x8 ]= FUNC(pred8x16_mad_cow_dc_l0t, depth);\
512  h->pred8x8[ALZHEIMER_DC_0LT_PRED8x8 ]= FUNC(pred8x16_mad_cow_dc_0lt, depth);\
513  h->pred8x8[ALZHEIMER_DC_L00_PRED8x8 ]= FUNC(pred8x16_mad_cow_dc_l00, depth);\
514  h->pred8x8[ALZHEIMER_DC_0L0_PRED8x8 ]= FUNC(pred8x16_mad_cow_dc_0l0, depth);\
515  }\
516  }else{\
517  h->pred8x8[DC_PRED8x8 ]= FUNCD(pred8x8_dc_rv40);\
518  h->pred8x8[LEFT_DC_PRED8x8]= FUNCD(pred8x8_left_dc_rv40);\
519  h->pred8x8[TOP_DC_PRED8x8 ]= FUNCD(pred8x8_top_dc_rv40);\
520  if (codec_id == AV_CODEC_ID_VP8) {\
521  h->pred8x8[DC_127_PRED8x8]= FUNCC(pred8x8_127_dc , depth);\
522  h->pred8x8[DC_129_PRED8x8]= FUNCC(pred8x8_129_dc , depth);\
523  }\
524  }\
525  if (chroma_format_idc == 1) {\
526  h->pred8x8[DC_128_PRED8x8 ]= FUNCC(pred8x8_128_dc , depth);\
527  } else {\
528  h->pred8x8[DC_128_PRED8x8 ]= FUNCC(pred8x16_128_dc , depth);\
529  }\
530 \
531  h->pred16x16[DC_PRED8x8 ]= FUNCC(pred16x16_dc , depth);\
532  h->pred16x16[VERT_PRED8x8 ]= FUNCC(pred16x16_vertical , depth);\
533  h->pred16x16[HOR_PRED8x8 ]= FUNCC(pred16x16_horizontal , depth);\
534  switch(codec_id){\
535  case AV_CODEC_ID_SVQ3:\
536  h->pred16x16[PLANE_PRED8x8 ]= FUNCD(pred16x16_plane_svq3);\
537  break;\
538  case AV_CODEC_ID_RV40:\
539  h->pred16x16[PLANE_PRED8x8 ]= FUNCD(pred16x16_plane_rv40);\
540  break;\
541  case AV_CODEC_ID_VP8:\
542  h->pred16x16[PLANE_PRED8x8 ]= FUNCD(pred16x16_tm_vp8);\
543  h->pred16x16[DC_127_PRED8x8]= FUNCC(pred16x16_127_dc , depth);\
544  h->pred16x16[DC_129_PRED8x8]= FUNCC(pred16x16_129_dc , depth);\
545  break;\
546  default:\
547  h->pred16x16[PLANE_PRED8x8 ]= FUNCC(pred16x16_plane , depth);\
548  break;\
549  }\
550  h->pred16x16[LEFT_DC_PRED8x8]= FUNCC(pred16x16_left_dc , depth);\
551  h->pred16x16[TOP_DC_PRED8x8 ]= FUNCC(pred16x16_top_dc , depth);\
552  h->pred16x16[DC_128_PRED8x8 ]= FUNCC(pred16x16_128_dc , depth);\
553 \
554  /* special lossless h/v prediction for h264 */ \
555  h->pred4x4_add [VERT_PRED ]= FUNCC(pred4x4_vertical_add , depth);\
556  h->pred4x4_add [ HOR_PRED ]= FUNCC(pred4x4_horizontal_add , depth);\
557  h->pred8x8l_add [VERT_PRED ]= FUNCC(pred8x8l_vertical_add , depth);\
558  h->pred8x8l_add [ HOR_PRED ]= FUNCC(pred8x8l_horizontal_add , depth);\
559  if (chroma_format_idc == 1) {\
560  h->pred8x8_add [VERT_PRED8x8]= FUNCC(pred8x8_vertical_add , depth);\
561  h->pred8x8_add [ HOR_PRED8x8]= FUNCC(pred8x8_horizontal_add , depth);\
562  } else {\
563  h->pred8x8_add [VERT_PRED8x8]= FUNCC(pred8x16_vertical_add , depth);\
564  h->pred8x8_add [ HOR_PRED8x8]= FUNCC(pred8x16_horizontal_add , depth);\
565  }\
566  h->pred16x16_add[VERT_PRED8x8]= FUNCC(pred16x16_vertical_add , depth);\
567  h->pred16x16_add[ HOR_PRED8x8]= FUNCC(pred16x16_horizontal_add , depth);\
568 
569  if(!chroma_format_idc)
570  chroma_format_idc = 1;
571 
572  switch (bit_depth) {
573  case 9:
574  H264_PRED(9)
575  break;
576  case 10:
577  H264_PRED(10)
578  break;
579  case 12:
580  H264_PRED(12)
581  break;
582  case 14:
583  H264_PRED(14)
584  break;
585  default:
586  av_assert0(bit_depth<=8);
587  H264_PRED(8)
588  break;
589  }
590 
591  if (ARCH_ARM) ff_h264_pred_init_arm(h, codec_id, bit_depth, chroma_format_idc);
592  if (ARCH_X86) ff_h264_pred_init_x86(h, codec_id, bit_depth, chroma_format_idc);
593 }
static void pred4x4_vertical_left_rv40_nodown_c(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.c:193
float v
void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, const int bit_depth, const int chroma_format_idc)
#define ff_cropTbl
static void pred4x4_tm_vp8_c(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.c:278
#define MAX_NEG_CROP
Definition: dsputil.h:47
static void pred4x4_horizontal_up_rv40_nodown_c(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.c:252
int stride
Definition: mace.c:144
#define AV_WN32A(p, v)
Definition: intreadwrite.h:530
#define PACK_4U8(a, b, c, d)
Definition: mathops.h:177
#define t7
Definition: regdef.h:35
static void pred8x8_tm_vp8_c(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.c:387
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
uint8_t
static void pred4x4_vertical_left_rv40(uint8_t *src, const uint8_t *topright, ptrdiff_t stride, const int l0, const int l1, const int l2, const int l3, const int l4)
Definition: h264pred.c:158
static void pred16x16_plane_rv40_c(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.c:300
Context for storing H.264 prediction functions.
Definition: h264pred.h:92
#define t0
Definition: regdef.h:28
static void pred4x4_down_left_rv40_nodown_c(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.c:132
static void pred8x8_dc_rv40_c(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.c:365
#define cm
Definition: dvbsubdec.c:34
#define ARCH_X86
Definition: config.h:35
static void pred16x16_plane_svq3_c(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.c:295
static void pred8x8_top_dc_rv40_c(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.c:349
#define t1
Definition: regdef.h:29
simple assert() macros that are a bit more flexible than ISO C assert().
#define t3
Definition: regdef.h:31
enum AVCodecID codec_id
Definition: mov_chan.c:433
external API header
#define H264_PRED(depth)
H.264 / AVC / MPEG4 part10 prediction functions.
#define ARCH_ARM
Definition: config.h:16
static void pred4x4_down_left_rv40_c(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.c:106
AVS_Value src
Definition: avisynth_c.h:523
static void pred4x4_horizontal_vp8_c(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.c:70
static void pred4x4_vertical_left_rv40_c(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.c:184
static void pred4x4_vertical_vp8_c(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.c:53
synthesis window for stochastic i
static void pred16x16_tm_vp8_c(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.c:305
#define t5
Definition: regdef.h:33
H.264 / AVC / MPEG4 prediction functions.
static void pred4x4_down_left_svq3_c(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.c:82
#define LOAD_LEFT_EDGE
void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:410
av_cold void ff_h264_pred_init_arm(H264PredContext *h, int codec_id, int bit_depth, const int chroma_format_idc)
#define LOAD_TOP_EDGE
static void pred8x8_left_dc_rv40_c(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.c:333
static void pred4x4_vertical_left_vp8_c(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.c:202
#define t6
Definition: regdef.h:34
function y
Definition: D.m:1
DSP utils.
#define t4
Definition: regdef.h:32
#define LOAD_DOWN_LEFT_EDGE
static void pred4x4_horizontal_up_rv40_c(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.c:226
#define LOAD_TOP_RIGHT_EDGE
#define t2
Definition: regdef.h:30