43 for(i = 0; i < 8; i++) {
48 d1 = (a - d + 3 + rnd) >> 3;
49 d2 = (a - d + b - c + 4 - rnd) >> 3;
52 src[-
stride] = av_clip_uint8(b - d2);
53 src[0] = av_clip_uint8(c + d2);
68 for(i = 0; i < 8; i++) {
73 d1 = (a - d + 3 + rnd) >> 3;
74 d2 = (a - d + b - c + 4 - rnd) >> 3;
77 src[-1] = av_clip_uint8(b - d2);
78 src[0] = av_clip_uint8(c + d2);
90 int rnd1 = 4, rnd2 = 3;
91 for(i = 0; i < 8; i++) {
99 top[48] = ((a << 3) - d1 + rnd1) >> 3;
100 top[56] = ((b << 3) - d2 + rnd2) >> 3;
101 bottom[0] = ((c << 3) + d2 + rnd1) >> 3;
102 bottom[8] = ((d << 3) + d1 + rnd2) >> 3;
116 int rnd1 = 4, rnd2 = 3;
117 for(i = 0; i < 8; i++) {
125 left[6] = ((a << 3) - d1 + rnd1) >> 3;
126 left[7] = ((b << 3) - d2 + rnd2) >> 3;
127 right[0] = ((c << 3) + d2 + rnd1) >> 3;
128 right[1] = ((d << 3) + d1 + rnd2) >> 3;
146 int a0 = (2*(src[-2*
stride] - src[ 1*
stride]) - 5*(src[-1*stride] - src[ 0*stride]) + 4) >> 3;
147 int a0_sign = a0 >> 31;
148 a0 = (a0 ^ a0_sign) - a0_sign;
150 int a1 =
FFABS((2*(src[-4*stride] - src[-1*stride]) - 5*(src[-3*stride] - src[-2*stride]) + 4) >> 3);
151 int a2 =
FFABS((2*(src[ 0*stride] - src[ 3*stride]) - 5*(src[ 1*stride] - src[ 2*stride]) + 4) >> 3);
152 if(a1 < a0 || a2 < a0){
154 int clip_sign = clip >> 31;
155 clip = ((clip ^ clip_sign) - clip_sign)>>1;
158 int d = 5 * (a3 -
a0);
159 int d_sign = (d >> 31);
160 d = ((d ^ d_sign) - d_sign) >> 3;
163 if( d_sign ^ clip_sign )
167 d = (d ^ d_sign) - d_sign;
168 src[-1*
stride] = av_clip_uint8(src[-1*stride] - d);
169 src[ 0*
stride] = av_clip_uint8(src[ 0*stride] + d);
192 for(i = 0; i <
len; i += 4){
239 dc = (3 * dc + 1) >> 1;
240 dc = (3 * dc + 16) >> 5;
241 for(i = 0; i < 8; i++){
242 dest[0] = av_clip_uint8(dest[0] + dc);
243 dest[1] = av_clip_uint8(dest[1] + dc);
244 dest[2] = av_clip_uint8(dest[2] + dc);
245 dest[3] = av_clip_uint8(dest[3] + dc);
246 dest[4] = av_clip_uint8(dest[4] + dc);
247 dest[5] = av_clip_uint8(dest[5] + dc);
248 dest[6] = av_clip_uint8(dest[6] + dc);
249 dest[7] = av_clip_uint8(dest[7] + dc);
262 for(i = 0; i < 8; i++){
263 t1 = 12 * (src[ 0] + src[32]) + 4;
264 t2 = 12 * (src[ 0] - src[32]) + 4;
265 t3 = 16 * src[16] + 6 * src[48];
266 t4 = 6 * src[16] - 16 * src[48];
273 t1 = 16 * src[ 8] + 15 * src[24] + 9 * src[40] + 4 * src[56];
274 t2 = 15 * src[ 8] - 4 * src[24] - 16 * src[40] - 9 * src[56];
275 t3 = 9 * src[ 8] - 16 * src[24] + 4 * src[40] + 15 * src[56];
276 t4 = 4 * src[ 8] - 9 * src[24] + 15 * src[40] - 16 * src[56];
278 dst[0] = (t5 +
t1) >> 3;
279 dst[1] = (t6 +
t2) >> 3;
280 dst[2] = (t7 +
t3) >> 3;
281 dst[3] = (t8 +
t4) >> 3;
282 dst[4] = (t8 -
t4) >> 3;
283 dst[5] = (t7 -
t3) >> 3;
284 dst[6] = (t6 -
t2) >> 3;
285 dst[7] = (t5 -
t1) >> 3;
293 for(i = 0; i < 8; i++){
294 t1 = 12 * (src[ 0] + src[32]) + 64;
295 t2 = 12 * (src[ 0] - src[32]) + 64;
296 t3 = 16 * src[16] + 6 * src[48];
297 t4 = 6 * src[16] - 16 * src[48];
304 t1 = 16 * src[ 8] + 15 * src[24] + 9 * src[40] + 4 * src[56];
305 t2 = 15 * src[ 8] - 4 * src[24] - 16 * src[40] - 9 * src[56];
306 t3 = 9 * src[ 8] - 16 * src[24] + 4 * src[40] + 15 * src[56];
307 t4 = 4 * src[ 8] - 9 * src[24] + 15 * src[40] - 16 * src[56];
309 dst[ 0] = (t5 +
t1) >> 7;
310 dst[ 8] = (t6 +
t2) >> 7;
311 dst[16] = (t7 +
t3) >> 7;
312 dst[24] = (t8 +
t4) >> 7;
313 dst[32] = (t8 - t4 + 1) >> 7;
314 dst[40] = (t7 - t3 + 1) >> 7;
315 dst[48] = (t6 - t2 + 1) >> 7;
316 dst[56] = (t5 - t1 + 1) >> 7;
329 dc = ( 3 * dc + 1) >> 1;
330 dc = (17 * dc + 64) >> 7;
331 for(i = 0; i < 4; i++){
332 dest[0] = av_clip_uint8(dest[0] + dc);
333 dest[1] = av_clip_uint8(dest[1] + dc);
334 dest[2] = av_clip_uint8(dest[2] + dc);
335 dest[3] = av_clip_uint8(dest[3] + dc);
336 dest[4] = av_clip_uint8(dest[4] + dc);
337 dest[5] = av_clip_uint8(dest[5] + dc);
338 dest[6] = av_clip_uint8(dest[6] + dc);
339 dest[7] = av_clip_uint8(dest[7] + dc);
352 for(i = 0; i < 4; i++){
353 t1 = 12 * (src[0] + src[4]) + 4;
354 t2 = 12 * (src[0] - src[4]) + 4;
355 t3 = 16 * src[2] + 6 * src[6];
356 t4 = 6 * src[2] - 16 * src[6];
363 t1 = 16 * src[1] + 15 * src[3] + 9 * src[5] + 4 * src[7];
364 t2 = 15 * src[1] - 4 * src[3] - 16 * src[5] - 9 * src[7];
365 t3 = 9 * src[1] - 16 * src[3] + 4 * src[5] + 15 * src[7];
366 t4 = 4 * src[1] - 9 * src[3] + 15 * src[5] - 16 * src[7];
368 dst[0] = (t5 +
t1) >> 3;
369 dst[1] = (t6 +
t2) >> 3;
370 dst[2] = (t7 +
t3) >> 3;
371 dst[3] = (t8 +
t4) >> 3;
372 dst[4] = (t8 -
t4) >> 3;
373 dst[5] = (t7 -
t3) >> 3;
374 dst[6] = (t6 -
t2) >> 3;
375 dst[7] = (t5 -
t1) >> 3;
382 for(i = 0; i < 8; i++){
383 t1 = 17 * (src[ 0] + src[16]) + 64;
384 t2 = 17 * (src[ 0] - src[16]) + 64;
385 t3 = 22 * src[ 8] + 10 * src[24];
386 t4 = 22 * src[24] - 10 * src[ 8];
388 dest[0*linesize] = av_clip_uint8(dest[0*linesize] + ((t1 + t3) >> 7));
389 dest[1*linesize] = av_clip_uint8(dest[1*linesize] + ((t2 - t4) >> 7));
390 dest[2*linesize] = av_clip_uint8(dest[2*linesize] + ((t2 + t4) >> 7));
391 dest[3*linesize] = av_clip_uint8(dest[3*linesize] + ((t1 - t3) >> 7));
404 dc = (17 * dc + 4) >> 3;
405 dc = (12 * dc + 64) >> 7;
406 for(i = 0; i < 8; i++){
407 dest[0] = av_clip_uint8(dest[0] + dc);
408 dest[1] = av_clip_uint8(dest[1] + dc);
409 dest[2] = av_clip_uint8(dest[2] + dc);
410 dest[3] = av_clip_uint8(dest[3] + dc);
423 for(i = 0; i < 8; i++){
424 t1 = 17 * (src[0] + src[2]) + 4;
425 t2 = 17 * (src[0] - src[2]) + 4;
426 t3 = 22 * src[1] + 10 * src[3];
427 t4 = 22 * src[3] - 10 * src[1];
429 dst[0] = (t1 +
t3) >> 3;
430 dst[1] = (t2 -
t4) >> 3;
431 dst[2] = (t2 +
t4) >> 3;
432 dst[3] = (t1 -
t3) >> 3;
439 for(i = 0; i < 4; i++){
440 t1 = 12 * (src[ 0] + src[32]) + 64;
441 t2 = 12 * (src[ 0] - src[32]) + 64;
442 t3 = 16 * src[16] + 6 * src[48];
443 t4 = 6 * src[16] - 16 * src[48];
450 t1 = 16 * src[ 8] + 15 * src[24] + 9 * src[40] + 4 * src[56];
451 t2 = 15 * src[ 8] - 4 * src[24] - 16 * src[40] - 9 * src[56];
452 t3 = 9 * src[ 8] - 16 * src[24] + 4 * src[40] + 15 * src[56];
453 t4 = 4 * src[ 8] - 9 * src[24] + 15 * src[40] - 16 * src[56];
455 dest[0*linesize] = av_clip_uint8(dest[0*linesize] + ((t5 + t1) >> 7));
456 dest[1*linesize] = av_clip_uint8(dest[1*linesize] + ((t6 + t2) >> 7));
457 dest[2*linesize] = av_clip_uint8(dest[2*linesize] + ((t7 + t3) >> 7));
458 dest[3*linesize] = av_clip_uint8(dest[3*linesize] + ((t8 + t4) >> 7));
459 dest[4*linesize] = av_clip_uint8(dest[4*linesize] + ((t8 - t4 + 1) >> 7));
460 dest[5*linesize] = av_clip_uint8(dest[5*linesize] + ((t7 - t3 + 1) >> 7));
461 dest[6*linesize] = av_clip_uint8(dest[6*linesize] + ((t6 - t2 + 1) >> 7));
462 dest[7*linesize] = av_clip_uint8(dest[7*linesize] + ((t5 - t1 + 1) >> 7));
475 dc = (17 * dc + 4) >> 3;
476 dc = (17 * dc + 64) >> 7;
477 for(i = 0; i < 4; i++){
478 dest[0] = av_clip_uint8(dest[0] + dc);
479 dest[1] = av_clip_uint8(dest[1] + dc);
480 dest[2] = av_clip_uint8(dest[2] + dc);
481 dest[3] = av_clip_uint8(dest[3] + dc);
494 for(i = 0; i < 4; i++){
495 t1 = 17 * (src[0] + src[2]) + 4;
496 t2 = 17 * (src[0] - src[2]) + 4;
497 t3 = 22 * src[1] + 10 * src[3];
498 t4 = 22 * src[3] - 10 * src[1];
500 dst[0] = (t1 +
t3) >> 3;
501 dst[1] = (t2 -
t4) >> 3;
502 dst[2] = (t2 +
t4) >> 3;
503 dst[3] = (t1 -
t3) >> 3;
510 for(i = 0; i < 4; i++){
511 t1 = 17 * (src[ 0] + src[16]) + 64;
512 t2 = 17 * (src[ 0] - src[16]) + 64;
513 t3 = 22 * src[ 8] + 10 * src[24];
514 t4 = 22 * src[24] - 10 * src[ 8];
516 dest[0*linesize] = av_clip_uint8(dest[0*linesize] + ((t1 + t3) >> 7));
517 dest[1*linesize] = av_clip_uint8(dest[1*linesize] + ((t2 - t4) >> 7));
518 dest[2*linesize] = av_clip_uint8(dest[2*linesize] + ((t2 + t4) >> 7));
519 dest[3*linesize] = av_clip_uint8(dest[3*linesize] + ((t1 - t3) >> 7));
528 #define VC1_MSPEL_FILTER_16B(DIR, TYPE) \ 529 static av_always_inline int vc1_mspel_ ## DIR ## _filter_16bits(const TYPE *src, int stride, int mode) \ 535 return -4*src[-stride] + 53*src[0] + 18*src[stride] - 3*src[stride*2]; \ 537 return -src[-stride] + 9*src[0] + 9*src[stride] - src[stride*2]; \ 539 return -3*src[-stride] + 18*src[0] + 53*src[stride] - 4*src[stride*2]; \ 556 return (-4*src[-stride] + 53*src[0] + 18*src[stride] - 3*src[stride*2] + 32 - r) >> 6;
558 return (-src[-stride] + 9*src[0] + 9*src[stride] - src[stride*2] + 8 - r) >> 4;
560 return (-3*src[-stride] + 18*src[0] + 53*src[stride] - 4*src[stride*2] + 32 - r) >> 6;
567 #define VC1_MSPEL_MC(OP, OP4, OPNAME)\ 568 static av_always_inline void OPNAME ## vc1_mspel_mc(uint8_t *dst, const uint8_t *src, ptrdiff_t stride, int hmode, int vmode, int rnd)\ 576 static const int shift_value[] = { 0, 5, 1, 5 };\ 577 int shift = (shift_value[hmode]+shift_value[vmode])>>1;\ 578 int16_t tmp[11*8], *tptr = tmp;\ 580 r = (1<<(shift-1)) + rnd-1;\ 583 for(j = 0; j < 8; j++) {\ 584 for(i = 0; i < 11; i++)\ 585 tptr[i] = (vc1_mspel_ver_filter_16bits(src + i, stride, vmode)+r)>>shift;\ 592 for(j = 0; j < 8; j++) {\ 593 for(i = 0; i < 8; i++)\ 594 OP(dst[i], (vc1_mspel_hor_filter_16bits(tptr + i, 1, hmode)+r)>>7);\ 604 for(j = 0; j < 8; j++) {\ 605 for(i = 0; i < 8; i++)\ 606 OP(dst[i], vc1_mspel_filter(src + i, stride, vmode, r));\ 615 for(j = 0; j < 8; j++) {\ 616 for(i = 0; i < 8; i++)\ 617 OP(dst[i], vc1_mspel_filter(src + i, 1, hmode, rnd));\ 622 static void OPNAME ## pixels8x8_c(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int rnd){\ 625 OP4(*(uint32_t*)(block ), AV_RN32(pixels ));\ 626 OP4(*(uint32_t*)(block+4), AV_RN32(pixels+4));\ 632 #define op_put(a, b) a = av_clip_uint8(b) 633 #define op_avg(a, b) a = (a + av_clip_uint8(b) + 1) >> 1 634 #define op4_avg(a, b) a = rnd_avg32(a, b) 635 #define op4_put(a, b) a = b 642 #define PUT_VC1_MSPEL(a, b)\ 643 static void put_vc1_mspel_mc ## a ## b ##_c(uint8_t *dst, \ 644 const uint8_t *src, \ 645 ptrdiff_t stride, int rnd) \ 647 put_vc1_mspel_mc(dst, src, stride, a, b, rnd); \ 649 static void avg_vc1_mspel_mc ## a ## b ##_c(uint8_t *dst, \ 650 const uint8_t *src, \ 651 ptrdiff_t stride, int rnd) \ 653 avg_vc1_mspel_mc(dst, src, stride, a, b, rnd); \ 676 const int A=(8-
x)*(8-y);
677 const int B=(
x)*(8-y);
678 const int C=(8-
x)*( y);
679 const int D=(
x)*( y);
686 dst[0] = (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + 32 - 4) >> 6;
687 dst[1] = (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + 32 - 4) >> 6;
688 dst[2] = (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + 32 - 4) >> 6;
689 dst[3] = (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + 32 - 4) >> 6;
690 dst[4] = (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + 32 - 4) >> 6;
691 dst[5] = (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + 32 - 4) >> 6;
692 dst[6] = (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + 32 - 4) >> 6;
693 dst[7] = (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + 32 - 4) >> 6;
700 const int A=(8-
x)*(8-y);
701 const int B=(
x)*(8-y);
702 const int C=(8-
x)*( y);
703 const int D=(
x)*( y);
710 dst[0] = (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + 32 - 4) >> 6;
711 dst[1] = (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + 32 - 4) >> 6;
712 dst[2] = (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + 32 - 4) >> 6;
713 dst[3] = (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + 32 - 4) >> 6;
719 #define avg2(a,b) ((a+b+1)>>1) 721 const int A=(8-
x)*(8-y);
722 const int B=(
x)*(8-y);
723 const int C=(8-
x)*( y);
724 const int D=(
x)*( y);
731 dst[0] =
avg2(dst[0], ((A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + 32 - 4) >> 6));
732 dst[1] =
avg2(dst[1], ((A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + 32 - 4) >> 6));
733 dst[2] =
avg2(dst[2], ((A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + 32 - 4) >> 6));
734 dst[3] =
avg2(dst[3], ((A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + 32 - 4) >> 6));
735 dst[4] =
avg2(dst[4], ((A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + 32 - 4) >> 6));
736 dst[5] =
avg2(dst[5], ((A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + 32 - 4) >> 6));
737 dst[6] =
avg2(dst[6], ((A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + 32 - 4) >> 6));
738 dst[7] =
avg2(dst[7], ((A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + 32 - 4) >> 6));
744 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER 749 int a = src[(offset >> 16) ];
750 int b = src[(offset >> 16) + 1];
751 *dst++ = a + ((b -
a) * (offset&0xFFFF) >> 16);
757 int two_sprites,
const uint8_t *src2a,
const uint8_t *src2b,
int offset2,
765 a1 = a1 + ((b1 -
a1) * offset1 >> 16);
771 a2 = a2 + ((b2 -
a2) * offset2 >> 16);
773 a1 = a1 + ((a2 -
a1) * alpha >> 16);
779 static void sprite_v_single_c(
uint8_t *dst,
const uint8_t *src1a,
const uint8_t *src1b,
int offset,
int width)
781 sprite_v_template(dst, src1a, src1b, offset, 0,
NULL,
NULL, 0, 0, 1, width);
784 static void sprite_v_double_noscale_c(
uint8_t *dst,
const uint8_t *src1a,
const uint8_t *src2a,
int alpha,
int width)
786 sprite_v_template(dst, src1a,
NULL, 0, 1, src2a,
NULL, 0, alpha, 0, width);
789 static void sprite_v_double_onescale_c(
uint8_t *dst,
const uint8_t *src1a,
const uint8_t *src1b,
int offset1,
790 const uint8_t *src2a,
int alpha,
int width)
792 sprite_v_template(dst, src1a, src1b, offset1, 1, src2a,
NULL, 0, alpha, 1, width);
795 static void sprite_v_double_twoscale_c(
uint8_t *dst,
const uint8_t *src1a,
const uint8_t *src1b,
int offset1,
797 int alpha,
int width)
799 sprite_v_template(dst, src1a, src1b, offset1, 1, src2a, src2b, offset2, alpha, 2, width);
862 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER static void vc1_inv_trans_4x4_c(uint8_t *dest, int linesize, int16_t *block)
void(* vc1_h_s_overlap)(int16_t *left, int16_t *right)
static void vc1_inv_trans_4x8_dc_c(uint8_t *dest, int linesize, int16_t *block)
Do inverse transform on 4x8 parts of block.
void(* vc1_inv_trans_4x8)(uint8_t *dest, int line_size, int16_t *block)
void(* sprite_v_double_onescale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset1, const uint8_t *src2a, int alpha, int width)
static void vc1_inv_trans_8x8_dc_c(uint8_t *dest, int linesize, int16_t *block)
Do inverse transform on 8x8 block.
static void put_no_rnd_vc1_chroma_mc4_c(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y)
static av_always_inline int vc1_filter_line(uint8_t *src, int stride, int pq)
VC-1 in-loop deblocking filter for one line.
void(* vc1_v_loop_filter16)(uint8_t *src, int stride, int pq)
static void vc1_v_loop_filter16_c(uint8_t *src, int stride, int pq)
static void vc1_v_loop_filter4_c(uint8_t *src, int stride, int pq)
void(* vc1_v_overlap)(uint8_t *src, int stride)
void(* sprite_v_single)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset, int width)
void(* vc1_h_loop_filter16)(uint8_t *src, int stride, int pq)
void(* vc1_inv_trans_8x4_dc)(uint8_t *dest, int line_size, int16_t *block)
void(* vc1_v_loop_filter4)(uint8_t *src, int stride, int pq)
vc1op_pixels_func put_vc1_mspel_pixels_tab[16]
#define VC1_MSPEL_FILTER_16B(DIR, TYPE)
Filter in case of 2 filters.
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
static void vc1_h_loop_filter16_c(uint8_t *src, int stride, int pq)
void(* vc1_inv_trans_8x8)(int16_t *b)
static void avg_no_rnd_vc1_chroma_mc8_c(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y)
void(* vc1_h_loop_filter8)(uint8_t *src, int stride, int pq)
static void vc1_h_overlap_c(uint8_t *src, int stride)
Apply overlap transform to vertical edge.
static double alpha(void *priv, double x, double y)
void(* vc1_inv_trans_4x4)(uint8_t *dest, int line_size, int16_t *block)
static void put_no_rnd_vc1_chroma_mc8_c(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y)
simple assert() macros that are a bit more flexible than ISO C assert().
void(* vc1_v_loop_filter8)(uint8_t *src, int stride, int pq)
static const uint8_t offset[127][2]
void(* vc1_inv_trans_8x8_dc)(uint8_t *dest, int line_size, int16_t *block)
static void vc1_loop_filter(uint8_t *src, int step, int stride, int len, int pq)
VC-1 in-loop deblocking filter.
static void vc1_inv_trans_8x8_c(int16_t block[64])
void ff_vc1dsp_init_x86(VC1DSPContext *dsp)
void(* vc1_h_overlap)(uint8_t *src, int stride)
static void vc1_v_s_overlap_c(int16_t *top, int16_t *bottom)
vc1op_pixels_func avg_vc1_mspel_pixels_tab[16]
void(* vc1_v_s_overlap)(int16_t *top, int16_t *bottom)
static void vc1_v_loop_filter8_c(uint8_t *src, int stride, int pq)
void(* vc1_inv_trans_4x8_dc)(uint8_t *dest, int line_size, int16_t *block)
h264_chroma_mc_func avg_no_rnd_vc1_chroma_pixels_tab[3]
#define PUT_VC1_MSPEL(a, b)
void(* sprite_v_double_noscale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src2a, int alpha, int width)
FIXME Range Coding of cr are mx and my are Motion Vector top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff)*mv_scale Intra DC Predicton block[y][x] dc[1]
void(* vc1_inv_trans_8x4)(uint8_t *dest, int line_size, int16_t *block)
synthesis window for stochastic i
void(* sprite_v_double_twoscale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset1, const uint8_t *src2a, const uint8_t *src2b, int offset2, int alpha, int width)
static void vc1_h_s_overlap_c(int16_t *left, int16_t *right)
static void vc1_v_overlap_c(uint8_t *src, int stride)
Apply overlap transform to horizontal edge.
h264_chroma_mc_func put_no_rnd_vc1_chroma_pixels_tab[3]
av_cold void ff_vc1dsp_init_altivec(VC1DSPContext *dsp)
static void vc1_h_loop_filter8_c(uint8_t *src, int stride, int pq)
common internal and external API header
static double clip(void *opaque, double val)
Clip value val in the minval - maxval range.
static void vc1_h_loop_filter4_c(uint8_t *src, int stride, int pq)
static void vc1_inv_trans_8x4_c(uint8_t *dest, int linesize, int16_t *block)
#define VC1_MSPEL_MC(OP, OP4, OPNAME)
Function used to do motion compensation with bicubic interpolation.
static av_always_inline int vc1_mspel_filter(const uint8_t *src, int stride, int mode, int r)
Filter used to interpolate fractional pel values.
else dst[i][x+y *dst_stride[i]]
void(* vc1_inv_trans_4x4_dc)(uint8_t *dest, int line_size, int16_t *block)
void(* vc1_h_loop_filter4)(uint8_t *src, int stride, int pq)
static void vc1_inv_trans_8x4_dc_c(uint8_t *dest, int linesize, int16_t *block)
Do inverse transform on 8x4 part of block.
void(* sprite_h)(uint8_t *dst, const uint8_t *src, int offset, int advance, int count)
av_cold void ff_vc1dsp_init(VC1DSPContext *dsp)
static void vc1_inv_trans_4x4_dc_c(uint8_t *dest, int linesize, int16_t *block)
Do inverse transform on 4x4 part of block.
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
static void vc1_inv_trans_4x8_c(uint8_t *dest, int linesize, int16_t *block)