33 #define RV40_LOWPASS(OPNAME, OP) \ 34 static av_unused void OPNAME ## rv40_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride,\ 35 const int h, const int C1, const int C2, const int SHIFT){\ 36 const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\ 38 for(i = 0; i < h; i++)\ 40 OP(dst[0], (src[-2] + src[ 3] - 5*(src[-1]+src[2]) + src[0]*C1 + src[1]*C2 + (1<<(SHIFT-1))) >> SHIFT);\ 41 OP(dst[1], (src[-1] + src[ 4] - 5*(src[ 0]+src[3]) + src[1]*C1 + src[2]*C2 + (1<<(SHIFT-1))) >> SHIFT);\ 42 OP(dst[2], (src[ 0] + src[ 5] - 5*(src[ 1]+src[4]) + src[2]*C1 + src[3]*C2 + (1<<(SHIFT-1))) >> SHIFT);\ 43 OP(dst[3], (src[ 1] + src[ 6] - 5*(src[ 2]+src[5]) + src[3]*C1 + src[4]*C2 + (1<<(SHIFT-1))) >> SHIFT);\ 44 OP(dst[4], (src[ 2] + src[ 7] - 5*(src[ 3]+src[6]) + src[4]*C1 + src[5]*C2 + (1<<(SHIFT-1))) >> SHIFT);\ 45 OP(dst[5], (src[ 3] + src[ 8] - 5*(src[ 4]+src[7]) + src[5]*C1 + src[6]*C2 + (1<<(SHIFT-1))) >> SHIFT);\ 46 OP(dst[6], (src[ 4] + src[ 9] - 5*(src[ 5]+src[8]) + src[6]*C1 + src[7]*C2 + (1<<(SHIFT-1))) >> SHIFT);\ 47 OP(dst[7], (src[ 5] + src[10] - 5*(src[ 6]+src[9]) + src[7]*C1 + src[8]*C2 + (1<<(SHIFT-1))) >> SHIFT);\ 53 static void OPNAME ## rv40_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride,\ 54 const int w, const int C1, const int C2, const int SHIFT){\ 55 const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\ 57 for(i = 0; i < w; i++)\ 59 const int srcB = src[-2*srcStride];\ 60 const int srcA = src[-1*srcStride];\ 61 const int src0 = src[0 *srcStride];\ 62 const int src1 = src[1 *srcStride];\ 63 const int src2 = src[2 *srcStride];\ 64 const int src3 = src[3 *srcStride];\ 65 const int src4 = src[4 *srcStride];\ 66 const int src5 = src[5 *srcStride];\ 67 const int src6 = src[6 *srcStride];\ 68 const int src7 = src[7 *srcStride];\ 69 const int src8 = src[8 *srcStride];\ 70 const int src9 = src[9 *srcStride];\ 71 const int src10 = src[10*srcStride];\ 72 OP(dst[0*dstStride], (srcB + src3 - 5*(srcA+src2) + src0*C1 + src1*C2 + (1<<(SHIFT-1))) >> SHIFT);\ 73 OP(dst[1*dstStride], (srcA + src4 - 5*(src0+src3) + src1*C1 + src2*C2 + (1<<(SHIFT-1))) >> SHIFT);\ 74 OP(dst[2*dstStride], (src0 + src5 - 5*(src1+src4) + src2*C1 + src3*C2 + (1<<(SHIFT-1))) >> SHIFT);\ 75 OP(dst[3*dstStride], (src1 + src6 - 5*(src2+src5) + src3*C1 + src4*C2 + (1<<(SHIFT-1))) >> SHIFT);\ 76 OP(dst[4*dstStride], (src2 + src7 - 5*(src3+src6) + src4*C1 + src5*C2 + (1<<(SHIFT-1))) >> SHIFT);\ 77 OP(dst[5*dstStride], (src3 + src8 - 5*(src4+src7) + src5*C1 + src6*C2 + (1<<(SHIFT-1))) >> SHIFT);\ 78 OP(dst[6*dstStride], (src4 + src9 - 5*(src5+src8) + src6*C1 + src7*C2 + (1<<(SHIFT-1))) >> SHIFT);\ 79 OP(dst[7*dstStride], (src5 + src10 - 5*(src6+src9) + src7*C1 + src8*C2 + (1<<(SHIFT-1))) >> SHIFT);\ 85 static void OPNAME ## rv40_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride,\ 86 const int w, const int C1, const int C2, const int SHIFT){\ 87 OPNAME ## rv40_qpel8_v_lowpass(dst , src , dstStride, srcStride, 8, C1, C2, SHIFT);\ 88 OPNAME ## rv40_qpel8_v_lowpass(dst+8, src+8, dstStride, srcStride, 8, C1, C2, SHIFT);\ 91 OPNAME ## rv40_qpel8_v_lowpass(dst , src , dstStride, srcStride, w-8, C1, C2, SHIFT);\ 92 OPNAME ## rv40_qpel8_v_lowpass(dst+8, src+8, dstStride, srcStride, w-8, C1, C2, SHIFT);\ 95 static void OPNAME ## rv40_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride,\ 96 const int h, const int C1, const int C2, const int SHIFT){\ 97 OPNAME ## rv40_qpel8_h_lowpass(dst , src , dstStride, srcStride, 8, C1, C2, SHIFT);\ 98 OPNAME ## rv40_qpel8_h_lowpass(dst+8, src+8, dstStride, srcStride, 8, C1, C2, SHIFT);\ 101 OPNAME ## rv40_qpel8_h_lowpass(dst , src , dstStride, srcStride, h-8, C1, C2, SHIFT);\ 102 OPNAME ## rv40_qpel8_h_lowpass(dst+8, src+8, dstStride, srcStride, h-8, C1, C2, SHIFT);\ 106 #define RV40_MC(OPNAME, SIZE) \ 107 static void OPNAME ## rv40_qpel ## SIZE ## _mc10_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ 109 OPNAME ## rv40_qpel ## SIZE ## _h_lowpass(dst, src, stride, stride, SIZE, 52, 20, 6);\ 112 static void OPNAME ## rv40_qpel ## SIZE ## _mc30_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ 114 OPNAME ## rv40_qpel ## SIZE ## _h_lowpass(dst, src, stride, stride, SIZE, 20, 52, 6);\ 117 static void OPNAME ## rv40_qpel ## SIZE ## _mc01_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ 119 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, src, stride, stride, SIZE, 52, 20, 6);\ 122 static void OPNAME ## rv40_qpel ## SIZE ## _mc11_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ 124 uint8_t full[SIZE*(SIZE+5)];\ 125 uint8_t * const full_mid = full + SIZE*2;\ 126 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 52, 20, 6);\ 127 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 52, 20, 6);\ 130 static void OPNAME ## rv40_qpel ## SIZE ## _mc21_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ 132 uint8_t full[SIZE*(SIZE+5)];\ 133 uint8_t * const full_mid = full + SIZE*2;\ 134 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 20, 5);\ 135 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 52, 20, 6);\ 138 static void OPNAME ## rv40_qpel ## SIZE ## _mc31_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ 140 uint8_t full[SIZE*(SIZE+5)];\ 141 uint8_t * const full_mid = full + SIZE*2;\ 142 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 52, 6);\ 143 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 52, 20, 6);\ 146 static void OPNAME ## rv40_qpel ## SIZE ## _mc12_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ 148 uint8_t full[SIZE*(SIZE+5)];\ 149 uint8_t * const full_mid = full + SIZE*2;\ 150 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 52, 20, 6);\ 151 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 20, 20, 5);\ 154 static void OPNAME ## rv40_qpel ## SIZE ## _mc22_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ 156 uint8_t full[SIZE*(SIZE+5)];\ 157 uint8_t * const full_mid = full + SIZE*2;\ 158 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 20, 5);\ 159 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 20, 20, 5);\ 162 static void OPNAME ## rv40_qpel ## SIZE ## _mc32_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ 164 uint8_t full[SIZE*(SIZE+5)];\ 165 uint8_t * const full_mid = full + SIZE*2;\ 166 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 52, 6);\ 167 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 20, 20, 5);\ 170 static void OPNAME ## rv40_qpel ## SIZE ## _mc03_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ 172 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, src, stride, stride, SIZE, 20, 52, 6);\ 175 static void OPNAME ## rv40_qpel ## SIZE ## _mc13_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ 177 uint8_t full[SIZE*(SIZE+5)];\ 178 uint8_t * const full_mid = full + SIZE*2;\ 179 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 52, 20, 6);\ 180 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 20, 52, 6);\ 183 static void OPNAME ## rv40_qpel ## SIZE ## _mc23_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ 185 uint8_t full[SIZE*(SIZE+5)];\ 186 uint8_t * const full_mid = full + SIZE*2;\ 187 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 20, 5);\ 188 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 20, 52, 6);\ 192 #define op_avg(a, b) a = (((a)+cm[b]+1)>>1) 193 #define op_put(a, b) a = cm[b] 213 #define RV40_CHROMA_MC(OPNAME, OP)\ 214 static void OPNAME ## rv40_chroma_mc4_c(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y){\ 215 const int A = (8-x) * (8-y);\ 216 const int B = ( x) * (8-y);\ 217 const int C = (8-x) * ( y);\ 218 const int D = ( x) * ( y);\ 220 int bias = rv40_bias[y>>1][x>>1];\ 222 av_assert2(x<8 && y<8 && x>=0 && y>=0);\ 225 for(i = 0; i < h; i++){\ 226 OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + bias));\ 227 OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + bias));\ 228 OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + bias));\ 229 OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + bias));\ 234 const int E = B + C;\ 235 const int step = C ? stride : 1;\ 236 for(i = 0; i < h; i++){\ 237 OP(dst[0], (A*src[0] + E*src[step+0] + bias));\ 238 OP(dst[1], (A*src[1] + E*src[step+1] + bias));\ 239 OP(dst[2], (A*src[2] + E*src[step+2] + bias));\ 240 OP(dst[3], (A*src[3] + E*src[step+3] + bias));\ 247 static void OPNAME ## rv40_chroma_mc8_c(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y){\ 248 const int A = (8-x) * (8-y);\ 249 const int B = ( x) * (8-y);\ 250 const int C = (8-x) * ( y);\ 251 const int D = ( x) * ( y);\ 253 int bias = rv40_bias[y>>1][x>>1];\ 255 av_assert2(x<8 && y<8 && x>=0 && y>=0);\ 258 for(i = 0; i < h; i++){\ 259 OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + bias));\ 260 OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + bias));\ 261 OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + bias));\ 262 OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + bias));\ 263 OP(dst[4], (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + bias));\ 264 OP(dst[5], (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + bias));\ 265 OP(dst[6], (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + bias));\ 266 OP(dst[7], (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + bias));\ 271 const int E = B + C;\ 272 const int step = C ? stride : 1;\ 273 for(i = 0; i < h; i++){\ 274 OP(dst[0], (A*src[0] + E*src[step+0] + bias));\ 275 OP(dst[1], (A*src[1] + E*src[step+1] + bias));\ 276 OP(dst[2], (A*src[2] + E*src[step+2] + bias));\ 277 OP(dst[3], (A*src[3] + E*src[step+3] + bias));\ 278 OP(dst[4], (A*src[4] + E*src[step+4] + bias));\ 279 OP(dst[5], (A*src[5] + E*src[step+5] + bias));\ 280 OP(dst[6], (A*src[6] + E*src[step+6] + bias));\ 281 OP(dst[7], (A*src[7] + E*src[step+7] + bias));\ 288 #define op_avg(a, b) a = (((a)+((b)>>6)+1)>>1) 289 #define op_put(a, b) a = ((b)>>6) 294 #define RV40_WEIGHT_FUNC(size) \ 295 static void rv40_weight_func_rnd_ ## size (uint8_t *dst, uint8_t *src1, uint8_t *src2, int w1, int w2, ptrdiff_t stride)\ 299 for (j = 0; j < size; j++) {\ 300 for (i = 0; i < size; i++)\ 301 dst[i] = (((w2 * src1[i]) >> 9) + ((w1 * src2[i]) >> 9) + 0x10) >> 5;\ 307 static void rv40_weight_func_nornd_ ## size (uint8_t *dst, uint8_t *src1, uint8_t *src2, int w1, int w2, ptrdiff_t stride)\ 311 for (j = 0; j < size; j++) {\ 312 for (i = 0; i < size; i++)\ 313 dst[i] = (w2 * src1[i] + w1 * src2[i] + 0x10) >> 5;\ 327 0x40, 0x50, 0x20, 0x60, 0x30, 0x50, 0x40, 0x30,
328 0x50, 0x40, 0x50, 0x30, 0x60, 0x20, 0x50, 0x40
335 0x40, 0x30, 0x60, 0x20, 0x50, 0x30, 0x30, 0x40,
336 0x40, 0x40, 0x50, 0x30, 0x20, 0x60, 0x30, 0x40
339 #define CLIP_SYMM(a, b) av_clip(a, -(b), b) 357 for (i = 0; i < 4; i++, src +=
stride) {
358 int diff_p1p0 = src[-2*
step] - src[-1*
step];
359 int diff_q1q0 = src[ 1*
step] - src[ 0*
step];
360 int diff_p1p2 = src[-2*
step] - src[-3*
step];
361 int diff_q1q2 = src[ 1*
step] - src[ 2*
step];
367 u = (alpha *
FFABS(t)) >> 7;
368 if (u > 3 - (filter_p1 && filter_q1))
372 if (filter_p1 && filter_q1)
375 diff =
CLIP_SYMM((t + 4) >> 3, lim_p0q0);
379 if (filter_p1 &&
FFABS(diff_p1p2) <= beta) {
380 t = (diff_p1p0 + diff_p1p2 -
diff) >> 1;
384 if (filter_q1 &&
FFABS(diff_q1q2) <= beta) {
385 t = (diff_q1q0 + diff_q1q2 +
diff) >> 1;
392 const int filter_p1,
const int filter_q1,
393 const int alpha,
const int beta,
394 const int lim_p0q0,
const int lim_q1,
398 alpha, beta, lim_p0q0, lim_q1, lim_p1);
402 const int filter_p1,
const int filter_q1,
403 const int alpha,
const int beta,
404 const int lim_p0q0,
const int lim_q1,
408 alpha, beta, lim_p0q0, lim_q1, lim_p1);
421 for(i = 0; i < 4; i++, src +=
stride){
422 int sflag, p0, q0, p1, q1;
428 sflag = (alpha *
FFABS(t)) >> 7;
432 p0 = (25*src[-3*
step] + 26*src[-2*
step] + 26*src[-1*
step] +
436 q0 = (25*src[-2*
step] + 26*src[-1*
step] + 26*src[ 0*
step] +
438 rv40_dither_r[dmode +
i]) >> 7;
441 p0 = av_clip(p0, src[-1*step] - lims, src[-1*step] + lims);
442 q0 = av_clip(q0, src[ 0*step] - lims, src[ 0*step] + lims);
445 p1 = (25*src[-4*
step] + 26*src[-3*
step] + 26*src[-2*
step] + 26*p0 +
447 q1 = (25*src[-1*
step] + 26*q0 + 26*src[ 1*
step] + 26*src[ 2*
step] +
448 25*src[ 3*
step] + rv40_dither_r[dmode +
i]) >> 7;
451 p1 = av_clip(p1, src[-2*step] - lims, src[-2*step] + lims);
452 q1 = av_clip(q1, src[ 1*step] - lims, src[ 1*step] + lims);
462 51*src[-3*
step] + 26*src[-4*
step] + 64) >> 7;
464 51*src[ 2*
step] + 26*src[ 3*
step] + 64) >> 7;
470 const int alpha,
const int lims,
471 const int dmode,
const int chroma)
477 const int alpha,
const int lims,
478 const int dmode,
const int chroma)
489 int sum_p1p0 = 0, sum_q1q0 = 0, sum_p1p2 = 0, sum_q1q2 = 0;
490 int strong0 = 0, strong1 = 0;
494 for (i = 0, ptr = src; i < 4; i++, ptr +=
stride) {
495 sum_p1p0 += ptr[-2*
step] - ptr[-1*
step];
496 sum_q1q0 += ptr[ 1*
step] - ptr[ 0*
step];
499 *p1 =
FFABS(sum_p1p0) < (beta << 2);
500 *q1 =
FFABS(sum_q1q0) < (beta << 2);
508 for (i = 0, ptr = src; i < 4; i++, ptr +=
stride) {
509 sum_p1p2 += ptr[-2*
step] - ptr[-3*
step];
510 sum_q1q2 += ptr[ 1*
step] - ptr[ 2*
step];
513 strong0 = *p1 && (
FFABS(sum_p1p2) < beta2);
514 strong1 = *q1 && (
FFABS(sum_q1q2) < beta2);
516 return strong0 && strong1;
520 int beta,
int beta2,
int edge,
527 int beta,
int beta2,
int edge,
qpel_mc_func put_pixels_tab[4][16]
static void rv40_h_weak_loop_filter(uint8_t *src, const ptrdiff_t stride, const int filter_p1, const int filter_q1, const int alpha, const int beta, const int lim_p0q0, const int lim_q1, const int lim_p1)
static void rv40_h_strong_loop_filter(uint8_t *src, const ptrdiff_t stride, const int alpha, const int lims, const int dmode, const int chroma)
rv40_loop_filter_strength_func rv40_loop_filter_strength[2]
av_cold void ff_rv40dsp_init_arm(RV34DSPContext *c)
void ff_put_rv40_qpel8_mc33_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
static av_always_inline void rv40_strong_loop_filter(uint8_t *src, const int step, const ptrdiff_t stride, const int alpha, const int lims, const int dmode, const int chroma)
#define RV40_LOWPASS(OPNAME, OP)
rv40_weak_loop_filter_func rv40_weak_loop_filter[2]
rv40_weight_func rv40_weight_pixels_tab[2][2]
Biweight functions, first dimension is transform size (16/8), second is whether the weight is prescal...
av_cold void ff_rv34dsp_init(RV34DSPContext *c)
static av_always_inline int rv40_loop_filter_strength(uint8_t *src, int step, ptrdiff_t stride, int beta, int beta2, int edge, int *p1, int *q1)
qpel_mc_func avg_h264_qpel_pixels_tab[4][16]
void ff_avg_rv40_qpel8_mc33_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
static void rv40_v_strong_loop_filter(uint8_t *src, const ptrdiff_t stride, const int alpha, const int lims, const int dmode, const int chroma)
static double alpha(void *priv, double x, double y)
qpel_mc_func avg_pixels_tab[4][16]
RV30/40 decoder motion compensation functions.
void ff_avg_rv40_qpel16_mc33_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
av_cold void ff_rv40dsp_init(RV34DSPContext *c)
simple assert() macros that are a bit more flexible than ISO C assert().
#define RV40_MC(OPNAME, SIZE)
static av_always_inline void rv40_weak_loop_filter(uint8_t *src, const int step, const ptrdiff_t stride, const int filter_p1, const int filter_q1, const int alpha, const int beta, const int lim_p0q0, const int lim_q1, const int lim_p1)
weaker deblocking very similar to the one described in 4.4.2 of JVT-A003r1
qpel_mc_func put_h264_qpel_pixels_tab[4][16]
static const uint8_t rv40_dither_r[16]
dither values for deblocking filter - right/bottom values
static void rv40_v_weak_loop_filter(uint8_t *src, const ptrdiff_t stride, const int filter_p1, const int filter_q1, const int alpha, const int beta, const int lim_p0q0, const int lim_q1, const int lim_p1)
#define diff(a, as, b, bs)
synthesis window for stochastic i
static const int rv40_bias[4][4]
#define RV40_WEIGHT_FUNC(size)
void ff_rv40dsp_init_x86(RV34DSPContext *c)
static const uint8_t rv40_dither_l[16]
dither values for deblocking filter - left/top values
static int rv40_h_loop_filter_strength(uint8_t *src, ptrdiff_t stride, int beta, int beta2, int edge, int *p1, int *q1)
common internal and external API header
void ff_h264qpel_init(H264QpelContext *c, int bit_depth)
rv40_strong_loop_filter_func rv40_strong_loop_filter[2]
static int rv40_v_loop_filter_strength(uint8_t *src, ptrdiff_t stride, int beta, int beta2, int edge, int *p1, int *q1)
void ff_put_rv40_qpel16_mc33_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
h264_chroma_mc_func avg_chroma_pixels_tab[3]
h264_chroma_mc_func put_chroma_pixels_tab[3]
#define RV40_CHROMA_MC(OPNAME, OP)
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step