32 #define         LP(p)           *(uint32_t*)(p)    33 #define         LPC(p)          *(const uint32_t*)(p)    36 #define         UNPACK(ph,pl,tt0,tt1) do { \    37         uint32_t t0,t1; t0=tt0;t1=tt1; \    38         ph = ( (t0 & ~BYTE_VEC32(0x03))>>2) + ( (t1 & ~BYTE_VEC32(0x03))>>2); \    39         pl = (t0 & BYTE_VEC32(0x03)) + (t1 & BYTE_VEC32(0x03)); } while(0)    41 #define         rnd_PACK(ph,pl,nph,npl) ph + nph + (((pl + npl + BYTE_VEC32(0x02))>>2) & BYTE_VEC32(0x03))    42 #define         no_rnd_PACK(ph,pl,nph,npl)      ph + nph + (((pl + npl + BYTE_VEC32(0x01))>>2) & BYTE_VEC32(0x03))    45 #define         MERGE1(a,b,ofs) (ofs==0)?a:( ((a)>>(8*ofs))|((b)<<(32-8*ofs)) )    46 #define         MERGE2(a,b,ofs) (ofs==3)?b:( ((a)>>(8*(ofs+1)))|((b)<<(32-8*(ofs+1))) )    53 #define         put(d,s)        d = s    54 #define         avg(d,s)        d = rnd_avg32(s,d)    59                 OP(LP(dest),MERGE1(LPC(ref),LPC(ref+4),ofs)); \    66                 OP(LP(dest),LPC(ref)); \    79         case 1: 
OP_C4(1); 
return;
    80         case 2: 
OP_C4(2); 
return;
    81         case 3: 
OP_C4(3); 
return;
    93         case 1: 
OP_C4(1); 
return;
    94         case 2: 
OP_C4(2); 
return;
    95         case 3: 
OP_C4(3); 
return;
   101 #define         OP_C(ofs,sz,avg2) \   108                 OP(LP(dest+0), MERGE1(t0,t1,ofs)); \   110                 OP(LP(dest+4), MERGE1(t1,t0,ofs)); \   113                 OP(LP(dest+8), MERGE1(t0,t1,ofs)); \   115                 OP(LP(dest+12), MERGE1(t1,t0,ofs)); \   123 #define         OP_C0(sz,avg2) \   126                 OP(LP(dest+0), LPC(ref+0)); \   127                 OP(LP(dest+4), LPC(ref+4)); \   129                 OP(LP(dest+8), LPC(ref+8)); \   130                 OP(LP(dest+12), LPC(ref+12)); \   137 #define         OP_X(ofs,sz,avg2) \   144                 OP(LP(dest+0), avg2(MERGE1(t0,t1,ofs),MERGE2(t0,t1,ofs))); \   146                 OP(LP(dest+4), avg2(MERGE1(t1,t0,ofs),MERGE2(t1,t0,ofs))); \   149                 OP(LP(dest+8), avg2(MERGE1(t0,t1,ofs),MERGE2(t0,t1,ofs))); \   151                 OP(LP(dest+12), avg2(MERGE1(t1,t0,ofs),MERGE2(t1,t0,ofs))); \   159 #define         OP_Y0(sz,avg2) \   161         uint32_t t0,t1,t2,t3,t; \   173                 OP(LP(dest+0), avg2(t0,t)); t0 = t; \   175                 OP(LP(dest+4), avg2(t1,t)); t1 = t; \   178                 OP(LP(dest+8), avg2(t2,t)); t2 = t; \   180                 OP(LP(dest+12), avg2(t3,t)); t3 = t; \   186 #define         OP_Y(ofs,sz,avg2) \   188         uint32_t t0,t1,t2,t3,t,w0,w1; \   193         t0 = MERGE1(w0,w1,ofs); \   195         t1 = MERGE1(w1,w0,ofs); \   198         t2 = MERGE1(w0,w1,ofs); \   200         t3 = MERGE1(w1,w0,ofs); \   207                 t = MERGE1(w0,w1,ofs); \   208                 OP(LP(dest+0), avg2(t0,t)); t0 = t; \   210                 t = MERGE1(w1,w0,ofs); \   211                 OP(LP(dest+4), avg2(t1,t)); t1 = t; \   214                 t = MERGE1(w0,w1,ofs); \   215                 OP(LP(dest+8), avg2(t2,t)); t2 = t; \   217                 t = MERGE1(w1,w0,ofs); \   218                 OP(LP(dest+12), avg2(t3,t)); t3 = t; \   224 #define OP_X0(sz,avg2) OP_X(0,sz,avg2)   225 #define OP_XY0(sz,PACK) OP_XY(0,sz,PACK)   226 #define         OP_XY(ofs,sz,PACK) \   228         uint32_t        t2,t3,w0,w1; \   229         uint32_t        a0,a1,a2,a3,a4,a5,a6,a7; \   234         UNPACK(a0,a1,MERGE1(w0,w1,ofs),MERGE2(w0,w1,ofs)); \   236         UNPACK(a2,a3,MERGE1(w1,w0,ofs),MERGE2(w1,w0,ofs)); \   239         UNPACK(a4,a5,MERGE1(w0,w1,ofs),MERGE2(w0,w1,ofs)); \   241         UNPACK(a6,a7,MERGE1(w1,w0,ofs),MERGE2(w1,w0,ofs)); \   247                 UNPACK(t2,t3,MERGE1(w0,w1,ofs),MERGE2(w0,w1,ofs)); \   248                 OP(LP(dest+0),PACK(a0,a1,t2,t3)); \   251                 UNPACK(t2,t3,MERGE1(w1,w0,ofs),MERGE2(w1,w0,ofs)); \   252                 OP(LP(dest+4),PACK(a2,a3,t2,t3)); \   256                 UNPACK(t2,t3,MERGE1(w0,w1,ofs),MERGE2(w0,w1,ofs)); \   257                 OP(LP(dest+8),PACK(a4,a5,t2,t3)); \   260                 UNPACK(t2,t3,MERGE1(w1,w0,ofs),MERGE2(w1,w0,ofs)); \   261                 OP(LP(dest+12),PACK(a6,a7,t2,t3)); \   268 #define         DEFFUNC(prefix, op, rnd, xy, sz, OP_N, avgfunc) \   269 prefix void op##_##rnd##_pixels##sz##_##xy(uint8_t *dest, const uint8_t *ref, \   270                                            const ptrdiff_t stride, int height) \   272         switch((int)ref&3) { \   273         case 0:OP_N##0(sz,rnd##_##avgfunc); return; \   274         case 1:OP_N(1,sz,rnd##_##avgfunc); return; \   275         case 2:OP_N(2,sz,rnd##_##avgfunc); return; \   276         case 3:OP_N(3,sz,rnd##_##avgfunc); return; \   284 DEFFUNC(static,put,no_rnd,x,8,OP_X,avg32)
   286 DEFFUNC(static,put,no_rnd,y,8,OP_Y,avg32)
   288 DEFFUNC(static,put,no_rnd,xy,8,OP_XY,PACK)
   290 DEFFUNC(static,put,   rnd,x,16,OP_X,avg32)
   291 DEFFUNC(static,put,no_rnd,x,16,OP_X,avg32)
   292 DEFFUNC(static,put,   rnd,y,16,OP_Y,avg32)
   293 DEFFUNC(static,put,no_rnd,y,16,OP_Y,avg32)
   294 DEFFUNC(static,put,   rnd,xy,16,OP_XY,PACK)
   295 DEFFUNC(static,put,no_rnd,xy,16,OP_XY,PACK)
   300 DEFFUNC(      ,ff_avg,rnd,o,8,OP_C,avg32)
   301 DEFFUNC(static,
avg,   rnd,x,8,OP_X,avg32)
   302 DEFFUNC(static,avg,   rnd,y,8,OP_Y,avg32)
   303 DEFFUNC(static,avg,   rnd,xy,8,OP_XY,PACK)
   304 DEFFUNC(      ,ff_avg,rnd,o,16,OP_C,avg32)
   305 DEFFUNC(static,avg,   rnd,x,16,OP_X,avg32)
   306 DEFFUNC(static,avg,no_rnd,x,16,OP_X,avg32)
   307 DEFFUNC(static,avg,   rnd,y,16,OP_Y,avg32)
   308 DEFFUNC(static,avg,no_rnd,y,16,OP_Y,avg32)
   309 DEFFUNC(static,avg,   rnd,xy,16,OP_XY,PACK)
   310 DEFFUNC(static,avg,no_rnd,xy,16,OP_XY,PACK)
   314 #define         ff_put_no_rnd_pixels8_o     ff_put_rnd_pixels8_o   315 #define         ff_put_no_rnd_pixels16_o    ff_put_rnd_pixels16_o   316 #define         ff_avg_no_rnd_pixels16_o    ff_avg_rnd_pixels16_o FIXME Range Coding of cr are ref
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1. 
#define OP_XY(ofs, sz, PACK)
void ff_put_rnd_pixels8_o(uint8_t *dest, const uint8_t *ref, const ptrdiff_t stride, int height)
Macro definitions for various function/variable attributes. 
av_cold void ff_hpeldsp_init_sh4(HpelDSPContext *c, int flags)
#define ff_put_no_rnd_pixels16_o
#define OP_X(ofs, sz, avg2)
void ff_avg_rnd_pixels16_o(uint8_t *dest, const uint8_t *ref, const ptrdiff_t stride, int height)
void ff_put_rnd_pixels16_o(uint8_t *dest, const uint8_t *ref, const ptrdiff_t stride, int height)
#define OP_Y(ofs, sz, avg2)
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1. 
void ff_avg_rnd_pixels8_o(uint8_t *dest, const uint8_t *ref, const ptrdiff_t stride, int height)
BYTE int const BYTE int int int height
op_pixels_func put_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)>>1. 
static void avg_pixels4_c(uint8_t *dest, const uint8_t *ref, const int stride, int height)
static void put_pixels4_c(uint8_t *dest, const uint8_t *ref, const int stride, int height)
#define DEFFUNC(prefix, op, rnd, xy, sz, OP_N, avgfunc)
#define ff_put_no_rnd_pixels8_o
op_pixels_func avg_no_rnd_pixels_tab[4]
Halfpel motion compensation with no rounding (a+b)>>1. 
#define OP_C(ofs, sz, avg2)
#define ff_avg_no_rnd_pixels16_o