30 #define LP(p) *(uint32_t*)(p) 31 #define LPC(p) *(const uint32_t*)(p) 34 #define UNPACK(ph,pl,tt0,tt1) do { \ 35 uint32_t t0,t1; t0=tt0;t1=tt1; \ 36 ph = ( (t0 & ~BYTE_VEC32(0x03))>>2) + ( (t1 & ~BYTE_VEC32(0x03))>>2); \ 37 pl = (t0 & BYTE_VEC32(0x03)) + (t1 & BYTE_VEC32(0x03)); } while(0) 39 #define rnd_PACK(ph,pl,nph,npl) ph + nph + (((pl + npl + BYTE_VEC32(0x02))>>2) & BYTE_VEC32(0x03)) 40 #define no_rnd_PACK(ph,pl,nph,npl) ph + nph + (((pl + npl + BYTE_VEC32(0x01))>>2) & BYTE_VEC32(0x03)) 43 #define MERGE1(a,b,ofs) (ofs==0)?a:( ((a)>>(8*ofs))|((b)<<(32-8*ofs)) ) 44 #define MERGE2(a,b,ofs) (ofs==3)?b:( ((a)>>(8*(ofs+1)))|((b)<<(32-8*(ofs+1))) ) 51 #define put(d,s) d = s 52 #define avg(d,s) d = rnd_avg32(s,d) 57 OP(LP(dest),MERGE1(LPC(ref),LPC(ref+4),ofs)); \ 64 OP(LP(dest),LPC(ref)); \ 69 #define OP_C(ofs,sz,avg2) \ 76 OP(LP(dest+0), MERGE1(t0,t1,ofs)); \ 78 OP(LP(dest+4), MERGE1(t1,t0,ofs)); \ 81 OP(LP(dest+8), MERGE1(t0,t1,ofs)); \ 83 OP(LP(dest+12), MERGE1(t1,t0,ofs)); \ 91 #define OP_C0(sz,avg2) \ 94 OP(LP(dest+0), LPC(ref+0)); \ 95 OP(LP(dest+4), LPC(ref+4)); \ 97 OP(LP(dest+8), LPC(ref+8)); \ 98 OP(LP(dest+12), LPC(ref+12)); \ 105 #define OP_X(ofs,sz,avg2) \ 112 OP(LP(dest+0), avg2(MERGE1(t0,t1,ofs),MERGE2(t0,t1,ofs))); \ 114 OP(LP(dest+4), avg2(MERGE1(t1,t0,ofs),MERGE2(t1,t0,ofs))); \ 117 OP(LP(dest+8), avg2(MERGE1(t0,t1,ofs),MERGE2(t0,t1,ofs))); \ 119 OP(LP(dest+12), avg2(MERGE1(t1,t0,ofs),MERGE2(t1,t0,ofs))); \ 127 #define OP_Y0(sz,avg2) \ 129 uint32_t t0,t1,t2,t3,t; \ 141 OP(LP(dest+0), avg2(t0,t)); t0 = t; \ 143 OP(LP(dest+4), avg2(t1,t)); t1 = t; \ 146 OP(LP(dest+8), avg2(t2,t)); t2 = t; \ 148 OP(LP(dest+12), avg2(t3,t)); t3 = t; \ 154 #define OP_Y(ofs,sz,avg2) \ 156 uint32_t t0,t1,t2,t3,t,w0,w1; \ 161 t0 = MERGE1(w0,w1,ofs); \ 163 t1 = MERGE1(w1,w0,ofs); \ 166 t2 = MERGE1(w0,w1,ofs); \ 168 t3 = MERGE1(w1,w0,ofs); \ 175 t = MERGE1(w0,w1,ofs); \ 176 OP(LP(dest+0), avg2(t0,t)); t0 = t; \ 178 t = MERGE1(w1,w0,ofs); \ 179 OP(LP(dest+4), avg2(t1,t)); t1 = t; \ 182 t = MERGE1(w0,w1,ofs); \ 183 OP(LP(dest+8), avg2(t2,t)); t2 = t; \ 185 t = MERGE1(w1,w0,ofs); \ 186 OP(LP(dest+12), avg2(t3,t)); t3 = t; \ 192 #define OP_X0(sz,avg2) OP_X(0,sz,avg2) 193 #define OP_XY0(sz,PACK) OP_XY(0,sz,PACK) 194 #define OP_XY(ofs,sz,PACK) \ 196 uint32_t t2,t3,w0,w1; \ 197 uint32_t a0,a1,a2,a3,a4,a5,a6,a7; \ 202 UNPACK(a0,a1,MERGE1(w0,w1,ofs),MERGE2(w0,w1,ofs)); \ 204 UNPACK(a2,a3,MERGE1(w1,w0,ofs),MERGE2(w1,w0,ofs)); \ 207 UNPACK(a4,a5,MERGE1(w0,w1,ofs),MERGE2(w0,w1,ofs)); \ 209 UNPACK(a6,a7,MERGE1(w1,w0,ofs),MERGE2(w1,w0,ofs)); \ 215 UNPACK(t2,t3,MERGE1(w0,w1,ofs),MERGE2(w0,w1,ofs)); \ 216 OP(LP(dest+0),PACK(a0,a1,t2,t3)); \ 219 UNPACK(t2,t3,MERGE1(w1,w0,ofs),MERGE2(w1,w0,ofs)); \ 220 OP(LP(dest+4),PACK(a2,a3,t2,t3)); \ 224 UNPACK(t2,t3,MERGE1(w0,w1,ofs),MERGE2(w0,w1,ofs)); \ 225 OP(LP(dest+8),PACK(a4,a5,t2,t3)); \ 228 UNPACK(t2,t3,MERGE1(w1,w0,ofs),MERGE2(w1,w0,ofs)); \ 229 OP(LP(dest+12),PACK(a6,a7,t2,t3)); \ 236 #define put_pixels8_c ff_put_rnd_pixels8_o 237 #define put_pixels16_c ff_put_rnd_pixels16_o 238 #define avg_pixels8_c ff_avg_rnd_pixels8_o 239 #define avg_pixels16_c ff_avg_rnd_pixels16_o 240 #define put_no_rnd_pixels8_c ff_put_rnd_pixels8_o 241 #define put_no_rnd_pixels16_c ff_put_rnd_pixels16_o 242 #define avg_no_rnd_pixels16_c ff_avg_rnd_pixels16_o 254 #define dspfunc(PFX, IDX, NUM) \ 255 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_sh4; \ 256 c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_sh4; \ 257 c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_sh4; \ 258 c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_sh4; \ 259 c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_sh4; \ 260 c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_sh4; \ 261 c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_sh4; \ 262 c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_sh4; \ 263 c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_sh4; \ 264 c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_sh4; \ 265 c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_sh4; \ 266 c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_sh4; \ 267 c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_sh4; \ 268 c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_sh4; \ 269 c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_sh4; \ 270 c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_sh4 273 dspfunc(put_no_rnd_qpel, 0, 16);
279 dspfunc(put_no_rnd_qpel, 1, 8);
void(* gmc1)(uint8_t *dst, uint8_t *src, int srcStride, int h, int x16, int y16, int rounder)
translational global motion compensation.
static void put_mspel8_mc22_sh4(uint8_t *dst, uint8_t *src, int stride)
static void put_mspel8_mc20_sh4(uint8_t *dst, uint8_t *src, int stride)
static void put_mspel8_mc32_sh4(uint8_t *dst, uint8_t *src, int stride)
static void put_mspel8_mc30_sh4(uint8_t *dst, uint8_t *src, int stride)
Macro definitions for various function/variable attributes.
qpel_mc_func put_mspel_pixels_tab[8]
static void put_mspel8_mc02_sh4(uint8_t *dst, uint8_t *src, int stride)
static void gmc1_c(uint8_t *dst, uint8_t *src, int stride, int h, int x16, int y16, int rounder)
static void put_mspel8_mc10_sh4(uint8_t *dst, uint8_t *src, int stride)
av_cold void ff_dsputil_init_align(DSPContext *c, AVCodecContext *avctx)
static void put_mspel8_mc12_sh4(uint8_t *dst, uint8_t *src, int stride)
#define dspfunc(PFX, IDX, NUM)
main external API structure.
static void put_mspel8_mc00_sh4(uint8_t *dst, uint8_t *src, int stride)