31 ptrdiff_t line_size,
int h);
33 ptrdiff_t line_size,
int h);
35 ptrdiff_t line_size,
int h);
37 ptrdiff_t line_size,
int h);
39 ptrdiff_t line_size,
int h);
41 ptrdiff_t line_size,
int h);
44 ptrdiff_t line_size,
int h);
47 ptrdiff_t line_size,
int h);
49 ptrdiff_t line_size,
int h);
51 ptrdiff_t line_size,
int h);
53 ptrdiff_t line_size,
int h);
55 ptrdiff_t line_size,
int h);
58 ptrdiff_t line_size,
int h);
61 ptrdiff_t line_size,
int h);
63 ptrdiff_t line_size,
int h);
65 ptrdiff_t line_size,
int h);
67 ptrdiff_t line_size,
int h);
69 ptrdiff_t line_size,
int h);
71 ptrdiff_t line_size,
int h);
73 ptrdiff_t line_size,
int h);
75 ptrdiff_t line_size,
int h);
80 #define JUMPALIGN() __asm__ volatile (".p2align 3"::) 81 #define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%"#regd", %%"#regd ::) 83 #define MOVQ_BFE(regd) \ 85 "pcmpeqd %%"#regd", %%"#regd" \n\t" \ 86 "paddb %%"#regd", %%"#regd" \n\t" ::) 89 #define MOVQ_BONE(regd) __asm__ volatile ("movq %0, %%"#regd" \n\t" :: "m"(ff_bone)) 90 #define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%"#regd" \n\t" :: "m"(ff_wtwo)) 94 #define MOVQ_BONE(regd) \ 96 "pcmpeqd %%"#regd", %%"#regd" \n\t" \ 97 "psrlw $15, %%"#regd" \n\t" \ 98 "packuswb %%"#regd", %%"#regd" \n\t" ::) 100 #define MOVQ_WTWO(regd) \ 102 "pcmpeqd %%"#regd", %%"#regd" \n\t" \ 103 "psrlw $15, %%"#regd" \n\t" \ 104 "psllw $1, %%"#regd" \n\t"::) 111 #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \ 112 "movq "#rega", "#regr" \n\t" \ 113 "pand "#regb", "#regr" \n\t" \ 114 "pxor "#rega", "#regb" \n\t" \ 115 "pand "#regfe", "#regb" \n\t" \ 116 "psrlq $1, "#regb" \n\t" \ 117 "paddb "#regb", "#regr" \n\t" 119 #define PAVGB_MMX(rega, regb, regr, regfe) \ 120 "movq "#rega", "#regr" \n\t" \ 121 "por "#regb", "#regr" \n\t" \ 122 "pxor "#rega", "#regb" \n\t" \ 123 "pand "#regfe", "#regb" \n\t" \ 124 "psrlq $1, "#regb" \n\t" \ 125 "psubb "#regb", "#regr" \n\t" 128 #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \ 129 "movq "#rega", "#regr" \n\t" \ 130 "movq "#regc", "#regp" \n\t" \ 131 "pand "#regb", "#regr" \n\t" \ 132 "pand "#regd", "#regp" \n\t" \ 133 "pxor "#rega", "#regb" \n\t" \ 134 "pxor "#regc", "#regd" \n\t" \ 135 "pand %%mm6, "#regb" \n\t" \ 136 "pand %%mm6, "#regd" \n\t" \ 137 "psrlq $1, "#regb" \n\t" \ 138 "psrlq $1, "#regd" \n\t" \ 139 "paddb "#regb", "#regr" \n\t" \ 140 "paddb "#regd", "#regp" \n\t" 142 #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \ 143 "movq "#rega", "#regr" \n\t" \ 144 "movq "#regc", "#regp" \n\t" \ 145 "por "#regb", "#regr" \n\t" \ 146 "por "#regd", "#regp" \n\t" \ 147 "pxor "#rega", "#regb" \n\t" \ 148 "pxor "#regc", "#regd" \n\t" \ 149 "pand %%mm6, "#regb" \n\t" \ 150 "pand %%mm6, "#regd" \n\t" \ 151 "psrlq $1, "#regd" \n\t" \ 152 "psrlq $1, "#regb" \n\t" \ 153 "psubb "#regb", "#regr" \n\t" \ 154 "psubb "#regd", "#regp" \n\t" 159 #define DEF(x, y) x ## _no_rnd_ ## y ## _mmx 160 #define SET_RND MOVQ_WONE 161 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f) 162 #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e) 163 #define OP_AVG(a, b, c, e) PAVGB_MMX(a, b, c, e) 175 #define DEF(x, y) x ## _ ## y ## _mmx 176 #define SET_RND MOVQ_WTWO 177 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f) 178 #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e) 192 #define ff_put_pixels8_mmx ff_put_pixels8_mmxext 197 #define DEF(x) x ## _3dnow 206 #define DEF(x) x ## _mmxext 216 #define put_no_rnd_pixels16_mmx put_pixels16_mmx 217 #define put_no_rnd_pixels8_mmx put_pixels8_mmx 218 #define put_pixels16_mmxext put_pixels16_mmx 219 #define put_pixels8_mmxext put_pixels8_mmx 220 #define put_pixels4_mmxext put_pixels4_mmx 221 #define put_no_rnd_pixels16_mmxext put_no_rnd_pixels16_mmx 222 #define put_no_rnd_pixels8_mmxext put_no_rnd_pixels8_mmx 224 static void put_pixels8_mmx(
uint8_t *block,
const uint8_t *pixels,
225 ptrdiff_t line_size,
int h)
228 "lea (%3, %3), %%"REG_a
" \n\t" 231 "movq (%1 ), %%mm0 \n\t" 232 "movq (%1, %3), %%mm1 \n\t" 233 "movq %%mm0, (%2) \n\t" 234 "movq %%mm1, (%2, %3) \n\t" 235 "add %%"REG_a
", %1 \n\t" 236 "add %%"REG_a
", %2 \n\t" 237 "movq (%1 ), %%mm0 \n\t" 238 "movq (%1, %3), %%mm1 \n\t" 239 "movq %%mm0, (%2) \n\t" 240 "movq %%mm1, (%2, %3) \n\t" 241 "add %%"REG_a
", %1 \n\t" 242 "add %%"REG_a
", %2 \n\t" 245 :
"+g"(h),
"+r"(pixels),
"+r"(block)
251 static void put_pixels16_mmx(
uint8_t *block,
const uint8_t *pixels,
252 ptrdiff_t line_size,
int h)
255 "lea (%3, %3), %%"REG_a
" \n\t" 258 "movq (%1 ), %%mm0 \n\t" 259 "movq 8(%1 ), %%mm4 \n\t" 260 "movq (%1, %3), %%mm1 \n\t" 261 "movq 8(%1, %3), %%mm5 \n\t" 262 "movq %%mm0, (%2) \n\t" 263 "movq %%mm4, 8(%2) \n\t" 264 "movq %%mm1, (%2, %3) \n\t" 265 "movq %%mm5, 8(%2, %3) \n\t" 266 "add %%"REG_a
", %1 \n\t" 267 "add %%"REG_a
", %2 \n\t" 268 "movq (%1 ), %%mm0 \n\t" 269 "movq 8(%1 ), %%mm4 \n\t" 270 "movq (%1, %3), %%mm1 \n\t" 271 "movq 8(%1, %3), %%mm5 \n\t" 272 "movq %%mm0, (%2) \n\t" 273 "movq %%mm4, 8(%2) \n\t" 274 "movq %%mm1, (%2, %3) \n\t" 275 "movq %%mm5, 8(%2, %3) \n\t" 276 "add %%"REG_a
", %1 \n\t" 277 "add %%"REG_a
", %2 \n\t" 280 :
"+g"(h),
"+r"(pixels),
"+r"(block)
288 ptrdiff_t line_size,
int h);
290 ptrdiff_t line_size,
int h);
292 #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \ 294 c->PFX ## _pixels_tab IDX [0] = PFX ## _pixels ## SIZE ## _ ## CPU; \ 295 c->PFX ## _pixels_tab IDX [1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \ 296 c->PFX ## _pixels_tab IDX [2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \ 297 c->PFX ## _pixels_tab IDX [3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU; \ void ff_put_pixels8_x2_3dnow(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
void ff_put_no_rnd_pixels8_y2_exact_3dnow(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
void ff_avg_pixels8_x2_3dnow(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
void ff_put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
void ff_put_pixels8_x2_mmxext(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
void ff_put_no_rnd_pixels8_y2_exact_mmxext(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
void ff_avg_pixels8_y2_mmxext(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
#define AV_CPU_FLAG_MMXEXT
SSE integer functions or AMD MMX ext.
void ff_avg_pixels8_xy2_mmxext(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
void ff_put_pixels8_y2_mmxext(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
#define CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
#define AV_CPU_FLAG_SSE2SLOW
SSE2 supported, but usually not faster.
void ff_put_pixels16_x2_3dnow(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
void ff_hpeldsp_init_x86(HpelDSPContext *c, int flags)
void ff_put_no_rnd_pixels8_y2_3dnow(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
static void hpeldsp_init_3dnow(HpelDSPContext *c, int flags, int mm_flags)
#define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU)
void ff_avg_pixels8_x2_mmxext(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
void ff_put_no_rnd_pixels8_y2_mmxext(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
#define AV_CPU_FLAG_3DNOW
AMD 3DNOW.
void ff_put_pixels16_x2_mmxext(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
#define AV_CPU_FLAG_MMX
standard MMX
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
void ff_put_no_rnd_pixels8_x2_3dnow(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
void ff_avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
op_pixels_func put_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)>>1.
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
void ff_avg_pixels8_xy2_3dnow(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
void ff_put_no_rnd_pixels8_x2_exact_mmxext(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
static void hpeldsp_init_sse2(HpelDSPContext *c, int flags, int mm_flags)
void ff_avg_pixels8_y2_3dnow(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
#define AV_CPU_FLAG_SSE2
PIV SSE2 functions.
#define CONFIG_VP3_DECODER
void ff_put_pixels8_y2_3dnow(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
static void hpeldsp_init_mmxext(HpelDSPContext *c, int flags, int mm_flags)
static void hpeldsp_init_mmx(HpelDSPContext *c, int flags, int mm_flags)
void ff_avg_pixels8_3dnow(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
void ff_put_no_rnd_pixels8_x2_mmxext(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
void ff_put_no_rnd_pixels8_x2_exact_3dnow(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
void ff_avg_pixels8_mmxext(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)