yading@10: /* yading@10: * MMX optimized DSP utils yading@10: * Copyright (c) 2000, 2001 Fabrice Bellard yading@10: * Copyright (c) 2002-2004 Michael Niedermayer yading@10: * yading@10: * This file is part of FFmpeg. yading@10: * yading@10: * FFmpeg is free software; you can redistribute it and/or yading@10: * modify it under the terms of the GNU Lesser General Public yading@10: * License as published by the Free Software Foundation; either yading@10: * version 2.1 of the License, or (at your option) any later version. yading@10: * yading@10: * FFmpeg is distributed in the hope that it will be useful, yading@10: * but WITHOUT ANY WARRANTY; without even the implied warranty of yading@10: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU yading@10: * Lesser General Public License for more details. yading@10: * yading@10: * You should have received a copy of the GNU Lesser General Public yading@10: * License along with FFmpeg; if not, write to the Free Software yading@10: * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA yading@10: * yading@10: * MMX optimization by Nick Kurshev yading@10: */ yading@10: yading@10: #include "libavutil/cpu.h" yading@10: #include "libavutil/x86/asm.h" yading@10: #include "libavcodec/hpeldsp.h" yading@10: #include "dsputil_mmx.h" yading@10: yading@10: void ff_put_pixels8_x2_mmxext(uint8_t *block, const uint8_t *pixels, yading@10: ptrdiff_t line_size, int h); yading@10: void ff_put_pixels8_x2_3dnow(uint8_t *block, const uint8_t *pixels, yading@10: ptrdiff_t line_size, int h); yading@10: void ff_put_pixels16_x2_mmxext(uint8_t *block, const uint8_t *pixels, yading@10: ptrdiff_t line_size, int h); yading@10: void ff_put_pixels16_x2_3dnow(uint8_t *block, const uint8_t *pixels, yading@10: ptrdiff_t line_size, int h); yading@10: void ff_put_no_rnd_pixels8_x2_mmxext(uint8_t *block, const uint8_t *pixels, yading@10: ptrdiff_t line_size, int h); yading@10: void ff_put_no_rnd_pixels8_x2_3dnow(uint8_t *block, const uint8_t *pixels, yading@10: ptrdiff_t line_size, int h); yading@10: void ff_put_no_rnd_pixels8_x2_exact_mmxext(uint8_t *block, yading@10: const uint8_t *pixels, yading@10: ptrdiff_t line_size, int h); yading@10: void ff_put_no_rnd_pixels8_x2_exact_3dnow(uint8_t *block, yading@10: const uint8_t *pixels, yading@10: ptrdiff_t line_size, int h); yading@10: void ff_put_pixels8_y2_mmxext(uint8_t *block, const uint8_t *pixels, yading@10: ptrdiff_t line_size, int h); yading@10: void ff_put_pixels8_y2_3dnow(uint8_t *block, const uint8_t *pixels, yading@10: ptrdiff_t line_size, int h); yading@10: void ff_put_no_rnd_pixels8_y2_mmxext(uint8_t *block, const uint8_t *pixels, yading@10: ptrdiff_t line_size, int h); yading@10: void ff_put_no_rnd_pixels8_y2_3dnow(uint8_t *block, const uint8_t *pixels, yading@10: ptrdiff_t line_size, int h); yading@10: void ff_put_no_rnd_pixels8_y2_exact_mmxext(uint8_t *block, yading@10: const uint8_t *pixels, yading@10: ptrdiff_t line_size, int h); yading@10: void ff_put_no_rnd_pixels8_y2_exact_3dnow(uint8_t *block, yading@10: const uint8_t *pixels, yading@10: ptrdiff_t line_size, int h); yading@10: void ff_avg_pixels8_3dnow(uint8_t *block, const uint8_t *pixels, yading@10: ptrdiff_t line_size, int h); yading@10: void ff_avg_pixels8_x2_mmxext(uint8_t *block, const uint8_t *pixels, yading@10: ptrdiff_t line_size, int h); yading@10: void ff_avg_pixels8_x2_3dnow(uint8_t *block, const uint8_t *pixels, yading@10: ptrdiff_t line_size, int h); yading@10: void ff_avg_pixels8_y2_mmxext(uint8_t *block, const uint8_t *pixels, yading@10: ptrdiff_t line_size, int h); yading@10: void ff_avg_pixels8_y2_3dnow(uint8_t *block, const uint8_t *pixels, yading@10: ptrdiff_t line_size, int h); yading@10: void ff_avg_pixels8_xy2_mmxext(uint8_t *block, const uint8_t *pixels, yading@10: ptrdiff_t line_size, int h); yading@10: void ff_avg_pixels8_xy2_3dnow(uint8_t *block, const uint8_t *pixels, yading@10: ptrdiff_t line_size, int h); yading@10: yading@10: yading@10: #if HAVE_INLINE_ASM yading@10: yading@10: #define JUMPALIGN() __asm__ volatile (".p2align 3"::) yading@10: #define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%"#regd", %%"#regd ::) yading@10: yading@10: #define MOVQ_BFE(regd) \ yading@10: __asm__ volatile ( \ yading@10: "pcmpeqd %%"#regd", %%"#regd" \n\t" \ yading@10: "paddb %%"#regd", %%"#regd" \n\t" ::) yading@10: yading@10: #ifndef PIC yading@10: #define MOVQ_BONE(regd) __asm__ volatile ("movq %0, %%"#regd" \n\t" :: "m"(ff_bone)) yading@10: #define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%"#regd" \n\t" :: "m"(ff_wtwo)) yading@10: #else yading@10: // for shared library it's better to use this way for accessing constants yading@10: // pcmpeqd -> -1 yading@10: #define MOVQ_BONE(regd) \ yading@10: __asm__ volatile ( \ yading@10: "pcmpeqd %%"#regd", %%"#regd" \n\t" \ yading@10: "psrlw $15, %%"#regd" \n\t" \ yading@10: "packuswb %%"#regd", %%"#regd" \n\t" ::) yading@10: yading@10: #define MOVQ_WTWO(regd) \ yading@10: __asm__ volatile ( \ yading@10: "pcmpeqd %%"#regd", %%"#regd" \n\t" \ yading@10: "psrlw $15, %%"#regd" \n\t" \ yading@10: "psllw $1, %%"#regd" \n\t"::) yading@10: yading@10: #endif yading@10: yading@10: // using regr as temporary and for the output result yading@10: // first argument is unmodifed and second is trashed yading@10: // regfe is supposed to contain 0xfefefefefefefefe yading@10: #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \ yading@10: "movq "#rega", "#regr" \n\t" \ yading@10: "pand "#regb", "#regr" \n\t" \ yading@10: "pxor "#rega", "#regb" \n\t" \ yading@10: "pand "#regfe", "#regb" \n\t" \ yading@10: "psrlq $1, "#regb" \n\t" \ yading@10: "paddb "#regb", "#regr" \n\t" yading@10: yading@10: #define PAVGB_MMX(rega, regb, regr, regfe) \ yading@10: "movq "#rega", "#regr" \n\t" \ yading@10: "por "#regb", "#regr" \n\t" \ yading@10: "pxor "#rega", "#regb" \n\t" \ yading@10: "pand "#regfe", "#regb" \n\t" \ yading@10: "psrlq $1, "#regb" \n\t" \ yading@10: "psubb "#regb", "#regr" \n\t" yading@10: yading@10: // mm6 is supposed to contain 0xfefefefefefefefe yading@10: #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \ yading@10: "movq "#rega", "#regr" \n\t" \ yading@10: "movq "#regc", "#regp" \n\t" \ yading@10: "pand "#regb", "#regr" \n\t" \ yading@10: "pand "#regd", "#regp" \n\t" \ yading@10: "pxor "#rega", "#regb" \n\t" \ yading@10: "pxor "#regc", "#regd" \n\t" \ yading@10: "pand %%mm6, "#regb" \n\t" \ yading@10: "pand %%mm6, "#regd" \n\t" \ yading@10: "psrlq $1, "#regb" \n\t" \ yading@10: "psrlq $1, "#regd" \n\t" \ yading@10: "paddb "#regb", "#regr" \n\t" \ yading@10: "paddb "#regd", "#regp" \n\t" yading@10: yading@10: #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \ yading@10: "movq "#rega", "#regr" \n\t" \ yading@10: "movq "#regc", "#regp" \n\t" \ yading@10: "por "#regb", "#regr" \n\t" \ yading@10: "por "#regd", "#regp" \n\t" \ yading@10: "pxor "#rega", "#regb" \n\t" \ yading@10: "pxor "#regc", "#regd" \n\t" \ yading@10: "pand %%mm6, "#regb" \n\t" \ yading@10: "pand %%mm6, "#regd" \n\t" \ yading@10: "psrlq $1, "#regd" \n\t" \ yading@10: "psrlq $1, "#regb" \n\t" \ yading@10: "psubb "#regb", "#regr" \n\t" \ yading@10: "psubb "#regd", "#regp" \n\t" yading@10: yading@10: /***********************************/ yading@10: /* MMX no rounding */ yading@10: #define NO_RND 1 yading@10: #define DEF(x, y) x ## _no_rnd_ ## y ## _mmx yading@10: #define SET_RND MOVQ_WONE yading@10: #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f) yading@10: #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e) yading@10: #define OP_AVG(a, b, c, e) PAVGB_MMX(a, b, c, e) yading@10: yading@10: #include "hpeldsp_rnd_template.c" yading@10: yading@10: #undef DEF yading@10: #undef SET_RND yading@10: #undef PAVGBP yading@10: #undef PAVGB yading@10: #undef NO_RND yading@10: /***********************************/ yading@10: /* MMX rounding */ yading@10: yading@10: #define DEF(x, y) x ## _ ## y ## _mmx yading@10: #define SET_RND MOVQ_WTWO yading@10: #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f) yading@10: #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e) yading@10: yading@10: #include "hpeldsp_rnd_template.c" yading@10: yading@10: #undef DEF yading@10: #undef SET_RND yading@10: #undef PAVGBP yading@10: #undef PAVGB yading@10: #undef OP_AVG yading@10: yading@10: #endif /* HAVE_INLINE_ASM */ yading@10: yading@10: yading@10: #if HAVE_YASM yading@10: #define ff_put_pixels8_mmx ff_put_pixels8_mmxext yading@10: yading@10: /***********************************/ yading@10: /* 3Dnow specific */ yading@10: yading@10: #define DEF(x) x ## _3dnow yading@10: yading@10: #include "hpeldsp_avg_template.c" yading@10: yading@10: #undef DEF yading@10: yading@10: /***********************************/ yading@10: /* MMXEXT specific */ yading@10: yading@10: #define DEF(x) x ## _mmxext yading@10: yading@10: #include "hpeldsp_avg_template.c" yading@10: yading@10: #undef DEF yading@10: yading@10: #endif /* HAVE_YASM */ yading@10: yading@10: yading@10: #if HAVE_INLINE_ASM yading@10: #define put_no_rnd_pixels16_mmx put_pixels16_mmx yading@10: #define put_no_rnd_pixels8_mmx put_pixels8_mmx yading@10: #define put_pixels16_mmxext put_pixels16_mmx yading@10: #define put_pixels8_mmxext put_pixels8_mmx yading@10: #define put_pixels4_mmxext put_pixels4_mmx yading@10: #define put_no_rnd_pixels16_mmxext put_no_rnd_pixels16_mmx yading@10: #define put_no_rnd_pixels8_mmxext put_no_rnd_pixels8_mmx yading@10: yading@10: static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, yading@10: ptrdiff_t line_size, int h) yading@10: { yading@10: __asm__ volatile ( yading@10: "lea (%3, %3), %%"REG_a" \n\t" yading@10: ".p2align 3 \n\t" yading@10: "1: \n\t" yading@10: "movq (%1 ), %%mm0 \n\t" yading@10: "movq (%1, %3), %%mm1 \n\t" yading@10: "movq %%mm0, (%2) \n\t" yading@10: "movq %%mm1, (%2, %3) \n\t" yading@10: "add %%"REG_a", %1 \n\t" yading@10: "add %%"REG_a", %2 \n\t" yading@10: "movq (%1 ), %%mm0 \n\t" yading@10: "movq (%1, %3), %%mm1 \n\t" yading@10: "movq %%mm0, (%2) \n\t" yading@10: "movq %%mm1, (%2, %3) \n\t" yading@10: "add %%"REG_a", %1 \n\t" yading@10: "add %%"REG_a", %2 \n\t" yading@10: "subl $4, %0 \n\t" yading@10: "jnz 1b \n\t" yading@10: : "+g"(h), "+r"(pixels), "+r"(block) yading@10: : "r"((x86_reg)line_size) yading@10: : "%"REG_a, "memory" yading@10: ); yading@10: } yading@10: yading@10: static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, yading@10: ptrdiff_t line_size, int h) yading@10: { yading@10: __asm__ volatile ( yading@10: "lea (%3, %3), %%"REG_a" \n\t" yading@10: ".p2align 3 \n\t" yading@10: "1: \n\t" yading@10: "movq (%1 ), %%mm0 \n\t" yading@10: "movq 8(%1 ), %%mm4 \n\t" yading@10: "movq (%1, %3), %%mm1 \n\t" yading@10: "movq 8(%1, %3), %%mm5 \n\t" yading@10: "movq %%mm0, (%2) \n\t" yading@10: "movq %%mm4, 8(%2) \n\t" yading@10: "movq %%mm1, (%2, %3) \n\t" yading@10: "movq %%mm5, 8(%2, %3) \n\t" yading@10: "add %%"REG_a", %1 \n\t" yading@10: "add %%"REG_a", %2 \n\t" yading@10: "movq (%1 ), %%mm0 \n\t" yading@10: "movq 8(%1 ), %%mm4 \n\t" yading@10: "movq (%1, %3), %%mm1 \n\t" yading@10: "movq 8(%1, %3), %%mm5 \n\t" yading@10: "movq %%mm0, (%2) \n\t" yading@10: "movq %%mm4, 8(%2) \n\t" yading@10: "movq %%mm1, (%2, %3) \n\t" yading@10: "movq %%mm5, 8(%2, %3) \n\t" yading@10: "add %%"REG_a", %1 \n\t" yading@10: "add %%"REG_a", %2 \n\t" yading@10: "subl $4, %0 \n\t" yading@10: "jnz 1b \n\t" yading@10: : "+g"(h), "+r"(pixels), "+r"(block) yading@10: : "r"((x86_reg)line_size) yading@10: : "%"REG_a, "memory" yading@10: ); yading@10: } yading@10: #endif /* HAVE_INLINE_ASM */ yading@10: yading@10: void ff_put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, yading@10: ptrdiff_t line_size, int h); yading@10: void ff_avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, yading@10: ptrdiff_t line_size, int h); yading@10: yading@10: #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \ yading@10: do { \ yading@10: c->PFX ## _pixels_tab IDX [0] = PFX ## _pixels ## SIZE ## _ ## CPU; \ yading@10: c->PFX ## _pixels_tab IDX [1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \ yading@10: c->PFX ## _pixels_tab IDX [2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \ yading@10: c->PFX ## _pixels_tab IDX [3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU; \ yading@10: } while (0) yading@10: yading@10: static void hpeldsp_init_mmx(HpelDSPContext *c, int flags, int mm_flags) yading@10: { yading@10: #if HAVE_INLINE_ASM yading@10: SET_HPEL_FUNCS(put, [0], 16, mmx); yading@10: SET_HPEL_FUNCS(put_no_rnd, [0], 16, mmx); yading@10: SET_HPEL_FUNCS(avg, [0], 16, mmx); yading@10: SET_HPEL_FUNCS(avg_no_rnd, , 16, mmx); yading@10: SET_HPEL_FUNCS(put, [1], 8, mmx); yading@10: SET_HPEL_FUNCS(put_no_rnd, [1], 8, mmx); yading@10: SET_HPEL_FUNCS(avg, [1], 8, mmx); yading@10: #endif /* HAVE_INLINE_ASM */ yading@10: } yading@10: yading@10: static void hpeldsp_init_mmxext(HpelDSPContext *c, int flags, int mm_flags) yading@10: { yading@10: #if HAVE_YASM yading@10: c->put_pixels_tab[0][1] = ff_put_pixels16_x2_mmxext; yading@10: c->put_pixels_tab[0][2] = ff_put_pixels16_y2_mmxext; yading@10: yading@10: c->avg_pixels_tab[0][0] = ff_avg_pixels16_mmxext; yading@10: c->avg_pixels_tab[0][1] = ff_avg_pixels16_x2_mmxext; yading@10: c->avg_pixels_tab[0][2] = ff_avg_pixels16_y2_mmxext; yading@10: yading@10: c->put_pixels_tab[1][1] = ff_put_pixels8_x2_mmxext; yading@10: c->put_pixels_tab[1][2] = ff_put_pixels8_y2_mmxext; yading@10: yading@10: c->avg_pixels_tab[1][0] = ff_avg_pixels8_mmxext; yading@10: c->avg_pixels_tab[1][1] = ff_avg_pixels8_x2_mmxext; yading@10: c->avg_pixels_tab[1][2] = ff_avg_pixels8_y2_mmxext; yading@10: yading@10: if (!(flags & CODEC_FLAG_BITEXACT)) { yading@10: c->put_no_rnd_pixels_tab[0][1] = ff_put_no_rnd_pixels16_x2_mmxext; yading@10: c->put_no_rnd_pixels_tab[0][2] = ff_put_no_rnd_pixels16_y2_mmxext; yading@10: c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_mmxext; yading@10: c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_mmxext; yading@10: yading@10: c->avg_pixels_tab[0][3] = ff_avg_pixels16_xy2_mmxext; yading@10: c->avg_pixels_tab[1][3] = ff_avg_pixels8_xy2_mmxext; yading@10: } yading@10: yading@10: if (flags & CODEC_FLAG_BITEXACT && CONFIG_VP3_DECODER) { yading@10: c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_exact_mmxext; yading@10: c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_exact_mmxext; yading@10: } yading@10: #endif /* HAVE_YASM */ yading@10: } yading@10: yading@10: static void hpeldsp_init_3dnow(HpelDSPContext *c, int flags, int mm_flags) yading@10: { yading@10: #if HAVE_YASM yading@10: c->put_pixels_tab[0][1] = ff_put_pixels16_x2_3dnow; yading@10: c->put_pixels_tab[0][2] = ff_put_pixels16_y2_3dnow; yading@10: yading@10: c->avg_pixels_tab[0][0] = ff_avg_pixels16_3dnow; yading@10: c->avg_pixels_tab[0][1] = ff_avg_pixels16_x2_3dnow; yading@10: c->avg_pixels_tab[0][2] = ff_avg_pixels16_y2_3dnow; yading@10: yading@10: c->put_pixels_tab[1][1] = ff_put_pixels8_x2_3dnow; yading@10: c->put_pixels_tab[1][2] = ff_put_pixels8_y2_3dnow; yading@10: yading@10: c->avg_pixels_tab[1][0] = ff_avg_pixels8_3dnow; yading@10: c->avg_pixels_tab[1][1] = ff_avg_pixels8_x2_3dnow; yading@10: c->avg_pixels_tab[1][2] = ff_avg_pixels8_y2_3dnow; yading@10: yading@10: if (!(flags & CODEC_FLAG_BITEXACT)){ yading@10: c->put_no_rnd_pixels_tab[0][1] = ff_put_no_rnd_pixels16_x2_3dnow; yading@10: c->put_no_rnd_pixels_tab[0][2] = ff_put_no_rnd_pixels16_y2_3dnow; yading@10: c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_3dnow; yading@10: c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_3dnow; yading@10: yading@10: c->avg_pixels_tab[0][3] = ff_avg_pixels16_xy2_3dnow; yading@10: c->avg_pixels_tab[1][3] = ff_avg_pixels8_xy2_3dnow; yading@10: } yading@10: yading@10: if (flags & CODEC_FLAG_BITEXACT && CONFIG_VP3_DECODER) { yading@10: c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_exact_3dnow; yading@10: c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_exact_3dnow; yading@10: } yading@10: #endif /* HAVE_YASM */ yading@10: } yading@10: yading@10: static void hpeldsp_init_sse2(HpelDSPContext *c, int flags, int mm_flags) yading@10: { yading@10: #if HAVE_YASM yading@10: if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) { yading@10: // these functions are slower than mmx on AMD, but faster on Intel yading@10: c->put_pixels_tab[0][0] = ff_put_pixels16_sse2; yading@10: c->put_no_rnd_pixels_tab[0][0] = ff_put_pixels16_sse2; yading@10: c->avg_pixels_tab[0][0] = ff_avg_pixels16_sse2; yading@10: } yading@10: #endif /* HAVE_YASM */ yading@10: } yading@10: yading@10: void ff_hpeldsp_init_x86(HpelDSPContext *c, int flags) yading@10: { yading@10: int mm_flags = av_get_cpu_flags(); yading@10: yading@10: if (HAVE_MMX && mm_flags & AV_CPU_FLAG_MMX) yading@10: hpeldsp_init_mmx(c, flags, mm_flags); yading@10: yading@10: if (mm_flags & AV_CPU_FLAG_MMXEXT) yading@10: hpeldsp_init_mmxext(c, flags, mm_flags); yading@10: yading@10: if (mm_flags & AV_CPU_FLAG_3DNOW) yading@10: hpeldsp_init_3dnow(c, flags, mm_flags); yading@10: yading@10: if (mm_flags & AV_CPU_FLAG_SSE2) yading@10: hpeldsp_init_sse2(c, flags, mm_flags); yading@10: }