annotate ffmpeg/libavcodec/x86/rv40dsp_init.c @ 13:844d341cf643 tip

Back up before ISMIR
author Yading Song <yading.song@eecs.qmul.ac.uk>
date Thu, 31 Oct 2013 13:17:06 +0000
parents 6840f77b83aa
children
rev   line source
yading@10 1 /*
yading@10 2 * RV40 decoder motion compensation functions x86-optimised
yading@10 3 * Copyright (c) 2008 Konstantin Shishkov
yading@10 4 *
yading@10 5 * This file is part of FFmpeg.
yading@10 6 *
yading@10 7 * FFmpeg is free software; you can redistribute it and/or
yading@10 8 * modify it under the terms of the GNU Lesser General Public
yading@10 9 * License as published by the Free Software Foundation; either
yading@10 10 * version 2.1 of the License, or (at your option) any later version.
yading@10 11 *
yading@10 12 * FFmpeg is distributed in the hope that it will be useful,
yading@10 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
yading@10 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
yading@10 15 * Lesser General Public License for more details.
yading@10 16 *
yading@10 17 * You should have received a copy of the GNU Lesser General Public
yading@10 18 * License along with FFmpeg; if not, write to the Free Software
yading@10 19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
yading@10 20 */
yading@10 21
yading@10 22 /**
yading@10 23 * @file
yading@10 24 * RV40 decoder motion compensation functions x86-optimised
yading@10 25 * 2,0 and 0,2 have h264 equivalents.
yading@10 26 * 3,3 is bugged in the rv40 format and maps to _xy2 version
yading@10 27 */
yading@10 28
yading@10 29 #include "libavcodec/rv34dsp.h"
yading@10 30 #include "libavutil/attributes.h"
yading@10 31 #include "libavutil/mem.h"
yading@10 32 #include "libavutil/x86/cpu.h"
yading@10 33 #include "dsputil_mmx.h"
yading@10 34
yading@10 35 #if HAVE_YASM
yading@10 36 void ff_put_rv40_chroma_mc8_mmx (uint8_t *dst, uint8_t *src,
yading@10 37 int stride, int h, int x, int y);
yading@10 38 void ff_avg_rv40_chroma_mc8_mmxext(uint8_t *dst, uint8_t *src,
yading@10 39 int stride, int h, int x, int y);
yading@10 40 void ff_avg_rv40_chroma_mc8_3dnow(uint8_t *dst, uint8_t *src,
yading@10 41 int stride, int h, int x, int y);
yading@10 42
yading@10 43 void ff_put_rv40_chroma_mc4_mmx (uint8_t *dst, uint8_t *src,
yading@10 44 int stride, int h, int x, int y);
yading@10 45 void ff_avg_rv40_chroma_mc4_mmxext(uint8_t *dst, uint8_t *src,
yading@10 46 int stride, int h, int x, int y);
yading@10 47 void ff_avg_rv40_chroma_mc4_3dnow(uint8_t *dst, uint8_t *src,
yading@10 48 int stride, int h, int x, int y);
yading@10 49
yading@10 50 #define DECLARE_WEIGHT(opt) \
yading@10 51 void ff_rv40_weight_func_rnd_16_##opt(uint8_t *dst, uint8_t *src1, uint8_t *src2, \
yading@10 52 int w1, int w2, ptrdiff_t stride); \
yading@10 53 void ff_rv40_weight_func_rnd_8_##opt (uint8_t *dst, uint8_t *src1, uint8_t *src2, \
yading@10 54 int w1, int w2, ptrdiff_t stride); \
yading@10 55 void ff_rv40_weight_func_nornd_16_##opt(uint8_t *dst, uint8_t *src1, uint8_t *src2, \
yading@10 56 int w1, int w2, ptrdiff_t stride); \
yading@10 57 void ff_rv40_weight_func_nornd_8_##opt (uint8_t *dst, uint8_t *src1, uint8_t *src2, \
yading@10 58 int w1, int w2, ptrdiff_t stride);
yading@10 59 DECLARE_WEIGHT(mmxext)
yading@10 60 DECLARE_WEIGHT(sse2)
yading@10 61 DECLARE_WEIGHT(ssse3)
yading@10 62
yading@10 63 /** @{ */
yading@10 64 /**
yading@10 65 * Define one qpel function.
yading@10 66 * LOOPSIZE must be already set to the number of pixels processed per
yading@10 67 * iteration in the inner loop of the called functions.
yading@10 68 * COFF(x) must be already defined so as to provide the offset into any
yading@10 69 * array of coeffs used by the called function for the qpel position x.
yading@10 70 */
yading@10 71 #define QPEL_FUNC_DECL(OP, SIZE, PH, PV, OPT) \
yading@10 72 static void OP ## rv40_qpel ##SIZE ##_mc ##PH ##PV ##OPT(uint8_t *dst, \
yading@10 73 uint8_t *src, \
yading@10 74 ptrdiff_t stride) \
yading@10 75 { \
yading@10 76 int i; \
yading@10 77 if (PH && PV) { \
yading@10 78 DECLARE_ALIGNED(16, uint8_t, tmp)[SIZE * (SIZE + 5)]; \
yading@10 79 uint8_t *tmpptr = tmp + SIZE * 2; \
yading@10 80 src -= stride * 2; \
yading@10 81 \
yading@10 82 for (i = 0; i < SIZE; i += LOOPSIZE) \
yading@10 83 ff_put_rv40_qpel_h ##OPT(tmp + i, SIZE, src + i, stride, \
yading@10 84 SIZE + 5, HCOFF(PH)); \
yading@10 85 for (i = 0; i < SIZE; i += LOOPSIZE) \
yading@10 86 ff_ ##OP ##rv40_qpel_v ##OPT(dst + i, stride, tmpptr + i, \
yading@10 87 SIZE, SIZE, VCOFF(PV)); \
yading@10 88 } else if (PV) { \
yading@10 89 for (i = 0; i < SIZE; i += LOOPSIZE) \
yading@10 90 ff_ ##OP ##rv40_qpel_v ## OPT(dst + i, stride, src + i, \
yading@10 91 stride, SIZE, VCOFF(PV)); \
yading@10 92 } else { \
yading@10 93 for (i = 0; i < SIZE; i += LOOPSIZE) \
yading@10 94 ff_ ##OP ##rv40_qpel_h ## OPT(dst + i, stride, src + i, \
yading@10 95 stride, SIZE, HCOFF(PH)); \
yading@10 96 } \
yading@10 97 };
yading@10 98
yading@10 99 /** Declare functions for sizes 8 and 16 and given operations
yading@10 100 * and qpel position. */
yading@10 101 #define QPEL_FUNCS_DECL(OP, PH, PV, OPT) \
yading@10 102 QPEL_FUNC_DECL(OP, 8, PH, PV, OPT) \
yading@10 103 QPEL_FUNC_DECL(OP, 16, PH, PV, OPT)
yading@10 104
yading@10 105 /** Declare all functions for all sizes and qpel positions */
yading@10 106 #define QPEL_MC_DECL(OP, OPT) \
yading@10 107 void ff_ ##OP ##rv40_qpel_h ##OPT(uint8_t *dst, ptrdiff_t dstStride, \
yading@10 108 const uint8_t *src, \
yading@10 109 ptrdiff_t srcStride, \
yading@10 110 int len, int m); \
yading@10 111 void ff_ ##OP ##rv40_qpel_v ##OPT(uint8_t *dst, ptrdiff_t dstStride, \
yading@10 112 const uint8_t *src, \
yading@10 113 ptrdiff_t srcStride, \
yading@10 114 int len, int m); \
yading@10 115 QPEL_FUNCS_DECL(OP, 0, 1, OPT) \
yading@10 116 QPEL_FUNCS_DECL(OP, 0, 3, OPT) \
yading@10 117 QPEL_FUNCS_DECL(OP, 1, 0, OPT) \
yading@10 118 QPEL_FUNCS_DECL(OP, 1, 1, OPT) \
yading@10 119 QPEL_FUNCS_DECL(OP, 1, 2, OPT) \
yading@10 120 QPEL_FUNCS_DECL(OP, 1, 3, OPT) \
yading@10 121 QPEL_FUNCS_DECL(OP, 2, 1, OPT) \
yading@10 122 QPEL_FUNCS_DECL(OP, 2, 2, OPT) \
yading@10 123 QPEL_FUNCS_DECL(OP, 2, 3, OPT) \
yading@10 124 QPEL_FUNCS_DECL(OP, 3, 0, OPT) \
yading@10 125 QPEL_FUNCS_DECL(OP, 3, 1, OPT) \
yading@10 126 QPEL_FUNCS_DECL(OP, 3, 2, OPT)
yading@10 127 /** @} */
yading@10 128
yading@10 129 #define LOOPSIZE 8
yading@10 130 #define HCOFF(x) (32 * (x - 1))
yading@10 131 #define VCOFF(x) (32 * (x - 1))
yading@10 132 QPEL_MC_DECL(put_, _ssse3)
yading@10 133 QPEL_MC_DECL(avg_, _ssse3)
yading@10 134
yading@10 135 #undef LOOPSIZE
yading@10 136 #undef HCOFF
yading@10 137 #undef VCOFF
yading@10 138 #define LOOPSIZE 8
yading@10 139 #define HCOFF(x) (64 * (x - 1))
yading@10 140 #define VCOFF(x) (64 * (x - 1))
yading@10 141 QPEL_MC_DECL(put_, _sse2)
yading@10 142 QPEL_MC_DECL(avg_, _sse2)
yading@10 143
yading@10 144 #if ARCH_X86_32
yading@10 145 #undef LOOPSIZE
yading@10 146 #undef HCOFF
yading@10 147 #undef VCOFF
yading@10 148 #define LOOPSIZE 4
yading@10 149 #define HCOFF(x) (64 * (x - 1))
yading@10 150 #define VCOFF(x) (64 * (x - 1))
yading@10 151
yading@10 152 QPEL_MC_DECL(put_, _mmx)
yading@10 153
yading@10 154 #define ff_put_rv40_qpel_h_mmxext ff_put_rv40_qpel_h_mmx
yading@10 155 #define ff_put_rv40_qpel_v_mmxext ff_put_rv40_qpel_v_mmx
yading@10 156 QPEL_MC_DECL(avg_, _mmxext)
yading@10 157
yading@10 158 #define ff_put_rv40_qpel_h_3dnow ff_put_rv40_qpel_h_mmx
yading@10 159 #define ff_put_rv40_qpel_v_3dnow ff_put_rv40_qpel_v_mmx
yading@10 160 QPEL_MC_DECL(avg_, _3dnow)
yading@10 161 #endif
yading@10 162
yading@10 163 /** @{ */
yading@10 164 /** Set one function */
yading@10 165 #define QPEL_FUNC_SET(OP, SIZE, PH, PV, OPT) \
yading@10 166 c-> OP ## pixels_tab[2 - SIZE / 8][4 * PV + PH] = OP ## rv40_qpel ##SIZE ## _mc ##PH ##PV ##OPT;
yading@10 167
yading@10 168 /** Set functions put and avg for sizes 8 and 16 and a given qpel position */
yading@10 169 #define QPEL_FUNCS_SET(OP, PH, PV, OPT) \
yading@10 170 QPEL_FUNC_SET(OP, 8, PH, PV, OPT) \
yading@10 171 QPEL_FUNC_SET(OP, 16, PH, PV, OPT)
yading@10 172
yading@10 173 /** Set all functions for all sizes and qpel positions */
yading@10 174 #define QPEL_MC_SET(OP, OPT) \
yading@10 175 QPEL_FUNCS_SET (OP, 0, 1, OPT) \
yading@10 176 QPEL_FUNCS_SET (OP, 0, 3, OPT) \
yading@10 177 QPEL_FUNCS_SET (OP, 1, 0, OPT) \
yading@10 178 QPEL_FUNCS_SET (OP, 1, 1, OPT) \
yading@10 179 QPEL_FUNCS_SET (OP, 1, 2, OPT) \
yading@10 180 QPEL_FUNCS_SET (OP, 1, 3, OPT) \
yading@10 181 QPEL_FUNCS_SET (OP, 2, 1, OPT) \
yading@10 182 QPEL_FUNCS_SET (OP, 2, 2, OPT) \
yading@10 183 QPEL_FUNCS_SET (OP, 2, 3, OPT) \
yading@10 184 QPEL_FUNCS_SET (OP, 3, 0, OPT) \
yading@10 185 QPEL_FUNCS_SET (OP, 3, 1, OPT) \
yading@10 186 QPEL_FUNCS_SET (OP, 3, 2, OPT)
yading@10 187 /** @} */
yading@10 188
yading@10 189 #endif /* HAVE_YASM */
yading@10 190
yading@10 191 av_cold void ff_rv40dsp_init_x86(RV34DSPContext *c)
yading@10 192 {
yading@10 193 #if HAVE_YASM
yading@10 194 int mm_flags = av_get_cpu_flags();
yading@10 195
yading@10 196 if (EXTERNAL_MMX(mm_flags)) {
yading@10 197 c->put_chroma_pixels_tab[0] = ff_put_rv40_chroma_mc8_mmx;
yading@10 198 c->put_chroma_pixels_tab[1] = ff_put_rv40_chroma_mc4_mmx;
yading@10 199 #if HAVE_MMX_INLINE
yading@10 200 c->put_pixels_tab[0][15] = ff_put_rv40_qpel16_mc33_mmx;
yading@10 201 c->put_pixels_tab[1][15] = ff_put_rv40_qpel8_mc33_mmx;
yading@10 202 c->avg_pixels_tab[0][15] = ff_avg_rv40_qpel16_mc33_mmx;
yading@10 203 c->avg_pixels_tab[1][15] = ff_avg_rv40_qpel8_mc33_mmx;
yading@10 204 #endif /* HAVE_MMX_INLINE */
yading@10 205 #if ARCH_X86_32
yading@10 206 QPEL_MC_SET(put_, _mmx)
yading@10 207 #endif
yading@10 208 }
yading@10 209 if (EXTERNAL_MMXEXT(mm_flags)) {
yading@10 210 c->avg_chroma_pixels_tab[0] = ff_avg_rv40_chroma_mc8_mmxext;
yading@10 211 c->avg_chroma_pixels_tab[1] = ff_avg_rv40_chroma_mc4_mmxext;
yading@10 212 c->rv40_weight_pixels_tab[0][0] = ff_rv40_weight_func_rnd_16_mmxext;
yading@10 213 c->rv40_weight_pixels_tab[0][1] = ff_rv40_weight_func_rnd_8_mmxext;
yading@10 214 c->rv40_weight_pixels_tab[1][0] = ff_rv40_weight_func_nornd_16_mmxext;
yading@10 215 c->rv40_weight_pixels_tab[1][1] = ff_rv40_weight_func_nornd_8_mmxext;
yading@10 216 #if ARCH_X86_32
yading@10 217 QPEL_MC_SET(avg_, _mmxext)
yading@10 218 #endif
yading@10 219 } else if (EXTERNAL_AMD3DNOW(mm_flags)) {
yading@10 220 c->avg_chroma_pixels_tab[0] = ff_avg_rv40_chroma_mc8_3dnow;
yading@10 221 c->avg_chroma_pixels_tab[1] = ff_avg_rv40_chroma_mc4_3dnow;
yading@10 222 #if ARCH_X86_32
yading@10 223 QPEL_MC_SET(avg_, _3dnow)
yading@10 224 #endif
yading@10 225 }
yading@10 226 if (EXTERNAL_SSE2(mm_flags)) {
yading@10 227 c->rv40_weight_pixels_tab[0][0] = ff_rv40_weight_func_rnd_16_sse2;
yading@10 228 c->rv40_weight_pixels_tab[0][1] = ff_rv40_weight_func_rnd_8_sse2;
yading@10 229 c->rv40_weight_pixels_tab[1][0] = ff_rv40_weight_func_nornd_16_sse2;
yading@10 230 c->rv40_weight_pixels_tab[1][1] = ff_rv40_weight_func_nornd_8_sse2;
yading@10 231 QPEL_MC_SET(put_, _sse2)
yading@10 232 QPEL_MC_SET(avg_, _sse2)
yading@10 233 }
yading@10 234 if (EXTERNAL_SSSE3(mm_flags)) {
yading@10 235 c->rv40_weight_pixels_tab[0][0] = ff_rv40_weight_func_rnd_16_ssse3;
yading@10 236 c->rv40_weight_pixels_tab[0][1] = ff_rv40_weight_func_rnd_8_ssse3;
yading@10 237 c->rv40_weight_pixels_tab[1][0] = ff_rv40_weight_func_nornd_16_ssse3;
yading@10 238 c->rv40_weight_pixels_tab[1][1] = ff_rv40_weight_func_nornd_8_ssse3;
yading@10 239 QPEL_MC_SET(put_, _ssse3)
yading@10 240 QPEL_MC_SET(avg_, _ssse3)
yading@10 241 }
yading@10 242 #endif /* HAVE_YASM */
yading@10 243 }