32 #define PUT_OP_U8_ALTIVEC(d, s, dst) d = s 33 #define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s) 35 #define OP_U8_ALTIVEC PUT_OP_U8_ALTIVEC 36 #define PREFIX_h264_qpel16_h_lowpass_altivec put_h264_qpel16_h_lowpass_altivec 37 #define PREFIX_h264_qpel16_h_lowpass_num altivec_put_h264_qpel16_h_lowpass_num 38 #define PREFIX_h264_qpel16_v_lowpass_altivec put_h264_qpel16_v_lowpass_altivec 39 #define PREFIX_h264_qpel16_v_lowpass_num altivec_put_h264_qpel16_v_lowpass_num 40 #define PREFIX_h264_qpel16_hv_lowpass_altivec put_h264_qpel16_hv_lowpass_altivec 41 #define PREFIX_h264_qpel16_hv_lowpass_num altivec_put_h264_qpel16_hv_lowpass_num 44 #undef PREFIX_h264_qpel16_h_lowpass_altivec 45 #undef PREFIX_h264_qpel16_h_lowpass_num 46 #undef PREFIX_h264_qpel16_v_lowpass_altivec 47 #undef PREFIX_h264_qpel16_v_lowpass_num 48 #undef PREFIX_h264_qpel16_hv_lowpass_altivec 49 #undef PREFIX_h264_qpel16_hv_lowpass_num 51 #define OP_U8_ALTIVEC AVG_OP_U8_ALTIVEC 52 #define PREFIX_h264_qpel16_h_lowpass_altivec avg_h264_qpel16_h_lowpass_altivec 53 #define PREFIX_h264_qpel16_h_lowpass_num altivec_avg_h264_qpel16_h_lowpass_num 54 #define PREFIX_h264_qpel16_v_lowpass_altivec avg_h264_qpel16_v_lowpass_altivec 55 #define PREFIX_h264_qpel16_v_lowpass_num altivec_avg_h264_qpel16_v_lowpass_num 56 #define PREFIX_h264_qpel16_hv_lowpass_altivec avg_h264_qpel16_hv_lowpass_altivec 57 #define PREFIX_h264_qpel16_hv_lowpass_num altivec_avg_h264_qpel16_hv_lowpass_num 60 #undef PREFIX_h264_qpel16_h_lowpass_altivec 61 #undef PREFIX_h264_qpel16_h_lowpass_num 62 #undef PREFIX_h264_qpel16_v_lowpass_altivec 63 #undef PREFIX_h264_qpel16_v_lowpass_num 64 #undef PREFIX_h264_qpel16_hv_lowpass_altivec 65 #undef PREFIX_h264_qpel16_hv_lowpass_num 67 #define H264_MC(OPNAME, SIZE, CODETYPE) \ 68 static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## CODETYPE (uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ 70 ff_ ## OPNAME ## pixels ## SIZE ## _ ## CODETYPE(dst, src, stride, SIZE);\ 73 static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## CODETYPE(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ 75 DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\ 76 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\ 77 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\ 80 static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## CODETYPE(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ 82 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(dst, src, stride, stride);\ 85 static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## CODETYPE(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ 87 DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\ 88 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\ 89 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+1, half, stride, stride, SIZE);\ 92 static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## CODETYPE(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ 94 DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\ 95 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\ 96 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\ 99 static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## CODETYPE(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ 101 OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(dst, src, stride, stride);\ 104 static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## CODETYPE(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ 106 DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\ 107 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\ 108 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+stride, half, stride, stride, SIZE);\ 111 static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## CODETYPE(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ 113 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\ 114 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\ 115 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\ 116 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\ 117 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\ 120 static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## CODETYPE(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ 122 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\ 123 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\ 124 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\ 125 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\ 126 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\ 129 static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## CODETYPE(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ 131 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\ 132 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\ 133 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\ 134 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\ 135 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\ 138 static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## CODETYPE(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ 140 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\ 141 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\ 142 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\ 143 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\ 144 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\ 147 static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## CODETYPE(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ 149 DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\ 150 OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(dst, tmp, src, stride, SIZE, stride);\ 153 static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## CODETYPE(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ 155 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\ 156 DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\ 157 DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\ 158 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\ 159 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\ 160 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\ 163 static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## CODETYPE(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ 165 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\ 166 DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\ 167 DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\ 168 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\ 169 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\ 170 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\ 173 static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## CODETYPE(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ 175 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\ 176 DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\ 177 DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\ 178 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\ 179 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\ 180 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\ 183 static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## CODETYPE(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ 185 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\ 186 DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\ 187 DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\ 188 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\ 189 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\ 190 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\ 194 const uint8_t * src2,
int dst_stride,
195 int src_stride1,
int h)
200 mask_ = vec_lvsl(0, src2);
202 for (i = 0; i < h; i++) {
204 tmp1 = vec_ld(i * src_stride1, src1);
205 mask = vec_lvsl(i * src_stride1, src1);
206 tmp2 = vec_ld(i * src_stride1 + 15, src1);
208 a = vec_perm(tmp1, tmp2, mask);
210 tmp1 = vec_ld(i * 16, src2);
211 tmp2 = vec_ld(i * 16 + 15, src2);
213 b = vec_perm(tmp1, tmp2, mask_);
215 tmp1 = vec_ld(0, dst);
216 mask = vec_lvsl(0, dst);
217 tmp2 = vec_ld(15, dst);
221 edges = vec_perm(tmp2, tmp1, mask);
223 align = vec_lvsr(0, dst);
225 tmp2 = vec_perm(d, edges, align);
226 tmp1 = vec_perm(edges, d, align);
228 vec_st(tmp2, 15, dst);
229 vec_st(tmp1, 0 , dst);
235 static inline void avg_pixels16_l2_altivec(
uint8_t * dst,
const uint8_t * src1,
236 const uint8_t * src2,
int dst_stride,
237 int src_stride1,
int h)
242 mask_ = vec_lvsl(0, src2);
244 for (i = 0; i < h; i++) {
246 tmp1 = vec_ld(i * src_stride1, src1);
247 mask = vec_lvsl(i * src_stride1, src1);
248 tmp2 = vec_ld(i * src_stride1 + 15, src1);
250 a = vec_perm(tmp1, tmp2, mask);
252 tmp1 = vec_ld(i * 16, src2);
253 tmp2 = vec_ld(i * 16 + 15, src2);
255 b = vec_perm(tmp1, tmp2, mask_);
257 tmp1 = vec_ld(0, dst);
258 mask = vec_lvsl(0, dst);
259 tmp2 = vec_ld(15, dst);
261 d = vec_avg(vec_perm(tmp1, tmp2, mask), vec_avg(a, b));
263 edges = vec_perm(tmp2, tmp1, mask);
265 align = vec_lvsr(0, dst);
267 tmp2 = vec_perm(d, edges, align);
268 tmp1 = vec_perm(edges, d, align);
270 vec_st(tmp2, 15, dst);
271 vec_st(tmp1, 0 , dst);
289 const int high_bit_depth = bit_depth > 8;
292 if (!high_bit_depth) {
293 #define dspfunc(PFX, IDX, NUM) \ 294 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_altivec; \ 295 c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_altivec; \ 296 c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_altivec; \ 297 c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_altivec; \ 298 c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_altivec; \ 299 c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_altivec; \ 300 c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_altivec; \ 301 c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_altivec; \ 302 c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_altivec; \ 303 c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_altivec; \ 304 c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_altivec; \ 305 c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_altivec; \ 306 c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_altivec; \ 307 c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_altivec; \ 308 c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_altivec; \ 309 c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_altivec #define AV_CPU_FLAG_ALTIVEC
standard
Macro definitions for various function/variable attributes.
static const uint16_t mask[17]
const AVS_VideoInfo int align
#define dspfunc(PFX, IDX, NUM)
av_cold void ff_h264qpel_init_ppc(H264QpelContext *c, int bit_depth)
synthesis window for stochastic i
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
Contains misc utility macros and inline functions.
else dst[i][x+y *dst_stride[i]]
#define H264_MC(OPNAME, SIZE)