annotate ffmpeg/libavcodec/ppc/gmc_altivec.c @ 13:844d341cf643 tip

Back up before ISMIR
author Yading Song <yading.song@eecs.qmul.ac.uk>
date Thu, 31 Oct 2013 13:17:06 +0000
parents 6840f77b83aa
children
rev   line source
yading@10 1 /*
yading@10 2 * GMC (Global Motion Compensation)
yading@10 3 * AltiVec-enabled
yading@10 4 * Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
yading@10 5 *
yading@10 6 * This file is part of FFmpeg.
yading@10 7 *
yading@10 8 * FFmpeg is free software; you can redistribute it and/or
yading@10 9 * modify it under the terms of the GNU Lesser General Public
yading@10 10 * License as published by the Free Software Foundation; either
yading@10 11 * version 2.1 of the License, or (at your option) any later version.
yading@10 12 *
yading@10 13 * FFmpeg is distributed in the hope that it will be useful,
yading@10 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
yading@10 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
yading@10 16 * Lesser General Public License for more details.
yading@10 17 *
yading@10 18 * You should have received a copy of the GNU Lesser General Public
yading@10 19 * License along with FFmpeg; if not, write to the Free Software
yading@10 20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
yading@10 21 */
yading@10 22
yading@10 23 #include "libavutil/mem.h"
yading@10 24 #include "libavutil/ppc/types_altivec.h"
yading@10 25 #include "libavutil/ppc/util_altivec.h"
yading@10 26 #include "dsputil_altivec.h"
yading@10 27
yading@10 28 /*
yading@10 29 altivec-enhanced gmc1. ATM this code assume stride is a multiple of 8,
yading@10 30 to preserve proper dst alignment.
yading@10 31 */
yading@10 32 void ff_gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */, int stride, int h, int x16, int y16, int rounder)
yading@10 33 {
yading@10 34 const DECLARE_ALIGNED(16, unsigned short, rounder_a) = rounder;
yading@10 35 const DECLARE_ALIGNED(16, unsigned short, ABCD)[8] =
yading@10 36 {
yading@10 37 (16-x16)*(16-y16), /* A */
yading@10 38 ( x16)*(16-y16), /* B */
yading@10 39 (16-x16)*( y16), /* C */
yading@10 40 ( x16)*( y16), /* D */
yading@10 41 0, 0, 0, 0 /* padding */
yading@10 42 };
yading@10 43 register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
yading@10 44 register const vector unsigned short vcsr8 = (const vector unsigned short)vec_splat_u16(8);
yading@10 45 register vector unsigned char dstv, dstv2, src_0, src_1, srcvA, srcvB, srcvC, srcvD;
yading@10 46 register vector unsigned short Av, Bv, Cv, Dv, rounderV, tempA, tempB, tempC, tempD;
yading@10 47 int i;
yading@10 48 unsigned long dst_odd = (unsigned long)dst & 0x0000000F;
yading@10 49 unsigned long src_really_odd = (unsigned long)src & 0x0000000F;
yading@10 50
yading@10 51 tempA = vec_ld(0, (const unsigned short*)ABCD);
yading@10 52 Av = vec_splat(tempA, 0);
yading@10 53 Bv = vec_splat(tempA, 1);
yading@10 54 Cv = vec_splat(tempA, 2);
yading@10 55 Dv = vec_splat(tempA, 3);
yading@10 56
yading@10 57 rounderV = vec_splat((vec_u16)vec_lde(0, &rounder_a), 0);
yading@10 58
yading@10 59 // we'll be able to pick-up our 9 char elements
yading@10 60 // at src from those 32 bytes
yading@10 61 // we load the first batch here, as inside the loop
yading@10 62 // we can re-use 'src+stride' from one iteration
yading@10 63 // as the 'src' of the next.
yading@10 64 src_0 = vec_ld(0, src);
yading@10 65 src_1 = vec_ld(16, src);
yading@10 66 srcvA = vec_perm(src_0, src_1, vec_lvsl(0, src));
yading@10 67
yading@10 68 if (src_really_odd != 0x0000000F) {
yading@10 69 // if src & 0xF == 0xF, then (src+1) is properly aligned
yading@10 70 // on the second vector.
yading@10 71 srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src));
yading@10 72 } else {
yading@10 73 srcvB = src_1;
yading@10 74 }
yading@10 75 srcvA = vec_mergeh(vczero, srcvA);
yading@10 76 srcvB = vec_mergeh(vczero, srcvB);
yading@10 77
yading@10 78 for(i=0; i<h; i++) {
yading@10 79 dst_odd = (unsigned long)dst & 0x0000000F;
yading@10 80 src_really_odd = (((unsigned long)src) + stride) & 0x0000000F;
yading@10 81
yading@10 82 dstv = vec_ld(0, dst);
yading@10 83
yading@10 84 // we we'll be able to pick-up our 9 char elements
yading@10 85 // at src + stride from those 32 bytes
yading@10 86 // then reuse the resulting 2 vectors srvcC and srcvD
yading@10 87 // as the next srcvA and srcvB
yading@10 88 src_0 = vec_ld(stride + 0, src);
yading@10 89 src_1 = vec_ld(stride + 16, src);
yading@10 90 srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src));
yading@10 91
yading@10 92 if (src_really_odd != 0x0000000F) {
yading@10 93 // if src & 0xF == 0xF, then (src+1) is properly aligned
yading@10 94 // on the second vector.
yading@10 95 srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src));
yading@10 96 } else {
yading@10 97 srcvD = src_1;
yading@10 98 }
yading@10 99
yading@10 100 srcvC = vec_mergeh(vczero, srcvC);
yading@10 101 srcvD = vec_mergeh(vczero, srcvD);
yading@10 102
yading@10 103
yading@10 104 // OK, now we (finally) do the math :-)
yading@10 105 // those four instructions replaces 32 int muls & 32 int adds.
yading@10 106 // isn't AltiVec nice ?
yading@10 107 tempA = vec_mladd((vector unsigned short)srcvA, Av, rounderV);
yading@10 108 tempB = vec_mladd((vector unsigned short)srcvB, Bv, tempA);
yading@10 109 tempC = vec_mladd((vector unsigned short)srcvC, Cv, tempB);
yading@10 110 tempD = vec_mladd((vector unsigned short)srcvD, Dv, tempC);
yading@10 111
yading@10 112 srcvA = srcvC;
yading@10 113 srcvB = srcvD;
yading@10 114
yading@10 115 tempD = vec_sr(tempD, vcsr8);
yading@10 116
yading@10 117 dstv2 = vec_pack(tempD, (vector unsigned short)vczero);
yading@10 118
yading@10 119 if (dst_odd) {
yading@10 120 dstv2 = vec_perm(dstv, dstv2, vcprm(0,1,s0,s1));
yading@10 121 } else {
yading@10 122 dstv2 = vec_perm(dstv, dstv2, vcprm(s0,s1,2,3));
yading@10 123 }
yading@10 124
yading@10 125 vec_st(dstv2, 0, dst);
yading@10 126
yading@10 127 dst += stride;
yading@10 128 src += stride;
yading@10 129 }
yading@10 130 }