28 #define STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_rnd) \    30     t0 = vec_sl(vec_add(s0, s4), vec_2); \    31     t0 = vec_add(vec_sl(t0, vec_1), t0); \    32     t0 = vec_add(t0, vec_rnd); \    33     t1 = vec_sl(vec_sub(s0, s4), vec_2); \    34     t1 = vec_add(vec_sl(t1, vec_1), t1); \    35     t1 = vec_add(t1, vec_rnd); \    36     t2 = vec_add(vec_sl(s6, vec_2), vec_sl(s6, vec_1)); \    37     t2 = vec_add(t2, vec_sl(s2, vec_4)); \    38     t3 = vec_add(vec_sl(s2, vec_2), vec_sl(s2, vec_1)); \    39     t3 = vec_sub(t3, vec_sl(s6, vec_4)); \    40     t4 = vec_add(t0, t2); \    41     t5 = vec_add(t1, t3); \    42     t6 = vec_sub(t1, t3); \    43     t7 = vec_sub(t0, t2); \    45     t0 = vec_sl(vec_add(s1, s3), vec_4); \    46     t0 = vec_add(t0, vec_sl(s5, vec_3)); \    47     t0 = vec_add(t0, vec_sl(s7, vec_2)); \    48     t0 = vec_add(t0, vec_sub(s5, s3)); \    50     t1 = vec_sl(vec_sub(s1, s5), vec_4); \    51     t1 = vec_sub(t1, vec_sl(s7, vec_3)); \    52     t1 = vec_sub(t1, vec_sl(s3, vec_2)); \    53     t1 = vec_sub(t1, vec_add(s1, s7)); \    55     t2 = vec_sl(vec_sub(s7, s3), vec_4); \    56     t2 = vec_add(t2, vec_sl(s1, vec_3)); \    57     t2 = vec_add(t2, vec_sl(s5, vec_2)); \    58     t2 = vec_add(t2, vec_sub(s1, s7)); \    60     t3 = vec_sl(vec_sub(s5, s7), vec_4); \    61     t3 = vec_sub(t3, vec_sl(s3, vec_3)); \    62     t3 = vec_add(t3, vec_sl(s1, vec_2)); \    63     t3 = vec_sub(t3, vec_add(s3, s5)); \    65     s0 = vec_add(t4, t0); \    66     s1 = vec_add(t5, t1); \    67     s2 = vec_add(t6, t2); \    68     s3 = vec_add(t7, t3); \    69     s4 = vec_sub(t7, t3); \    70     s5 = vec_sub(t6, t2); \    71     s6 = vec_sub(t5, t1); \    72     s7 = vec_sub(t4, t0); \    75 #define SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7) \    77     s0 = vec_sra(s0, vec_3); \    78     s1 = vec_sra(s1, vec_3); \    79     s2 = vec_sra(s2, vec_3); \    80     s3 = vec_sra(s3, vec_3); \    81     s4 = vec_sra(s4, vec_3); \    82     s5 = vec_sra(s5, vec_3); \    83     s6 = vec_sra(s6, vec_3); \    84     s7 = vec_sra(s7, vec_3); \    87 #define SHIFT_VERT8(s0, s1, s2, s3, s4, s5, s6, s7) \    89     s0 = vec_sra(s0, vec_7); \    90     s1 = vec_sra(s1, vec_7); \    91     s2 = vec_sra(s2, vec_7); \    92     s3 = vec_sra(s3, vec_7); \    93     s4 = vec_sra(vec_add(s4, vec_1s), vec_7); \    94     s5 = vec_sra(vec_add(s5, vec_1s), vec_7); \    95     s6 = vec_sra(vec_add(s6, vec_1s), vec_7); \    96     s7 = vec_sra(vec_add(s7, vec_1s), vec_7); \   100 #define STEP4(s0, s1, s2, s3, vec_rnd) \   102     t1 = vec_add(vec_sl(s0, vec_4), s0); \   103     t1 = vec_add(t1, vec_rnd); \   104     t2 = vec_add(vec_sl(s2, vec_4), s2); \   105     t0 = vec_add(t1, t2); \   106     t1 = vec_sub(t1, t2); \   107     t3 = vec_sl(vec_sub(s3, s1), vec_1); \   108     t3 = vec_add(t3, vec_sl(t3, vec_2)); \   109     t2 = vec_add(t3, vec_sl(s1, vec_5)); \   110     t3 = vec_add(t3, vec_sl(s3, vec_3)); \   111     t3 = vec_add(t3, vec_sl(s3, vec_2)); \   112     s0 = vec_add(t0, t2); \   113     s1 = vec_sub(t1, t3); \   114     s2 = vec_add(t1, t3); \   115     s3 = vec_sub(t0, t2); \   118 #define SHIFT_HOR4(s0, s1, s2, s3) \   119     s0 = vec_sra(s0, vec_3); \   120     s1 = vec_sra(s1, vec_3); \   121     s2 = vec_sra(s2, vec_3); \   122     s3 = vec_sra(s3, vec_3);   124 #define SHIFT_VERT4(s0, s1, s2, s3) \   125     s0 = vec_sra(s0, vec_7); \   126     s1 = vec_sra(s1, vec_7); \   127     s2 = vec_sra(s2, vec_7); \   128     s3 = vec_sra(s3, vec_7);   134     vector 
signed short src0, src1, src2, src3, src4, src5, src6, src7;
   136     vector 
signed int s8, s9, sA, sB, sC, sD, sE, sF;
   138     const vector 
signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4));
   139     const vector 
unsigned int vec_7 = vec_splat_u32(7);
   140     const vector 
unsigned int vec_4 = vec_splat_u32(4);
   141     const vector  
signed int vec_4s = vec_splat_s32(4);
   142     const vector 
unsigned int vec_3 = vec_splat_u32(3);
   143     const vector 
unsigned int vec_2 = vec_splat_u32(2);
   144     const vector  
signed int vec_1s = vec_splat_s32(1);
   145     const vector 
unsigned int vec_1 = vec_splat_u32(1);
   147     src0 = vec_ld(  0, block);
   148     src1 = vec_ld( 16, block);
   149     src2 = vec_ld( 32, block);
   150     src3 = vec_ld( 48, block);
   151     src4 = vec_ld( 64, block);
   152     src5 = vec_ld( 80, block);
   153     src6 = vec_ld( 96, block);
   154     src7 = vec_ld(112, block);
   156     s0 = vec_unpackl(src0);
   157     s1 = vec_unpackl(src1);
   158     s2 = vec_unpackl(src2);
   159     s3 = vec_unpackl(src3);
   160     s4 = vec_unpackl(src4);
   161     s5 = vec_unpackl(src5);
   162     s6 = vec_unpackl(src6);
   163     s7 = vec_unpackl(src7);
   164     s8 = vec_unpackh(src0);
   165     s9 = vec_unpackh(src1);
   166     sA = vec_unpackh(src2);
   167     sB = vec_unpackh(src3);
   168     sC = vec_unpackh(src4);
   169     sD = vec_unpackh(src5);
   170     sE = vec_unpackh(src6);
   171     sF = vec_unpackh(src7);
   172     STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s);
   174     STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s);
   176     src0 = vec_pack(s8, s0);
   177     src1 = vec_pack(s9, s1);
   178     src2 = vec_pack(sA, s2);
   179     src3 = vec_pack(sB, s3);
   180     src4 = vec_pack(sC, s4);
   181     src5 = vec_pack(sD, s5);
   182     src6 = vec_pack(sE, s6);
   183     src7 = vec_pack(sF, s7);
   184     TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
   186     s0 = vec_unpackl(src0);
   187     s1 = vec_unpackl(src1);
   188     s2 = vec_unpackl(src2);
   189     s3 = vec_unpackl(src3);
   190     s4 = vec_unpackl(src4);
   191     s5 = vec_unpackl(src5);
   192     s6 = vec_unpackl(src6);
   193     s7 = vec_unpackl(src7);
   194     s8 = vec_unpackh(src0);
   195     s9 = vec_unpackh(src1);
   196     sA = vec_unpackh(src2);
   197     sB = vec_unpackh(src3);
   198     sC = vec_unpackh(src4);
   199     sD = vec_unpackh(src5);
   200     sE = vec_unpackh(src6);
   201     sF = vec_unpackh(src7);
   202     STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_64);
   204     STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_64);
   206     src0 = vec_pack(s8, s0);
   207     src1 = vec_pack(s9, s1);
   208     src2 = vec_pack(sA, s2);
   209     src3 = vec_pack(sB, s3);
   210     src4 = vec_pack(sC, s4);
   211     src5 = vec_pack(sD, s5);
   212     src6 = vec_pack(sE, s6);
   213     src7 = vec_pack(sF, s7);
   215     vec_st(src0,  0, block);
   216     vec_st(src1, 16, block);
   217     vec_st(src2, 32, block);
   218     vec_st(src3, 48, block);
   219     vec_st(src4, 64, block);
   220     vec_st(src5, 80, block);
   221     vec_st(src6, 96, block);
   222     vec_st(src7,112, block);
   229     vector 
signed short src0, src1, src2, src3, src4, src5, src6, src7;
   231     vector 
signed int s8, s9, sA, sB, sC, sD, sE, sF;
   233     const vector 
signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4));
   234     const vector 
unsigned int vec_7 = vec_splat_u32(7);
   235     const vector 
unsigned int vec_5 = vec_splat_u32(5);
   236     const vector 
unsigned int vec_4 = vec_splat_u32(4);
   237     const vector  
signed int vec_4s = vec_splat_s32(4);
   238     const vector 
unsigned int vec_3 = vec_splat_u32(3);
   239     const vector 
unsigned int vec_2 = vec_splat_u32(2);
   240     const vector 
unsigned int vec_1 = vec_splat_u32(1);
   241     vector 
unsigned char tmp;
   242     vector 
signed short tmp2, tmp3;
   243     vector 
unsigned char perm0, perm1, p0, p1, p;
   245     src0 = vec_ld(  0, block);
   246     src1 = vec_ld( 16, block);
   247     src2 = vec_ld( 32, block);
   248     src3 = vec_ld( 48, block);
   249     src4 = vec_ld( 64, block);
   250     src5 = vec_ld( 80, block);
   251     src6 = vec_ld( 96, block);
   252     src7 = vec_ld(112, block);
   254     TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
   255     s0 = vec_unpackl(src0);
   256     s1 = vec_unpackl(src1);
   257     s2 = vec_unpackl(src2);
   258     s3 = vec_unpackl(src3);
   259     s4 = vec_unpackl(src4);
   260     s5 = vec_unpackl(src5);
   261     s6 = vec_unpackl(src6);
   262     s7 = vec_unpackl(src7);
   263     s8 = vec_unpackh(src0);
   264     s9 = vec_unpackh(src1);
   265     sA = vec_unpackh(src2);
   266     sB = vec_unpackh(src3);
   267     sC = vec_unpackh(src4);
   268     sD = vec_unpackh(src5);
   269     sE = vec_unpackh(src6);
   270     sF = vec_unpackh(src7);
   271     STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s);
   273     STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s);
   275     src0 = vec_pack(s8, s0);
   276     src1 = vec_pack(s9, s1);
   277     src2 = vec_pack(sA, s2);
   278     src3 = vec_pack(sB, s3);
   279     src4 = vec_pack(sC, s4);
   280     src5 = vec_pack(sD, s5);
   281     src6 = vec_pack(sE, s6);
   282     src7 = vec_pack(sF, s7);
   283     TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
   285     s0 = vec_unpackh(src0);
   286     s1 = vec_unpackh(src1);
   287     s2 = vec_unpackh(src2);
   288     s3 = vec_unpackh(src3);
   289     s8 = vec_unpackl(src0);
   290     s9 = vec_unpackl(src1);
   291     sA = vec_unpackl(src2);
   292     sB = vec_unpackl(src3);
   293     STEP4(s0, s1, s2, s3, vec_64);
   295     STEP4(s8, s9, sA, sB, vec_64);
   297     src0 = vec_pack(s0, s8);
   298     src1 = vec_pack(s1, s9);
   299     src2 = vec_pack(s2, sA);
   300     src3 = vec_pack(s3, sB);
   302     p0 = vec_lvsl (0, dest);
   303     p1 = vec_lvsl (stride, dest);
   304     p = vec_splat_u8 (-1);
   305     perm0 = vec_mergeh (p, p0);
   306     perm1 = vec_mergeh (p, p1);
   308 #define ADD(dest,src,perm)                                              \   310     tmp = vec_ld (0, dest);                                             \   311     tmp2 = (vector signed short)vec_perm (tmp, vec_splat_u8(0), perm);  \   312     tmp3 = vec_adds (tmp2, src);                                        \   313     tmp = vec_packsu (tmp3, tmp3);                                      \   314     vec_ste ((vector unsigned int)tmp, 0, (unsigned int *)dest);        \   315     vec_ste ((vector unsigned int)tmp, 4, (unsigned int *)dest);   320     ADD (dest, src3, perm1)
   323 #define PUT_OP_U8_ALTIVEC(d, s, dst) d = s   324 #define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s)   326 #define OP_U8_ALTIVEC                          PUT_OP_U8_ALTIVEC   327 #define PREFIX_no_rnd_vc1_chroma_mc8_altivec   put_no_rnd_vc1_chroma_mc8_altivec   330 #undef PREFIX_no_rnd_vc1_chroma_mc8_altivec   332 #define OP_U8_ALTIVEC                          AVG_OP_U8_ALTIVEC   333 #define PREFIX_no_rnd_vc1_chroma_mc8_altivec   avg_no_rnd_vc1_chroma_mc8_altivec   336 #undef PREFIX_no_rnd_vc1_chroma_mc8_altivec #define AV_CPU_FLAG_ALTIVEC
standard 
Macro definitions for various function/variable attributes. 
#define SHIFT_VERT4(s0, s1, s2, s3)
#define TRANSPOSE8(a, b, c, d, e, f, g, h)
void(* vc1_inv_trans_8x8)(int16_t *b)
#define SHIFT_VERT8(s0, s1, s2, s3, s4, s5, s6, s7)
h264_chroma_mc_func avg_no_rnd_vc1_chroma_pixels_tab[3]
#define STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_rnd)
#define STEP4(s0, s1, s2, s3, vec_rnd)
void(* vc1_inv_trans_8x4)(uint8_t *dest, int line_size, int16_t *block)
static void vc1_inv_trans_8x4_altivec(uint8_t *dest, int stride, int16_t *block)
Do inverse transform on 8x4 part of block. 
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU. 
#define SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7)
Contains misc utility macros and inline functions. 
h264_chroma_mc_func put_no_rnd_vc1_chroma_pixels_tab[3]
static void vc1_inv_trans_8x8_altivec(int16_t block[64])
Do inverse transform on 8x8 block. 
av_cold void ff_vc1dsp_init_altivec(VC1DSPContext *dsp)
#define ADD(dest, src, perm)