yading@10: /* yading@10: * ARM NEON IDCT yading@10: * yading@10: * Copyright (c) 2008 Mans Rullgard yading@10: * yading@10: * Based on Simple IDCT yading@10: * Copyright (c) 2001 Michael Niedermayer yading@10: * yading@10: * This file is part of FFmpeg. yading@10: * yading@10: * FFmpeg is free software; you can redistribute it and/or yading@10: * modify it under the terms of the GNU Lesser General Public yading@10: * License as published by the Free Software Foundation; either yading@10: * version 2.1 of the License, or (at your option) any later version. yading@10: * yading@10: * FFmpeg is distributed in the hope that it will be useful, yading@10: * but WITHOUT ANY WARRANTY; without even the implied warranty of yading@10: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU yading@10: * Lesser General Public License for more details. yading@10: * yading@10: * You should have received a copy of the GNU Lesser General Public yading@10: * License along with FFmpeg; if not, write to the Free Software yading@10: * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA yading@10: */ yading@10: yading@10: #include "libavutil/arm/asm.S" yading@10: yading@10: #define W1 22725 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 yading@10: #define W2 21407 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 yading@10: #define W3 19266 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 yading@10: #define W4 16383 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 yading@10: #define W5 12873 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 yading@10: #define W6 8867 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 yading@10: #define W7 4520 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 yading@10: #define W4c ((1<<(COL_SHIFT-1))/W4) yading@10: #define ROW_SHIFT 11 yading@10: #define COL_SHIFT 20 yading@10: yading@10: #define w1 d0[0] yading@10: #define w2 d0[1] yading@10: #define w3 d0[2] yading@10: #define w4 d0[3] yading@10: #define w5 d1[0] yading@10: #define w6 d1[1] yading@10: #define w7 d1[2] yading@10: #define w4c d1[3] yading@10: yading@10: .macro idct_col4_top yading@10: vmull.s16 q7, d6, w2 /* q9 = W2 * col[2] */ yading@10: vmull.s16 q8, d6, w6 /* q10 = W6 * col[2] */ yading@10: vmull.s16 q9, d4, w1 /* q9 = W1 * col[1] */ yading@10: vadd.i32 q11, q15, q7 yading@10: vmull.s16 q10, d4, w3 /* q10 = W3 * col[1] */ yading@10: vadd.i32 q12, q15, q8 yading@10: vmull.s16 q5, d4, w5 /* q5 = W5 * col[1] */ yading@10: vsub.i32 q13, q15, q8 yading@10: vmull.s16 q6, d4, w7 /* q6 = W7 * col[1] */ yading@10: vsub.i32 q14, q15, q7 yading@10: yading@10: vmlal.s16 q9, d8, w3 /* q9 += W3 * col[3] */ yading@10: vmlsl.s16 q10, d8, w7 /* q10 -= W7 * col[3] */ yading@10: vmlsl.s16 q5, d8, w1 /* q5 -= W1 * col[3] */ yading@10: vmlsl.s16 q6, d8, w5 /* q6 -= W5 * col[3] */ yading@10: .endm yading@10: yading@10: .text yading@10: .align 6 yading@10: yading@10: function idct_row4_pld_neon yading@10: pld [r0] yading@10: add r3, r0, r1, lsl #2 yading@10: pld [r0, r1] yading@10: pld [r0, r1, lsl #1] yading@10: A pld [r3, -r1] yading@10: pld [r3] yading@10: pld [r3, r1] yading@10: add r3, r3, r1, lsl #1 yading@10: pld [r3] yading@10: pld [r3, r1] yading@10: endfunc yading@10: yading@10: function idct_row4_neon yading@10: vmov.i32 q15, #(1<<(ROW_SHIFT-1)) yading@10: vld1.64 {d2-d5}, [r2,:128]! yading@10: vmlal.s16 q15, d2, w4 /* q15 += W4 * col[0] */ yading@10: vld1.64 {d6,d7}, [r2,:128]! yading@10: vorr d10, d3, d5 yading@10: vld1.64 {d8,d9}, [r2,:128]! yading@10: add r2, r2, #-64 yading@10: yading@10: vorr d11, d7, d9 yading@10: vorr d10, d10, d11 yading@10: vmov r3, r4, d10 yading@10: yading@10: idct_col4_top yading@10: yading@10: orrs r3, r3, r4 yading@10: beq 1f yading@10: yading@10: vmull.s16 q7, d3, w4 /* q7 = W4 * col[4] */ yading@10: vmlal.s16 q9, d5, w5 /* q9 += W5 * col[5] */ yading@10: vmlsl.s16 q10, d5, w1 /* q10 -= W1 * col[5] */ yading@10: vmull.s16 q8, d7, w2 /* q8 = W2 * col[6] */ yading@10: vmlal.s16 q5, d5, w7 /* q5 += W7 * col[5] */ yading@10: vadd.i32 q11, q11, q7 yading@10: vsub.i32 q12, q12, q7 yading@10: vsub.i32 q13, q13, q7 yading@10: vadd.i32 q14, q14, q7 yading@10: vmlal.s16 q6, d5, w3 /* q6 += W3 * col[5] */ yading@10: vmull.s16 q7, d7, w6 /* q7 = W6 * col[6] */ yading@10: vmlal.s16 q9, d9, w7 yading@10: vmlsl.s16 q10, d9, w5 yading@10: vmlal.s16 q5, d9, w3 yading@10: vmlsl.s16 q6, d9, w1 yading@10: vadd.i32 q11, q11, q7 yading@10: vsub.i32 q12, q12, q8 yading@10: vadd.i32 q13, q13, q8 yading@10: vsub.i32 q14, q14, q7 yading@10: yading@10: 1: vadd.i32 q3, q11, q9 yading@10: vadd.i32 q4, q12, q10 yading@10: vshrn.i32 d2, q3, #ROW_SHIFT yading@10: vshrn.i32 d4, q4, #ROW_SHIFT yading@10: vadd.i32 q7, q13, q5 yading@10: vadd.i32 q8, q14, q6 yading@10: vtrn.16 d2, d4 yading@10: vshrn.i32 d6, q7, #ROW_SHIFT yading@10: vshrn.i32 d8, q8, #ROW_SHIFT yading@10: vsub.i32 q14, q14, q6 yading@10: vsub.i32 q11, q11, q9 yading@10: vtrn.16 d6, d8 yading@10: vsub.i32 q13, q13, q5 yading@10: vshrn.i32 d3, q14, #ROW_SHIFT yading@10: vtrn.32 d2, d6 yading@10: vsub.i32 q12, q12, q10 yading@10: vtrn.32 d4, d8 yading@10: vshrn.i32 d5, q13, #ROW_SHIFT yading@10: vshrn.i32 d7, q12, #ROW_SHIFT yading@10: vshrn.i32 d9, q11, #ROW_SHIFT yading@10: yading@10: vtrn.16 d3, d5 yading@10: vtrn.16 d7, d9 yading@10: vtrn.32 d3, d7 yading@10: vtrn.32 d5, d9 yading@10: yading@10: vst1.64 {d2-d5}, [r2,:128]! yading@10: vst1.64 {d6-d9}, [r2,:128]! yading@10: yading@10: bx lr yading@10: endfunc yading@10: yading@10: function idct_col4_neon yading@10: mov ip, #16 yading@10: vld1.64 {d2}, [r2,:64], ip /* d2 = col[0] */ yading@10: vdup.16 d30, w4c yading@10: vld1.64 {d4}, [r2,:64], ip /* d3 = col[1] */ yading@10: vadd.i16 d30, d30, d2 yading@10: vld1.64 {d6}, [r2,:64], ip /* d4 = col[2] */ yading@10: vmull.s16 q15, d30, w4 /* q15 = W4*(col[0]+(1<