yading@10: /* yading@10: * Copyright (c) 2008 Mans Rullgard yading@10: * yading@10: * This file is part of FFmpeg. yading@10: * yading@10: * FFmpeg is free software; you can redistribute it and/or yading@10: * modify it under the terms of the GNU Lesser General Public yading@10: * License as published by the Free Software Foundation; either yading@10: * version 2.1 of the License, or (at your option) any later version. yading@10: * yading@10: * FFmpeg is distributed in the hope that it will be useful, yading@10: * but WITHOUT ANY WARRANTY; without even the implied warranty of yading@10: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU yading@10: * Lesser General Public License for more details. yading@10: * yading@10: * You should have received a copy of the GNU Lesser General Public yading@10: * License along with FFmpeg; if not, write to the Free Software yading@10: * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA yading@10: */ yading@10: yading@10: #include "libavutil/arm/asm.S" yading@10: #include "neon.S" yading@10: yading@10: /* H.264 loop filter */ yading@10: yading@10: .macro h264_loop_filter_start yading@10: ldr r12, [sp] yading@10: tst r2, r2 yading@10: ldr r12, [r12] yading@10: it ne yading@10: tstne r3, r3 yading@10: vmov.32 d24[0], r12 yading@10: and r12, r12, r12, lsl #16 yading@10: it eq yading@10: bxeq lr yading@10: ands r12, r12, r12, lsl #8 yading@10: it lt yading@10: bxlt lr yading@10: .endm yading@10: yading@10: .macro h264_loop_filter_luma yading@10: vdup.8 q11, r2 @ alpha yading@10: vmovl.u8 q12, d24 yading@10: vabd.u8 q6, q8, q0 @ abs(p0 - q0) yading@10: vmovl.u16 q12, d24 yading@10: vabd.u8 q14, q9, q8 @ abs(p1 - p0) yading@10: vsli.16 q12, q12, #8 yading@10: vabd.u8 q15, q1, q0 @ abs(q1 - q0) yading@10: vsli.32 q12, q12, #16 yading@10: vclt.u8 q6, q6, q11 @ < alpha yading@10: vdup.8 q11, r3 @ beta yading@10: vclt.s8 q7, q12, #0 yading@10: vclt.u8 q14, q14, q11 @ < beta yading@10: vclt.u8 q15, q15, q11 @ < beta yading@10: vbic q6, q6, q7 yading@10: vabd.u8 q4, q10, q8 @ abs(p2 - p0) yading@10: vand q6, q6, q14 yading@10: vabd.u8 q5, q2, q0 @ abs(q2 - q0) yading@10: vclt.u8 q4, q4, q11 @ < beta yading@10: vand q6, q6, q15 yading@10: vclt.u8 q5, q5, q11 @ < beta yading@10: vand q4, q4, q6 yading@10: vand q5, q5, q6 yading@10: vand q12, q12, q6 yading@10: vrhadd.u8 q14, q8, q0 yading@10: vsub.i8 q6, q12, q4 yading@10: vqadd.u8 q7, q9, q12 yading@10: vhadd.u8 q10, q10, q14 yading@10: vsub.i8 q6, q6, q5 yading@10: vhadd.u8 q14, q2, q14 yading@10: vmin.u8 q7, q7, q10 yading@10: vqsub.u8 q11, q9, q12 yading@10: vqadd.u8 q2, q1, q12 yading@10: vmax.u8 q7, q7, q11 yading@10: vqsub.u8 q11, q1, q12 yading@10: vmin.u8 q14, q2, q14 yading@10: vmovl.u8 q2, d0 yading@10: vmax.u8 q14, q14, q11 yading@10: vmovl.u8 q10, d1 yading@10: vsubw.u8 q2, q2, d16 yading@10: vsubw.u8 q10, q10, d17 yading@10: vshl.i16 q2, q2, #2 yading@10: vshl.i16 q10, q10, #2 yading@10: vaddw.u8 q2, q2, d18 yading@10: vaddw.u8 q10, q10, d19 yading@10: vsubw.u8 q2, q2, d2 yading@10: vsubw.u8 q10, q10, d3 yading@10: vrshrn.i16 d4, q2, #3 yading@10: vrshrn.i16 d5, q10, #3 yading@10: vbsl q4, q7, q9 yading@10: vbsl q5, q14, q1 yading@10: vneg.s8 q7, q6 yading@10: vmovl.u8 q14, d16 yading@10: vmin.s8 q2, q2, q6 yading@10: vmovl.u8 q6, d17 yading@10: vmax.s8 q2, q2, q7 yading@10: vmovl.u8 q11, d0 yading@10: vmovl.u8 q12, d1 yading@10: vaddw.s8 q14, q14, d4 yading@10: vaddw.s8 q6, q6, d5 yading@10: vsubw.s8 q11, q11, d4 yading@10: vsubw.s8 q12, q12, d5 yading@10: vqmovun.s16 d16, q14 yading@10: vqmovun.s16 d17, q6 yading@10: vqmovun.s16 d0, q11 yading@10: vqmovun.s16 d1, q12 yading@10: .endm yading@10: yading@10: function ff_h264_v_loop_filter_luma_neon, export=1 yading@10: h264_loop_filter_start yading@10: yading@10: vld1.8 {d0, d1}, [r0,:128], r1 yading@10: vld1.8 {d2, d3}, [r0,:128], r1 yading@10: vld1.8 {d4, d5}, [r0,:128], r1 yading@10: sub r0, r0, r1, lsl #2 yading@10: sub r0, r0, r1, lsl #1 yading@10: vld1.8 {d20,d21}, [r0,:128], r1 yading@10: vld1.8 {d18,d19}, [r0,:128], r1 yading@10: vld1.8 {d16,d17}, [r0,:128], r1 yading@10: yading@10: vpush {d8-d15} yading@10: yading@10: h264_loop_filter_luma yading@10: yading@10: sub r0, r0, r1, lsl #1 yading@10: vst1.8 {d8, d9}, [r0,:128], r1 yading@10: vst1.8 {d16,d17}, [r0,:128], r1 yading@10: vst1.8 {d0, d1}, [r0,:128], r1 yading@10: vst1.8 {d10,d11}, [r0,:128] yading@10: yading@10: vpop {d8-d15} yading@10: bx lr yading@10: endfunc yading@10: yading@10: function ff_h264_h_loop_filter_luma_neon, export=1 yading@10: h264_loop_filter_start yading@10: yading@10: sub r0, r0, #4 yading@10: vld1.8 {d6}, [r0], r1 yading@10: vld1.8 {d20}, [r0], r1 yading@10: vld1.8 {d18}, [r0], r1 yading@10: vld1.8 {d16}, [r0], r1 yading@10: vld1.8 {d0}, [r0], r1 yading@10: vld1.8 {d2}, [r0], r1 yading@10: vld1.8 {d4}, [r0], r1 yading@10: vld1.8 {d26}, [r0], r1 yading@10: vld1.8 {d7}, [r0], r1 yading@10: vld1.8 {d21}, [r0], r1 yading@10: vld1.8 {d19}, [r0], r1 yading@10: vld1.8 {d17}, [r0], r1 yading@10: vld1.8 {d1}, [r0], r1 yading@10: vld1.8 {d3}, [r0], r1 yading@10: vld1.8 {d5}, [r0], r1 yading@10: vld1.8 {d27}, [r0], r1 yading@10: yading@10: transpose_8x8 q3, q10, q9, q8, q0, q1, q2, q13 yading@10: yading@10: vpush {d8-d15} yading@10: yading@10: h264_loop_filter_luma yading@10: yading@10: transpose_4x4 q4, q8, q0, q5 yading@10: yading@10: sub r0, r0, r1, lsl #4 yading@10: add r0, r0, #2 yading@10: vst1.32 {d8[0]}, [r0], r1 yading@10: vst1.32 {d16[0]}, [r0], r1 yading@10: vst1.32 {d0[0]}, [r0], r1 yading@10: vst1.32 {d10[0]}, [r0], r1 yading@10: vst1.32 {d8[1]}, [r0], r1 yading@10: vst1.32 {d16[1]}, [r0], r1 yading@10: vst1.32 {d0[1]}, [r0], r1 yading@10: vst1.32 {d10[1]}, [r0], r1 yading@10: vst1.32 {d9[0]}, [r0], r1 yading@10: vst1.32 {d17[0]}, [r0], r1 yading@10: vst1.32 {d1[0]}, [r0], r1 yading@10: vst1.32 {d11[0]}, [r0], r1 yading@10: vst1.32 {d9[1]}, [r0], r1 yading@10: vst1.32 {d17[1]}, [r0], r1 yading@10: vst1.32 {d1[1]}, [r0], r1 yading@10: vst1.32 {d11[1]}, [r0], r1 yading@10: yading@10: vpop {d8-d15} yading@10: bx lr yading@10: endfunc yading@10: yading@10: .macro h264_loop_filter_chroma yading@10: vdup.8 d22, r2 @ alpha yading@10: vmovl.u8 q12, d24 yading@10: vabd.u8 d26, d16, d0 @ abs(p0 - q0) yading@10: vmovl.u8 q2, d0 yading@10: vabd.u8 d28, d18, d16 @ abs(p1 - p0) yading@10: vsubw.u8 q2, q2, d16 yading@10: vsli.16 d24, d24, #8 yading@10: vshl.i16 q2, q2, #2 yading@10: vabd.u8 d30, d2, d0 @ abs(q1 - q0) yading@10: vaddw.u8 q2, q2, d18 yading@10: vclt.u8 d26, d26, d22 @ < alpha yading@10: vsubw.u8 q2, q2, d2 yading@10: vdup.8 d22, r3 @ beta yading@10: vrshrn.i16 d4, q2, #3 yading@10: vclt.u8 d28, d28, d22 @ < beta yading@10: vclt.u8 d30, d30, d22 @ < beta yading@10: vmin.s8 d4, d4, d24 yading@10: vneg.s8 d25, d24 yading@10: vand d26, d26, d28 yading@10: vmax.s8 d4, d4, d25 yading@10: vand d26, d26, d30 yading@10: vmovl.u8 q11, d0 yading@10: vand d4, d4, d26 yading@10: vmovl.u8 q14, d16 yading@10: vaddw.s8 q14, q14, d4 yading@10: vsubw.s8 q11, q11, d4 yading@10: vqmovun.s16 d16, q14 yading@10: vqmovun.s16 d0, q11 yading@10: .endm yading@10: yading@10: function ff_h264_v_loop_filter_chroma_neon, export=1 yading@10: h264_loop_filter_start yading@10: yading@10: sub r0, r0, r1, lsl #1 yading@10: vld1.8 {d18}, [r0,:64], r1 yading@10: vld1.8 {d16}, [r0,:64], r1 yading@10: vld1.8 {d0}, [r0,:64], r1 yading@10: vld1.8 {d2}, [r0,:64] yading@10: yading@10: h264_loop_filter_chroma yading@10: yading@10: sub r0, r0, r1, lsl #1 yading@10: vst1.8 {d16}, [r0,:64], r1 yading@10: vst1.8 {d0}, [r0,:64], r1 yading@10: yading@10: bx lr yading@10: endfunc yading@10: yading@10: function ff_h264_h_loop_filter_chroma_neon, export=1 yading@10: h264_loop_filter_start yading@10: yading@10: sub r0, r0, #2 yading@10: vld1.32 {d18[0]}, [r0], r1 yading@10: vld1.32 {d16[0]}, [r0], r1 yading@10: vld1.32 {d0[0]}, [r0], r1 yading@10: vld1.32 {d2[0]}, [r0], r1 yading@10: vld1.32 {d18[1]}, [r0], r1 yading@10: vld1.32 {d16[1]}, [r0], r1 yading@10: vld1.32 {d0[1]}, [r0], r1 yading@10: vld1.32 {d2[1]}, [r0], r1 yading@10: yading@10: vtrn.16 d18, d0 yading@10: vtrn.16 d16, d2 yading@10: vtrn.8 d18, d16 yading@10: vtrn.8 d0, d2 yading@10: yading@10: h264_loop_filter_chroma yading@10: yading@10: vtrn.16 d18, d0 yading@10: vtrn.16 d16, d2 yading@10: vtrn.8 d18, d16 yading@10: vtrn.8 d0, d2 yading@10: yading@10: sub r0, r0, r1, lsl #3 yading@10: vst1.32 {d18[0]}, [r0], r1 yading@10: vst1.32 {d16[0]}, [r0], r1 yading@10: vst1.32 {d0[0]}, [r0], r1 yading@10: vst1.32 {d2[0]}, [r0], r1 yading@10: vst1.32 {d18[1]}, [r0], r1 yading@10: vst1.32 {d16[1]}, [r0], r1 yading@10: vst1.32 {d0[1]}, [r0], r1 yading@10: vst1.32 {d2[1]}, [r0], r1 yading@10: yading@10: bx lr yading@10: endfunc yading@10: yading@10: @ Biweighted prediction yading@10: yading@10: .macro biweight_16 macs, macd yading@10: vdup.8 d0, r4 yading@10: vdup.8 d1, r5 yading@10: vmov q2, q8 yading@10: vmov q3, q8 yading@10: 1: subs r3, r3, #2 yading@10: vld1.8 {d20-d21},[r0,:128], r2 yading@10: \macd q2, d0, d20 yading@10: pld [r0] yading@10: \macd q3, d0, d21 yading@10: vld1.8 {d22-d23},[r1,:128], r2 yading@10: \macs q2, d1, d22 yading@10: pld [r1] yading@10: \macs q3, d1, d23 yading@10: vmov q12, q8 yading@10: vld1.8 {d28-d29},[r0,:128], r2 yading@10: vmov q13, q8 yading@10: \macd q12, d0, d28 yading@10: pld [r0] yading@10: \macd q13, d0, d29 yading@10: vld1.8 {d30-d31},[r1,:128], r2 yading@10: \macs q12, d1, d30 yading@10: pld [r1] yading@10: \macs q13, d1, d31 yading@10: vshl.s16 q2, q2, q9 yading@10: vshl.s16 q3, q3, q9 yading@10: vqmovun.s16 d4, q2 yading@10: vqmovun.s16 d5, q3 yading@10: vshl.s16 q12, q12, q9 yading@10: vshl.s16 q13, q13, q9 yading@10: vqmovun.s16 d24, q12 yading@10: vqmovun.s16 d25, q13 yading@10: vmov q3, q8 yading@10: vst1.8 {d4- d5}, [r6,:128], r2 yading@10: vmov q2, q8 yading@10: vst1.8 {d24-d25},[r6,:128], r2 yading@10: bne 1b yading@10: pop {r4-r6, pc} yading@10: .endm yading@10: yading@10: .macro biweight_8 macs, macd yading@10: vdup.8 d0, r4 yading@10: vdup.8 d1, r5 yading@10: vmov q1, q8 yading@10: vmov q10, q8 yading@10: 1: subs r3, r3, #2 yading@10: vld1.8 {d4},[r0,:64], r2 yading@10: \macd q1, d0, d4 yading@10: pld [r0] yading@10: vld1.8 {d5},[r1,:64], r2 yading@10: \macs q1, d1, d5 yading@10: pld [r1] yading@10: vld1.8 {d6},[r0,:64], r2 yading@10: \macd q10, d0, d6 yading@10: pld [r0] yading@10: vld1.8 {d7},[r1,:64], r2 yading@10: \macs q10, d1, d7 yading@10: pld [r1] yading@10: vshl.s16 q1, q1, q9 yading@10: vqmovun.s16 d2, q1 yading@10: vshl.s16 q10, q10, q9 yading@10: vqmovun.s16 d4, q10 yading@10: vmov q10, q8 yading@10: vst1.8 {d2},[r6,:64], r2 yading@10: vmov q1, q8 yading@10: vst1.8 {d4},[r6,:64], r2 yading@10: bne 1b yading@10: pop {r4-r6, pc} yading@10: .endm yading@10: yading@10: .macro biweight_4 macs, macd yading@10: vdup.8 d0, r4 yading@10: vdup.8 d1, r5 yading@10: vmov q1, q8 yading@10: vmov q10, q8 yading@10: 1: subs r3, r3, #4 yading@10: vld1.32 {d4[0]},[r0,:32], r2 yading@10: vld1.32 {d4[1]},[r0,:32], r2 yading@10: \macd q1, d0, d4 yading@10: pld [r0] yading@10: vld1.32 {d5[0]},[r1,:32], r2 yading@10: vld1.32 {d5[1]},[r1,:32], r2 yading@10: \macs q1, d1, d5 yading@10: pld [r1] yading@10: blt 2f yading@10: vld1.32 {d6[0]},[r0,:32], r2 yading@10: vld1.32 {d6[1]},[r0,:32], r2 yading@10: \macd q10, d0, d6 yading@10: pld [r0] yading@10: vld1.32 {d7[0]},[r1,:32], r2 yading@10: vld1.32 {d7[1]},[r1,:32], r2 yading@10: \macs q10, d1, d7 yading@10: pld [r1] yading@10: vshl.s16 q1, q1, q9 yading@10: vqmovun.s16 d2, q1 yading@10: vshl.s16 q10, q10, q9 yading@10: vqmovun.s16 d4, q10 yading@10: vmov q10, q8 yading@10: vst1.32 {d2[0]},[r6,:32], r2 yading@10: vst1.32 {d2[1]},[r6,:32], r2 yading@10: vmov q1, q8 yading@10: vst1.32 {d4[0]},[r6,:32], r2 yading@10: vst1.32 {d4[1]},[r6,:32], r2 yading@10: bne 1b yading@10: pop {r4-r6, pc} yading@10: 2: vshl.s16 q1, q1, q9 yading@10: vqmovun.s16 d2, q1 yading@10: vst1.32 {d2[0]},[r6,:32], r2 yading@10: vst1.32 {d2[1]},[r6,:32], r2 yading@10: pop {r4-r6, pc} yading@10: .endm yading@10: yading@10: .macro biweight_func w yading@10: function ff_biweight_h264_pixels_\w\()_neon, export=1 yading@10: push {r4-r6, lr} yading@10: ldr r12, [sp, #16] yading@10: add r4, sp, #20 yading@10: ldm r4, {r4-r6} yading@10: lsr lr, r4, #31 yading@10: add r6, r6, #1 yading@10: eors lr, lr, r5, lsr #30 yading@10: orr r6, r6, #1 yading@10: vdup.16 q9, r12 yading@10: lsl r6, r6, r12 yading@10: vmvn q9, q9 yading@10: vdup.16 q8, r6 yading@10: mov r6, r0 yading@10: beq 10f yading@10: subs lr, lr, #1 yading@10: beq 20f yading@10: subs lr, lr, #1 yading@10: beq 30f yading@10: b 40f yading@10: 10: biweight_\w vmlal.u8, vmlal.u8 yading@10: 20: rsb r4, r4, #0 yading@10: biweight_\w vmlal.u8, vmlsl.u8 yading@10: 30: rsb r4, r4, #0 yading@10: rsb r5, r5, #0 yading@10: biweight_\w vmlsl.u8, vmlsl.u8 yading@10: 40: rsb r5, r5, #0 yading@10: biweight_\w vmlsl.u8, vmlal.u8 yading@10: endfunc yading@10: .endm yading@10: yading@10: biweight_func 16 yading@10: biweight_func 8 yading@10: biweight_func 4 yading@10: yading@10: @ Weighted prediction yading@10: yading@10: .macro weight_16 add yading@10: vdup.8 d0, r12 yading@10: 1: subs r2, r2, #2 yading@10: vld1.8 {d20-d21},[r0,:128], r1 yading@10: vmull.u8 q2, d0, d20 yading@10: pld [r0] yading@10: vmull.u8 q3, d0, d21 yading@10: vld1.8 {d28-d29},[r0,:128], r1 yading@10: vmull.u8 q12, d0, d28 yading@10: pld [r0] yading@10: vmull.u8 q13, d0, d29 yading@10: \add q2, q8, q2 yading@10: vrshl.s16 q2, q2, q9 yading@10: \add q3, q8, q3 yading@10: vrshl.s16 q3, q3, q9 yading@10: vqmovun.s16 d4, q2 yading@10: vqmovun.s16 d5, q3 yading@10: \add q12, q8, q12 yading@10: vrshl.s16 q12, q12, q9 yading@10: \add q13, q8, q13 yading@10: vrshl.s16 q13, q13, q9 yading@10: vqmovun.s16 d24, q12 yading@10: vqmovun.s16 d25, q13 yading@10: vst1.8 {d4- d5}, [r4,:128], r1 yading@10: vst1.8 {d24-d25},[r4,:128], r1 yading@10: bne 1b yading@10: pop {r4, pc} yading@10: .endm yading@10: yading@10: .macro weight_8 add yading@10: vdup.8 d0, r12 yading@10: 1: subs r2, r2, #2 yading@10: vld1.8 {d4},[r0,:64], r1 yading@10: vmull.u8 q1, d0, d4 yading@10: pld [r0] yading@10: vld1.8 {d6},[r0,:64], r1 yading@10: vmull.u8 q10, d0, d6 yading@10: \add q1, q8, q1 yading@10: pld [r0] yading@10: vrshl.s16 q1, q1, q9 yading@10: vqmovun.s16 d2, q1 yading@10: \add q10, q8, q10 yading@10: vrshl.s16 q10, q10, q9 yading@10: vqmovun.s16 d4, q10 yading@10: vst1.8 {d2},[r4,:64], r1 yading@10: vst1.8 {d4},[r4,:64], r1 yading@10: bne 1b yading@10: pop {r4, pc} yading@10: .endm yading@10: yading@10: .macro weight_4 add yading@10: vdup.8 d0, r12 yading@10: vmov q1, q8 yading@10: vmov q10, q8 yading@10: 1: subs r2, r2, #4 yading@10: vld1.32 {d4[0]},[r0,:32], r1 yading@10: vld1.32 {d4[1]},[r0,:32], r1 yading@10: vmull.u8 q1, d0, d4 yading@10: pld [r0] yading@10: blt 2f yading@10: vld1.32 {d6[0]},[r0,:32], r1 yading@10: vld1.32 {d6[1]},[r0,:32], r1 yading@10: vmull.u8 q10, d0, d6 yading@10: pld [r0] yading@10: \add q1, q8, q1 yading@10: vrshl.s16 q1, q1, q9 yading@10: vqmovun.s16 d2, q1 yading@10: \add q10, q8, q10 yading@10: vrshl.s16 q10, q10, q9 yading@10: vqmovun.s16 d4, q10 yading@10: vmov q10, q8 yading@10: vst1.32 {d2[0]},[r4,:32], r1 yading@10: vst1.32 {d2[1]},[r4,:32], r1 yading@10: vmov q1, q8 yading@10: vst1.32 {d4[0]},[r4,:32], r1 yading@10: vst1.32 {d4[1]},[r4,:32], r1 yading@10: bne 1b yading@10: pop {r4, pc} yading@10: 2: \add q1, q8, q1 yading@10: vrshl.s16 q1, q1, q9 yading@10: vqmovun.s16 d2, q1 yading@10: vst1.32 {d2[0]},[r4,:32], r1 yading@10: vst1.32 {d2[1]},[r4,:32], r1 yading@10: pop {r4, pc} yading@10: .endm yading@10: yading@10: .macro weight_func w yading@10: function ff_weight_h264_pixels_\w\()_neon, export=1 yading@10: push {r4, lr} yading@10: ldr r12, [sp, #8] yading@10: ldr r4, [sp, #12] yading@10: cmp r3, #1 yading@10: lsl r4, r4, r3 yading@10: vdup.16 q8, r4 yading@10: mov r4, r0 yading@10: ble 20f yading@10: rsb lr, r3, #1 yading@10: vdup.16 q9, lr yading@10: cmp r12, #0 yading@10: blt 10f yading@10: weight_\w vhadd.s16 yading@10: 10: rsb r12, r12, #0 yading@10: weight_\w vhsub.s16 yading@10: 20: rsb lr, r3, #0 yading@10: vdup.16 q9, lr yading@10: cmp r12, #0 yading@10: blt 10f yading@10: weight_\w vadd.s16 yading@10: 10: rsb r12, r12, #0 yading@10: weight_\w vsub.s16 yading@10: endfunc yading@10: .endm yading@10: yading@10: weight_func 16 yading@10: weight_func 8 yading@10: weight_func 4