yading@11
|
1 /*
|
yading@11
|
2 * Copyright (c) 2008 Siarhei Siamashka <ssvb@users.sourceforge.net>
|
yading@11
|
3 *
|
yading@11
|
4 * This file is part of FFmpeg
|
yading@11
|
5 *
|
yading@11
|
6 * FFmpeg is free software; you can redistribute it and/or
|
yading@11
|
7 * modify it under the terms of the GNU Lesser General Public
|
yading@11
|
8 * License as published by the Free Software Foundation; either
|
yading@11
|
9 * version 2.1 of the License, or (at your option) any later version.
|
yading@11
|
10 *
|
yading@11
|
11 * FFmpeg is distributed in the hope that it will be useful,
|
yading@11
|
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
yading@11
|
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
yading@11
|
14 * Lesser General Public License for more details.
|
yading@11
|
15 *
|
yading@11
|
16 * You should have received a copy of the GNU Lesser General Public
|
yading@11
|
17 * License along with FFmpeg; if not, write to the Free Software
|
yading@11
|
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
yading@11
|
19 */
|
yading@11
|
20
|
yading@11
|
21 #include "config.h"
|
yading@11
|
22 #include "asm.S"
|
yading@11
|
23
|
yading@11
|
24 /**
|
yading@11
|
25 * Assume that len is a positive number and is multiple of 8
|
yading@11
|
26 */
|
yading@11
|
27 @ void ff_vector_fmul_vfp(float *dst, const float *src0, const float *src1, int len)
|
yading@11
|
28 function ff_vector_fmul_vfp, export=1
|
yading@11
|
29 vpush {d8-d15}
|
yading@11
|
30 fmrx r12, fpscr
|
yading@11
|
31 orr r12, r12, #(3 << 16) /* set vector size to 4 */
|
yading@11
|
32 fmxr fpscr, r12
|
yading@11
|
33
|
yading@11
|
34 vldmia r1!, {s0-s3}
|
yading@11
|
35 vldmia r2!, {s8-s11}
|
yading@11
|
36 vldmia r1!, {s4-s7}
|
yading@11
|
37 vldmia r2!, {s12-s15}
|
yading@11
|
38 vmul.f32 s8, s0, s8
|
yading@11
|
39 1:
|
yading@11
|
40 subs r3, r3, #16
|
yading@11
|
41 vmul.f32 s12, s4, s12
|
yading@11
|
42 itttt ge
|
yading@11
|
43 vldmiage r1!, {s16-s19}
|
yading@11
|
44 vldmiage r2!, {s24-s27}
|
yading@11
|
45 vldmiage r1!, {s20-s23}
|
yading@11
|
46 vldmiage r2!, {s28-s31}
|
yading@11
|
47 it ge
|
yading@11
|
48 vmulge.f32 s24, s16, s24
|
yading@11
|
49 vstmia r0!, {s8-s11}
|
yading@11
|
50 vstmia r0!, {s12-s15}
|
yading@11
|
51 it ge
|
yading@11
|
52 vmulge.f32 s28, s20, s28
|
yading@11
|
53 itttt gt
|
yading@11
|
54 vldmiagt r1!, {s0-s3}
|
yading@11
|
55 vldmiagt r2!, {s8-s11}
|
yading@11
|
56 vldmiagt r1!, {s4-s7}
|
yading@11
|
57 vldmiagt r2!, {s12-s15}
|
yading@11
|
58 ittt ge
|
yading@11
|
59 vmulge.f32 s8, s0, s8
|
yading@11
|
60 vstmiage r0!, {s24-s27}
|
yading@11
|
61 vstmiage r0!, {s28-s31}
|
yading@11
|
62 bgt 1b
|
yading@11
|
63
|
yading@11
|
64 bic r12, r12, #(7 << 16) /* set vector size back to 1 */
|
yading@11
|
65 fmxr fpscr, r12
|
yading@11
|
66 vpop {d8-d15}
|
yading@11
|
67 bx lr
|
yading@11
|
68 endfunc
|
yading@11
|
69
|
yading@11
|
70 /**
|
yading@11
|
71 * ARM VFP optimized implementation of 'vector_fmul_reverse_c' function.
|
yading@11
|
72 * Assume that len is a positive number and is multiple of 8
|
yading@11
|
73 */
|
yading@11
|
74 @ void ff_vector_fmul_reverse_vfp(float *dst, const float *src0,
|
yading@11
|
75 @ const float *src1, int len)
|
yading@11
|
76 function ff_vector_fmul_reverse_vfp, export=1
|
yading@11
|
77 vpush {d8-d15}
|
yading@11
|
78 add r2, r2, r3, lsl #2
|
yading@11
|
79 vldmdb r2!, {s0-s3}
|
yading@11
|
80 vldmia r1!, {s8-s11}
|
yading@11
|
81 vldmdb r2!, {s4-s7}
|
yading@11
|
82 vldmia r1!, {s12-s15}
|
yading@11
|
83 vmul.f32 s8, s3, s8
|
yading@11
|
84 vmul.f32 s9, s2, s9
|
yading@11
|
85 vmul.f32 s10, s1, s10
|
yading@11
|
86 vmul.f32 s11, s0, s11
|
yading@11
|
87 1:
|
yading@11
|
88 subs r3, r3, #16
|
yading@11
|
89 it ge
|
yading@11
|
90 vldmdbge r2!, {s16-s19}
|
yading@11
|
91 vmul.f32 s12, s7, s12
|
yading@11
|
92 it ge
|
yading@11
|
93 vldmiage r1!, {s24-s27}
|
yading@11
|
94 vmul.f32 s13, s6, s13
|
yading@11
|
95 it ge
|
yading@11
|
96 vldmdbge r2!, {s20-s23}
|
yading@11
|
97 vmul.f32 s14, s5, s14
|
yading@11
|
98 it ge
|
yading@11
|
99 vldmiage r1!, {s28-s31}
|
yading@11
|
100 vmul.f32 s15, s4, s15
|
yading@11
|
101 it ge
|
yading@11
|
102 vmulge.f32 s24, s19, s24
|
yading@11
|
103 it gt
|
yading@11
|
104 vldmdbgt r2!, {s0-s3}
|
yading@11
|
105 it ge
|
yading@11
|
106 vmulge.f32 s25, s18, s25
|
yading@11
|
107 vstmia r0!, {s8-s13}
|
yading@11
|
108 it ge
|
yading@11
|
109 vmulge.f32 s26, s17, s26
|
yading@11
|
110 it gt
|
yading@11
|
111 vldmiagt r1!, {s8-s11}
|
yading@11
|
112 itt ge
|
yading@11
|
113 vmulge.f32 s27, s16, s27
|
yading@11
|
114 vmulge.f32 s28, s23, s28
|
yading@11
|
115 it gt
|
yading@11
|
116 vldmdbgt r2!, {s4-s7}
|
yading@11
|
117 it ge
|
yading@11
|
118 vmulge.f32 s29, s22, s29
|
yading@11
|
119 vstmia r0!, {s14-s15}
|
yading@11
|
120 ittt ge
|
yading@11
|
121 vmulge.f32 s30, s21, s30
|
yading@11
|
122 vmulge.f32 s31, s20, s31
|
yading@11
|
123 vmulge.f32 s8, s3, s8
|
yading@11
|
124 it gt
|
yading@11
|
125 vldmiagt r1!, {s12-s15}
|
yading@11
|
126 itttt ge
|
yading@11
|
127 vmulge.f32 s9, s2, s9
|
yading@11
|
128 vmulge.f32 s10, s1, s10
|
yading@11
|
129 vstmiage r0!, {s24-s27}
|
yading@11
|
130 vmulge.f32 s11, s0, s11
|
yading@11
|
131 it ge
|
yading@11
|
132 vstmiage r0!, {s28-s31}
|
yading@11
|
133 bgt 1b
|
yading@11
|
134
|
yading@11
|
135 vpop {d8-d15}
|
yading@11
|
136 bx lr
|
yading@11
|
137 endfunc
|