yading@11
|
1 /*
|
yading@11
|
2 * Copyright (c) 2012 Michael Niedermayer <michaelni@gmx.at>
|
yading@11
|
3 *
|
yading@11
|
4 * This file is part of FFmpeg.
|
yading@11
|
5 *
|
yading@11
|
6 * FFmpeg is free software; you can redistribute it and/or
|
yading@11
|
7 * modify it under the terms of the GNU Lesser General Public
|
yading@11
|
8 * License as published by the Free Software Foundation; either
|
yading@11
|
9 * version 2.1 of the License, or (at your option) any later version.
|
yading@11
|
10 *
|
yading@11
|
11 * FFmpeg is distributed in the hope that it will be useful,
|
yading@11
|
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
yading@11
|
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
yading@11
|
14 * Lesser General Public License for more details.
|
yading@11
|
15 *
|
yading@11
|
16 * You should have received a copy of the GNU Lesser General Public
|
yading@11
|
17 * License along with FFmpeg; if not, write to the Free Software
|
yading@11
|
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
yading@11
|
19 */
|
yading@11
|
20
|
yading@11
|
21 #include "libavutil/x86/asm.h"
|
yading@11
|
22 #include "libavutil/cpu.h"
|
yading@11
|
23 #include "libswresample/swresample_internal.h"
|
yading@11
|
24
|
yading@11
|
25 int swri_resample_int16_mmx2 (struct ResampleContext *c, int16_t *dst, const int16_t *src, int *consumed, int src_size, int dst_size, int update_ctx);
|
yading@11
|
26 int swri_resample_int16_ssse3(struct ResampleContext *c, int16_t *dst, const int16_t *src, int *consumed, int src_size, int dst_size, int update_ctx);
|
yading@11
|
27
|
yading@11
|
28 DECLARE_ALIGNED(16, const uint64_t, ff_resample_int16_rounder)[2] = { 0x0000000000004000ULL, 0x0000000000000000ULL};
|
yading@11
|
29
|
yading@11
|
30 #define COMMON_CORE_INT16_MMX2 \
|
yading@11
|
31 x86_reg len= -2*c->filter_length;\
|
yading@11
|
32 __asm__ volatile(\
|
yading@11
|
33 "movq "MANGLE(ff_resample_int16_rounder)", %%mm0 \n\t"\
|
yading@11
|
34 "1: \n\t"\
|
yading@11
|
35 "movq (%1, %0), %%mm1 \n\t"\
|
yading@11
|
36 "pmaddwd (%2, %0), %%mm1 \n\t"\
|
yading@11
|
37 "paddd %%mm1, %%mm0 \n\t"\
|
yading@11
|
38 "add $8, %0 \n\t"\
|
yading@11
|
39 " js 1b \n\t"\
|
yading@11
|
40 "pshufw $0x0E, %%mm0, %%mm1 \n\t"\
|
yading@11
|
41 "paddd %%mm1, %%mm0 \n\t"\
|
yading@11
|
42 "psrad $15, %%mm0 \n\t"\
|
yading@11
|
43 "packssdw %%mm0, %%mm0 \n\t"\
|
yading@11
|
44 "movd %%mm0, (%3) \n\t"\
|
yading@11
|
45 : "+r" (len)\
|
yading@11
|
46 : "r" (((uint8_t*)(src+sample_index))-len),\
|
yading@11
|
47 "r" (((uint8_t*)filter)-len),\
|
yading@11
|
48 "r" (dst+dst_index)\
|
yading@11
|
49 );
|
yading@11
|
50
|
yading@11
|
51 #define COMMON_CORE_INT16_SSSE3 \
|
yading@11
|
52 x86_reg len= -2*c->filter_length;\
|
yading@11
|
53 __asm__ volatile(\
|
yading@11
|
54 "movdqa "MANGLE(ff_resample_int16_rounder)", %%xmm0 \n\t"\
|
yading@11
|
55 "1: \n\t"\
|
yading@11
|
56 "movdqu (%1, %0), %%xmm1 \n\t"\
|
yading@11
|
57 "pmaddwd (%2, %0), %%xmm1 \n\t"\
|
yading@11
|
58 "paddd %%xmm1, %%xmm0 \n\t"\
|
yading@11
|
59 "add $16, %0 \n\t"\
|
yading@11
|
60 " js 1b \n\t"\
|
yading@11
|
61 "phaddd %%xmm0, %%xmm0 \n\t"\
|
yading@11
|
62 "phaddd %%xmm0, %%xmm0 \n\t"\
|
yading@11
|
63 "psrad $15, %%xmm0 \n\t"\
|
yading@11
|
64 "packssdw %%xmm0, %%xmm0 \n\t"\
|
yading@11
|
65 "movd %%xmm0, (%3) \n\t"\
|
yading@11
|
66 : "+r" (len)\
|
yading@11
|
67 : "r" (((uint8_t*)(src+sample_index))-len),\
|
yading@11
|
68 "r" (((uint8_t*)filter)-len),\
|
yading@11
|
69 "r" (dst+dst_index)\
|
yading@11
|
70 );
|