yading@11
|
1 /*
|
yading@11
|
2 * Copyright (c) 2010 Alexander Strange <astrange@ithinksw.com>
|
yading@11
|
3 *
|
yading@11
|
4 * This file is part of FFmpeg.
|
yading@11
|
5 *
|
yading@11
|
6 * FFmpeg is free software; you can redistribute it and/or
|
yading@11
|
7 * modify it under the terms of the GNU Lesser General Public
|
yading@11
|
8 * License as published by the Free Software Foundation; either
|
yading@11
|
9 * version 2.1 of the License, or (at your option) any later version.
|
yading@11
|
10 *
|
yading@11
|
11 * FFmpeg is distributed in the hope that it will be useful,
|
yading@11
|
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
yading@11
|
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
yading@11
|
14 * Lesser General Public License for more details.
|
yading@11
|
15 *
|
yading@11
|
16 * You should have received a copy of the GNU Lesser General Public
|
yading@11
|
17 * License along with FFmpeg; if not, write to the Free Software
|
yading@11
|
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
yading@11
|
19 */
|
yading@11
|
20
|
yading@11
|
21 #ifndef AVUTIL_X86_INTREADWRITE_H
|
yading@11
|
22 #define AVUTIL_X86_INTREADWRITE_H
|
yading@11
|
23
|
yading@11
|
24 #include <stdint.h>
|
yading@11
|
25 #include "config.h"
|
yading@11
|
26 #include "libavutil/attributes.h"
|
yading@11
|
27
|
yading@11
|
28 #if HAVE_MMX
|
yading@11
|
29
|
yading@11
|
30 #if !HAVE_FAST_64BIT && defined(__MMX__)
|
yading@11
|
31
|
yading@11
|
32 #define AV_COPY64 AV_COPY64
|
yading@11
|
33 static av_always_inline void AV_COPY64(void *d, const void *s)
|
yading@11
|
34 {
|
yading@11
|
35 __asm__("movq %1, %%mm0 \n\t"
|
yading@11
|
36 "movq %%mm0, %0 \n\t"
|
yading@11
|
37 : "=m"(*(uint64_t*)d)
|
yading@11
|
38 : "m" (*(const uint64_t*)s)
|
yading@11
|
39 : "mm0");
|
yading@11
|
40 }
|
yading@11
|
41
|
yading@11
|
42 #define AV_SWAP64 AV_SWAP64
|
yading@11
|
43 static av_always_inline void AV_SWAP64(void *a, void *b)
|
yading@11
|
44 {
|
yading@11
|
45 __asm__("movq %1, %%mm0 \n\t"
|
yading@11
|
46 "movq %0, %%mm1 \n\t"
|
yading@11
|
47 "movq %%mm0, %0 \n\t"
|
yading@11
|
48 "movq %%mm1, %1 \n\t"
|
yading@11
|
49 : "+m"(*(uint64_t*)a), "+m"(*(uint64_t*)b)
|
yading@11
|
50 ::"mm0", "mm1");
|
yading@11
|
51 }
|
yading@11
|
52
|
yading@11
|
53 #define AV_ZERO64 AV_ZERO64
|
yading@11
|
54 static av_always_inline void AV_ZERO64(void *d)
|
yading@11
|
55 {
|
yading@11
|
56 __asm__("pxor %%mm0, %%mm0 \n\t"
|
yading@11
|
57 "movq %%mm0, %0 \n\t"
|
yading@11
|
58 : "=m"(*(uint64_t*)d)
|
yading@11
|
59 :: "mm0");
|
yading@11
|
60 }
|
yading@11
|
61
|
yading@11
|
62 #endif /* !HAVE_FAST_64BIT && defined(__MMX__) */
|
yading@11
|
63
|
yading@11
|
64 #ifdef __SSE__
|
yading@11
|
65
|
yading@11
|
66 #define AV_COPY128 AV_COPY128
|
yading@11
|
67 static av_always_inline void AV_COPY128(void *d, const void *s)
|
yading@11
|
68 {
|
yading@11
|
69 struct v {uint64_t v[2];};
|
yading@11
|
70
|
yading@11
|
71 __asm__("movaps %1, %%xmm0 \n\t"
|
yading@11
|
72 "movaps %%xmm0, %0 \n\t"
|
yading@11
|
73 : "=m"(*(struct v*)d)
|
yading@11
|
74 : "m" (*(const struct v*)s)
|
yading@11
|
75 : "xmm0");
|
yading@11
|
76 }
|
yading@11
|
77
|
yading@11
|
78 #endif /* __SSE__ */
|
yading@11
|
79
|
yading@11
|
80 #ifdef __SSE2__
|
yading@11
|
81
|
yading@11
|
82 #define AV_ZERO128 AV_ZERO128
|
yading@11
|
83 static av_always_inline void AV_ZERO128(void *d)
|
yading@11
|
84 {
|
yading@11
|
85 struct v {uint64_t v[2];};
|
yading@11
|
86
|
yading@11
|
87 __asm__("pxor %%xmm0, %%xmm0 \n\t"
|
yading@11
|
88 "movdqa %%xmm0, %0 \n\t"
|
yading@11
|
89 : "=m"(*(struct v*)d)
|
yading@11
|
90 :: "xmm0");
|
yading@11
|
91 }
|
yading@11
|
92
|
yading@11
|
93 #endif /* __SSE2__ */
|
yading@11
|
94
|
yading@11
|
95 #endif /* HAVE_MMX */
|
yading@11
|
96
|
yading@11
|
97 #endif /* AVUTIL_X86_INTREADWRITE_H */
|