cannam@154: /* Copyright (c) 2014, Cisco Systems, INC cannam@154: Written by XiangMingZhu WeiZhou MinPeng YanWang cannam@154: cannam@154: Redistribution and use in source and binary forms, with or without cannam@154: modification, are permitted provided that the following conditions cannam@154: are met: cannam@154: cannam@154: - Redistributions of source code must retain the above copyright cannam@154: notice, this list of conditions and the following disclaimer. cannam@154: cannam@154: - Redistributions in binary form must reproduce the above copyright cannam@154: notice, this list of conditions and the following disclaimer in the cannam@154: documentation and/or other materials provided with the distribution. cannam@154: cannam@154: THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS cannam@154: ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT cannam@154: LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR cannam@154: A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER cannam@154: OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, cannam@154: EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, cannam@154: PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR cannam@154: PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF cannam@154: LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING cannam@154: NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS cannam@154: SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. cannam@154: */ cannam@154: cannam@154: #if !defined(X86CPU_H) cannam@154: # define X86CPU_H cannam@154: cannam@154: # if defined(OPUS_X86_MAY_HAVE_SSE) cannam@154: # define MAY_HAVE_SSE(name) name ## _sse cannam@154: # else cannam@154: # define MAY_HAVE_SSE(name) name ## _c cannam@154: # endif cannam@154: cannam@154: # if defined(OPUS_X86_MAY_HAVE_SSE2) cannam@154: # define MAY_HAVE_SSE2(name) name ## _sse2 cannam@154: # else cannam@154: # define MAY_HAVE_SSE2(name) name ## _c cannam@154: # endif cannam@154: cannam@154: # if defined(OPUS_X86_MAY_HAVE_SSE4_1) cannam@154: # define MAY_HAVE_SSE4_1(name) name ## _sse4_1 cannam@154: # else cannam@154: # define MAY_HAVE_SSE4_1(name) name ## _c cannam@154: # endif cannam@154: cannam@154: # if defined(OPUS_X86_MAY_HAVE_AVX) cannam@154: # define MAY_HAVE_AVX(name) name ## _avx cannam@154: # else cannam@154: # define MAY_HAVE_AVX(name) name ## _c cannam@154: # endif cannam@154: cannam@154: # if defined(OPUS_HAVE_RTCD) cannam@154: int opus_select_arch(void); cannam@154: # endif cannam@154: cannam@154: /*gcc appears to emit MOVDQA's to load the argument of an _mm_cvtepi8_epi32() cannam@154: or _mm_cvtepi16_epi32() when optimizations are disabled, even though the cannam@154: actual PMOVSXWD instruction takes an m32 or m64. Unlike a normal memory cannam@154: reference, these require 16-byte alignment and load a full 16 bytes (instead cannam@154: of 4 or 8), possibly reading out of bounds. cannam@154: cannam@154: We can insert an explicit MOVD or MOVQ using _mm_cvtsi32_si128() or cannam@154: _mm_loadl_epi64(), which should have the same semantics as an m32 or m64 cannam@154: reference in the PMOVSXWD instruction itself, but gcc is not smart enough to cannam@154: optimize this out when optimizations ARE enabled. cannam@154: cannam@154: Clang, in contrast, requires us to do this always for _mm_cvtepi8_epi32 cannam@154: (which is fair, since technically the compiler is always allowed to do the cannam@154: dereference before invoking the function implementing the intrinsic). cannam@154: However, it is smart enough to eliminate the extra MOVD instruction. cannam@154: For _mm_cvtepi16_epi32, it does the right thing, though does *not* optimize out cannam@154: the extra MOVQ if it's specified explicitly */ cannam@154: cannam@154: # if defined(__clang__) || !defined(__OPTIMIZE__) cannam@154: # define OP_CVTEPI8_EPI32_M32(x) \ cannam@154: (_mm_cvtepi8_epi32(_mm_cvtsi32_si128(*(int *)(x)))) cannam@154: # else cannam@154: # define OP_CVTEPI8_EPI32_M32(x) \ cannam@154: (_mm_cvtepi8_epi32(*(__m128i *)(x))) cannam@154: #endif cannam@154: cannam@154: /* similar reasoning about the instruction sequence as in the 32-bit macro above, cannam@154: */ cannam@154: # if defined(__clang__) || !defined(__OPTIMIZE__) cannam@154: # define OP_CVTEPI16_EPI32_M64(x) \ cannam@154: (_mm_cvtepi16_epi32(_mm_loadl_epi64((__m128i *)(x)))) cannam@154: # else cannam@154: # define OP_CVTEPI16_EPI32_M64(x) \ cannam@154: (_mm_cvtepi16_epi32(*(__m128i *)(x))) cannam@154: # endif cannam@154: cannam@154: #endif