annotate ffmpeg/libavcodec/x86/qpel.asm @ 13:844d341cf643 tip

Back up before ISMIR
author Yading Song <yading.song@eecs.qmul.ac.uk>
date Thu, 31 Oct 2013 13:17:06 +0000
parents 6840f77b83aa
children
rev   line source
yading@10 1 ;******************************************************************************
yading@10 2 ;* MMX optimized DSP utils
yading@10 3 ;* Copyright (c) 2008 Loren Merritt
yading@10 4 ;* Copyright (c) 2003-2013 Michael Niedermayer
yading@10 5 ;* Copyright (c) 2013 Daniel Kang
yading@10 6 ;*
yading@10 7 ;* This file is part of FFmpeg.
yading@10 8 ;*
yading@10 9 ;* FFmpeg is free software; you can redistribute it and/or
yading@10 10 ;* modify it under the terms of the GNU Lesser General Public
yading@10 11 ;* License as published by the Free Software Foundation; either
yading@10 12 ;* version 2.1 of the License, or (at your option) any later version.
yading@10 13 ;*
yading@10 14 ;* FFmpeg is distributed in the hope that it will be useful,
yading@10 15 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
yading@10 16 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
yading@10 17 ;* Lesser General Public License for more details.
yading@10 18 ;*
yading@10 19 ;* You should have received a copy of the GNU Lesser General Public
yading@10 20 ;* License along with FFmpeg; if not, write to the Free Software
yading@10 21 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
yading@10 22 ;******************************************************************************
yading@10 23
yading@10 24 %include "libavutil/x86/x86util.asm"
yading@10 25
yading@10 26 SECTION .text
yading@10 27
yading@10 28 %macro op_avgh 3
yading@10 29 movh %3, %2
yading@10 30 pavgb %1, %3
yading@10 31 movh %2, %1
yading@10 32 %endmacro
yading@10 33
yading@10 34 %macro op_avg 2
yading@10 35 pavgb %1, %2
yading@10 36 mova %2, %1
yading@10 37 %endmacro
yading@10 38
yading@10 39 %macro op_puth 2-3
yading@10 40 movh %2, %1
yading@10 41 %endmacro
yading@10 42
yading@10 43 %macro op_put 2
yading@10 44 mova %2, %1
yading@10 45 %endmacro
yading@10 46
yading@10 47 ; void pixels4_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
yading@10 48 %macro PIXELS4_L2 1
yading@10 49 %define OP op_%1h
yading@10 50 cglobal %1_pixels4_l2, 6,6
yading@10 51 movsxdifnidn r3, r3d
yading@10 52 movsxdifnidn r4, r4d
yading@10 53 test r5d, 1
yading@10 54 je .loop
yading@10 55 movd m0, [r1]
yading@10 56 movd m1, [r2]
yading@10 57 add r1, r4
yading@10 58 add r2, 4
yading@10 59 pavgb m0, m1
yading@10 60 OP m0, [r0], m3
yading@10 61 add r0, r3
yading@10 62 dec r5d
yading@10 63 .loop:
yading@10 64 mova m0, [r1]
yading@10 65 mova m1, [r1+r4]
yading@10 66 lea r1, [r1+2*r4]
yading@10 67 pavgb m0, [r2]
yading@10 68 pavgb m1, [r2+4]
yading@10 69 OP m0, [r0], m3
yading@10 70 OP m1, [r0+r3], m3
yading@10 71 lea r0, [r0+2*r3]
yading@10 72 mova m0, [r1]
yading@10 73 mova m1, [r1+r4]
yading@10 74 lea r1, [r1+2*r4]
yading@10 75 pavgb m0, [r2+8]
yading@10 76 pavgb m1, [r2+12]
yading@10 77 OP m0, [r0], m3
yading@10 78 OP m1, [r0+r3], m3
yading@10 79 lea r0, [r0+2*r3]
yading@10 80 add r2, 16
yading@10 81 sub r5d, 4
yading@10 82 jne .loop
yading@10 83 REP_RET
yading@10 84 %endmacro
yading@10 85
yading@10 86 INIT_MMX mmxext
yading@10 87 PIXELS4_L2 put
yading@10 88 PIXELS4_L2 avg
yading@10 89
yading@10 90 ; void pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
yading@10 91 %macro PIXELS8_L2 1
yading@10 92 %define OP op_%1
yading@10 93 cglobal %1_pixels8_l2, 6,6
yading@10 94 movsxdifnidn r3, r3d
yading@10 95 movsxdifnidn r4, r4d
yading@10 96 test r5d, 1
yading@10 97 je .loop
yading@10 98 mova m0, [r1]
yading@10 99 mova m1, [r2]
yading@10 100 add r1, r4
yading@10 101 add r2, 8
yading@10 102 pavgb m0, m1
yading@10 103 OP m0, [r0]
yading@10 104 add r0, r3
yading@10 105 dec r5d
yading@10 106 .loop:
yading@10 107 mova m0, [r1]
yading@10 108 mova m1, [r1+r4]
yading@10 109 lea r1, [r1+2*r4]
yading@10 110 pavgb m0, [r2]
yading@10 111 pavgb m1, [r2+8]
yading@10 112 OP m0, [r0]
yading@10 113 OP m1, [r0+r3]
yading@10 114 lea r0, [r0+2*r3]
yading@10 115 mova m0, [r1]
yading@10 116 mova m1, [r1+r4]
yading@10 117 lea r1, [r1+2*r4]
yading@10 118 pavgb m0, [r2+16]
yading@10 119 pavgb m1, [r2+24]
yading@10 120 OP m0, [r0]
yading@10 121 OP m1, [r0+r3]
yading@10 122 lea r0, [r0+2*r3]
yading@10 123 add r2, 32
yading@10 124 sub r5d, 4
yading@10 125 jne .loop
yading@10 126 REP_RET
yading@10 127 %endmacro
yading@10 128
yading@10 129 INIT_MMX mmxext
yading@10 130 PIXELS8_L2 put
yading@10 131 PIXELS8_L2 avg
yading@10 132
yading@10 133 ; void pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
yading@10 134 %macro PIXELS16_L2 1
yading@10 135 %define OP op_%1
yading@10 136 cglobal %1_pixels16_l2, 6,6
yading@10 137 movsxdifnidn r3, r3d
yading@10 138 movsxdifnidn r4, r4d
yading@10 139 test r5d, 1
yading@10 140 je .loop
yading@10 141 mova m0, [r1]
yading@10 142 mova m1, [r1+8]
yading@10 143 pavgb m0, [r2]
yading@10 144 pavgb m1, [r2+8]
yading@10 145 add r1, r4
yading@10 146 add r2, 16
yading@10 147 OP m0, [r0]
yading@10 148 OP m1, [r0+8]
yading@10 149 add r0, r3
yading@10 150 dec r5d
yading@10 151 .loop:
yading@10 152 mova m0, [r1]
yading@10 153 mova m1, [r1+8]
yading@10 154 add r1, r4
yading@10 155 pavgb m0, [r2]
yading@10 156 pavgb m1, [r2+8]
yading@10 157 OP m0, [r0]
yading@10 158 OP m1, [r0+8]
yading@10 159 add r0, r3
yading@10 160 mova m0, [r1]
yading@10 161 mova m1, [r1+8]
yading@10 162 add r1, r4
yading@10 163 pavgb m0, [r2+16]
yading@10 164 pavgb m1, [r2+24]
yading@10 165 OP m0, [r0]
yading@10 166 OP m1, [r0+8]
yading@10 167 add r0, r3
yading@10 168 add r2, 32
yading@10 169 sub r5d, 2
yading@10 170 jne .loop
yading@10 171 REP_RET
yading@10 172 %endmacro
yading@10 173
yading@10 174 INIT_MMX mmxext
yading@10 175 PIXELS16_L2 put
yading@10 176 PIXELS16_L2 avg