annotate ffmpeg/libavcodec/x86/pngdsp.asm @ 13:844d341cf643 tip

Back up before ISMIR
author Yading Song <yading.song@eecs.qmul.ac.uk>
date Thu, 31 Oct 2013 13:17:06 +0000
parents 6840f77b83aa
children
rev   line source
yading@10 1 ;******************************************************************************
yading@10 2 ;* x86 optimizations for PNG decoding
yading@10 3 ;*
yading@10 4 ;* Copyright (c) 2008 Loren Merritt <lorenm@u.washington.edu>
yading@10 5 ;* Copyright (c) 2012 Ronald S. Bultje <rsbultje@gmail.com>
yading@10 6 ;*
yading@10 7 ;* This file is part of Libav.
yading@10 8 ;*
yading@10 9 ;* Libav is free software; you can redistribute it and/or
yading@10 10 ;* modify it under the terms of the GNU Lesser General Public
yading@10 11 ;* License as published by the Free Software Foundation; either
yading@10 12 ;* version 2.1 of the License, or (at your option) any later version.
yading@10 13 ;*
yading@10 14 ;* Libav is distributed in the hope that it will be useful,
yading@10 15 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
yading@10 16 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
yading@10 17 ;* Lesser General Public License for more details.
yading@10 18 ;*
yading@10 19 ;* You should have received a copy of the GNU Lesser General Public
yading@10 20 ;* License along with Libav; if not, write to the Free Software
yading@10 21 ;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
yading@10 22 ;******************************************************************************
yading@10 23
yading@10 24 %include "libavutil/x86/x86util.asm"
yading@10 25
yading@10 26 SECTION_RODATA
yading@10 27
yading@10 28 cextern pw_255
yading@10 29
yading@10 30 SECTION_TEXT
yading@10 31
yading@10 32 ; %1 = nr. of xmm registers used
yading@10 33 %macro ADD_BYTES_FN 1
yading@10 34 cglobal add_bytes_l2, 4, 6, %1, dst, src1, src2, wa, w, i
yading@10 35 %if ARCH_X86_64
yading@10 36 movsxd waq, wad
yading@10 37 %endif
yading@10 38 xor iq, iq
yading@10 39
yading@10 40 ; vector loop
yading@10 41 mov wq, waq
yading@10 42 and waq, ~(mmsize*2-1)
yading@10 43 jmp .end_v
yading@10 44 .loop_v:
yading@10 45 mova m0, [src1q+iq]
yading@10 46 mova m1, [src1q+iq+mmsize]
yading@10 47 paddb m0, [src2q+iq]
yading@10 48 paddb m1, [src2q+iq+mmsize]
yading@10 49 mova [dstq+iq ], m0
yading@10 50 mova [dstq+iq+mmsize], m1
yading@10 51 add iq, mmsize*2
yading@10 52 .end_v:
yading@10 53 cmp iq, waq
yading@10 54 jl .loop_v
yading@10 55
yading@10 56 %if mmsize == 16
yading@10 57 ; vector loop
yading@10 58 mov waq, wq
yading@10 59 and waq, ~7
yading@10 60 jmp .end_l
yading@10 61 .loop_l:
yading@10 62 movq mm0, [src1q+iq]
yading@10 63 paddb mm0, [src2q+iq]
yading@10 64 movq [dstq+iq ], mm0
yading@10 65 add iq, 8
yading@10 66 .end_l:
yading@10 67 cmp iq, waq
yading@10 68 jl .loop_l
yading@10 69 %endif
yading@10 70
yading@10 71 ; scalar loop for leftover
yading@10 72 jmp .end_s
yading@10 73 .loop_s:
yading@10 74 mov wab, [src1q+iq]
yading@10 75 add wab, [src2q+iq]
yading@10 76 mov [dstq+iq], wab
yading@10 77 inc iq
yading@10 78 .end_s:
yading@10 79 cmp iq, wq
yading@10 80 jl .loop_s
yading@10 81 REP_RET
yading@10 82 %endmacro
yading@10 83
yading@10 84 %if ARCH_X86_32
yading@10 85 INIT_MMX mmx
yading@10 86 ADD_BYTES_FN 0
yading@10 87 %endif
yading@10 88
yading@10 89 INIT_XMM sse2
yading@10 90 ADD_BYTES_FN 2
yading@10 91
yading@10 92 %macro ADD_PAETH_PRED_FN 1
yading@10 93 cglobal add_png_paeth_prediction, 5, 7, %1, dst, src, top, w, bpp, end, cntr
yading@10 94 %if ARCH_X86_64
yading@10 95 movsxd bppq, bppd
yading@10 96 movsxd wq, wd
yading@10 97 %endif
yading@10 98 lea endq, [dstq+wq-(mmsize/2-1)]
yading@10 99 sub topq, dstq
yading@10 100 sub srcq, dstq
yading@10 101 sub dstq, bppq
yading@10 102 pxor m7, m7
yading@10 103
yading@10 104 PUSH dstq
yading@10 105 lea cntrq, [bppq-1]
yading@10 106 shr cntrq, 2 + mmsize/16
yading@10 107 .bpp_loop:
yading@10 108 lea dstq, [dstq+cntrq*(mmsize/2)]
yading@10 109 movh m0, [dstq]
yading@10 110 movh m1, [topq+dstq]
yading@10 111 punpcklbw m0, m7
yading@10 112 punpcklbw m1, m7
yading@10 113 add dstq, bppq
yading@10 114 .loop:
yading@10 115 mova m2, m1
yading@10 116 movh m1, [topq+dstq]
yading@10 117 mova m3, m2
yading@10 118 punpcklbw m1, m7
yading@10 119 mova m4, m2
yading@10 120 psubw m3, m1
yading@10 121 psubw m4, m0
yading@10 122 mova m5, m3
yading@10 123 paddw m5, m4
yading@10 124 %if cpuflag(ssse3)
yading@10 125 pabsw m3, m3
yading@10 126 pabsw m4, m4
yading@10 127 pabsw m5, m5
yading@10 128 %else ; !cpuflag(ssse3)
yading@10 129 psubw m7, m5
yading@10 130 pmaxsw m5, m7
yading@10 131 pxor m6, m6
yading@10 132 pxor m7, m7
yading@10 133 psubw m6, m3
yading@10 134 psubw m7, m4
yading@10 135 pmaxsw m3, m6
yading@10 136 pmaxsw m4, m7
yading@10 137 pxor m7, m7
yading@10 138 %endif ; cpuflag(ssse3)
yading@10 139 mova m6, m4
yading@10 140 pminsw m6, m5
yading@10 141 pcmpgtw m3, m6
yading@10 142 pcmpgtw m4, m5
yading@10 143 mova m6, m4
yading@10 144 pand m4, m3
yading@10 145 pandn m6, m3
yading@10 146 pandn m3, m0
yading@10 147 movh m0, [srcq+dstq]
yading@10 148 pand m6, m1
yading@10 149 pand m2, m4
yading@10 150 punpcklbw m0, m7
yading@10 151 paddw m0, m6
yading@10 152 paddw m3, m2
yading@10 153 paddw m0, m3
yading@10 154 pand m0, [pw_255]
yading@10 155 mova m3, m0
yading@10 156 packuswb m3, m3
yading@10 157 movh [dstq], m3
yading@10 158 add dstq, bppq
yading@10 159 cmp dstq, endq
yading@10 160 jle .loop
yading@10 161
yading@10 162 mov dstq, [rsp]
yading@10 163 dec cntrq
yading@10 164 jge .bpp_loop
yading@10 165 POP dstq
yading@10 166 RET
yading@10 167 %endmacro
yading@10 168
yading@10 169 INIT_MMX mmxext
yading@10 170 ADD_PAETH_PRED_FN 0
yading@10 171
yading@10 172 INIT_MMX ssse3
yading@10 173 ADD_PAETH_PRED_FN 0