annotate ffmpeg/libavcodec/x86/h264_deblock_10bit.asm @ 13:844d341cf643 tip

Back up before ISMIR
author Yading Song <yading.song@eecs.qmul.ac.uk>
date Thu, 31 Oct 2013 13:17:06 +0000
parents 6840f77b83aa
children
rev   line source
yading@10 1 ;*****************************************************************************
yading@10 2 ;* MMX/SSE2/AVX-optimized 10-bit H.264 deblocking code
yading@10 3 ;*****************************************************************************
yading@10 4 ;* Copyright (C) 2005-2011 x264 project
yading@10 5 ;*
yading@10 6 ;* Authors: Oskar Arvidsson <oskar@irock.se>
yading@10 7 ;* Loren Merritt <lorenm@u.washington.edu>
yading@10 8 ;* Jason Garrett-Glaser <darkshikari@gmail.com>
yading@10 9 ;*
yading@10 10 ;* This file is part of Libav.
yading@10 11 ;*
yading@10 12 ;* Libav is free software; you can redistribute it and/or
yading@10 13 ;* modify it under the terms of the GNU Lesser General Public
yading@10 14 ;* License as published by the Free Software Foundation; either
yading@10 15 ;* version 2.1 of the License, or (at your option) any later version.
yading@10 16 ;*
yading@10 17 ;* Libav is distributed in the hope that it will be useful,
yading@10 18 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
yading@10 19 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
yading@10 20 ;* Lesser General Public License for more details.
yading@10 21 ;*
yading@10 22 ;* You should have received a copy of the GNU Lesser General Public
yading@10 23 ;* License along with Libav; if not, write to the Free Software
yading@10 24 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
yading@10 25 ;******************************************************************************
yading@10 26
yading@10 27 %include "libavutil/x86/x86util.asm"
yading@10 28
yading@10 29 SECTION_RODATA
yading@10 30
yading@10 31 pw_pixel_max: times 8 dw ((1 << 10)-1)
yading@10 32
yading@10 33 SECTION .text
yading@10 34
yading@10 35 cextern pw_2
yading@10 36 cextern pw_3
yading@10 37 cextern pw_4
yading@10 38
yading@10 39 ; out: %4 = |%1-%2|-%3
yading@10 40 ; clobbers: %5
yading@10 41 %macro ABS_SUB 5
yading@10 42 psubusw %5, %2, %1
yading@10 43 psubusw %4, %1, %2
yading@10 44 por %4, %5
yading@10 45 psubw %4, %3
yading@10 46 %endmacro
yading@10 47
yading@10 48 ; out: %4 = |%1-%2|<%3
yading@10 49 %macro DIFF_LT 5
yading@10 50 psubusw %4, %2, %1
yading@10 51 psubusw %5, %1, %2
yading@10 52 por %5, %4 ; |%1-%2|
yading@10 53 pxor %4, %4
yading@10 54 psubw %5, %3 ; |%1-%2|-%3
yading@10 55 pcmpgtw %4, %5 ; 0 > |%1-%2|-%3
yading@10 56 %endmacro
yading@10 57
yading@10 58 %macro LOAD_AB 4
yading@10 59 movd %1, %3
yading@10 60 movd %2, %4
yading@10 61 SPLATW %1, %1
yading@10 62 SPLATW %2, %2
yading@10 63 %endmacro
yading@10 64
yading@10 65 ; in: %2=tc reg
yading@10 66 ; out: %1=splatted tc
yading@10 67 %macro LOAD_TC 2
yading@10 68 movd %1, [%2]
yading@10 69 punpcklbw %1, %1
yading@10 70 %if mmsize == 8
yading@10 71 pshufw %1, %1, 0
yading@10 72 %else
yading@10 73 pshuflw %1, %1, 01010000b
yading@10 74 pshufd %1, %1, 01010000b
yading@10 75 %endif
yading@10 76 psraw %1, 6
yading@10 77 %endmacro
yading@10 78
yading@10 79 ; in: %1=p1, %2=p0, %3=q0, %4=q1
yading@10 80 ; %5=alpha, %6=beta, %7-%9=tmp
yading@10 81 ; out: %7=mask
yading@10 82 %macro LOAD_MASK 9
yading@10 83 ABS_SUB %2, %3, %5, %8, %7 ; |p0-q0| - alpha
yading@10 84 ABS_SUB %1, %2, %6, %9, %7 ; |p1-p0| - beta
yading@10 85 pand %8, %9
yading@10 86 ABS_SUB %3, %4, %6, %9, %7 ; |q1-q0| - beta
yading@10 87 pxor %7, %7
yading@10 88 pand %8, %9
yading@10 89 pcmpgtw %7, %8
yading@10 90 %endmacro
yading@10 91
yading@10 92 ; in: %1=p0, %2=q0, %3=p1, %4=q1, %5=mask, %6=tmp, %7=tmp
yading@10 93 ; out: %1=p0', m2=q0'
yading@10 94 %macro DEBLOCK_P0_Q0 7
yading@10 95 psubw %3, %4
yading@10 96 pxor %7, %7
yading@10 97 paddw %3, [pw_4]
yading@10 98 psubw %7, %5
yading@10 99 psubw %6, %2, %1
yading@10 100 psllw %6, 2
yading@10 101 paddw %3, %6
yading@10 102 psraw %3, 3
yading@10 103 mova %6, [pw_pixel_max]
yading@10 104 CLIPW %3, %7, %5
yading@10 105 pxor %7, %7
yading@10 106 paddw %1, %3
yading@10 107 psubw %2, %3
yading@10 108 CLIPW %1, %7, %6
yading@10 109 CLIPW %2, %7, %6
yading@10 110 %endmacro
yading@10 111
yading@10 112 ; in: %1=x2, %2=x1, %3=p0, %4=q0 %5=mask&tc, %6=tmp
yading@10 113 %macro LUMA_Q1 6
yading@10 114 pavgw %6, %3, %4 ; (p0+q0+1)>>1
yading@10 115 paddw %1, %6
yading@10 116 pxor %6, %6
yading@10 117 psraw %1, 1
yading@10 118 psubw %6, %5
yading@10 119 psubw %1, %2
yading@10 120 CLIPW %1, %6, %5
yading@10 121 paddw %1, %2
yading@10 122 %endmacro
yading@10 123
yading@10 124 %macro LUMA_DEBLOCK_ONE 3
yading@10 125 DIFF_LT m5, %1, bm, m4, m6
yading@10 126 pxor m6, m6
yading@10 127 mova %3, m4
yading@10 128 pcmpgtw m6, tcm
yading@10 129 pand m4, tcm
yading@10 130 pandn m6, m7
yading@10 131 pand m4, m6
yading@10 132 LUMA_Q1 m5, %2, m1, m2, m4, m6
yading@10 133 %endmacro
yading@10 134
yading@10 135 %macro LUMA_H_STORE 2
yading@10 136 %if mmsize == 8
yading@10 137 movq [r0-4], m0
yading@10 138 movq [r0+r1-4], m1
yading@10 139 movq [r0+r1*2-4], m2
yading@10 140 movq [r0+%2-4], m3
yading@10 141 %else
yading@10 142 movq [r0-4], m0
yading@10 143 movhps [r0+r1-4], m0
yading@10 144 movq [r0+r1*2-4], m1
yading@10 145 movhps [%1-4], m1
yading@10 146 movq [%1+r1-4], m2
yading@10 147 movhps [%1+r1*2-4], m2
yading@10 148 movq [%1+%2-4], m3
yading@10 149 movhps [%1+r1*4-4], m3
yading@10 150 %endif
yading@10 151 %endmacro
yading@10 152
yading@10 153 %macro DEBLOCK_LUMA 0
yading@10 154 ;-----------------------------------------------------------------------------
yading@10 155 ; void deblock_v_luma( uint16_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
yading@10 156 ;-----------------------------------------------------------------------------
yading@10 157 cglobal deblock_v_luma_10, 5,5,8*(mmsize/16)
yading@10 158 %assign pad 5*mmsize+12-(stack_offset&15)
yading@10 159 %define tcm [rsp]
yading@10 160 %define ms1 [rsp+mmsize]
yading@10 161 %define ms2 [rsp+mmsize*2]
yading@10 162 %define am [rsp+mmsize*3]
yading@10 163 %define bm [rsp+mmsize*4]
yading@10 164 SUB rsp, pad
yading@10 165 shl r2d, 2
yading@10 166 shl r3d, 2
yading@10 167 LOAD_AB m4, m5, r2d, r3d
yading@10 168 mov r3, 32/mmsize
yading@10 169 mov r2, r0
yading@10 170 sub r0, r1
yading@10 171 mova am, m4
yading@10 172 sub r0, r1
yading@10 173 mova bm, m5
yading@10 174 sub r0, r1
yading@10 175 .loop:
yading@10 176 mova m0, [r0+r1]
yading@10 177 mova m1, [r0+r1*2]
yading@10 178 mova m2, [r2]
yading@10 179 mova m3, [r2+r1]
yading@10 180
yading@10 181 LOAD_MASK m0, m1, m2, m3, am, bm, m7, m4, m6
yading@10 182 LOAD_TC m6, r4
yading@10 183 mova tcm, m6
yading@10 184
yading@10 185 mova m5, [r0]
yading@10 186 LUMA_DEBLOCK_ONE m1, m0, ms1
yading@10 187 mova [r0+r1], m5
yading@10 188
yading@10 189 mova m5, [r2+r1*2]
yading@10 190 LUMA_DEBLOCK_ONE m2, m3, ms2
yading@10 191 mova [r2+r1], m5
yading@10 192
yading@10 193 pxor m5, m5
yading@10 194 mova m6, tcm
yading@10 195 pcmpgtw m5, tcm
yading@10 196 psubw m6, ms1
yading@10 197 pandn m5, m7
yading@10 198 psubw m6, ms2
yading@10 199 pand m5, m6
yading@10 200 DEBLOCK_P0_Q0 m1, m2, m0, m3, m5, m7, m6
yading@10 201 mova [r0+r1*2], m1
yading@10 202 mova [r2], m2
yading@10 203
yading@10 204 add r0, mmsize
yading@10 205 add r2, mmsize
yading@10 206 add r4, mmsize/8
yading@10 207 dec r3
yading@10 208 jg .loop
yading@10 209 ADD rsp, pad
yading@10 210 RET
yading@10 211
yading@10 212 cglobal deblock_h_luma_10, 5,6,8*(mmsize/16)
yading@10 213 %assign pad 7*mmsize+12-(stack_offset&15)
yading@10 214 %define tcm [rsp]
yading@10 215 %define ms1 [rsp+mmsize]
yading@10 216 %define ms2 [rsp+mmsize*2]
yading@10 217 %define p1m [rsp+mmsize*3]
yading@10 218 %define p2m [rsp+mmsize*4]
yading@10 219 %define am [rsp+mmsize*5]
yading@10 220 %define bm [rsp+mmsize*6]
yading@10 221 SUB rsp, pad
yading@10 222 shl r2d, 2
yading@10 223 shl r3d, 2
yading@10 224 LOAD_AB m4, m5, r2d, r3d
yading@10 225 mov r3, r1
yading@10 226 mova am, m4
yading@10 227 add r3, r1
yading@10 228 mov r5, 32/mmsize
yading@10 229 mova bm, m5
yading@10 230 add r3, r1
yading@10 231 %if mmsize == 16
yading@10 232 mov r2, r0
yading@10 233 add r2, r3
yading@10 234 %endif
yading@10 235 .loop:
yading@10 236 %if mmsize == 8
yading@10 237 movq m2, [r0-8] ; y q2 q1 q0
yading@10 238 movq m7, [r0+0]
yading@10 239 movq m5, [r0+r1-8]
yading@10 240 movq m3, [r0+r1+0]
yading@10 241 movq m0, [r0+r1*2-8]
yading@10 242 movq m6, [r0+r1*2+0]
yading@10 243 movq m1, [r0+r3-8]
yading@10 244 TRANSPOSE4x4W 2, 5, 0, 1, 4
yading@10 245 SWAP 2, 7
yading@10 246 movq m7, [r0+r3]
yading@10 247 TRANSPOSE4x4W 2, 3, 6, 7, 4
yading@10 248 %else
yading@10 249 movu m5, [r0-8] ; y q2 q1 q0 p0 p1 p2 x
yading@10 250 movu m0, [r0+r1-8]
yading@10 251 movu m2, [r0+r1*2-8]
yading@10 252 movu m3, [r2-8]
yading@10 253 TRANSPOSE4x4W 5, 0, 2, 3, 6
yading@10 254 mova tcm, m3
yading@10 255
yading@10 256 movu m4, [r2+r1-8]
yading@10 257 movu m1, [r2+r1*2-8]
yading@10 258 movu m3, [r2+r3-8]
yading@10 259 movu m7, [r2+r1*4-8]
yading@10 260 TRANSPOSE4x4W 4, 1, 3, 7, 6
yading@10 261
yading@10 262 mova m6, tcm
yading@10 263 punpcklqdq m6, m7
yading@10 264 punpckhqdq m5, m4
yading@10 265 SBUTTERFLY qdq, 0, 1, 7
yading@10 266 SBUTTERFLY qdq, 2, 3, 7
yading@10 267 %endif
yading@10 268
yading@10 269 mova p2m, m6
yading@10 270 LOAD_MASK m0, m1, m2, m3, am, bm, m7, m4, m6
yading@10 271 LOAD_TC m6, r4
yading@10 272 mova tcm, m6
yading@10 273
yading@10 274 LUMA_DEBLOCK_ONE m1, m0, ms1
yading@10 275 mova p1m, m5
yading@10 276
yading@10 277 mova m5, p2m
yading@10 278 LUMA_DEBLOCK_ONE m2, m3, ms2
yading@10 279 mova p2m, m5
yading@10 280
yading@10 281 pxor m5, m5
yading@10 282 mova m6, tcm
yading@10 283 pcmpgtw m5, tcm
yading@10 284 psubw m6, ms1
yading@10 285 pandn m5, m7
yading@10 286 psubw m6, ms2
yading@10 287 pand m5, m6
yading@10 288 DEBLOCK_P0_Q0 m1, m2, m0, m3, m5, m7, m6
yading@10 289 mova m0, p1m
yading@10 290 mova m3, p2m
yading@10 291 TRANSPOSE4x4W 0, 1, 2, 3, 4
yading@10 292 LUMA_H_STORE r2, r3
yading@10 293
yading@10 294 add r4, mmsize/8
yading@10 295 lea r0, [r0+r1*(mmsize/2)]
yading@10 296 lea r2, [r2+r1*(mmsize/2)]
yading@10 297 dec r5
yading@10 298 jg .loop
yading@10 299 ADD rsp, pad
yading@10 300 RET
yading@10 301 %endmacro
yading@10 302
yading@10 303 %if ARCH_X86_64
yading@10 304 ; in: m0=p1, m1=p0, m2=q0, m3=q1, m8=p2, m9=q2
yading@10 305 ; m12=alpha, m13=beta
yading@10 306 ; out: m0=p1', m3=q1', m1=p0', m2=q0'
yading@10 307 ; clobbers: m4, m5, m6, m7, m10, m11, m14
yading@10 308 %macro DEBLOCK_LUMA_INTER_SSE2 0
yading@10 309 LOAD_MASK m0, m1, m2, m3, m12, m13, m7, m4, m6
yading@10 310 LOAD_TC m6, r4
yading@10 311 DIFF_LT m8, m1, m13, m10, m4
yading@10 312 DIFF_LT m9, m2, m13, m11, m4
yading@10 313 pand m6, m7
yading@10 314
yading@10 315 mova m14, m6
yading@10 316 pxor m4, m4
yading@10 317 pcmpgtw m6, m4
yading@10 318 pand m6, m14
yading@10 319
yading@10 320 mova m5, m10
yading@10 321 pand m5, m6
yading@10 322 LUMA_Q1 m8, m0, m1, m2, m5, m4
yading@10 323
yading@10 324 mova m5, m11
yading@10 325 pand m5, m6
yading@10 326 LUMA_Q1 m9, m3, m1, m2, m5, m4
yading@10 327
yading@10 328 pxor m4, m4
yading@10 329 psubw m6, m10
yading@10 330 pcmpgtw m4, m14
yading@10 331 pandn m4, m7
yading@10 332 psubw m6, m11
yading@10 333 pand m4, m6
yading@10 334 DEBLOCK_P0_Q0 m1, m2, m0, m3, m4, m5, m6
yading@10 335
yading@10 336 SWAP 0, 8
yading@10 337 SWAP 3, 9
yading@10 338 %endmacro
yading@10 339
yading@10 340 %macro DEBLOCK_LUMA_64 0
yading@10 341 cglobal deblock_v_luma_10, 5,5,15
yading@10 342 %define p2 m8
yading@10 343 %define p1 m0
yading@10 344 %define p0 m1
yading@10 345 %define q0 m2
yading@10 346 %define q1 m3
yading@10 347 %define q2 m9
yading@10 348 %define mask0 m7
yading@10 349 %define mask1 m10
yading@10 350 %define mask2 m11
yading@10 351 shl r2d, 2
yading@10 352 shl r3d, 2
yading@10 353 LOAD_AB m12, m13, r2d, r3d
yading@10 354 mov r2, r0
yading@10 355 sub r0, r1
yading@10 356 sub r0, r1
yading@10 357 sub r0, r1
yading@10 358 mov r3, 2
yading@10 359 .loop:
yading@10 360 mova p2, [r0]
yading@10 361 mova p1, [r0+r1]
yading@10 362 mova p0, [r0+r1*2]
yading@10 363 mova q0, [r2]
yading@10 364 mova q1, [r2+r1]
yading@10 365 mova q2, [r2+r1*2]
yading@10 366 DEBLOCK_LUMA_INTER_SSE2
yading@10 367 mova [r0+r1], p1
yading@10 368 mova [r0+r1*2], p0
yading@10 369 mova [r2], q0
yading@10 370 mova [r2+r1], q1
yading@10 371 add r0, mmsize
yading@10 372 add r2, mmsize
yading@10 373 add r4, 2
yading@10 374 dec r3
yading@10 375 jg .loop
yading@10 376 REP_RET
yading@10 377
yading@10 378 cglobal deblock_h_luma_10, 5,7,15
yading@10 379 shl r2d, 2
yading@10 380 shl r3d, 2
yading@10 381 LOAD_AB m12, m13, r2d, r3d
yading@10 382 mov r2, r1
yading@10 383 add r2, r1
yading@10 384 add r2, r1
yading@10 385 mov r5, r0
yading@10 386 add r5, r2
yading@10 387 mov r6, 2
yading@10 388 .loop:
yading@10 389 movu m8, [r0-8] ; y q2 q1 q0 p0 p1 p2 x
yading@10 390 movu m0, [r0+r1-8]
yading@10 391 movu m2, [r0+r1*2-8]
yading@10 392 movu m9, [r5-8]
yading@10 393 movu m5, [r5+r1-8]
yading@10 394 movu m1, [r5+r1*2-8]
yading@10 395 movu m3, [r5+r2-8]
yading@10 396 movu m7, [r5+r1*4-8]
yading@10 397
yading@10 398 TRANSPOSE4x4W 8, 0, 2, 9, 10
yading@10 399 TRANSPOSE4x4W 5, 1, 3, 7, 10
yading@10 400
yading@10 401 punpckhqdq m8, m5
yading@10 402 SBUTTERFLY qdq, 0, 1, 10
yading@10 403 SBUTTERFLY qdq, 2, 3, 10
yading@10 404 punpcklqdq m9, m7
yading@10 405
yading@10 406 DEBLOCK_LUMA_INTER_SSE2
yading@10 407
yading@10 408 TRANSPOSE4x4W 0, 1, 2, 3, 4
yading@10 409 LUMA_H_STORE r5, r2
yading@10 410 add r4, 2
yading@10 411 lea r0, [r0+r1*8]
yading@10 412 lea r5, [r5+r1*8]
yading@10 413 dec r6
yading@10 414 jg .loop
yading@10 415 REP_RET
yading@10 416 %endmacro
yading@10 417
yading@10 418 INIT_XMM sse2
yading@10 419 DEBLOCK_LUMA_64
yading@10 420 %if HAVE_AVX_EXTERNAL
yading@10 421 INIT_XMM avx
yading@10 422 DEBLOCK_LUMA_64
yading@10 423 %endif
yading@10 424 %endif
yading@10 425
yading@10 426 %macro SWAPMOVA 2
yading@10 427 %ifid %1
yading@10 428 SWAP %1, %2
yading@10 429 %else
yading@10 430 mova %1, %2
yading@10 431 %endif
yading@10 432 %endmacro
yading@10 433
yading@10 434 ; in: t0-t2: tmp registers
yading@10 435 ; %1=p0 %2=p1 %3=p2 %4=p3 %5=q0 %6=q1 %7=mask0
yading@10 436 ; %8=mask1p %9=2 %10=p0' %11=p1' %12=p2'
yading@10 437 %macro LUMA_INTRA_P012 12 ; p0..p3 in memory
yading@10 438 %if ARCH_X86_64
yading@10 439 paddw t0, %3, %2
yading@10 440 mova t2, %4
yading@10 441 paddw t2, %3
yading@10 442 %else
yading@10 443 mova t0, %3
yading@10 444 mova t2, %4
yading@10 445 paddw t0, %2
yading@10 446 paddw t2, %3
yading@10 447 %endif
yading@10 448 paddw t0, %1
yading@10 449 paddw t2, t2
yading@10 450 paddw t0, %5
yading@10 451 paddw t2, %9
yading@10 452 paddw t0, %9 ; (p2 + p1 + p0 + q0 + 2)
yading@10 453 paddw t2, t0 ; (2*p3 + 3*p2 + p1 + p0 + q0 + 4)
yading@10 454
yading@10 455 psrlw t2, 3
yading@10 456 psrlw t1, t0, 2
yading@10 457 psubw t2, %3
yading@10 458 psubw t1, %2
yading@10 459 pand t2, %8
yading@10 460 pand t1, %8
yading@10 461 paddw t2, %3
yading@10 462 paddw t1, %2
yading@10 463 SWAPMOVA %11, t1
yading@10 464
yading@10 465 psubw t1, t0, %3
yading@10 466 paddw t0, t0
yading@10 467 psubw t1, %5
yading@10 468 psubw t0, %3
yading@10 469 paddw t1, %6
yading@10 470 paddw t1, %2
yading@10 471 paddw t0, %6
yading@10 472 psrlw t1, 2 ; (2*p1 + p0 + q1 + 2)/4
yading@10 473 psrlw t0, 3 ; (p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4)>>3
yading@10 474
yading@10 475 pxor t0, t1
yading@10 476 pxor t1, %1
yading@10 477 pand t0, %8
yading@10 478 pand t1, %7
yading@10 479 pxor t0, t1
yading@10 480 pxor t0, %1
yading@10 481 SWAPMOVA %10, t0
yading@10 482 SWAPMOVA %12, t2
yading@10 483 %endmacro
yading@10 484
yading@10 485 %macro LUMA_INTRA_INIT 1
yading@10 486 %xdefine pad %1*mmsize+((gprsize*3) % mmsize)-(stack_offset&15)
yading@10 487 %define t0 m4
yading@10 488 %define t1 m5
yading@10 489 %define t2 m6
yading@10 490 %define t3 m7
yading@10 491 %assign i 4
yading@10 492 %rep %1
yading@10 493 CAT_XDEFINE t, i, [rsp+mmsize*(i-4)]
yading@10 494 %assign i i+1
yading@10 495 %endrep
yading@10 496 SUB rsp, pad
yading@10 497 %endmacro
yading@10 498
yading@10 499 ; in: %1-%3=tmp, %4=p2, %5=q2
yading@10 500 %macro LUMA_INTRA_INTER 5
yading@10 501 LOAD_AB t0, t1, r2d, r3d
yading@10 502 mova %1, t0
yading@10 503 LOAD_MASK m0, m1, m2, m3, %1, t1, t0, t2, t3
yading@10 504 %if ARCH_X86_64
yading@10 505 mova %2, t0 ; mask0
yading@10 506 psrlw t3, %1, 2
yading@10 507 %else
yading@10 508 mova t3, %1
yading@10 509 mova %2, t0 ; mask0
yading@10 510 psrlw t3, 2
yading@10 511 %endif
yading@10 512 paddw t3, [pw_2] ; alpha/4+2
yading@10 513 DIFF_LT m1, m2, t3, t2, t0 ; t2 = |p0-q0| < alpha/4+2
yading@10 514 pand t2, %2
yading@10 515 mova t3, %5 ; q2
yading@10 516 mova %1, t2 ; mask1
yading@10 517 DIFF_LT t3, m2, t1, t2, t0 ; t2 = |q2-q0| < beta
yading@10 518 pand t2, %1
yading@10 519 mova t3, %4 ; p2
yading@10 520 mova %3, t2 ; mask1q
yading@10 521 DIFF_LT t3, m1, t1, t2, t0 ; t2 = |p2-p0| < beta
yading@10 522 pand t2, %1
yading@10 523 mova %1, t2 ; mask1p
yading@10 524 %endmacro
yading@10 525
yading@10 526 %macro LUMA_H_INTRA_LOAD 0
yading@10 527 %if mmsize == 8
yading@10 528 movu t0, [r0-8]
yading@10 529 movu t1, [r0+r1-8]
yading@10 530 movu m0, [r0+r1*2-8]
yading@10 531 movu m1, [r0+r4-8]
yading@10 532 TRANSPOSE4x4W 4, 5, 0, 1, 2
yading@10 533 mova t4, t0 ; p3
yading@10 534 mova t5, t1 ; p2
yading@10 535
yading@10 536 movu m2, [r0]
yading@10 537 movu m3, [r0+r1]
yading@10 538 movu t0, [r0+r1*2]
yading@10 539 movu t1, [r0+r4]
yading@10 540 TRANSPOSE4x4W 2, 3, 4, 5, 6
yading@10 541 mova t6, t0 ; q2
yading@10 542 mova t7, t1 ; q3
yading@10 543 %else
yading@10 544 movu t0, [r0-8]
yading@10 545 movu t1, [r0+r1-8]
yading@10 546 movu m0, [r0+r1*2-8]
yading@10 547 movu m1, [r0+r5-8]
yading@10 548 movu m2, [r4-8]
yading@10 549 movu m3, [r4+r1-8]
yading@10 550 movu t2, [r4+r1*2-8]
yading@10 551 movu t3, [r4+r5-8]
yading@10 552 TRANSPOSE8x8W 4, 5, 0, 1, 2, 3, 6, 7, t4, t5
yading@10 553 mova t4, t0 ; p3
yading@10 554 mova t5, t1 ; p2
yading@10 555 mova t6, t2 ; q2
yading@10 556 mova t7, t3 ; q3
yading@10 557 %endif
yading@10 558 %endmacro
yading@10 559
yading@10 560 ; in: %1=q3 %2=q2' %3=q1' %4=q0' %5=p0' %6=p1' %7=p2' %8=p3 %9=tmp
yading@10 561 %macro LUMA_H_INTRA_STORE 9
yading@10 562 %if mmsize == 8
yading@10 563 TRANSPOSE4x4W %1, %2, %3, %4, %9
yading@10 564 movq [r0-8], m%1
yading@10 565 movq [r0+r1-8], m%2
yading@10 566 movq [r0+r1*2-8], m%3
yading@10 567 movq [r0+r4-8], m%4
yading@10 568 movq m%1, %8
yading@10 569 TRANSPOSE4x4W %5, %6, %7, %1, %9
yading@10 570 movq [r0], m%5
yading@10 571 movq [r0+r1], m%6
yading@10 572 movq [r0+r1*2], m%7
yading@10 573 movq [r0+r4], m%1
yading@10 574 %else
yading@10 575 TRANSPOSE2x4x4W %1, %2, %3, %4, %9
yading@10 576 movq [r0-8], m%1
yading@10 577 movq [r0+r1-8], m%2
yading@10 578 movq [r0+r1*2-8], m%3
yading@10 579 movq [r0+r5-8], m%4
yading@10 580 movhps [r4-8], m%1
yading@10 581 movhps [r4+r1-8], m%2
yading@10 582 movhps [r4+r1*2-8], m%3
yading@10 583 movhps [r4+r5-8], m%4
yading@10 584 %ifnum %8
yading@10 585 SWAP %1, %8
yading@10 586 %else
yading@10 587 mova m%1, %8
yading@10 588 %endif
yading@10 589 TRANSPOSE2x4x4W %5, %6, %7, %1, %9
yading@10 590 movq [r0], m%5
yading@10 591 movq [r0+r1], m%6
yading@10 592 movq [r0+r1*2], m%7
yading@10 593 movq [r0+r5], m%1
yading@10 594 movhps [r4], m%5
yading@10 595 movhps [r4+r1], m%6
yading@10 596 movhps [r4+r1*2], m%7
yading@10 597 movhps [r4+r5], m%1
yading@10 598 %endif
yading@10 599 %endmacro
yading@10 600
yading@10 601 %if ARCH_X86_64
yading@10 602 ;-----------------------------------------------------------------------------
yading@10 603 ; void deblock_v_luma_intra( uint16_t *pix, int stride, int alpha, int beta )
yading@10 604 ;-----------------------------------------------------------------------------
yading@10 605 %macro DEBLOCK_LUMA_INTRA_64 0
yading@10 606 cglobal deblock_v_luma_intra_10, 4,7,16
yading@10 607 %define t0 m1
yading@10 608 %define t1 m2
yading@10 609 %define t2 m4
yading@10 610 %define p2 m8
yading@10 611 %define p1 m9
yading@10 612 %define p0 m10
yading@10 613 %define q0 m11
yading@10 614 %define q1 m12
yading@10 615 %define q2 m13
yading@10 616 %define aa m5
yading@10 617 %define bb m14
yading@10 618 lea r4, [r1*4]
yading@10 619 lea r5, [r1*3] ; 3*stride
yading@10 620 neg r4
yading@10 621 add r4, r0 ; pix-4*stride
yading@10 622 mov r6, 2
yading@10 623 mova m0, [pw_2]
yading@10 624 shl r2d, 2
yading@10 625 shl r3d, 2
yading@10 626 LOAD_AB aa, bb, r2d, r3d
yading@10 627 .loop:
yading@10 628 mova p2, [r4+r1]
yading@10 629 mova p1, [r4+2*r1]
yading@10 630 mova p0, [r4+r5]
yading@10 631 mova q0, [r0]
yading@10 632 mova q1, [r0+r1]
yading@10 633 mova q2, [r0+2*r1]
yading@10 634
yading@10 635 LOAD_MASK p1, p0, q0, q1, aa, bb, m3, t0, t1
yading@10 636 mova t2, aa
yading@10 637 psrlw t2, 2
yading@10 638 paddw t2, m0 ; alpha/4+2
yading@10 639 DIFF_LT p0, q0, t2, m6, t0 ; m6 = |p0-q0| < alpha/4+2
yading@10 640 DIFF_LT p2, p0, bb, t1, t0 ; m7 = |p2-p0| < beta
yading@10 641 DIFF_LT q2, q0, bb, m7, t0 ; t1 = |q2-q0| < beta
yading@10 642 pand m6, m3
yading@10 643 pand m7, m6
yading@10 644 pand m6, t1
yading@10 645 LUMA_INTRA_P012 p0, p1, p2, [r4], q0, q1, m3, m6, m0, [r4+r5], [r4+2*r1], [r4+r1]
yading@10 646 LUMA_INTRA_P012 q0, q1, q2, [r0+r5], p0, p1, m3, m7, m0, [r0], [r0+r1], [r0+2*r1]
yading@10 647 add r0, mmsize
yading@10 648 add r4, mmsize
yading@10 649 dec r6
yading@10 650 jg .loop
yading@10 651 REP_RET
yading@10 652
yading@10 653 ;-----------------------------------------------------------------------------
yading@10 654 ; void deblock_h_luma_intra( uint16_t *pix, int stride, int alpha, int beta )
yading@10 655 ;-----------------------------------------------------------------------------
yading@10 656 cglobal deblock_h_luma_intra_10, 4,7,16
yading@10 657 %define t0 m15
yading@10 658 %define t1 m14
yading@10 659 %define t2 m2
yading@10 660 %define q3 m5
yading@10 661 %define q2 m8
yading@10 662 %define q1 m9
yading@10 663 %define q0 m10
yading@10 664 %define p0 m11
yading@10 665 %define p1 m12
yading@10 666 %define p2 m13
yading@10 667 %define p3 m4
yading@10 668 %define spill [rsp]
yading@10 669 %assign pad 24-(stack_offset&15)
yading@10 670 SUB rsp, pad
yading@10 671 lea r4, [r1*4]
yading@10 672 lea r5, [r1*3] ; 3*stride
yading@10 673 add r4, r0 ; pix+4*stride
yading@10 674 mov r6, 2
yading@10 675 mova m0, [pw_2]
yading@10 676 shl r2d, 2
yading@10 677 shl r3d, 2
yading@10 678 .loop:
yading@10 679 movu q3, [r0-8]
yading@10 680 movu q2, [r0+r1-8]
yading@10 681 movu q1, [r0+r1*2-8]
yading@10 682 movu q0, [r0+r5-8]
yading@10 683 movu p0, [r4-8]
yading@10 684 movu p1, [r4+r1-8]
yading@10 685 movu p2, [r4+r1*2-8]
yading@10 686 movu p3, [r4+r5-8]
yading@10 687 TRANSPOSE8x8W 5, 8, 9, 10, 11, 12, 13, 4, 1
yading@10 688
yading@10 689 LOAD_AB m1, m2, r2d, r3d
yading@10 690 LOAD_MASK q1, q0, p0, p1, m1, m2, m3, t0, t1
yading@10 691 psrlw m1, 2
yading@10 692 paddw m1, m0 ; alpha/4+2
yading@10 693 DIFF_LT p0, q0, m1, m6, t0 ; m6 = |p0-q0| < alpha/4+2
yading@10 694 DIFF_LT q2, q0, m2, t1, t0 ; t1 = |q2-q0| < beta
yading@10 695 DIFF_LT p0, p2, m2, m7, t0 ; m7 = |p2-p0| < beta
yading@10 696 pand m6, m3
yading@10 697 pand m7, m6
yading@10 698 pand m6, t1
yading@10 699
yading@10 700 mova spill, q3
yading@10 701 LUMA_INTRA_P012 q0, q1, q2, q3, p0, p1, m3, m6, m0, m5, m1, q2
yading@10 702 LUMA_INTRA_P012 p0, p1, p2, p3, q0, q1, m3, m7, m0, p0, m6, p2
yading@10 703 mova m7, spill
yading@10 704
yading@10 705 LUMA_H_INTRA_STORE 7, 8, 1, 5, 11, 6, 13, 4, 14
yading@10 706
yading@10 707 lea r0, [r0+r1*8]
yading@10 708 lea r4, [r4+r1*8]
yading@10 709 dec r6
yading@10 710 jg .loop
yading@10 711 ADD rsp, pad
yading@10 712 RET
yading@10 713 %endmacro
yading@10 714
yading@10 715 INIT_XMM sse2
yading@10 716 DEBLOCK_LUMA_INTRA_64
yading@10 717 %if HAVE_AVX_EXTERNAL
yading@10 718 INIT_XMM avx
yading@10 719 DEBLOCK_LUMA_INTRA_64
yading@10 720 %endif
yading@10 721
yading@10 722 %endif
yading@10 723
yading@10 724 %macro DEBLOCK_LUMA_INTRA 0
yading@10 725 ;-----------------------------------------------------------------------------
yading@10 726 ; void deblock_v_luma_intra( uint16_t *pix, int stride, int alpha, int beta )
yading@10 727 ;-----------------------------------------------------------------------------
yading@10 728 cglobal deblock_v_luma_intra_10, 4,7,8*(mmsize/16)
yading@10 729 LUMA_INTRA_INIT 3
yading@10 730 lea r4, [r1*4]
yading@10 731 lea r5, [r1*3]
yading@10 732 neg r4
yading@10 733 add r4, r0
yading@10 734 mov r6, 32/mmsize
yading@10 735 shl r2d, 2
yading@10 736 shl r3d, 2
yading@10 737 .loop:
yading@10 738 mova m0, [r4+r1*2] ; p1
yading@10 739 mova m1, [r4+r5] ; p0
yading@10 740 mova m2, [r0] ; q0
yading@10 741 mova m3, [r0+r1] ; q1
yading@10 742 LUMA_INTRA_INTER t4, t5, t6, [r4+r1], [r0+r1*2]
yading@10 743 LUMA_INTRA_P012 m1, m0, t3, [r4], m2, m3, t5, t4, [pw_2], [r4+r5], [r4+2*r1], [r4+r1]
yading@10 744 mova t3, [r0+r1*2] ; q2
yading@10 745 LUMA_INTRA_P012 m2, m3, t3, [r0+r5], m1, m0, t5, t6, [pw_2], [r0], [r0+r1], [r0+2*r1]
yading@10 746 add r0, mmsize
yading@10 747 add r4, mmsize
yading@10 748 dec r6
yading@10 749 jg .loop
yading@10 750 ADD rsp, pad
yading@10 751 RET
yading@10 752
yading@10 753 ;-----------------------------------------------------------------------------
yading@10 754 ; void deblock_h_luma_intra( uint16_t *pix, int stride, int alpha, int beta )
yading@10 755 ;-----------------------------------------------------------------------------
yading@10 756 cglobal deblock_h_luma_intra_10, 4,7,8*(mmsize/16)
yading@10 757 LUMA_INTRA_INIT 8
yading@10 758 %if mmsize == 8
yading@10 759 lea r4, [r1*3]
yading@10 760 mov r5, 32/mmsize
yading@10 761 %else
yading@10 762 lea r4, [r1*4]
yading@10 763 lea r5, [r1*3] ; 3*stride
yading@10 764 add r4, r0 ; pix+4*stride
yading@10 765 mov r6, 32/mmsize
yading@10 766 %endif
yading@10 767 shl r2d, 2
yading@10 768 shl r3d, 2
yading@10 769 .loop:
yading@10 770 LUMA_H_INTRA_LOAD
yading@10 771 LUMA_INTRA_INTER t8, t9, t10, t5, t6
yading@10 772
yading@10 773 LUMA_INTRA_P012 m1, m0, t3, t4, m2, m3, t9, t8, [pw_2], t8, t5, t11
yading@10 774 mova t3, t6 ; q2
yading@10 775 LUMA_INTRA_P012 m2, m3, t3, t7, m1, m0, t9, t10, [pw_2], m4, t6, m5
yading@10 776
yading@10 777 mova m2, t4
yading@10 778 mova m0, t11
yading@10 779 mova m1, t5
yading@10 780 mova m3, t8
yading@10 781 mova m6, t6
yading@10 782
yading@10 783 LUMA_H_INTRA_STORE 2, 0, 1, 3, 4, 6, 5, t7, 7
yading@10 784
yading@10 785 lea r0, [r0+r1*(mmsize/2)]
yading@10 786 %if mmsize == 8
yading@10 787 dec r5
yading@10 788 %else
yading@10 789 lea r4, [r4+r1*(mmsize/2)]
yading@10 790 dec r6
yading@10 791 %endif
yading@10 792 jg .loop
yading@10 793 ADD rsp, pad
yading@10 794 RET
yading@10 795 %endmacro
yading@10 796
yading@10 797 %if ARCH_X86_64 == 0
yading@10 798 INIT_MMX mmxext
yading@10 799 DEBLOCK_LUMA
yading@10 800 DEBLOCK_LUMA_INTRA
yading@10 801 INIT_XMM sse2
yading@10 802 DEBLOCK_LUMA
yading@10 803 DEBLOCK_LUMA_INTRA
yading@10 804 %if HAVE_AVX_EXTERNAL
yading@10 805 INIT_XMM avx
yading@10 806 DEBLOCK_LUMA
yading@10 807 DEBLOCK_LUMA_INTRA
yading@10 808 %endif
yading@10 809 %endif
yading@10 810
yading@10 811 ; in: %1=p0, %2=q0, %3=p1, %4=q1, %5=mask, %6=tmp, %7=tmp
yading@10 812 ; out: %1=p0', %2=q0'
yading@10 813 %macro CHROMA_DEBLOCK_P0_Q0_INTRA 7
yading@10 814 mova %6, [pw_2]
yading@10 815 paddw %6, %3
yading@10 816 paddw %6, %4
yading@10 817 paddw %7, %6, %2
yading@10 818 paddw %6, %1
yading@10 819 paddw %6, %3
yading@10 820 paddw %7, %4
yading@10 821 psraw %6, 2
yading@10 822 psraw %7, 2
yading@10 823 psubw %6, %1
yading@10 824 psubw %7, %2
yading@10 825 pand %6, %5
yading@10 826 pand %7, %5
yading@10 827 paddw %1, %6
yading@10 828 paddw %2, %7
yading@10 829 %endmacro
yading@10 830
yading@10 831 %macro CHROMA_V_LOAD 1
yading@10 832 mova m0, [r0] ; p1
yading@10 833 mova m1, [r0+r1] ; p0
yading@10 834 mova m2, [%1] ; q0
yading@10 835 mova m3, [%1+r1] ; q1
yading@10 836 %endmacro
yading@10 837
yading@10 838 %macro CHROMA_V_STORE 0
yading@10 839 mova [r0+1*r1], m1
yading@10 840 mova [r0+2*r1], m2
yading@10 841 %endmacro
yading@10 842
yading@10 843 %macro CHROMA_V_LOAD_TC 2
yading@10 844 movd %1, [%2]
yading@10 845 punpcklbw %1, %1
yading@10 846 punpcklwd %1, %1
yading@10 847 psraw %1, 6
yading@10 848 %endmacro
yading@10 849
yading@10 850 %macro DEBLOCK_CHROMA 0
yading@10 851 ;-----------------------------------------------------------------------------
yading@10 852 ; void deblock_v_chroma( uint16_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
yading@10 853 ;-----------------------------------------------------------------------------
yading@10 854 cglobal deblock_v_chroma_10, 5,7-(mmsize/16),8*(mmsize/16)
yading@10 855 mov r5, r0
yading@10 856 sub r0, r1
yading@10 857 sub r0, r1
yading@10 858 shl r2d, 2
yading@10 859 shl r3d, 2
yading@10 860 %if mmsize < 16
yading@10 861 mov r6, 16/mmsize
yading@10 862 .loop:
yading@10 863 %endif
yading@10 864 CHROMA_V_LOAD r5
yading@10 865 LOAD_AB m4, m5, r2d, r3d
yading@10 866 LOAD_MASK m0, m1, m2, m3, m4, m5, m7, m6, m4
yading@10 867 pxor m4, m4
yading@10 868 CHROMA_V_LOAD_TC m6, r4
yading@10 869 psubw m6, [pw_3]
yading@10 870 pmaxsw m6, m4
yading@10 871 pand m7, m6
yading@10 872 DEBLOCK_P0_Q0 m1, m2, m0, m3, m7, m5, m6
yading@10 873 CHROMA_V_STORE
yading@10 874 %if mmsize < 16
yading@10 875 add r0, mmsize
yading@10 876 add r5, mmsize
yading@10 877 add r4, mmsize/4
yading@10 878 dec r6
yading@10 879 jg .loop
yading@10 880 REP_RET
yading@10 881 %else
yading@10 882 RET
yading@10 883 %endif
yading@10 884
yading@10 885 ;-----------------------------------------------------------------------------
yading@10 886 ; void deblock_v_chroma_intra( uint16_t *pix, int stride, int alpha, int beta )
yading@10 887 ;-----------------------------------------------------------------------------
yading@10 888 cglobal deblock_v_chroma_intra_10, 4,6-(mmsize/16),8*(mmsize/16)
yading@10 889 mov r4, r0
yading@10 890 sub r0, r1
yading@10 891 sub r0, r1
yading@10 892 shl r2d, 2
yading@10 893 shl r3d, 2
yading@10 894 %if mmsize < 16
yading@10 895 mov r5, 16/mmsize
yading@10 896 .loop:
yading@10 897 %endif
yading@10 898 CHROMA_V_LOAD r4
yading@10 899 LOAD_AB m4, m5, r2d, r3d
yading@10 900 LOAD_MASK m0, m1, m2, m3, m4, m5, m7, m6, m4
yading@10 901 CHROMA_DEBLOCK_P0_Q0_INTRA m1, m2, m0, m3, m7, m5, m6
yading@10 902 CHROMA_V_STORE
yading@10 903 %if mmsize < 16
yading@10 904 add r0, mmsize
yading@10 905 add r4, mmsize
yading@10 906 dec r5
yading@10 907 jg .loop
yading@10 908 REP_RET
yading@10 909 %else
yading@10 910 RET
yading@10 911 %endif
yading@10 912 %endmacro
yading@10 913
yading@10 914 %if ARCH_X86_64 == 0
yading@10 915 INIT_MMX mmxext
yading@10 916 DEBLOCK_CHROMA
yading@10 917 %endif
yading@10 918 INIT_XMM sse2
yading@10 919 DEBLOCK_CHROMA
yading@10 920 %if HAVE_AVX_EXTERNAL
yading@10 921 INIT_XMM avx
yading@10 922 DEBLOCK_CHROMA
yading@10 923 %endif