annotate ffmpeg/libavcodec/x86/h264_deblock.asm @ 13:844d341cf643 tip

Back up before ISMIR
author Yading Song <yading.song@eecs.qmul.ac.uk>
date Thu, 31 Oct 2013 13:17:06 +0000
parents 6840f77b83aa
children
rev   line source
yading@10 1 ;*****************************************************************************
yading@10 2 ;* MMX/SSE2/AVX-optimized H.264 deblocking code
yading@10 3 ;*****************************************************************************
yading@10 4 ;* Copyright (C) 2005-2011 x264 project
yading@10 5 ;*
yading@10 6 ;* Authors: Loren Merritt <lorenm@u.washington.edu>
yading@10 7 ;* Jason Garrett-Glaser <darkshikari@gmail.com>
yading@10 8 ;* Oskar Arvidsson <oskar@irock.se>
yading@10 9 ;*
yading@10 10 ;* This file is part of FFmpeg.
yading@10 11 ;*
yading@10 12 ;* FFmpeg is free software; you can redistribute it and/or
yading@10 13 ;* modify it under the terms of the GNU Lesser General Public
yading@10 14 ;* License as published by the Free Software Foundation; either
yading@10 15 ;* version 2.1 of the License, or (at your option) any later version.
yading@10 16 ;*
yading@10 17 ;* FFmpeg is distributed in the hope that it will be useful,
yading@10 18 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
yading@10 19 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
yading@10 20 ;* Lesser General Public License for more details.
yading@10 21 ;*
yading@10 22 ;* You should have received a copy of the GNU Lesser General Public
yading@10 23 ;* License along with FFmpeg; if not, write to the Free Software
yading@10 24 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
yading@10 25 ;******************************************************************************
yading@10 26
yading@10 27 %include "libavutil/x86/x86util.asm"
yading@10 28
yading@10 29 SECTION_RODATA
yading@10 30
yading@10 31 pb_A1: times 16 db 0xA1
yading@10 32 pb_3_1: times 4 db 3, 1
yading@10 33
yading@10 34 SECTION .text
yading@10 35
yading@10 36 cextern pb_0
yading@10 37 cextern pb_1
yading@10 38 cextern pb_3
yading@10 39
yading@10 40 ; expands to [base],...,[base+7*stride]
yading@10 41 %define PASS8ROWS(base, base3, stride, stride3) \
yading@10 42 [base], [base+stride], [base+stride*2], [base3], \
yading@10 43 [base3+stride], [base3+stride*2], [base3+stride3], [base3+stride*4]
yading@10 44
yading@10 45 %define PASS8ROWS(base, base3, stride, stride3, offset) \
yading@10 46 PASS8ROWS(base+offset, base3+offset, stride, stride3)
yading@10 47
yading@10 48 ; in: 8 rows of 4 bytes in %4..%11
yading@10 49 ; out: 4 rows of 8 bytes in m0..m3
yading@10 50 %macro TRANSPOSE4x8_LOAD 11
yading@10 51 movh m0, %4
yading@10 52 movh m2, %5
yading@10 53 movh m1, %6
yading@10 54 movh m3, %7
yading@10 55 punpckl%1 m0, m2
yading@10 56 punpckl%1 m1, m3
yading@10 57 mova m2, m0
yading@10 58 punpckl%2 m0, m1
yading@10 59 punpckh%2 m2, m1
yading@10 60
yading@10 61 movh m4, %8
yading@10 62 movh m6, %9
yading@10 63 movh m5, %10
yading@10 64 movh m7, %11
yading@10 65 punpckl%1 m4, m6
yading@10 66 punpckl%1 m5, m7
yading@10 67 mova m6, m4
yading@10 68 punpckl%2 m4, m5
yading@10 69 punpckh%2 m6, m5
yading@10 70
yading@10 71 punpckh%3 m1, m0, m4
yading@10 72 punpckh%3 m3, m2, m6
yading@10 73 punpckl%3 m0, m4
yading@10 74 punpckl%3 m2, m6
yading@10 75 %endmacro
yading@10 76
yading@10 77 ; in: 4 rows of 8 bytes in m0..m3
yading@10 78 ; out: 8 rows of 4 bytes in %1..%8
yading@10 79 %macro TRANSPOSE8x4B_STORE 8
yading@10 80 punpckhdq m4, m0, m0
yading@10 81 punpckhdq m5, m1, m1
yading@10 82 punpckhdq m6, m2, m2
yading@10 83
yading@10 84 punpcklbw m0, m1
yading@10 85 punpcklbw m2, m3
yading@10 86 punpcklwd m1, m0, m2
yading@10 87 punpckhwd m0, m2
yading@10 88 movh %1, m1
yading@10 89 punpckhdq m1, m1
yading@10 90 movh %2, m1
yading@10 91 movh %3, m0
yading@10 92 punpckhdq m0, m0
yading@10 93 movh %4, m0
yading@10 94
yading@10 95 punpckhdq m3, m3
yading@10 96 punpcklbw m4, m5
yading@10 97 punpcklbw m6, m3
yading@10 98 punpcklwd m5, m4, m6
yading@10 99 punpckhwd m4, m6
yading@10 100 movh %5, m5
yading@10 101 punpckhdq m5, m5
yading@10 102 movh %6, m5
yading@10 103 movh %7, m4
yading@10 104 punpckhdq m4, m4
yading@10 105 movh %8, m4
yading@10 106 %endmacro
yading@10 107
yading@10 108 %macro TRANSPOSE4x8B_LOAD 8
yading@10 109 TRANSPOSE4x8_LOAD bw, wd, dq, %1, %2, %3, %4, %5, %6, %7, %8
yading@10 110 %endmacro
yading@10 111
yading@10 112 %macro SBUTTERFLY3 4
yading@10 113 punpckh%1 %4, %2, %3
yading@10 114 punpckl%1 %2, %3
yading@10 115 %endmacro
yading@10 116
yading@10 117 ; in: 8 rows of 8 (only the middle 6 pels are used) in %1..%8
yading@10 118 ; out: 6 rows of 8 in [%9+0*16] .. [%9+5*16]
yading@10 119 %macro TRANSPOSE6x8_MEM 9
yading@10 120 RESET_MM_PERMUTATION
yading@10 121 movq m0, %1
yading@10 122 movq m1, %2
yading@10 123 movq m2, %3
yading@10 124 movq m3, %4
yading@10 125 movq m4, %5
yading@10 126 movq m5, %6
yading@10 127 movq m6, %7
yading@10 128 SBUTTERFLY bw, 0, 1, 7
yading@10 129 SBUTTERFLY bw, 2, 3, 7
yading@10 130 SBUTTERFLY bw, 4, 5, 7
yading@10 131 movq [%9+0x10], m3
yading@10 132 SBUTTERFLY3 bw, m6, %8, m7
yading@10 133 SBUTTERFLY wd, 0, 2, 3
yading@10 134 SBUTTERFLY wd, 4, 6, 3
yading@10 135 punpckhdq m0, m4
yading@10 136 movq [%9+0x00], m0
yading@10 137 SBUTTERFLY3 wd, m1, [%9+0x10], m3
yading@10 138 SBUTTERFLY wd, 5, 7, 0
yading@10 139 SBUTTERFLY dq, 1, 5, 0
yading@10 140 SBUTTERFLY dq, 2, 6, 0
yading@10 141 punpckldq m3, m7
yading@10 142 movq [%9+0x10], m2
yading@10 143 movq [%9+0x20], m6
yading@10 144 movq [%9+0x30], m1
yading@10 145 movq [%9+0x40], m5
yading@10 146 movq [%9+0x50], m3
yading@10 147 RESET_MM_PERMUTATION
yading@10 148 %endmacro
yading@10 149
yading@10 150 ; in: 8 rows of 8 in %1..%8
yading@10 151 ; out: 8 rows of 8 in %9..%16
yading@10 152 %macro TRANSPOSE8x8_MEM 16
yading@10 153 RESET_MM_PERMUTATION
yading@10 154 movq m0, %1
yading@10 155 movq m1, %2
yading@10 156 movq m2, %3
yading@10 157 movq m3, %4
yading@10 158 movq m4, %5
yading@10 159 movq m5, %6
yading@10 160 movq m6, %7
yading@10 161 SBUTTERFLY bw, 0, 1, 7
yading@10 162 SBUTTERFLY bw, 2, 3, 7
yading@10 163 SBUTTERFLY bw, 4, 5, 7
yading@10 164 SBUTTERFLY3 bw, m6, %8, m7
yading@10 165 movq %9, m5
yading@10 166 SBUTTERFLY wd, 0, 2, 5
yading@10 167 SBUTTERFLY wd, 4, 6, 5
yading@10 168 SBUTTERFLY wd, 1, 3, 5
yading@10 169 movq %11, m6
yading@10 170 movq m6, %9
yading@10 171 SBUTTERFLY wd, 6, 7, 5
yading@10 172 SBUTTERFLY dq, 0, 4, 5
yading@10 173 SBUTTERFLY dq, 1, 6, 5
yading@10 174 movq %9, m0
yading@10 175 movq %10, m4
yading@10 176 movq %13, m1
yading@10 177 movq %14, m6
yading@10 178 SBUTTERFLY3 dq, m2, %11, m0
yading@10 179 SBUTTERFLY dq, 3, 7, 4
yading@10 180 movq %11, m2
yading@10 181 movq %12, m0
yading@10 182 movq %15, m3
yading@10 183 movq %16, m7
yading@10 184 RESET_MM_PERMUTATION
yading@10 185 %endmacro
yading@10 186
yading@10 187 ; out: %4 = |%1-%2|>%3
yading@10 188 ; clobbers: %5
yading@10 189 %macro DIFF_GT 5
yading@10 190 %if avx_enabled == 0
yading@10 191 mova %5, %2
yading@10 192 mova %4, %1
yading@10 193 psubusb %5, %1
yading@10 194 psubusb %4, %2
yading@10 195 %else
yading@10 196 psubusb %5, %2, %1
yading@10 197 psubusb %4, %1, %2
yading@10 198 %endif
yading@10 199 por %4, %5
yading@10 200 psubusb %4, %3
yading@10 201 %endmacro
yading@10 202
yading@10 203 ; out: %4 = |%1-%2|>%3
yading@10 204 ; clobbers: %5
yading@10 205 %macro DIFF_GT2 5
yading@10 206 %if ARCH_X86_64
yading@10 207 psubusb %5, %2, %1
yading@10 208 psubusb %4, %1, %2
yading@10 209 %else
yading@10 210 mova %5, %2
yading@10 211 mova %4, %1
yading@10 212 psubusb %5, %1
yading@10 213 psubusb %4, %2
yading@10 214 %endif
yading@10 215 psubusb %5, %3
yading@10 216 psubusb %4, %3
yading@10 217 pcmpeqb %4, %5
yading@10 218 %endmacro
yading@10 219
yading@10 220 ; in: m0=p1 m1=p0 m2=q0 m3=q1 %1=alpha-1 %2=beta-1
yading@10 221 ; out: m5=beta-1, m7=mask, %3=alpha-1
yading@10 222 ; clobbers: m4,m6
yading@10 223 %macro LOAD_MASK 2-3
yading@10 224 movd m4, %1
yading@10 225 movd m5, %2
yading@10 226 SPLATW m4, m4
yading@10 227 SPLATW m5, m5
yading@10 228 packuswb m4, m4 ; 16x alpha-1
yading@10 229 packuswb m5, m5 ; 16x beta-1
yading@10 230 %if %0>2
yading@10 231 mova %3, m4
yading@10 232 %endif
yading@10 233 DIFF_GT m1, m2, m4, m7, m6 ; |p0-q0| > alpha-1
yading@10 234 DIFF_GT m0, m1, m5, m4, m6 ; |p1-p0| > beta-1
yading@10 235 por m7, m4
yading@10 236 DIFF_GT m3, m2, m5, m4, m6 ; |q1-q0| > beta-1
yading@10 237 por m7, m4
yading@10 238 pxor m6, m6
yading@10 239 pcmpeqb m7, m6
yading@10 240 %endmacro
yading@10 241
yading@10 242 ; in: m0=p1 m1=p0 m2=q0 m3=q1 m7=(tc&mask)
yading@10 243 ; out: m1=p0' m2=q0'
yading@10 244 ; clobbers: m0,3-6
yading@10 245 %macro DEBLOCK_P0_Q0 0
yading@10 246 pcmpeqb m4, m4
yading@10 247 pxor m5, m1, m2 ; p0^q0
yading@10 248 pxor m3, m4
yading@10 249 pand m5, [pb_1] ; (p0^q0)&1
yading@10 250 pavgb m3, m0 ; (p1 - q1 + 256)>>1
yading@10 251 pxor m4, m1
yading@10 252 pavgb m3, [pb_3] ; (((p1 - q1 + 256)>>1)+4)>>1 = 64+2+(p1-q1)>>2
yading@10 253 pavgb m4, m2 ; (q0 - p0 + 256)>>1
yading@10 254 pavgb m3, m5
yading@10 255 mova m6, [pb_A1]
yading@10 256 paddusb m3, m4 ; d+128+33
yading@10 257 psubusb m6, m3
yading@10 258 psubusb m3, [pb_A1]
yading@10 259 pminub m6, m7
yading@10 260 pminub m3, m7
yading@10 261 psubusb m1, m6
yading@10 262 psubusb m2, m3
yading@10 263 paddusb m1, m3
yading@10 264 paddusb m2, m6
yading@10 265 %endmacro
yading@10 266
yading@10 267 ; in: m1=p0 m2=q0
yading@10 268 ; %1=p1 %2=q2 %3=[q2] %4=[q1] %5=tc0 %6=tmp
yading@10 269 ; out: [q1] = clip( (q2+((p0+q0+1)>>1))>>1, q1-tc0, q1+tc0 )
yading@10 270 ; clobbers: q2, tmp, tc0
yading@10 271 %macro LUMA_Q1 6
yading@10 272 pavgb %6, m1, m2
yading@10 273 pavgb %2, %6 ; avg(p2,avg(p0,q0))
yading@10 274 pxor %6, %3
yading@10 275 pand %6, [pb_1] ; (p2^avg(p0,q0))&1
yading@10 276 psubusb %2, %6 ; (p2+((p0+q0+1)>>1))>>1
yading@10 277 psubusb %6, %1, %5
yading@10 278 paddusb %5, %1
yading@10 279 pmaxub %2, %6
yading@10 280 pminub %2, %5
yading@10 281 mova %4, %2
yading@10 282 %endmacro
yading@10 283
yading@10 284 %if ARCH_X86_64
yading@10 285 ;-----------------------------------------------------------------------------
yading@10 286 ; void deblock_v_luma( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
yading@10 287 ;-----------------------------------------------------------------------------
yading@10 288 %macro DEBLOCK_LUMA 0
yading@10 289 cglobal deblock_v_luma_8, 5,5,10
yading@10 290 movd m8, [r4] ; tc0
yading@10 291 lea r4, [r1*3]
yading@10 292 dec r2d ; alpha-1
yading@10 293 neg r4
yading@10 294 dec r3d ; beta-1
yading@10 295 add r4, r0 ; pix-3*stride
yading@10 296
yading@10 297 mova m0, [r4+r1] ; p1
yading@10 298 mova m1, [r4+2*r1] ; p0
yading@10 299 mova m2, [r0] ; q0
yading@10 300 mova m3, [r0+r1] ; q1
yading@10 301 LOAD_MASK r2d, r3d
yading@10 302
yading@10 303 punpcklbw m8, m8
yading@10 304 punpcklbw m8, m8 ; tc = 4x tc0[3], 4x tc0[2], 4x tc0[1], 4x tc0[0]
yading@10 305 pcmpeqb m9, m9
yading@10 306 pcmpeqb m9, m8
yading@10 307 pandn m9, m7
yading@10 308 pand m8, m9
yading@10 309
yading@10 310 movdqa m3, [r4] ; p2
yading@10 311 DIFF_GT2 m1, m3, m5, m6, m7 ; |p2-p0| > beta-1
yading@10 312 pand m6, m9
yading@10 313 psubb m7, m8, m6
yading@10 314 pand m6, m8
yading@10 315 LUMA_Q1 m0, m3, [r4], [r4+r1], m6, m4
yading@10 316
yading@10 317 movdqa m4, [r0+2*r1] ; q2
yading@10 318 DIFF_GT2 m2, m4, m5, m6, m3 ; |q2-q0| > beta-1
yading@10 319 pand m6, m9
yading@10 320 pand m8, m6
yading@10 321 psubb m7, m6
yading@10 322 mova m3, [r0+r1]
yading@10 323 LUMA_Q1 m3, m4, [r0+2*r1], [r0+r1], m8, m6
yading@10 324
yading@10 325 DEBLOCK_P0_Q0
yading@10 326 mova [r4+2*r1], m1
yading@10 327 mova [r0], m2
yading@10 328 RET
yading@10 329
yading@10 330 ;-----------------------------------------------------------------------------
yading@10 331 ; void deblock_h_luma( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
yading@10 332 ;-----------------------------------------------------------------------------
yading@10 333 INIT_MMX cpuname
yading@10 334 cglobal deblock_h_luma_8, 5,9
yading@10 335 movsxd r7, r1d
yading@10 336 lea r8, [r7+r7*2]
yading@10 337 lea r6, [r0-4]
yading@10 338 lea r5, [r0-4+r8]
yading@10 339 %if WIN64
yading@10 340 sub rsp, 0x98
yading@10 341 %define pix_tmp rsp+0x30
yading@10 342 %else
yading@10 343 sub rsp, 0x68
yading@10 344 %define pix_tmp rsp
yading@10 345 %endif
yading@10 346
yading@10 347 ; transpose 6x16 -> tmp space
yading@10 348 TRANSPOSE6x8_MEM PASS8ROWS(r6, r5, r7, r8), pix_tmp
yading@10 349 lea r6, [r6+r7*8]
yading@10 350 lea r5, [r5+r7*8]
yading@10 351 TRANSPOSE6x8_MEM PASS8ROWS(r6, r5, r7, r8), pix_tmp+8
yading@10 352
yading@10 353 ; vertical filter
yading@10 354 ; alpha, beta, tc0 are still in r2d, r3d, r4
yading@10 355 ; don't backup r6, r5, r7, r8 because deblock_v_luma_sse2 doesn't use them
yading@10 356 lea r0, [pix_tmp+0x30]
yading@10 357 mov r1d, 0x10
yading@10 358 %if WIN64
yading@10 359 mov [rsp+0x20], r4
yading@10 360 %endif
yading@10 361 call deblock_v_luma_8
yading@10 362
yading@10 363 ; transpose 16x4 -> original space (only the middle 4 rows were changed by the filter)
yading@10 364 add r6, 2
yading@10 365 add r5, 2
yading@10 366 movq m0, [pix_tmp+0x18]
yading@10 367 movq m1, [pix_tmp+0x28]
yading@10 368 movq m2, [pix_tmp+0x38]
yading@10 369 movq m3, [pix_tmp+0x48]
yading@10 370 TRANSPOSE8x4B_STORE PASS8ROWS(r6, r5, r7, r8)
yading@10 371
yading@10 372 shl r7, 3
yading@10 373 sub r6, r7
yading@10 374 sub r5, r7
yading@10 375 shr r7, 3
yading@10 376 movq m0, [pix_tmp+0x10]
yading@10 377 movq m1, [pix_tmp+0x20]
yading@10 378 movq m2, [pix_tmp+0x30]
yading@10 379 movq m3, [pix_tmp+0x40]
yading@10 380 TRANSPOSE8x4B_STORE PASS8ROWS(r6, r5, r7, r8)
yading@10 381
yading@10 382 %if WIN64
yading@10 383 add rsp, 0x98
yading@10 384 %else
yading@10 385 add rsp, 0x68
yading@10 386 %endif
yading@10 387 RET
yading@10 388 %endmacro
yading@10 389
yading@10 390 INIT_XMM sse2
yading@10 391 DEBLOCK_LUMA
yading@10 392 %if HAVE_AVX_EXTERNAL
yading@10 393 INIT_XMM avx
yading@10 394 DEBLOCK_LUMA
yading@10 395 %endif
yading@10 396
yading@10 397 %else
yading@10 398
yading@10 399 %macro DEBLOCK_LUMA 2
yading@10 400 ;-----------------------------------------------------------------------------
yading@10 401 ; void deblock_v8_luma( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
yading@10 402 ;-----------------------------------------------------------------------------
yading@10 403 cglobal deblock_%1_luma_8, 5,5,8,2*%2
yading@10 404 lea r4, [r1*3]
yading@10 405 dec r2 ; alpha-1
yading@10 406 neg r4
yading@10 407 dec r3 ; beta-1
yading@10 408 add r4, r0 ; pix-3*stride
yading@10 409
yading@10 410 mova m0, [r4+r1] ; p1
yading@10 411 mova m1, [r4+2*r1] ; p0
yading@10 412 mova m2, [r0] ; q0
yading@10 413 mova m3, [r0+r1] ; q1
yading@10 414 LOAD_MASK r2, r3
yading@10 415
yading@10 416 mov r3, r4mp
yading@10 417 pcmpeqb m3, m3
yading@10 418 movd m4, [r3] ; tc0
yading@10 419 punpcklbw m4, m4
yading@10 420 punpcklbw m4, m4 ; tc = 4x tc0[3], 4x tc0[2], 4x tc0[1], 4x tc0[0]
yading@10 421 mova [esp+%2], m4 ; tc
yading@10 422 pcmpgtb m4, m3
yading@10 423 mova m3, [r4] ; p2
yading@10 424 pand m4, m7
yading@10 425 mova [esp], m4 ; mask
yading@10 426
yading@10 427 DIFF_GT2 m1, m3, m5, m6, m7 ; |p2-p0| > beta-1
yading@10 428 pand m6, m4
yading@10 429 pand m4, [esp+%2] ; tc
yading@10 430 psubb m7, m4, m6
yading@10 431 pand m6, m4
yading@10 432 LUMA_Q1 m0, m3, [r4], [r4+r1], m6, m4
yading@10 433
yading@10 434 mova m4, [r0+2*r1] ; q2
yading@10 435 DIFF_GT2 m2, m4, m5, m6, m3 ; |q2-q0| > beta-1
yading@10 436 pand m6, [esp] ; mask
yading@10 437 mova m5, [esp+%2] ; tc
yading@10 438 psubb m7, m6
yading@10 439 pand m5, m6
yading@10 440 mova m3, [r0+r1]
yading@10 441 LUMA_Q1 m3, m4, [r0+2*r1], [r0+r1], m5, m6
yading@10 442
yading@10 443 DEBLOCK_P0_Q0
yading@10 444 mova [r4+2*r1], m1
yading@10 445 mova [r0], m2
yading@10 446 RET
yading@10 447
yading@10 448 ;-----------------------------------------------------------------------------
yading@10 449 ; void deblock_h_luma( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
yading@10 450 ;-----------------------------------------------------------------------------
yading@10 451 INIT_MMX cpuname
yading@10 452 cglobal deblock_h_luma_8, 0,5,8,0x60+HAVE_ALIGNED_STACK*12
yading@10 453 mov r0, r0mp
yading@10 454 mov r3, r1m
yading@10 455 lea r4, [r3*3]
yading@10 456 sub r0, 4
yading@10 457 lea r1, [r0+r4]
yading@10 458 %define pix_tmp esp+12*HAVE_ALIGNED_STACK
yading@10 459
yading@10 460 ; transpose 6x16 -> tmp space
yading@10 461 TRANSPOSE6x8_MEM PASS8ROWS(r0, r1, r3, r4), pix_tmp
yading@10 462 lea r0, [r0+r3*8]
yading@10 463 lea r1, [r1+r3*8]
yading@10 464 TRANSPOSE6x8_MEM PASS8ROWS(r0, r1, r3, r4), pix_tmp+8
yading@10 465
yading@10 466 ; vertical filter
yading@10 467 lea r0, [pix_tmp+0x30]
yading@10 468 PUSH dword r4m
yading@10 469 PUSH dword r3m
yading@10 470 PUSH dword r2m
yading@10 471 PUSH dword 16
yading@10 472 PUSH dword r0
yading@10 473 call deblock_%1_luma_8
yading@10 474 %ifidn %1, v8
yading@10 475 add dword [esp ], 8 ; pix_tmp+0x38
yading@10 476 add dword [esp+16], 2 ; tc0+2
yading@10 477 call deblock_%1_luma_8
yading@10 478 %endif
yading@10 479 ADD esp, 20
yading@10 480
yading@10 481 ; transpose 16x4 -> original space (only the middle 4 rows were changed by the filter)
yading@10 482 mov r0, r0mp
yading@10 483 sub r0, 2
yading@10 484
yading@10 485 movq m0, [pix_tmp+0x10]
yading@10 486 movq m1, [pix_tmp+0x20]
yading@10 487 lea r1, [r0+r4]
yading@10 488 movq m2, [pix_tmp+0x30]
yading@10 489 movq m3, [pix_tmp+0x40]
yading@10 490 TRANSPOSE8x4B_STORE PASS8ROWS(r0, r1, r3, r4)
yading@10 491
yading@10 492 lea r0, [r0+r3*8]
yading@10 493 lea r1, [r1+r3*8]
yading@10 494 movq m0, [pix_tmp+0x18]
yading@10 495 movq m1, [pix_tmp+0x28]
yading@10 496 movq m2, [pix_tmp+0x38]
yading@10 497 movq m3, [pix_tmp+0x48]
yading@10 498 TRANSPOSE8x4B_STORE PASS8ROWS(r0, r1, r3, r4)
yading@10 499
yading@10 500 RET
yading@10 501 %endmacro ; DEBLOCK_LUMA
yading@10 502
yading@10 503 INIT_MMX mmxext
yading@10 504 DEBLOCK_LUMA v8, 8
yading@10 505 INIT_XMM sse2
yading@10 506 DEBLOCK_LUMA v, 16
yading@10 507 %if HAVE_AVX_EXTERNAL
yading@10 508 INIT_XMM avx
yading@10 509 DEBLOCK_LUMA v, 16
yading@10 510 %endif
yading@10 511
yading@10 512 %endif ; ARCH
yading@10 513
yading@10 514
yading@10 515
yading@10 516 %macro LUMA_INTRA_P012 4 ; p0..p3 in memory
yading@10 517 %if ARCH_X86_64
yading@10 518 pavgb t0, p2, p1
yading@10 519 pavgb t1, p0, q0
yading@10 520 %else
yading@10 521 mova t0, p2
yading@10 522 mova t1, p0
yading@10 523 pavgb t0, p1
yading@10 524 pavgb t1, q0
yading@10 525 %endif
yading@10 526 pavgb t0, t1 ; ((p2+p1+1)/2 + (p0+q0+1)/2 + 1)/2
yading@10 527 mova t5, t1
yading@10 528 %if ARCH_X86_64
yading@10 529 paddb t2, p2, p1
yading@10 530 paddb t3, p0, q0
yading@10 531 %else
yading@10 532 mova t2, p2
yading@10 533 mova t3, p0
yading@10 534 paddb t2, p1
yading@10 535 paddb t3, q0
yading@10 536 %endif
yading@10 537 paddb t2, t3
yading@10 538 mova t3, t2
yading@10 539 mova t4, t2
yading@10 540 psrlw t2, 1
yading@10 541 pavgb t2, mpb_0
yading@10 542 pxor t2, t0
yading@10 543 pand t2, mpb_1
yading@10 544 psubb t0, t2 ; p1' = (p2+p1+p0+q0+2)/4;
yading@10 545
yading@10 546 %if ARCH_X86_64
yading@10 547 pavgb t1, p2, q1
yading@10 548 psubb t2, p2, q1
yading@10 549 %else
yading@10 550 mova t1, p2
yading@10 551 mova t2, p2
yading@10 552 pavgb t1, q1
yading@10 553 psubb t2, q1
yading@10 554 %endif
yading@10 555 paddb t3, t3
yading@10 556 psubb t3, t2 ; p2+2*p1+2*p0+2*q0+q1
yading@10 557 pand t2, mpb_1
yading@10 558 psubb t1, t2
yading@10 559 pavgb t1, p1
yading@10 560 pavgb t1, t5 ; (((p2+q1)/2 + p1+1)/2 + (p0+q0+1)/2 + 1)/2
yading@10 561 psrlw t3, 2
yading@10 562 pavgb t3, mpb_0
yading@10 563 pxor t3, t1
yading@10 564 pand t3, mpb_1
yading@10 565 psubb t1, t3 ; p0'a = (p2+2*p1+2*p0+2*q0+q1+4)/8
yading@10 566
yading@10 567 pxor t3, p0, q1
yading@10 568 pavgb t2, p0, q1
yading@10 569 pand t3, mpb_1
yading@10 570 psubb t2, t3
yading@10 571 pavgb t2, p1 ; p0'b = (2*p1+p0+q0+2)/4
yading@10 572
yading@10 573 pxor t1, t2
yading@10 574 pxor t2, p0
yading@10 575 pand t1, mask1p
yading@10 576 pand t2, mask0
yading@10 577 pxor t1, t2
yading@10 578 pxor t1, p0
yading@10 579 mova %1, t1 ; store p0
yading@10 580
yading@10 581 mova t1, %4 ; p3
yading@10 582 paddb t2, t1, p2
yading@10 583 pavgb t1, p2
yading@10 584 pavgb t1, t0 ; (p3+p2+1)/2 + (p2+p1+p0+q0+2)/4
yading@10 585 paddb t2, t2
yading@10 586 paddb t2, t4 ; 2*p3+3*p2+p1+p0+q0
yading@10 587 psrlw t2, 2
yading@10 588 pavgb t2, mpb_0
yading@10 589 pxor t2, t1
yading@10 590 pand t2, mpb_1
yading@10 591 psubb t1, t2 ; p2' = (2*p3+3*p2+p1+p0+q0+4)/8
yading@10 592
yading@10 593 pxor t0, p1
yading@10 594 pxor t1, p2
yading@10 595 pand t0, mask1p
yading@10 596 pand t1, mask1p
yading@10 597 pxor t0, p1
yading@10 598 pxor t1, p2
yading@10 599 mova %2, t0 ; store p1
yading@10 600 mova %3, t1 ; store p2
yading@10 601 %endmacro
yading@10 602
yading@10 603 %macro LUMA_INTRA_SWAP_PQ 0
yading@10 604 %define q1 m0
yading@10 605 %define q0 m1
yading@10 606 %define p0 m2
yading@10 607 %define p1 m3
yading@10 608 %define p2 q2
yading@10 609 %define mask1p mask1q
yading@10 610 %endmacro
yading@10 611
yading@10 612 %macro DEBLOCK_LUMA_INTRA 1
yading@10 613 %define p1 m0
yading@10 614 %define p0 m1
yading@10 615 %define q0 m2
yading@10 616 %define q1 m3
yading@10 617 %define t0 m4
yading@10 618 %define t1 m5
yading@10 619 %define t2 m6
yading@10 620 %define t3 m7
yading@10 621 %if ARCH_X86_64
yading@10 622 %define p2 m8
yading@10 623 %define q2 m9
yading@10 624 %define t4 m10
yading@10 625 %define t5 m11
yading@10 626 %define mask0 m12
yading@10 627 %define mask1p m13
yading@10 628 %if WIN64
yading@10 629 %define mask1q [rsp]
yading@10 630 %else
yading@10 631 %define mask1q [rsp-24]
yading@10 632 %endif
yading@10 633 %define mpb_0 m14
yading@10 634 %define mpb_1 m15
yading@10 635 %else
yading@10 636 %define spill(x) [esp+16*x]
yading@10 637 %define p2 [r4+r1]
yading@10 638 %define q2 [r0+2*r1]
yading@10 639 %define t4 spill(0)
yading@10 640 %define t5 spill(1)
yading@10 641 %define mask0 spill(2)
yading@10 642 %define mask1p spill(3)
yading@10 643 %define mask1q spill(4)
yading@10 644 %define mpb_0 [pb_0]
yading@10 645 %define mpb_1 [pb_1]
yading@10 646 %endif
yading@10 647
yading@10 648 ;-----------------------------------------------------------------------------
yading@10 649 ; void deblock_v_luma_intra( uint8_t *pix, int stride, int alpha, int beta )
yading@10 650 ;-----------------------------------------------------------------------------
yading@10 651 %if WIN64
yading@10 652 cglobal deblock_%1_luma_intra_8, 4,6,16,0x10
yading@10 653 %else
yading@10 654 cglobal deblock_%1_luma_intra_8, 4,6,16,ARCH_X86_64*0x50-0x50
yading@10 655 %endif
yading@10 656 lea r4, [r1*4]
yading@10 657 lea r5, [r1*3] ; 3*stride
yading@10 658 dec r2d ; alpha-1
yading@10 659 jl .end
yading@10 660 neg r4
yading@10 661 dec r3d ; beta-1
yading@10 662 jl .end
yading@10 663 add r4, r0 ; pix-4*stride
yading@10 664 mova p1, [r4+2*r1]
yading@10 665 mova p0, [r4+r5]
yading@10 666 mova q0, [r0]
yading@10 667 mova q1, [r0+r1]
yading@10 668 %if ARCH_X86_64
yading@10 669 pxor mpb_0, mpb_0
yading@10 670 mova mpb_1, [pb_1]
yading@10 671 LOAD_MASK r2d, r3d, t5 ; m5=beta-1, t5=alpha-1, m7=mask0
yading@10 672 SWAP 7, 12 ; m12=mask0
yading@10 673 pavgb t5, mpb_0
yading@10 674 pavgb t5, mpb_1 ; alpha/4+1
yading@10 675 movdqa p2, [r4+r1]
yading@10 676 movdqa q2, [r0+2*r1]
yading@10 677 DIFF_GT2 p0, q0, t5, t0, t3 ; t0 = |p0-q0| > alpha/4+1
yading@10 678 DIFF_GT2 p0, p2, m5, t2, t5 ; mask1 = |p2-p0| > beta-1
yading@10 679 DIFF_GT2 q0, q2, m5, t4, t5 ; t4 = |q2-q0| > beta-1
yading@10 680 pand t0, mask0
yading@10 681 pand t4, t0
yading@10 682 pand t2, t0
yading@10 683 mova mask1q, t4
yading@10 684 mova mask1p, t2
yading@10 685 %else
yading@10 686 LOAD_MASK r2d, r3d, t5 ; m5=beta-1, t5=alpha-1, m7=mask0
yading@10 687 mova m4, t5
yading@10 688 mova mask0, m7
yading@10 689 pavgb m4, [pb_0]
yading@10 690 pavgb m4, [pb_1] ; alpha/4+1
yading@10 691 DIFF_GT2 p0, q0, m4, m6, m7 ; m6 = |p0-q0| > alpha/4+1
yading@10 692 pand m6, mask0
yading@10 693 DIFF_GT2 p0, p2, m5, m4, m7 ; m4 = |p2-p0| > beta-1
yading@10 694 pand m4, m6
yading@10 695 mova mask1p, m4
yading@10 696 DIFF_GT2 q0, q2, m5, m4, m7 ; m4 = |q2-q0| > beta-1
yading@10 697 pand m4, m6
yading@10 698 mova mask1q, m4
yading@10 699 %endif
yading@10 700 LUMA_INTRA_P012 [r4+r5], [r4+2*r1], [r4+r1], [r4]
yading@10 701 LUMA_INTRA_SWAP_PQ
yading@10 702 LUMA_INTRA_P012 [r0], [r0+r1], [r0+2*r1], [r0+r5]
yading@10 703 .end:
yading@10 704 RET
yading@10 705
yading@10 706 INIT_MMX cpuname
yading@10 707 %if ARCH_X86_64
yading@10 708 ;-----------------------------------------------------------------------------
yading@10 709 ; void deblock_h_luma_intra( uint8_t *pix, int stride, int alpha, int beta )
yading@10 710 ;-----------------------------------------------------------------------------
yading@10 711 cglobal deblock_h_luma_intra_8, 4,9
yading@10 712 movsxd r7, r1d
yading@10 713 lea r8, [r7*3]
yading@10 714 lea r6, [r0-4]
yading@10 715 lea r5, [r0-4+r8]
yading@10 716 sub rsp, 0x88
yading@10 717 %define pix_tmp rsp
yading@10 718
yading@10 719 ; transpose 8x16 -> tmp space
yading@10 720 TRANSPOSE8x8_MEM PASS8ROWS(r6, r5, r7, r8), PASS8ROWS(pix_tmp, pix_tmp+0x30, 0x10, 0x30)
yading@10 721 lea r6, [r6+r7*8]
yading@10 722 lea r5, [r5+r7*8]
yading@10 723 TRANSPOSE8x8_MEM PASS8ROWS(r6, r5, r7, r8), PASS8ROWS(pix_tmp+8, pix_tmp+0x38, 0x10, 0x30)
yading@10 724
yading@10 725 lea r0, [pix_tmp+0x40]
yading@10 726 mov r1, 0x10
yading@10 727 call deblock_v_luma_intra_8
yading@10 728
yading@10 729 ; transpose 16x6 -> original space (but we can't write only 6 pixels, so really 16x8)
yading@10 730 lea r5, [r6+r8]
yading@10 731 TRANSPOSE8x8_MEM PASS8ROWS(pix_tmp+8, pix_tmp+0x38, 0x10, 0x30), PASS8ROWS(r6, r5, r7, r8)
yading@10 732 shl r7, 3
yading@10 733 sub r6, r7
yading@10 734 sub r5, r7
yading@10 735 shr r7, 3
yading@10 736 TRANSPOSE8x8_MEM PASS8ROWS(pix_tmp, pix_tmp+0x30, 0x10, 0x30), PASS8ROWS(r6, r5, r7, r8)
yading@10 737 add rsp, 0x88
yading@10 738 RET
yading@10 739 %else
yading@10 740 cglobal deblock_h_luma_intra_8, 2,4,8,0x80
yading@10 741 lea r3, [r1*3]
yading@10 742 sub r0, 4
yading@10 743 lea r2, [r0+r3]
yading@10 744 %define pix_tmp rsp
yading@10 745
yading@10 746 ; transpose 8x16 -> tmp space
yading@10 747 TRANSPOSE8x8_MEM PASS8ROWS(r0, r2, r1, r3), PASS8ROWS(pix_tmp, pix_tmp+0x30, 0x10, 0x30)
yading@10 748 lea r0, [r0+r1*8]
yading@10 749 lea r2, [r2+r1*8]
yading@10 750 TRANSPOSE8x8_MEM PASS8ROWS(r0, r2, r1, r3), PASS8ROWS(pix_tmp+8, pix_tmp+0x38, 0x10, 0x30)
yading@10 751
yading@10 752 lea r0, [pix_tmp+0x40]
yading@10 753 PUSH dword r3m
yading@10 754 PUSH dword r2m
yading@10 755 PUSH dword 16
yading@10 756 PUSH r0
yading@10 757 call deblock_%1_luma_intra_8
yading@10 758 %ifidn %1, v8
yading@10 759 add dword [rsp], 8 ; pix_tmp+8
yading@10 760 call deblock_%1_luma_intra_8
yading@10 761 %endif
yading@10 762 ADD esp, 16
yading@10 763
yading@10 764 mov r1, r1m
yading@10 765 mov r0, r0mp
yading@10 766 lea r3, [r1*3]
yading@10 767 sub r0, 4
yading@10 768 lea r2, [r0+r3]
yading@10 769 ; transpose 16x6 -> original space (but we can't write only 6 pixels, so really 16x8)
yading@10 770 TRANSPOSE8x8_MEM PASS8ROWS(pix_tmp, pix_tmp+0x30, 0x10, 0x30), PASS8ROWS(r0, r2, r1, r3)
yading@10 771 lea r0, [r0+r1*8]
yading@10 772 lea r2, [r2+r1*8]
yading@10 773 TRANSPOSE8x8_MEM PASS8ROWS(pix_tmp+8, pix_tmp+0x38, 0x10, 0x30), PASS8ROWS(r0, r2, r1, r3)
yading@10 774 RET
yading@10 775 %endif ; ARCH_X86_64
yading@10 776 %endmacro ; DEBLOCK_LUMA_INTRA
yading@10 777
yading@10 778 INIT_XMM sse2
yading@10 779 DEBLOCK_LUMA_INTRA v
yading@10 780 %if HAVE_AVX_EXTERNAL
yading@10 781 INIT_XMM avx
yading@10 782 DEBLOCK_LUMA_INTRA v
yading@10 783 %endif
yading@10 784 %if ARCH_X86_64 == 0
yading@10 785 INIT_MMX mmxext
yading@10 786 DEBLOCK_LUMA_INTRA v8
yading@10 787 %endif
yading@10 788
yading@10 789 INIT_MMX mmxext
yading@10 790
yading@10 791 %macro CHROMA_V_START 0
yading@10 792 dec r2d ; alpha-1
yading@10 793 dec r3d ; beta-1
yading@10 794 mov t5, r0
yading@10 795 sub t5, r1
yading@10 796 sub t5, r1
yading@10 797 %endmacro
yading@10 798
yading@10 799 %macro CHROMA_H_START 0
yading@10 800 dec r2d
yading@10 801 dec r3d
yading@10 802 sub r0, 2
yading@10 803 lea t6, [r1*3]
yading@10 804 mov t5, r0
yading@10 805 add r0, t6
yading@10 806 %endmacro
yading@10 807
yading@10 808 %define t5 r5
yading@10 809 %define t6 r6
yading@10 810
yading@10 811 ;-----------------------------------------------------------------------------
yading@10 812 ; void ff_deblock_v_chroma( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
yading@10 813 ;-----------------------------------------------------------------------------
yading@10 814 cglobal deblock_v_chroma_8, 5,6
yading@10 815 CHROMA_V_START
yading@10 816 movq m0, [t5]
yading@10 817 movq m1, [t5+r1]
yading@10 818 movq m2, [r0]
yading@10 819 movq m3, [r0+r1]
yading@10 820 call ff_chroma_inter_body_mmxext
yading@10 821 movq [t5+r1], m1
yading@10 822 movq [r0], m2
yading@10 823 RET
yading@10 824
yading@10 825 ;-----------------------------------------------------------------------------
yading@10 826 ; void ff_deblock_h_chroma( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
yading@10 827 ;-----------------------------------------------------------------------------
yading@10 828 cglobal deblock_h_chroma_8, 5,7
yading@10 829 %if UNIX64
yading@10 830 %define buf0 [rsp-24]
yading@10 831 %define buf1 [rsp-16]
yading@10 832 %elif WIN64
yading@10 833 sub rsp, 16
yading@10 834 %define buf0 [rsp]
yading@10 835 %define buf1 [rsp+8]
yading@10 836 %else
yading@10 837 %define buf0 r0m
yading@10 838 %define buf1 r2m
yading@10 839 %endif
yading@10 840 CHROMA_H_START
yading@10 841 TRANSPOSE4x8_LOAD bw, wd, dq, PASS8ROWS(t5, r0, r1, t6)
yading@10 842 movq buf0, m0
yading@10 843 movq buf1, m3
yading@10 844 LOAD_MASK r2d, r3d
yading@10 845 movd m6, [r4] ; tc0
yading@10 846 punpcklbw m6, m6
yading@10 847 pand m7, m6
yading@10 848 DEBLOCK_P0_Q0
yading@10 849 movq m0, buf0
yading@10 850 movq m3, buf1
yading@10 851 TRANSPOSE8x4B_STORE PASS8ROWS(t5, r0, r1, t6)
yading@10 852 %if WIN64
yading@10 853 add rsp, 16
yading@10 854 %endif
yading@10 855 RET
yading@10 856
yading@10 857 ALIGN 16
yading@10 858 ff_chroma_inter_body_mmxext:
yading@10 859 LOAD_MASK r2d, r3d
yading@10 860 movd m6, [r4] ; tc0
yading@10 861 punpcklbw m6, m6
yading@10 862 pand m7, m6
yading@10 863 DEBLOCK_P0_Q0
yading@10 864 ret
yading@10 865
yading@10 866
yading@10 867
yading@10 868 ; in: %1=p0 %2=p1 %3=q1
yading@10 869 ; out: p0 = (p0 + q1 + 2*p1 + 2) >> 2
yading@10 870 %macro CHROMA_INTRA_P0 3
yading@10 871 movq m4, %1
yading@10 872 pxor m4, %3
yading@10 873 pand m4, [pb_1] ; m4 = (p0^q1)&1
yading@10 874 pavgb %1, %3
yading@10 875 psubusb %1, m4
yading@10 876 pavgb %1, %2 ; dst = avg(p1, avg(p0,q1) - ((p0^q1)&1))
yading@10 877 %endmacro
yading@10 878
yading@10 879 %define t5 r4
yading@10 880 %define t6 r5
yading@10 881
yading@10 882 ;-----------------------------------------------------------------------------
yading@10 883 ; void ff_deblock_v_chroma_intra( uint8_t *pix, int stride, int alpha, int beta )
yading@10 884 ;-----------------------------------------------------------------------------
yading@10 885 cglobal deblock_v_chroma_intra_8, 4,5
yading@10 886 CHROMA_V_START
yading@10 887 movq m0, [t5]
yading@10 888 movq m1, [t5+r1]
yading@10 889 movq m2, [r0]
yading@10 890 movq m3, [r0+r1]
yading@10 891 call ff_chroma_intra_body_mmxext
yading@10 892 movq [t5+r1], m1
yading@10 893 movq [r0], m2
yading@10 894 RET
yading@10 895
yading@10 896 ;-----------------------------------------------------------------------------
yading@10 897 ; void ff_deblock_h_chroma_intra( uint8_t *pix, int stride, int alpha, int beta )
yading@10 898 ;-----------------------------------------------------------------------------
yading@10 899 cglobal deblock_h_chroma_intra_8, 4,6
yading@10 900 CHROMA_H_START
yading@10 901 TRANSPOSE4x8_LOAD bw, wd, dq, PASS8ROWS(t5, r0, r1, t6)
yading@10 902 call ff_chroma_intra_body_mmxext
yading@10 903 TRANSPOSE8x4B_STORE PASS8ROWS(t5, r0, r1, t6)
yading@10 904 RET
yading@10 905
yading@10 906 ALIGN 16
yading@10 907 ff_chroma_intra_body_mmxext:
yading@10 908 LOAD_MASK r2d, r3d
yading@10 909 movq m5, m1
yading@10 910 movq m6, m2
yading@10 911 CHROMA_INTRA_P0 m1, m0, m3
yading@10 912 CHROMA_INTRA_P0 m2, m3, m0
yading@10 913 psubb m1, m5
yading@10 914 psubb m2, m6
yading@10 915 pand m1, m7
yading@10 916 pand m2, m7
yading@10 917 paddb m1, m5
yading@10 918 paddb m2, m6
yading@10 919 ret
yading@10 920
yading@10 921 ;-----------------------------------------------------------------------------
yading@10 922 ; void h264_loop_filter_strength(int16_t bs[2][4][4], uint8_t nnz[40],
yading@10 923 ; int8_t ref[2][40], int16_t mv[2][40][2],
yading@10 924 ; int bidir, int edges, int step,
yading@10 925 ; int mask_mv0, int mask_mv1, int field);
yading@10 926 ;
yading@10 927 ; bidir is 0 or 1
yading@10 928 ; edges is 1 or 4
yading@10 929 ; step is 1 or 2
yading@10 930 ; mask_mv0 is 0 or 3
yading@10 931 ; mask_mv1 is 0 or 1
yading@10 932 ; field is 0 or 1
yading@10 933 ;-----------------------------------------------------------------------------
yading@10 934 %macro loop_filter_strength_iteration 7 ; edges, step, mask_mv,
yading@10 935 ; dir, d_idx, mask_dir, bidir
yading@10 936 %define edgesd %1
yading@10 937 %define stepd %2
yading@10 938 %define mask_mvd %3
yading@10 939 %define dir %4
yading@10 940 %define d_idx %5
yading@10 941 %define mask_dir %6
yading@10 942 %define bidir %7
yading@10 943 xor b_idxd, b_idxd ; for (b_idx = 0; b_idx < edges; b_idx += step)
yading@10 944 %%.b_idx_loop:
yading@10 945 %if mask_dir == 0
yading@10 946 pxor m0, m0
yading@10 947 %endif
yading@10 948 test b_idxd, dword mask_mvd
yading@10 949 jnz %%.skip_loop_iter ; if (!(b_idx & mask_mv))
yading@10 950 %if bidir == 1
yading@10 951 movd m2, [refq+b_idxq+d_idx+12] ; { ref0[bn] }
yading@10 952 punpckldq m2, [refq+b_idxq+d_idx+52] ; { ref0[bn], ref1[bn] }
yading@10 953 pshufw m0, [refq+b_idxq+12], 0x44 ; { ref0[b], ref0[b] }
yading@10 954 pshufw m1, [refq+b_idxq+52], 0x44 ; { ref1[b], ref1[b] }
yading@10 955 pshufw m3, m2, 0x4E ; { ref1[bn], ref0[bn] }
yading@10 956 psubb m0, m2 ; { ref0[b] != ref0[bn],
yading@10 957 ; ref0[b] != ref1[bn] }
yading@10 958 psubb m1, m3 ; { ref1[b] != ref1[bn],
yading@10 959 ; ref1[b] != ref0[bn] }
yading@10 960
yading@10 961 por m0, m1
yading@10 962 mova m1, [mvq+b_idxq*4+(d_idx+12)*4]
yading@10 963 mova m2, [mvq+b_idxq*4+(d_idx+12)*4+mmsize]
yading@10 964 mova m3, m1
yading@10 965 mova m4, m2
yading@10 966 psubw m1, [mvq+b_idxq*4+12*4]
yading@10 967 psubw m2, [mvq+b_idxq*4+12*4+mmsize]
yading@10 968 psubw m3, [mvq+b_idxq*4+52*4]
yading@10 969 psubw m4, [mvq+b_idxq*4+52*4+mmsize]
yading@10 970 packsswb m1, m2
yading@10 971 packsswb m3, m4
yading@10 972 paddb m1, m6
yading@10 973 paddb m3, m6
yading@10 974 psubusb m1, m5 ; abs(mv[b] - mv[bn]) >= limit
yading@10 975 psubusb m3, m5
yading@10 976 packsswb m1, m3
yading@10 977
yading@10 978 por m0, m1
yading@10 979 mova m1, [mvq+b_idxq*4+(d_idx+52)*4]
yading@10 980 mova m2, [mvq+b_idxq*4+(d_idx+52)*4+mmsize]
yading@10 981 mova m3, m1
yading@10 982 mova m4, m2
yading@10 983 psubw m1, [mvq+b_idxq*4+12*4]
yading@10 984 psubw m2, [mvq+b_idxq*4+12*4+mmsize]
yading@10 985 psubw m3, [mvq+b_idxq*4+52*4]
yading@10 986 psubw m4, [mvq+b_idxq*4+52*4+mmsize]
yading@10 987 packsswb m1, m2
yading@10 988 packsswb m3, m4
yading@10 989 paddb m1, m6
yading@10 990 paddb m3, m6
yading@10 991 psubusb m1, m5 ; abs(mv[b] - mv[bn]) >= limit
yading@10 992 psubusb m3, m5
yading@10 993 packsswb m1, m3
yading@10 994
yading@10 995 pshufw m1, m1, 0x4E
yading@10 996 por m0, m1
yading@10 997 pshufw m1, m0, 0x4E
yading@10 998 pminub m0, m1
yading@10 999 %else ; bidir == 0
yading@10 1000 movd m0, [refq+b_idxq+12]
yading@10 1001 psubb m0, [refq+b_idxq+d_idx+12] ; ref[b] != ref[bn]
yading@10 1002
yading@10 1003 mova m1, [mvq+b_idxq*4+12*4]
yading@10 1004 mova m2, [mvq+b_idxq*4+12*4+mmsize]
yading@10 1005 psubw m1, [mvq+b_idxq*4+(d_idx+12)*4]
yading@10 1006 psubw m2, [mvq+b_idxq*4+(d_idx+12)*4+mmsize]
yading@10 1007 packsswb m1, m2
yading@10 1008 paddb m1, m6
yading@10 1009 psubusb m1, m5 ; abs(mv[b] - mv[bn]) >= limit
yading@10 1010 packsswb m1, m1
yading@10 1011 por m0, m1
yading@10 1012 %endif ; bidir == 1/0
yading@10 1013
yading@10 1014 %%.skip_loop_iter:
yading@10 1015 movd m1, [nnzq+b_idxq+12]
yading@10 1016 por m1, [nnzq+b_idxq+d_idx+12] ; nnz[b] || nnz[bn]
yading@10 1017
yading@10 1018 pminub m1, m7
yading@10 1019 pminub m0, m7
yading@10 1020 psllw m1, 1
yading@10 1021 pxor m2, m2
yading@10 1022 pmaxub m1, m0
yading@10 1023 punpcklbw m1, m2
yading@10 1024 movq [bsq+b_idxq+32*dir], m1
yading@10 1025
yading@10 1026 add b_idxd, dword stepd
yading@10 1027 cmp b_idxd, dword edgesd
yading@10 1028 jl %%.b_idx_loop
yading@10 1029 %endmacro
yading@10 1030
yading@10 1031 INIT_MMX mmxext
yading@10 1032 cglobal h264_loop_filter_strength, 9, 9, 0, bs, nnz, ref, mv, bidir, edges, \
yading@10 1033 step, mask_mv0, mask_mv1, field
yading@10 1034 %define b_idxq bidirq
yading@10 1035 %define b_idxd bidird
yading@10 1036 cmp dword fieldm, 0
yading@10 1037 mova m7, [pb_1]
yading@10 1038 mova m5, [pb_3]
yading@10 1039 je .nofield
yading@10 1040 mova m5, [pb_3_1]
yading@10 1041 .nofield:
yading@10 1042 mova m6, m5
yading@10 1043 paddb m5, m5
yading@10 1044
yading@10 1045 shl dword stepd, 3
yading@10 1046 shl dword edgesd, 3
yading@10 1047 %if ARCH_X86_32
yading@10 1048 %define mask_mv0d mask_mv0m
yading@10 1049 %define mask_mv1d mask_mv1m
yading@10 1050 %endif
yading@10 1051 shl dword mask_mv1d, 3
yading@10 1052 shl dword mask_mv0d, 3
yading@10 1053
yading@10 1054 cmp dword bidird, 0
yading@10 1055 jne .bidir
yading@10 1056 loop_filter_strength_iteration edgesd, stepd, mask_mv1d, 1, -8, 0, 0
yading@10 1057 loop_filter_strength_iteration 32, 8, mask_mv0d, 0, -1, -1, 0
yading@10 1058
yading@10 1059 mova m0, [bsq+mmsize*0]
yading@10 1060 mova m1, [bsq+mmsize*1]
yading@10 1061 mova m2, [bsq+mmsize*2]
yading@10 1062 mova m3, [bsq+mmsize*3]
yading@10 1063 TRANSPOSE4x4W 0, 1, 2, 3, 4
yading@10 1064 mova [bsq+mmsize*0], m0
yading@10 1065 mova [bsq+mmsize*1], m1
yading@10 1066 mova [bsq+mmsize*2], m2
yading@10 1067 mova [bsq+mmsize*3], m3
yading@10 1068 RET
yading@10 1069
yading@10 1070 .bidir:
yading@10 1071 loop_filter_strength_iteration edgesd, stepd, mask_mv1d, 1, -8, 0, 1
yading@10 1072 loop_filter_strength_iteration 32, 8, mask_mv0d, 0, -1, -1, 1
yading@10 1073
yading@10 1074 mova m0, [bsq+mmsize*0]
yading@10 1075 mova m1, [bsq+mmsize*1]
yading@10 1076 mova m2, [bsq+mmsize*2]
yading@10 1077 mova m3, [bsq+mmsize*3]
yading@10 1078 TRANSPOSE4x4W 0, 1, 2, 3, 4
yading@10 1079 mova [bsq+mmsize*0], m0
yading@10 1080 mova [bsq+mmsize*1], m1
yading@10 1081 mova [bsq+mmsize*2], m2
yading@10 1082 mova [bsq+mmsize*3], m3
yading@10 1083 RET