annotate src/zlib-1.2.7/contrib/amd64/amd64-match.S @ 143:e95e00bdc3eb

Further win32 build updates
author Chris Cannam <cannam@all-day-breakfast.com>
date Mon, 09 Jan 2017 13:51:38 +0000
parents 8a15ff55d9af
children
rev   line source
cannam@89 1 /*
cannam@89 2 * match.S -- optimized version of longest_match()
cannam@89 3 * based on the similar work by Gilles Vollant, and Brian Raiter, written 1998
cannam@89 4 *
cannam@89 5 * This is free software; you can redistribute it and/or modify it
cannam@89 6 * under the terms of the BSD License. Use by owners of Che Guevarra
cannam@89 7 * parafernalia is prohibited, where possible, and highly discouraged
cannam@89 8 * elsewhere.
cannam@89 9 */
cannam@89 10
cannam@89 11 #ifndef NO_UNDERLINE
cannam@89 12 # define match_init _match_init
cannam@89 13 # define longest_match _longest_match
cannam@89 14 #endif
cannam@89 15
cannam@89 16 #define scanend ebx
cannam@89 17 #define scanendw bx
cannam@89 18 #define chainlenwmask edx /* high word: current chain len low word: s->wmask */
cannam@89 19 #define curmatch rsi
cannam@89 20 #define curmatchd esi
cannam@89 21 #define windowbestlen r8
cannam@89 22 #define scanalign r9
cannam@89 23 #define scanalignd r9d
cannam@89 24 #define window r10
cannam@89 25 #define bestlen r11
cannam@89 26 #define bestlend r11d
cannam@89 27 #define scanstart r12d
cannam@89 28 #define scanstartw r12w
cannam@89 29 #define scan r13
cannam@89 30 #define nicematch r14d
cannam@89 31 #define limit r15
cannam@89 32 #define limitd r15d
cannam@89 33 #define prev rcx
cannam@89 34
cannam@89 35 /*
cannam@89 36 * The 258 is a "magic number, not a parameter -- changing it
cannam@89 37 * breaks the hell loose
cannam@89 38 */
cannam@89 39 #define MAX_MATCH (258)
cannam@89 40 #define MIN_MATCH (3)
cannam@89 41 #define MIN_LOOKAHEAD (MAX_MATCH + MIN_MATCH + 1)
cannam@89 42 #define MAX_MATCH_8 ((MAX_MATCH + 7) & ~7)
cannam@89 43
cannam@89 44 /* stack frame offsets */
cannam@89 45 #define LocalVarsSize (112)
cannam@89 46 #define _chainlenwmask ( 8-LocalVarsSize)(%rsp)
cannam@89 47 #define _windowbestlen (16-LocalVarsSize)(%rsp)
cannam@89 48 #define save_r14 (24-LocalVarsSize)(%rsp)
cannam@89 49 #define save_rsi (32-LocalVarsSize)(%rsp)
cannam@89 50 #define save_rbx (40-LocalVarsSize)(%rsp)
cannam@89 51 #define save_r12 (56-LocalVarsSize)(%rsp)
cannam@89 52 #define save_r13 (64-LocalVarsSize)(%rsp)
cannam@89 53 #define save_r15 (80-LocalVarsSize)(%rsp)
cannam@89 54
cannam@89 55
cannam@89 56 .globl match_init, longest_match
cannam@89 57
cannam@89 58 /*
cannam@89 59 * On AMD64 the first argument of a function (in our case -- the pointer to
cannam@89 60 * deflate_state structure) is passed in %rdi, hence our offsets below are
cannam@89 61 * all off of that.
cannam@89 62 */
cannam@89 63
cannam@89 64 /* you can check the structure offset by running
cannam@89 65
cannam@89 66 #include <stdlib.h>
cannam@89 67 #include <stdio.h>
cannam@89 68 #include "deflate.h"
cannam@89 69
cannam@89 70 void print_depl()
cannam@89 71 {
cannam@89 72 deflate_state ds;
cannam@89 73 deflate_state *s=&ds;
cannam@89 74 printf("size pointer=%u\n",(int)sizeof(void*));
cannam@89 75
cannam@89 76 printf("#define dsWSize (%3u)(%%rdi)\n",(int)(((char*)&(s->w_size))-((char*)s)));
cannam@89 77 printf("#define dsWMask (%3u)(%%rdi)\n",(int)(((char*)&(s->w_mask))-((char*)s)));
cannam@89 78 printf("#define dsWindow (%3u)(%%rdi)\n",(int)(((char*)&(s->window))-((char*)s)));
cannam@89 79 printf("#define dsPrev (%3u)(%%rdi)\n",(int)(((char*)&(s->prev))-((char*)s)));
cannam@89 80 printf("#define dsMatchLen (%3u)(%%rdi)\n",(int)(((char*)&(s->match_length))-((char*)s)));
cannam@89 81 printf("#define dsPrevMatch (%3u)(%%rdi)\n",(int)(((char*)&(s->prev_match))-((char*)s)));
cannam@89 82 printf("#define dsStrStart (%3u)(%%rdi)\n",(int)(((char*)&(s->strstart))-((char*)s)));
cannam@89 83 printf("#define dsMatchStart (%3u)(%%rdi)\n",(int)(((char*)&(s->match_start))-((char*)s)));
cannam@89 84 printf("#define dsLookahead (%3u)(%%rdi)\n",(int)(((char*)&(s->lookahead))-((char*)s)));
cannam@89 85 printf("#define dsPrevLen (%3u)(%%rdi)\n",(int)(((char*)&(s->prev_length))-((char*)s)));
cannam@89 86 printf("#define dsMaxChainLen (%3u)(%%rdi)\n",(int)(((char*)&(s->max_chain_length))-((char*)s)));
cannam@89 87 printf("#define dsGoodMatch (%3u)(%%rdi)\n",(int)(((char*)&(s->good_match))-((char*)s)));
cannam@89 88 printf("#define dsNiceMatch (%3u)(%%rdi)\n",(int)(((char*)&(s->nice_match))-((char*)s)));
cannam@89 89 }
cannam@89 90
cannam@89 91 */
cannam@89 92
cannam@89 93
cannam@89 94 /*
cannam@89 95 to compile for XCode 3.2 on MacOSX x86_64
cannam@89 96 - run "gcc -g -c -DXCODE_MAC_X64_STRUCTURE amd64-match.S"
cannam@89 97 */
cannam@89 98
cannam@89 99
cannam@89 100 #ifndef CURRENT_LINX_XCODE_MAC_X64_STRUCTURE
cannam@89 101 #define dsWSize ( 68)(%rdi)
cannam@89 102 #define dsWMask ( 76)(%rdi)
cannam@89 103 #define dsWindow ( 80)(%rdi)
cannam@89 104 #define dsPrev ( 96)(%rdi)
cannam@89 105 #define dsMatchLen (144)(%rdi)
cannam@89 106 #define dsPrevMatch (148)(%rdi)
cannam@89 107 #define dsStrStart (156)(%rdi)
cannam@89 108 #define dsMatchStart (160)(%rdi)
cannam@89 109 #define dsLookahead (164)(%rdi)
cannam@89 110 #define dsPrevLen (168)(%rdi)
cannam@89 111 #define dsMaxChainLen (172)(%rdi)
cannam@89 112 #define dsGoodMatch (188)(%rdi)
cannam@89 113 #define dsNiceMatch (192)(%rdi)
cannam@89 114
cannam@89 115 #else
cannam@89 116
cannam@89 117 #ifndef STRUCT_OFFSET
cannam@89 118 # define STRUCT_OFFSET (0)
cannam@89 119 #endif
cannam@89 120
cannam@89 121
cannam@89 122 #define dsWSize ( 56 + STRUCT_OFFSET)(%rdi)
cannam@89 123 #define dsWMask ( 64 + STRUCT_OFFSET)(%rdi)
cannam@89 124 #define dsWindow ( 72 + STRUCT_OFFSET)(%rdi)
cannam@89 125 #define dsPrev ( 88 + STRUCT_OFFSET)(%rdi)
cannam@89 126 #define dsMatchLen (136 + STRUCT_OFFSET)(%rdi)
cannam@89 127 #define dsPrevMatch (140 + STRUCT_OFFSET)(%rdi)
cannam@89 128 #define dsStrStart (148 + STRUCT_OFFSET)(%rdi)
cannam@89 129 #define dsMatchStart (152 + STRUCT_OFFSET)(%rdi)
cannam@89 130 #define dsLookahead (156 + STRUCT_OFFSET)(%rdi)
cannam@89 131 #define dsPrevLen (160 + STRUCT_OFFSET)(%rdi)
cannam@89 132 #define dsMaxChainLen (164 + STRUCT_OFFSET)(%rdi)
cannam@89 133 #define dsGoodMatch (180 + STRUCT_OFFSET)(%rdi)
cannam@89 134 #define dsNiceMatch (184 + STRUCT_OFFSET)(%rdi)
cannam@89 135
cannam@89 136 #endif
cannam@89 137
cannam@89 138
cannam@89 139
cannam@89 140
cannam@89 141 .text
cannam@89 142
cannam@89 143 /* uInt longest_match(deflate_state *deflatestate, IPos curmatch) */
cannam@89 144
cannam@89 145 longest_match:
cannam@89 146 /*
cannam@89 147 * Retrieve the function arguments. %curmatch will hold cur_match
cannam@89 148 * throughout the entire function (passed via rsi on amd64).
cannam@89 149 * rdi will hold the pointer to the deflate_state (first arg on amd64)
cannam@89 150 */
cannam@89 151 mov %rsi, save_rsi
cannam@89 152 mov %rbx, save_rbx
cannam@89 153 mov %r12, save_r12
cannam@89 154 mov %r13, save_r13
cannam@89 155 mov %r14, save_r14
cannam@89 156 mov %r15, save_r15
cannam@89 157
cannam@89 158 /* uInt wmask = s->w_mask; */
cannam@89 159 /* unsigned chain_length = s->max_chain_length; */
cannam@89 160 /* if (s->prev_length >= s->good_match) { */
cannam@89 161 /* chain_length >>= 2; */
cannam@89 162 /* } */
cannam@89 163
cannam@89 164 movl dsPrevLen, %eax
cannam@89 165 movl dsGoodMatch, %ebx
cannam@89 166 cmpl %ebx, %eax
cannam@89 167 movl dsWMask, %eax
cannam@89 168 movl dsMaxChainLen, %chainlenwmask
cannam@89 169 jl LastMatchGood
cannam@89 170 shrl $2, %chainlenwmask
cannam@89 171 LastMatchGood:
cannam@89 172
cannam@89 173 /* chainlen is decremented once beforehand so that the function can */
cannam@89 174 /* use the sign flag instead of the zero flag for the exit test. */
cannam@89 175 /* It is then shifted into the high word, to make room for the wmask */
cannam@89 176 /* value, which it will always accompany. */
cannam@89 177
cannam@89 178 decl %chainlenwmask
cannam@89 179 shll $16, %chainlenwmask
cannam@89 180 orl %eax, %chainlenwmask
cannam@89 181
cannam@89 182 /* if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead; */
cannam@89 183
cannam@89 184 movl dsNiceMatch, %eax
cannam@89 185 movl dsLookahead, %ebx
cannam@89 186 cmpl %eax, %ebx
cannam@89 187 jl LookaheadLess
cannam@89 188 movl %eax, %ebx
cannam@89 189 LookaheadLess: movl %ebx, %nicematch
cannam@89 190
cannam@89 191 /* register Bytef *scan = s->window + s->strstart; */
cannam@89 192
cannam@89 193 mov dsWindow, %window
cannam@89 194 movl dsStrStart, %limitd
cannam@89 195 lea (%limit, %window), %scan
cannam@89 196
cannam@89 197 /* Determine how many bytes the scan ptr is off from being */
cannam@89 198 /* dword-aligned. */
cannam@89 199
cannam@89 200 mov %scan, %scanalign
cannam@89 201 negl %scanalignd
cannam@89 202 andl $3, %scanalignd
cannam@89 203
cannam@89 204 /* IPos limit = s->strstart > (IPos)MAX_DIST(s) ? */
cannam@89 205 /* s->strstart - (IPos)MAX_DIST(s) : NIL; */
cannam@89 206
cannam@89 207 movl dsWSize, %eax
cannam@89 208 subl $MIN_LOOKAHEAD, %eax
cannam@89 209 xorl %ecx, %ecx
cannam@89 210 subl %eax, %limitd
cannam@89 211 cmovng %ecx, %limitd
cannam@89 212
cannam@89 213 /* int best_len = s->prev_length; */
cannam@89 214
cannam@89 215 movl dsPrevLen, %bestlend
cannam@89 216
cannam@89 217 /* Store the sum of s->window + best_len in %windowbestlen locally, and in memory. */
cannam@89 218
cannam@89 219 lea (%window, %bestlen), %windowbestlen
cannam@89 220 mov %windowbestlen, _windowbestlen
cannam@89 221
cannam@89 222 /* register ush scan_start = *(ushf*)scan; */
cannam@89 223 /* register ush scan_end = *(ushf*)(scan+best_len-1); */
cannam@89 224 /* Posf *prev = s->prev; */
cannam@89 225
cannam@89 226 movzwl (%scan), %scanstart
cannam@89 227 movzwl -1(%scan, %bestlen), %scanend
cannam@89 228 mov dsPrev, %prev
cannam@89 229
cannam@89 230 /* Jump into the main loop. */
cannam@89 231
cannam@89 232 movl %chainlenwmask, _chainlenwmask
cannam@89 233 jmp LoopEntry
cannam@89 234
cannam@89 235 .balign 16
cannam@89 236
cannam@89 237 /* do {
cannam@89 238 * match = s->window + cur_match;
cannam@89 239 * if (*(ushf*)(match+best_len-1) != scan_end ||
cannam@89 240 * *(ushf*)match != scan_start) continue;
cannam@89 241 * [...]
cannam@89 242 * } while ((cur_match = prev[cur_match & wmask]) > limit
cannam@89 243 * && --chain_length != 0);
cannam@89 244 *
cannam@89 245 * Here is the inner loop of the function. The function will spend the
cannam@89 246 * majority of its time in this loop, and majority of that time will
cannam@89 247 * be spent in the first ten instructions.
cannam@89 248 */
cannam@89 249 LookupLoop:
cannam@89 250 andl %chainlenwmask, %curmatchd
cannam@89 251 movzwl (%prev, %curmatch, 2), %curmatchd
cannam@89 252 cmpl %limitd, %curmatchd
cannam@89 253 jbe LeaveNow
cannam@89 254 subl $0x00010000, %chainlenwmask
cannam@89 255 js LeaveNow
cannam@89 256 LoopEntry: cmpw -1(%windowbestlen, %curmatch), %scanendw
cannam@89 257 jne LookupLoop
cannam@89 258 cmpw %scanstartw, (%window, %curmatch)
cannam@89 259 jne LookupLoop
cannam@89 260
cannam@89 261 /* Store the current value of chainlen. */
cannam@89 262 movl %chainlenwmask, _chainlenwmask
cannam@89 263
cannam@89 264 /* %scan is the string under scrutiny, and %prev to the string we */
cannam@89 265 /* are hoping to match it up with. In actuality, %esi and %edi are */
cannam@89 266 /* both pointed (MAX_MATCH_8 - scanalign) bytes ahead, and %edx is */
cannam@89 267 /* initialized to -(MAX_MATCH_8 - scanalign). */
cannam@89 268
cannam@89 269 mov $(-MAX_MATCH_8), %rdx
cannam@89 270 lea (%curmatch, %window), %windowbestlen
cannam@89 271 lea MAX_MATCH_8(%windowbestlen, %scanalign), %windowbestlen
cannam@89 272 lea MAX_MATCH_8(%scan, %scanalign), %prev
cannam@89 273
cannam@89 274 /* the prefetching below makes very little difference... */
cannam@89 275 prefetcht1 (%windowbestlen, %rdx)
cannam@89 276 prefetcht1 (%prev, %rdx)
cannam@89 277
cannam@89 278 /*
cannam@89 279 * Test the strings for equality, 8 bytes at a time. At the end,
cannam@89 280 * adjust %rdx so that it is offset to the exact byte that mismatched.
cannam@89 281 *
cannam@89 282 * It should be confessed that this loop usually does not represent
cannam@89 283 * much of the total running time. Replacing it with a more
cannam@89 284 * straightforward "rep cmpsb" would not drastically degrade
cannam@89 285 * performance -- unrolling it, for example, makes no difference.
cannam@89 286 */
cannam@89 287
cannam@89 288 #undef USE_SSE /* works, but is 6-7% slower, than non-SSE... */
cannam@89 289
cannam@89 290 LoopCmps:
cannam@89 291 #ifdef USE_SSE
cannam@89 292 /* Preload the SSE registers */
cannam@89 293 movdqu (%windowbestlen, %rdx), %xmm1
cannam@89 294 movdqu (%prev, %rdx), %xmm2
cannam@89 295 pcmpeqb %xmm2, %xmm1
cannam@89 296 movdqu 16(%windowbestlen, %rdx), %xmm3
cannam@89 297 movdqu 16(%prev, %rdx), %xmm4
cannam@89 298 pcmpeqb %xmm4, %xmm3
cannam@89 299 movdqu 32(%windowbestlen, %rdx), %xmm5
cannam@89 300 movdqu 32(%prev, %rdx), %xmm6
cannam@89 301 pcmpeqb %xmm6, %xmm5
cannam@89 302 movdqu 48(%windowbestlen, %rdx), %xmm7
cannam@89 303 movdqu 48(%prev, %rdx), %xmm8
cannam@89 304 pcmpeqb %xmm8, %xmm7
cannam@89 305
cannam@89 306 /* Check the comparisions' results */
cannam@89 307 pmovmskb %xmm1, %rax
cannam@89 308 notw %ax
cannam@89 309 bsfw %ax, %ax
cannam@89 310 jnz LeaveLoopCmps
cannam@89 311
cannam@89 312 /* this is the only iteration of the loop with a possibility of having
cannam@89 313 incremented rdx by 0x108 (each loop iteration add 16*4 = 0x40
cannam@89 314 and (0x40*4)+8=0x108 */
cannam@89 315 add $8, %rdx
cannam@89 316 jz LenMaximum
cannam@89 317 add $8, %rdx
cannam@89 318
cannam@89 319
cannam@89 320 pmovmskb %xmm3, %rax
cannam@89 321 notw %ax
cannam@89 322 bsfw %ax, %ax
cannam@89 323 jnz LeaveLoopCmps
cannam@89 324
cannam@89 325
cannam@89 326 add $16, %rdx
cannam@89 327
cannam@89 328
cannam@89 329 pmovmskb %xmm5, %rax
cannam@89 330 notw %ax
cannam@89 331 bsfw %ax, %ax
cannam@89 332 jnz LeaveLoopCmps
cannam@89 333
cannam@89 334 add $16, %rdx
cannam@89 335
cannam@89 336
cannam@89 337 pmovmskb %xmm7, %rax
cannam@89 338 notw %ax
cannam@89 339 bsfw %ax, %ax
cannam@89 340 jnz LeaveLoopCmps
cannam@89 341
cannam@89 342 add $16, %rdx
cannam@89 343
cannam@89 344 jmp LoopCmps
cannam@89 345 LeaveLoopCmps: add %rax, %rdx
cannam@89 346 #else
cannam@89 347 mov (%windowbestlen, %rdx), %rax
cannam@89 348 xor (%prev, %rdx), %rax
cannam@89 349 jnz LeaveLoopCmps
cannam@89 350
cannam@89 351 mov 8(%windowbestlen, %rdx), %rax
cannam@89 352 xor 8(%prev, %rdx), %rax
cannam@89 353 jnz LeaveLoopCmps8
cannam@89 354
cannam@89 355 mov 16(%windowbestlen, %rdx), %rax
cannam@89 356 xor 16(%prev, %rdx), %rax
cannam@89 357 jnz LeaveLoopCmps16
cannam@89 358
cannam@89 359 add $24, %rdx
cannam@89 360 jnz LoopCmps
cannam@89 361 jmp LenMaximum
cannam@89 362 # if 0
cannam@89 363 /*
cannam@89 364 * This three-liner is tantalizingly simple, but bsf is a slow instruction,
cannam@89 365 * and the complicated alternative down below is quite a bit faster. Sad...
cannam@89 366 */
cannam@89 367
cannam@89 368 LeaveLoopCmps: bsf %rax, %rax /* find the first non-zero bit */
cannam@89 369 shrl $3, %eax /* divide by 8 to get the byte */
cannam@89 370 add %rax, %rdx
cannam@89 371 # else
cannam@89 372 LeaveLoopCmps16:
cannam@89 373 add $8, %rdx
cannam@89 374 LeaveLoopCmps8:
cannam@89 375 add $8, %rdx
cannam@89 376 LeaveLoopCmps: testl $0xFFFFFFFF, %eax /* Check the first 4 bytes */
cannam@89 377 jnz Check16
cannam@89 378 add $4, %rdx
cannam@89 379 shr $32, %rax
cannam@89 380 Check16: testw $0xFFFF, %ax
cannam@89 381 jnz LenLower
cannam@89 382 add $2, %rdx
cannam@89 383 shrl $16, %eax
cannam@89 384 LenLower: subb $1, %al
cannam@89 385 adc $0, %rdx
cannam@89 386 # endif
cannam@89 387 #endif
cannam@89 388
cannam@89 389 /* Calculate the length of the match. If it is longer than MAX_MATCH, */
cannam@89 390 /* then automatically accept it as the best possible match and leave. */
cannam@89 391
cannam@89 392 lea (%prev, %rdx), %rax
cannam@89 393 sub %scan, %rax
cannam@89 394 cmpl $MAX_MATCH, %eax
cannam@89 395 jge LenMaximum
cannam@89 396
cannam@89 397 /* If the length of the match is not longer than the best match we */
cannam@89 398 /* have so far, then forget it and return to the lookup loop. */
cannam@89 399
cannam@89 400 cmpl %bestlend, %eax
cannam@89 401 jg LongerMatch
cannam@89 402 mov _windowbestlen, %windowbestlen
cannam@89 403 mov dsPrev, %prev
cannam@89 404 movl _chainlenwmask, %edx
cannam@89 405 jmp LookupLoop
cannam@89 406
cannam@89 407 /* s->match_start = cur_match; */
cannam@89 408 /* best_len = len; */
cannam@89 409 /* if (len >= nice_match) break; */
cannam@89 410 /* scan_end = *(ushf*)(scan+best_len-1); */
cannam@89 411
cannam@89 412 LongerMatch:
cannam@89 413 movl %eax, %bestlend
cannam@89 414 movl %curmatchd, dsMatchStart
cannam@89 415 cmpl %nicematch, %eax
cannam@89 416 jge LeaveNow
cannam@89 417
cannam@89 418 lea (%window, %bestlen), %windowbestlen
cannam@89 419 mov %windowbestlen, _windowbestlen
cannam@89 420
cannam@89 421 movzwl -1(%scan, %rax), %scanend
cannam@89 422 mov dsPrev, %prev
cannam@89 423 movl _chainlenwmask, %chainlenwmask
cannam@89 424 jmp LookupLoop
cannam@89 425
cannam@89 426 /* Accept the current string, with the maximum possible length. */
cannam@89 427
cannam@89 428 LenMaximum:
cannam@89 429 movl $MAX_MATCH, %bestlend
cannam@89 430 movl %curmatchd, dsMatchStart
cannam@89 431
cannam@89 432 /* if ((uInt)best_len <= s->lookahead) return (uInt)best_len; */
cannam@89 433 /* return s->lookahead; */
cannam@89 434
cannam@89 435 LeaveNow:
cannam@89 436 movl dsLookahead, %eax
cannam@89 437 cmpl %eax, %bestlend
cannam@89 438 cmovngl %bestlend, %eax
cannam@89 439 LookaheadRet:
cannam@89 440
cannam@89 441 /* Restore the registers and return from whence we came. */
cannam@89 442
cannam@89 443 mov save_rsi, %rsi
cannam@89 444 mov save_rbx, %rbx
cannam@89 445 mov save_r12, %r12
cannam@89 446 mov save_r13, %r13
cannam@89 447 mov save_r14, %r14
cannam@89 448 mov save_r15, %r15
cannam@89 449
cannam@89 450 ret
cannam@89 451
cannam@89 452 match_init: ret