Chris@4
|
1 /*
|
Chris@4
|
2 * match.S -- optimized version of longest_match()
|
Chris@4
|
3 * based on the similar work by Gilles Vollant, and Brian Raiter, written 1998
|
Chris@4
|
4 *
|
Chris@4
|
5 * This is free software; you can redistribute it and/or modify it
|
Chris@4
|
6 * under the terms of the BSD License. Use by owners of Che Guevarra
|
Chris@4
|
7 * parafernalia is prohibited, where possible, and highly discouraged
|
Chris@4
|
8 * elsewhere.
|
Chris@4
|
9 */
|
Chris@4
|
10
|
Chris@4
|
11 #ifndef NO_UNDERLINE
|
Chris@4
|
12 # define match_init _match_init
|
Chris@4
|
13 # define longest_match _longest_match
|
Chris@4
|
14 #endif
|
Chris@4
|
15
|
Chris@4
|
16 #define scanend ebx
|
Chris@4
|
17 #define scanendw bx
|
Chris@4
|
18 #define chainlenwmask edx /* high word: current chain len low word: s->wmask */
|
Chris@4
|
19 #define curmatch rsi
|
Chris@4
|
20 #define curmatchd esi
|
Chris@4
|
21 #define windowbestlen r8
|
Chris@4
|
22 #define scanalign r9
|
Chris@4
|
23 #define scanalignd r9d
|
Chris@4
|
24 #define window r10
|
Chris@4
|
25 #define bestlen r11
|
Chris@4
|
26 #define bestlend r11d
|
Chris@4
|
27 #define scanstart r12d
|
Chris@4
|
28 #define scanstartw r12w
|
Chris@4
|
29 #define scan r13
|
Chris@4
|
30 #define nicematch r14d
|
Chris@4
|
31 #define limit r15
|
Chris@4
|
32 #define limitd r15d
|
Chris@4
|
33 #define prev rcx
|
Chris@4
|
34
|
Chris@4
|
35 /*
|
Chris@4
|
36 * The 258 is a "magic number, not a parameter -- changing it
|
Chris@4
|
37 * breaks the hell loose
|
Chris@4
|
38 */
|
Chris@4
|
39 #define MAX_MATCH (258)
|
Chris@4
|
40 #define MIN_MATCH (3)
|
Chris@4
|
41 #define MIN_LOOKAHEAD (MAX_MATCH + MIN_MATCH + 1)
|
Chris@4
|
42 #define MAX_MATCH_8 ((MAX_MATCH + 7) & ~7)
|
Chris@4
|
43
|
Chris@4
|
44 /* stack frame offsets */
|
Chris@4
|
45 #define LocalVarsSize (112)
|
Chris@4
|
46 #define _chainlenwmask ( 8-LocalVarsSize)(%rsp)
|
Chris@4
|
47 #define _windowbestlen (16-LocalVarsSize)(%rsp)
|
Chris@4
|
48 #define save_r14 (24-LocalVarsSize)(%rsp)
|
Chris@4
|
49 #define save_rsi (32-LocalVarsSize)(%rsp)
|
Chris@4
|
50 #define save_rbx (40-LocalVarsSize)(%rsp)
|
Chris@4
|
51 #define save_r12 (56-LocalVarsSize)(%rsp)
|
Chris@4
|
52 #define save_r13 (64-LocalVarsSize)(%rsp)
|
Chris@4
|
53 #define save_r15 (80-LocalVarsSize)(%rsp)
|
Chris@4
|
54
|
Chris@4
|
55
|
Chris@4
|
56 .globl match_init, longest_match
|
Chris@4
|
57
|
Chris@4
|
58 /*
|
Chris@4
|
59 * On AMD64 the first argument of a function (in our case -- the pointer to
|
Chris@4
|
60 * deflate_state structure) is passed in %rdi, hence our offsets below are
|
Chris@4
|
61 * all off of that.
|
Chris@4
|
62 */
|
Chris@4
|
63
|
Chris@4
|
64 /* you can check the structure offset by running
|
Chris@4
|
65
|
Chris@4
|
66 #include <stdlib.h>
|
Chris@4
|
67 #include <stdio.h>
|
Chris@4
|
68 #include "deflate.h"
|
Chris@4
|
69
|
Chris@4
|
70 void print_depl()
|
Chris@4
|
71 {
|
Chris@4
|
72 deflate_state ds;
|
Chris@4
|
73 deflate_state *s=&ds;
|
Chris@4
|
74 printf("size pointer=%u\n",(int)sizeof(void*));
|
Chris@4
|
75
|
Chris@4
|
76 printf("#define dsWSize (%3u)(%%rdi)\n",(int)(((char*)&(s->w_size))-((char*)s)));
|
Chris@4
|
77 printf("#define dsWMask (%3u)(%%rdi)\n",(int)(((char*)&(s->w_mask))-((char*)s)));
|
Chris@4
|
78 printf("#define dsWindow (%3u)(%%rdi)\n",(int)(((char*)&(s->window))-((char*)s)));
|
Chris@4
|
79 printf("#define dsPrev (%3u)(%%rdi)\n",(int)(((char*)&(s->prev))-((char*)s)));
|
Chris@4
|
80 printf("#define dsMatchLen (%3u)(%%rdi)\n",(int)(((char*)&(s->match_length))-((char*)s)));
|
Chris@4
|
81 printf("#define dsPrevMatch (%3u)(%%rdi)\n",(int)(((char*)&(s->prev_match))-((char*)s)));
|
Chris@4
|
82 printf("#define dsStrStart (%3u)(%%rdi)\n",(int)(((char*)&(s->strstart))-((char*)s)));
|
Chris@4
|
83 printf("#define dsMatchStart (%3u)(%%rdi)\n",(int)(((char*)&(s->match_start))-((char*)s)));
|
Chris@4
|
84 printf("#define dsLookahead (%3u)(%%rdi)\n",(int)(((char*)&(s->lookahead))-((char*)s)));
|
Chris@4
|
85 printf("#define dsPrevLen (%3u)(%%rdi)\n",(int)(((char*)&(s->prev_length))-((char*)s)));
|
Chris@4
|
86 printf("#define dsMaxChainLen (%3u)(%%rdi)\n",(int)(((char*)&(s->max_chain_length))-((char*)s)));
|
Chris@4
|
87 printf("#define dsGoodMatch (%3u)(%%rdi)\n",(int)(((char*)&(s->good_match))-((char*)s)));
|
Chris@4
|
88 printf("#define dsNiceMatch (%3u)(%%rdi)\n",(int)(((char*)&(s->nice_match))-((char*)s)));
|
Chris@4
|
89 }
|
Chris@4
|
90
|
Chris@4
|
91 */
|
Chris@4
|
92
|
Chris@4
|
93
|
Chris@4
|
94 /*
|
Chris@4
|
95 to compile for XCode 3.2 on MacOSX x86_64
|
Chris@4
|
96 - run "gcc -g -c -DXCODE_MAC_X64_STRUCTURE amd64-match.S"
|
Chris@4
|
97 */
|
Chris@4
|
98
|
Chris@4
|
99
|
Chris@4
|
100 #ifndef CURRENT_LINX_XCODE_MAC_X64_STRUCTURE
|
Chris@4
|
101 #define dsWSize ( 68)(%rdi)
|
Chris@4
|
102 #define dsWMask ( 76)(%rdi)
|
Chris@4
|
103 #define dsWindow ( 80)(%rdi)
|
Chris@4
|
104 #define dsPrev ( 96)(%rdi)
|
Chris@4
|
105 #define dsMatchLen (144)(%rdi)
|
Chris@4
|
106 #define dsPrevMatch (148)(%rdi)
|
Chris@4
|
107 #define dsStrStart (156)(%rdi)
|
Chris@4
|
108 #define dsMatchStart (160)(%rdi)
|
Chris@4
|
109 #define dsLookahead (164)(%rdi)
|
Chris@4
|
110 #define dsPrevLen (168)(%rdi)
|
Chris@4
|
111 #define dsMaxChainLen (172)(%rdi)
|
Chris@4
|
112 #define dsGoodMatch (188)(%rdi)
|
Chris@4
|
113 #define dsNiceMatch (192)(%rdi)
|
Chris@4
|
114
|
Chris@4
|
115 #else
|
Chris@4
|
116
|
Chris@4
|
117 #ifndef STRUCT_OFFSET
|
Chris@4
|
118 # define STRUCT_OFFSET (0)
|
Chris@4
|
119 #endif
|
Chris@4
|
120
|
Chris@4
|
121
|
Chris@4
|
122 #define dsWSize ( 56 + STRUCT_OFFSET)(%rdi)
|
Chris@4
|
123 #define dsWMask ( 64 + STRUCT_OFFSET)(%rdi)
|
Chris@4
|
124 #define dsWindow ( 72 + STRUCT_OFFSET)(%rdi)
|
Chris@4
|
125 #define dsPrev ( 88 + STRUCT_OFFSET)(%rdi)
|
Chris@4
|
126 #define dsMatchLen (136 + STRUCT_OFFSET)(%rdi)
|
Chris@4
|
127 #define dsPrevMatch (140 + STRUCT_OFFSET)(%rdi)
|
Chris@4
|
128 #define dsStrStart (148 + STRUCT_OFFSET)(%rdi)
|
Chris@4
|
129 #define dsMatchStart (152 + STRUCT_OFFSET)(%rdi)
|
Chris@4
|
130 #define dsLookahead (156 + STRUCT_OFFSET)(%rdi)
|
Chris@4
|
131 #define dsPrevLen (160 + STRUCT_OFFSET)(%rdi)
|
Chris@4
|
132 #define dsMaxChainLen (164 + STRUCT_OFFSET)(%rdi)
|
Chris@4
|
133 #define dsGoodMatch (180 + STRUCT_OFFSET)(%rdi)
|
Chris@4
|
134 #define dsNiceMatch (184 + STRUCT_OFFSET)(%rdi)
|
Chris@4
|
135
|
Chris@4
|
136 #endif
|
Chris@4
|
137
|
Chris@4
|
138
|
Chris@4
|
139
|
Chris@4
|
140
|
Chris@4
|
141 .text
|
Chris@4
|
142
|
Chris@4
|
143 /* uInt longest_match(deflate_state *deflatestate, IPos curmatch) */
|
Chris@4
|
144
|
Chris@4
|
145 longest_match:
|
Chris@4
|
146 /*
|
Chris@4
|
147 * Retrieve the function arguments. %curmatch will hold cur_match
|
Chris@4
|
148 * throughout the entire function (passed via rsi on amd64).
|
Chris@4
|
149 * rdi will hold the pointer to the deflate_state (first arg on amd64)
|
Chris@4
|
150 */
|
Chris@4
|
151 mov %rsi, save_rsi
|
Chris@4
|
152 mov %rbx, save_rbx
|
Chris@4
|
153 mov %r12, save_r12
|
Chris@4
|
154 mov %r13, save_r13
|
Chris@4
|
155 mov %r14, save_r14
|
Chris@4
|
156 mov %r15, save_r15
|
Chris@4
|
157
|
Chris@4
|
158 /* uInt wmask = s->w_mask; */
|
Chris@4
|
159 /* unsigned chain_length = s->max_chain_length; */
|
Chris@4
|
160 /* if (s->prev_length >= s->good_match) { */
|
Chris@4
|
161 /* chain_length >>= 2; */
|
Chris@4
|
162 /* } */
|
Chris@4
|
163
|
Chris@4
|
164 movl dsPrevLen, %eax
|
Chris@4
|
165 movl dsGoodMatch, %ebx
|
Chris@4
|
166 cmpl %ebx, %eax
|
Chris@4
|
167 movl dsWMask, %eax
|
Chris@4
|
168 movl dsMaxChainLen, %chainlenwmask
|
Chris@4
|
169 jl LastMatchGood
|
Chris@4
|
170 shrl $2, %chainlenwmask
|
Chris@4
|
171 LastMatchGood:
|
Chris@4
|
172
|
Chris@4
|
173 /* chainlen is decremented once beforehand so that the function can */
|
Chris@4
|
174 /* use the sign flag instead of the zero flag for the exit test. */
|
Chris@4
|
175 /* It is then shifted into the high word, to make room for the wmask */
|
Chris@4
|
176 /* value, which it will always accompany. */
|
Chris@4
|
177
|
Chris@4
|
178 decl %chainlenwmask
|
Chris@4
|
179 shll $16, %chainlenwmask
|
Chris@4
|
180 orl %eax, %chainlenwmask
|
Chris@4
|
181
|
Chris@4
|
182 /* if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead; */
|
Chris@4
|
183
|
Chris@4
|
184 movl dsNiceMatch, %eax
|
Chris@4
|
185 movl dsLookahead, %ebx
|
Chris@4
|
186 cmpl %eax, %ebx
|
Chris@4
|
187 jl LookaheadLess
|
Chris@4
|
188 movl %eax, %ebx
|
Chris@4
|
189 LookaheadLess: movl %ebx, %nicematch
|
Chris@4
|
190
|
Chris@4
|
191 /* register Bytef *scan = s->window + s->strstart; */
|
Chris@4
|
192
|
Chris@4
|
193 mov dsWindow, %window
|
Chris@4
|
194 movl dsStrStart, %limitd
|
Chris@4
|
195 lea (%limit, %window), %scan
|
Chris@4
|
196
|
Chris@4
|
197 /* Determine how many bytes the scan ptr is off from being */
|
Chris@4
|
198 /* dword-aligned. */
|
Chris@4
|
199
|
Chris@4
|
200 mov %scan, %scanalign
|
Chris@4
|
201 negl %scanalignd
|
Chris@4
|
202 andl $3, %scanalignd
|
Chris@4
|
203
|
Chris@4
|
204 /* IPos limit = s->strstart > (IPos)MAX_DIST(s) ? */
|
Chris@4
|
205 /* s->strstart - (IPos)MAX_DIST(s) : NIL; */
|
Chris@4
|
206
|
Chris@4
|
207 movl dsWSize, %eax
|
Chris@4
|
208 subl $MIN_LOOKAHEAD, %eax
|
Chris@4
|
209 xorl %ecx, %ecx
|
Chris@4
|
210 subl %eax, %limitd
|
Chris@4
|
211 cmovng %ecx, %limitd
|
Chris@4
|
212
|
Chris@4
|
213 /* int best_len = s->prev_length; */
|
Chris@4
|
214
|
Chris@4
|
215 movl dsPrevLen, %bestlend
|
Chris@4
|
216
|
Chris@4
|
217 /* Store the sum of s->window + best_len in %windowbestlen locally, and in memory. */
|
Chris@4
|
218
|
Chris@4
|
219 lea (%window, %bestlen), %windowbestlen
|
Chris@4
|
220 mov %windowbestlen, _windowbestlen
|
Chris@4
|
221
|
Chris@4
|
222 /* register ush scan_start = *(ushf*)scan; */
|
Chris@4
|
223 /* register ush scan_end = *(ushf*)(scan+best_len-1); */
|
Chris@4
|
224 /* Posf *prev = s->prev; */
|
Chris@4
|
225
|
Chris@4
|
226 movzwl (%scan), %scanstart
|
Chris@4
|
227 movzwl -1(%scan, %bestlen), %scanend
|
Chris@4
|
228 mov dsPrev, %prev
|
Chris@4
|
229
|
Chris@4
|
230 /* Jump into the main loop. */
|
Chris@4
|
231
|
Chris@4
|
232 movl %chainlenwmask, _chainlenwmask
|
Chris@4
|
233 jmp LoopEntry
|
Chris@4
|
234
|
Chris@4
|
235 .balign 16
|
Chris@4
|
236
|
Chris@4
|
237 /* do {
|
Chris@4
|
238 * match = s->window + cur_match;
|
Chris@4
|
239 * if (*(ushf*)(match+best_len-1) != scan_end ||
|
Chris@4
|
240 * *(ushf*)match != scan_start) continue;
|
Chris@4
|
241 * [...]
|
Chris@4
|
242 * } while ((cur_match = prev[cur_match & wmask]) > limit
|
Chris@4
|
243 * && --chain_length != 0);
|
Chris@4
|
244 *
|
Chris@4
|
245 * Here is the inner loop of the function. The function will spend the
|
Chris@4
|
246 * majority of its time in this loop, and majority of that time will
|
Chris@4
|
247 * be spent in the first ten instructions.
|
Chris@4
|
248 */
|
Chris@4
|
249 LookupLoop:
|
Chris@4
|
250 andl %chainlenwmask, %curmatchd
|
Chris@4
|
251 movzwl (%prev, %curmatch, 2), %curmatchd
|
Chris@4
|
252 cmpl %limitd, %curmatchd
|
Chris@4
|
253 jbe LeaveNow
|
Chris@4
|
254 subl $0x00010000, %chainlenwmask
|
Chris@4
|
255 js LeaveNow
|
Chris@4
|
256 LoopEntry: cmpw -1(%windowbestlen, %curmatch), %scanendw
|
Chris@4
|
257 jne LookupLoop
|
Chris@4
|
258 cmpw %scanstartw, (%window, %curmatch)
|
Chris@4
|
259 jne LookupLoop
|
Chris@4
|
260
|
Chris@4
|
261 /* Store the current value of chainlen. */
|
Chris@4
|
262 movl %chainlenwmask, _chainlenwmask
|
Chris@4
|
263
|
Chris@4
|
264 /* %scan is the string under scrutiny, and %prev to the string we */
|
Chris@4
|
265 /* are hoping to match it up with. In actuality, %esi and %edi are */
|
Chris@4
|
266 /* both pointed (MAX_MATCH_8 - scanalign) bytes ahead, and %edx is */
|
Chris@4
|
267 /* initialized to -(MAX_MATCH_8 - scanalign). */
|
Chris@4
|
268
|
Chris@4
|
269 mov $(-MAX_MATCH_8), %rdx
|
Chris@4
|
270 lea (%curmatch, %window), %windowbestlen
|
Chris@4
|
271 lea MAX_MATCH_8(%windowbestlen, %scanalign), %windowbestlen
|
Chris@4
|
272 lea MAX_MATCH_8(%scan, %scanalign), %prev
|
Chris@4
|
273
|
Chris@4
|
274 /* the prefetching below makes very little difference... */
|
Chris@4
|
275 prefetcht1 (%windowbestlen, %rdx)
|
Chris@4
|
276 prefetcht1 (%prev, %rdx)
|
Chris@4
|
277
|
Chris@4
|
278 /*
|
Chris@4
|
279 * Test the strings for equality, 8 bytes at a time. At the end,
|
Chris@4
|
280 * adjust %rdx so that it is offset to the exact byte that mismatched.
|
Chris@4
|
281 *
|
Chris@4
|
282 * It should be confessed that this loop usually does not represent
|
Chris@4
|
283 * much of the total running time. Replacing it with a more
|
Chris@4
|
284 * straightforward "rep cmpsb" would not drastically degrade
|
Chris@4
|
285 * performance -- unrolling it, for example, makes no difference.
|
Chris@4
|
286 */
|
Chris@4
|
287
|
Chris@4
|
288 #undef USE_SSE /* works, but is 6-7% slower, than non-SSE... */
|
Chris@4
|
289
|
Chris@4
|
290 LoopCmps:
|
Chris@4
|
291 #ifdef USE_SSE
|
Chris@4
|
292 /* Preload the SSE registers */
|
Chris@4
|
293 movdqu (%windowbestlen, %rdx), %xmm1
|
Chris@4
|
294 movdqu (%prev, %rdx), %xmm2
|
Chris@4
|
295 pcmpeqb %xmm2, %xmm1
|
Chris@4
|
296 movdqu 16(%windowbestlen, %rdx), %xmm3
|
Chris@4
|
297 movdqu 16(%prev, %rdx), %xmm4
|
Chris@4
|
298 pcmpeqb %xmm4, %xmm3
|
Chris@4
|
299 movdqu 32(%windowbestlen, %rdx), %xmm5
|
Chris@4
|
300 movdqu 32(%prev, %rdx), %xmm6
|
Chris@4
|
301 pcmpeqb %xmm6, %xmm5
|
Chris@4
|
302 movdqu 48(%windowbestlen, %rdx), %xmm7
|
Chris@4
|
303 movdqu 48(%prev, %rdx), %xmm8
|
Chris@4
|
304 pcmpeqb %xmm8, %xmm7
|
Chris@4
|
305
|
Chris@4
|
306 /* Check the comparisions' results */
|
Chris@4
|
307 pmovmskb %xmm1, %rax
|
Chris@4
|
308 notw %ax
|
Chris@4
|
309 bsfw %ax, %ax
|
Chris@4
|
310 jnz LeaveLoopCmps
|
Chris@4
|
311
|
Chris@4
|
312 /* this is the only iteration of the loop with a possibility of having
|
Chris@4
|
313 incremented rdx by 0x108 (each loop iteration add 16*4 = 0x40
|
Chris@4
|
314 and (0x40*4)+8=0x108 */
|
Chris@4
|
315 add $8, %rdx
|
Chris@4
|
316 jz LenMaximum
|
Chris@4
|
317 add $8, %rdx
|
Chris@4
|
318
|
Chris@4
|
319
|
Chris@4
|
320 pmovmskb %xmm3, %rax
|
Chris@4
|
321 notw %ax
|
Chris@4
|
322 bsfw %ax, %ax
|
Chris@4
|
323 jnz LeaveLoopCmps
|
Chris@4
|
324
|
Chris@4
|
325
|
Chris@4
|
326 add $16, %rdx
|
Chris@4
|
327
|
Chris@4
|
328
|
Chris@4
|
329 pmovmskb %xmm5, %rax
|
Chris@4
|
330 notw %ax
|
Chris@4
|
331 bsfw %ax, %ax
|
Chris@4
|
332 jnz LeaveLoopCmps
|
Chris@4
|
333
|
Chris@4
|
334 add $16, %rdx
|
Chris@4
|
335
|
Chris@4
|
336
|
Chris@4
|
337 pmovmskb %xmm7, %rax
|
Chris@4
|
338 notw %ax
|
Chris@4
|
339 bsfw %ax, %ax
|
Chris@4
|
340 jnz LeaveLoopCmps
|
Chris@4
|
341
|
Chris@4
|
342 add $16, %rdx
|
Chris@4
|
343
|
Chris@4
|
344 jmp LoopCmps
|
Chris@4
|
345 LeaveLoopCmps: add %rax, %rdx
|
Chris@4
|
346 #else
|
Chris@4
|
347 mov (%windowbestlen, %rdx), %rax
|
Chris@4
|
348 xor (%prev, %rdx), %rax
|
Chris@4
|
349 jnz LeaveLoopCmps
|
Chris@4
|
350
|
Chris@4
|
351 mov 8(%windowbestlen, %rdx), %rax
|
Chris@4
|
352 xor 8(%prev, %rdx), %rax
|
Chris@4
|
353 jnz LeaveLoopCmps8
|
Chris@4
|
354
|
Chris@4
|
355 mov 16(%windowbestlen, %rdx), %rax
|
Chris@4
|
356 xor 16(%prev, %rdx), %rax
|
Chris@4
|
357 jnz LeaveLoopCmps16
|
Chris@4
|
358
|
Chris@4
|
359 add $24, %rdx
|
Chris@4
|
360 jnz LoopCmps
|
Chris@4
|
361 jmp LenMaximum
|
Chris@4
|
362 # if 0
|
Chris@4
|
363 /*
|
Chris@4
|
364 * This three-liner is tantalizingly simple, but bsf is a slow instruction,
|
Chris@4
|
365 * and the complicated alternative down below is quite a bit faster. Sad...
|
Chris@4
|
366 */
|
Chris@4
|
367
|
Chris@4
|
368 LeaveLoopCmps: bsf %rax, %rax /* find the first non-zero bit */
|
Chris@4
|
369 shrl $3, %eax /* divide by 8 to get the byte */
|
Chris@4
|
370 add %rax, %rdx
|
Chris@4
|
371 # else
|
Chris@4
|
372 LeaveLoopCmps16:
|
Chris@4
|
373 add $8, %rdx
|
Chris@4
|
374 LeaveLoopCmps8:
|
Chris@4
|
375 add $8, %rdx
|
Chris@4
|
376 LeaveLoopCmps: testl $0xFFFFFFFF, %eax /* Check the first 4 bytes */
|
Chris@4
|
377 jnz Check16
|
Chris@4
|
378 add $4, %rdx
|
Chris@4
|
379 shr $32, %rax
|
Chris@4
|
380 Check16: testw $0xFFFF, %ax
|
Chris@4
|
381 jnz LenLower
|
Chris@4
|
382 add $2, %rdx
|
Chris@4
|
383 shrl $16, %eax
|
Chris@4
|
384 LenLower: subb $1, %al
|
Chris@4
|
385 adc $0, %rdx
|
Chris@4
|
386 # endif
|
Chris@4
|
387 #endif
|
Chris@4
|
388
|
Chris@4
|
389 /* Calculate the length of the match. If it is longer than MAX_MATCH, */
|
Chris@4
|
390 /* then automatically accept it as the best possible match and leave. */
|
Chris@4
|
391
|
Chris@4
|
392 lea (%prev, %rdx), %rax
|
Chris@4
|
393 sub %scan, %rax
|
Chris@4
|
394 cmpl $MAX_MATCH, %eax
|
Chris@4
|
395 jge LenMaximum
|
Chris@4
|
396
|
Chris@4
|
397 /* If the length of the match is not longer than the best match we */
|
Chris@4
|
398 /* have so far, then forget it and return to the lookup loop. */
|
Chris@4
|
399
|
Chris@4
|
400 cmpl %bestlend, %eax
|
Chris@4
|
401 jg LongerMatch
|
Chris@4
|
402 mov _windowbestlen, %windowbestlen
|
Chris@4
|
403 mov dsPrev, %prev
|
Chris@4
|
404 movl _chainlenwmask, %edx
|
Chris@4
|
405 jmp LookupLoop
|
Chris@4
|
406
|
Chris@4
|
407 /* s->match_start = cur_match; */
|
Chris@4
|
408 /* best_len = len; */
|
Chris@4
|
409 /* if (len >= nice_match) break; */
|
Chris@4
|
410 /* scan_end = *(ushf*)(scan+best_len-1); */
|
Chris@4
|
411
|
Chris@4
|
412 LongerMatch:
|
Chris@4
|
413 movl %eax, %bestlend
|
Chris@4
|
414 movl %curmatchd, dsMatchStart
|
Chris@4
|
415 cmpl %nicematch, %eax
|
Chris@4
|
416 jge LeaveNow
|
Chris@4
|
417
|
Chris@4
|
418 lea (%window, %bestlen), %windowbestlen
|
Chris@4
|
419 mov %windowbestlen, _windowbestlen
|
Chris@4
|
420
|
Chris@4
|
421 movzwl -1(%scan, %rax), %scanend
|
Chris@4
|
422 mov dsPrev, %prev
|
Chris@4
|
423 movl _chainlenwmask, %chainlenwmask
|
Chris@4
|
424 jmp LookupLoop
|
Chris@4
|
425
|
Chris@4
|
426 /* Accept the current string, with the maximum possible length. */
|
Chris@4
|
427
|
Chris@4
|
428 LenMaximum:
|
Chris@4
|
429 movl $MAX_MATCH, %bestlend
|
Chris@4
|
430 movl %curmatchd, dsMatchStart
|
Chris@4
|
431
|
Chris@4
|
432 /* if ((uInt)best_len <= s->lookahead) return (uInt)best_len; */
|
Chris@4
|
433 /* return s->lookahead; */
|
Chris@4
|
434
|
Chris@4
|
435 LeaveNow:
|
Chris@4
|
436 movl dsLookahead, %eax
|
Chris@4
|
437 cmpl %eax, %bestlend
|
Chris@4
|
438 cmovngl %bestlend, %eax
|
Chris@4
|
439 LookaheadRet:
|
Chris@4
|
440
|
Chris@4
|
441 /* Restore the registers and return from whence we came. */
|
Chris@4
|
442
|
Chris@4
|
443 mov save_rsi, %rsi
|
Chris@4
|
444 mov save_rbx, %rbx
|
Chris@4
|
445 mov save_r12, %r12
|
Chris@4
|
446 mov save_r13, %r13
|
Chris@4
|
447 mov save_r14, %r14
|
Chris@4
|
448 mov save_r15, %r15
|
Chris@4
|
449
|
Chris@4
|
450 ret
|
Chris@4
|
451
|
Chris@4
|
452 match_init: ret
|