]> git.zerfleddert.de Git - proxmark3-svn/blame - zlib/deflate.c
Update appmain.c
[proxmark3-svn] / zlib / deflate.c
CommitLineData
f3919878 1/* deflate.c -- compress data using the deflation algorithm
2 * Copyright (C) 1995-2013 Jean-loup Gailly and Mark Adler
3 * For conditions of distribution and use, see copyright notice in zlib.h
4 */
5
6/*
7 * ALGORITHM
8 *
9 * The "deflation" process depends on being able to identify portions
10 * of the input text which are identical to earlier input (within a
11 * sliding window trailing behind the input currently being processed).
12 *
13 * The most straightforward technique turns out to be the fastest for
14 * most input files: try all possible matches and select the longest.
15 * The key feature of this algorithm is that insertions into the string
16 * dictionary are very simple and thus fast, and deletions are avoided
17 * completely. Insertions are performed at each input character, whereas
18 * string matches are performed only when the previous match ends. So it
19 * is preferable to spend more time in matches to allow very fast string
20 * insertions and avoid deletions. The matching algorithm for small
21 * strings is inspired from that of Rabin & Karp. A brute force approach
22 * is used to find longer strings when a small match has been found.
23 * A similar algorithm is used in comic (by Jan-Mark Wams) and freeze
24 * (by Leonid Broukhis).
25 * A previous version of this file used a more sophisticated algorithm
26 * (by Fiala and Greene) which is guaranteed to run in linear amortized
27 * time, but has a larger average cost, uses more memory and is patented.
28 * However the F&G algorithm may be faster for some highly redundant
29 * files if the parameter max_chain_length (described below) is too large.
30 *
31 * ACKNOWLEDGEMENTS
32 *
33 * The idea of lazy evaluation of matches is due to Jan-Mark Wams, and
34 * I found it in 'freeze' written by Leonid Broukhis.
35 * Thanks to many people for bug reports and testing.
36 *
37 * REFERENCES
38 *
39 * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification".
40 * Available in http://tools.ietf.org/html/rfc1951
41 *
42 * A description of the Rabin and Karp algorithm is given in the book
43 * "Algorithms" by R. Sedgewick, Addison-Wesley, p252.
44 *
45 * Fiala,E.R., and Greene,D.H.
46 * Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595
47 *
48 */
49
50/* @(#) $Id$ */
51
52#include "deflate.h"
53
54const char deflate_copyright[] =
8e074056 55 " deflate 1.2.8.f-Proxmark3 Copyright 1995-2013 Jean-loup Gailly and Mark Adler ";
f3919878 56/*
57 If you use the zlib library in a product, an acknowledgment is welcome
58 in the documentation of your product. If for some reason you cannot
59 include such an acknowledgment, I would appreciate that you keep this
60 copyright string in the executable of your product.
61 */
62
8e074056 63//-----------------------------------------------------------------------------
64// This version of zlib is modified for use within the Proxmark3 project.
65// Files from the original distribution which are not required for this
66// purpose are not included. All modifications can easily be found
67// by searching for #ifdef ZLIB_PM3_TUNED and #ifndef ZLIB_PM3_TUNED.
68//-----------------------------------------------------------------------------
69
70
71
f3919878 72/* ===========================================================================
73 * Function prototypes.
74 */
75typedef enum {
76 need_more, /* block not completed, need more input or more output */
77 block_done, /* block flush performed */
78 finish_started, /* finish started, need only more output at next deflate */
79 finish_done /* finish done, accept no more input or output */
80} block_state;
81
82typedef block_state (*compress_func) OF((deflate_state *s, int flush));
83/* Compression function. Returns the block state after the call. */
84
85local void fill_window OF((deflate_state *s));
86local block_state deflate_stored OF((deflate_state *s, int flush));
87local block_state deflate_fast OF((deflate_state *s, int flush));
88#ifndef FASTEST
89local block_state deflate_slow OF((deflate_state *s, int flush));
90#endif
91local block_state deflate_rle OF((deflate_state *s, int flush));
92local block_state deflate_huff OF((deflate_state *s, int flush));
93local void lm_init OF((deflate_state *s));
94local void putShortMSB OF((deflate_state *s, uInt b));
95local void flush_pending OF((z_streamp strm));
96local int read_buf OF((z_streamp strm, Bytef *buf, unsigned size));
97#ifdef ASMV
98 void match_init OF((void)); /* asm code initialization */
99 uInt longest_match OF((deflate_state *s, IPos cur_match));
100#else
101local uInt longest_match OF((deflate_state *s, IPos cur_match));
102#endif
103
104#ifdef DEBUG
105local void check_match OF((deflate_state *s, IPos start, IPos match,
106 int length));
107#endif
108
109/* ===========================================================================
110 * Local data
111 */
112
113#define NIL 0
114/* Tail of hash chains */
115
116#ifndef TOO_FAR
117# define TOO_FAR 4096
118#endif
119/* Matches of length 3 are discarded if their distance exceeds TOO_FAR */
120
121/* Values for max_lazy_match, good_match and max_chain_length, depending on
122 * the desired pack level (0..9). The values given below have been tuned to
123 * exclude worst case performance for pathological files. Better values may be
124 * found for specific files.
125 */
126typedef struct config_s {
127 ush good_length; /* reduce lazy search above this match length */
128 ush max_lazy; /* do not perform lazy search above this match length */
129 ush nice_length; /* quit search above this match length */
130 ush max_chain;
131 compress_func func;
132} config;
133
134#ifdef FASTEST
135local const config configuration_table[2] = {
136/* good lazy nice chain */
137/* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */
138/* 1 */ {4, 4, 8, 4, deflate_fast}}; /* max speed, no lazy matches */
139#else
140local const config configuration_table[10] = {
141/* good lazy nice chain */
142/* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */
143/* 1 */ {4, 4, 8, 4, deflate_fast}, /* max speed, no lazy matches */
144/* 2 */ {4, 5, 16, 8, deflate_fast},
145/* 3 */ {4, 6, 32, 32, deflate_fast},
146
147/* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */
148/* 5 */ {8, 16, 32, 32, deflate_slow},
149/* 6 */ {8, 16, 128, 128, deflate_slow},
150/* 7 */ {8, 32, 128, 256, deflate_slow},
151/* 8 */ {32, 128, 258, 1024, deflate_slow},
152/* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* max compression */
153#endif
154
155/* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4
156 * For deflate_fast() (levels <= 3) good is ignored and lazy has a different
157 * meaning.
158 */
159
160#define EQUAL 0
161/* result of memcmp for equal strings */
162
163#ifndef NO_DUMMY_DECL
164struct static_tree_desc_s {int dummy;}; /* for buggy compilers */
165#endif
166
167/* rank Z_BLOCK between Z_NO_FLUSH and Z_PARTIAL_FLUSH */
168#define RANK(f) (((f) << 1) - ((f) > 4 ? 9 : 0))
169
170/* ===========================================================================
171 * Update a hash value with the given input byte
172 * IN assertion: all calls to to UPDATE_HASH are made with consecutive
173 * input characters, so that a running hash key can be computed from the
174 * previous key instead of complete recalculation each time.
175 */
176#define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask)
177
178
179/* ===========================================================================
180 * Insert string str in the dictionary and set match_head to the previous head
181 * of the hash chain (the most recent string with same hash key). Return
182 * the previous length of the hash chain.
183 * If this file is compiled with -DFASTEST, the compression level is forced
184 * to 1, and no hash chains are maintained.
185 * IN assertion: all calls to to INSERT_STRING are made with consecutive
186 * input characters and the first MIN_MATCH bytes of str are valid
187 * (except for the last MIN_MATCH-1 bytes of the input file).
188 */
189#ifdef FASTEST
190#define INSERT_STRING(s, str, match_head) \
191 (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \
192 match_head = s->head[s->ins_h], \
193 s->head[s->ins_h] = (Pos)(str))
194#else
195#define INSERT_STRING(s, str, match_head) \
196 (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \
197 match_head = s->prev[(str) & s->w_mask] = s->head[s->ins_h], \
198 s->head[s->ins_h] = (Pos)(str))
199#endif
200
201/* ===========================================================================
202 * Initialize the hash table (avoiding 64K overflow for 16 bit systems).
203 * prev[] will be initialized on the fly.
204 */
205#define CLEAR_HASH(s) \
206 s->head[s->hash_size-1] = NIL; \
207 zmemzero((Bytef *)s->head, (unsigned)(s->hash_size-1)*sizeof(*s->head));
208
209/* ========================================================================= */
210int ZEXPORT deflateInit_(strm, level, version, stream_size)
211 z_streamp strm;
212 int level;
213 const char *version;
214 int stream_size;
215{
216 return deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL,
217 Z_DEFAULT_STRATEGY, version, stream_size);
218 /* To do: ignore strm->next_in if we use it as window */
219}
220
221/* ========================================================================= */
222int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
223 version, stream_size)
224 z_streamp strm;
225 int level;
226 int method;
227 int windowBits;
228 int memLevel;
229 int strategy;
230 const char *version;
231 int stream_size;
232{
233 deflate_state *s;
234 int wrap = 1;
235 static const char my_version[] = ZLIB_VERSION;
236
237 ushf *overlay;
238 /* We overlay pending_buf and d_buf+l_buf. This works since the average
239 * output size for (length,distance) codes is <= 24 bits.
240 */
241
242 if (version == Z_NULL || version[0] != my_version[0] ||
243 stream_size != sizeof(z_stream)) {
244 return Z_VERSION_ERROR;
245 }
246 if (strm == Z_NULL) return Z_STREAM_ERROR;
247
248 strm->msg = Z_NULL;
249 if (strm->zalloc == (alloc_func)0) {
250#ifdef Z_SOLO
251 return Z_STREAM_ERROR;
252#else
253 strm->zalloc = zcalloc;
254 strm->opaque = (voidpf)0;
255#endif
256 }
257 if (strm->zfree == (free_func)0)
258#ifdef Z_SOLO
259 return Z_STREAM_ERROR;
260#else
261 strm->zfree = zcfree;
262#endif
263
264#ifdef FASTEST
265 if (level != 0) level = 1;
266#else
267 if (level == Z_DEFAULT_COMPRESSION) level = 6;
268#endif
269
270 if (windowBits < 0) { /* suppress zlib wrapper */
271 wrap = 0;
272 windowBits = -windowBits;
273 }
274#ifdef GZIP
275 else if (windowBits > 15) {
276 wrap = 2; /* write gzip wrapper instead */
277 windowBits -= 16;
278 }
279#endif
280 if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED ||
281 windowBits < 8 || windowBits > 15 || level < 0 || level > 9 ||
282 strategy < 0 || strategy > Z_FIXED) {
283 return Z_STREAM_ERROR;
284 }
285 if (windowBits == 8) windowBits = 9; /* until 256-byte window bug fixed */
286 s = (deflate_state *) ZALLOC(strm, 1, sizeof(deflate_state));
287 if (s == Z_NULL) return Z_MEM_ERROR;
288 strm->state = (struct internal_state FAR *)s;
289 s->strm = strm;
290
291 s->wrap = wrap;
292 s->gzhead = Z_NULL;
293 s->w_bits = windowBits;
294 s->w_size = 1 << s->w_bits;
295 s->w_mask = s->w_size - 1;
296
297 s->hash_bits = memLevel + 7;
298 s->hash_size = 1 << s->hash_bits;
299 s->hash_mask = s->hash_size - 1;
300 s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH);
301
302 s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte));
303 s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos));
304 s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos));
305
306 s->high_water = 0; /* nothing written to s->window yet */
307
308 s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */
309
310 overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2);
311 s->pending_buf = (uchf *) overlay;
312 s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L);
313
314 if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL ||
315 s->pending_buf == Z_NULL) {
316 s->status = FINISH_STATE;
317 strm->msg = ERR_MSG(Z_MEM_ERROR);
318 deflateEnd (strm);
319 return Z_MEM_ERROR;
320 }
321 s->d_buf = overlay + s->lit_bufsize/sizeof(ush);
322 s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize;
323
324 s->level = level;
325 s->strategy = strategy;
326 s->method = (Byte)method;
327
328 return deflateReset(strm);
329}
330
331/* ========================================================================= */
332int ZEXPORT deflateSetDictionary (strm, dictionary, dictLength)
333 z_streamp strm;
334 const Bytef *dictionary;
335 uInt dictLength;
336{
337 deflate_state *s;
338 uInt str, n;
339 int wrap;
340 unsigned avail;
341 z_const unsigned char *next;
342
343 if (strm == Z_NULL || strm->state == Z_NULL || dictionary == Z_NULL)
344 return Z_STREAM_ERROR;
345 s = strm->state;
346 wrap = s->wrap;
347 if (wrap == 2 || (wrap == 1 && s->status != INIT_STATE) || s->lookahead)
348 return Z_STREAM_ERROR;
349
350 /* when using zlib wrappers, compute Adler-32 for provided dictionary */
351 if (wrap == 1)
352 strm->adler = adler32(strm->adler, dictionary, dictLength);
353 s->wrap = 0; /* avoid computing Adler-32 in read_buf */
354
355 /* if dictionary would fill window, just replace the history */
356 if (dictLength >= s->w_size) {
357 if (wrap == 0) { /* already empty otherwise */
358 CLEAR_HASH(s);
359 s->strstart = 0;
360 s->block_start = 0L;
361 s->insert = 0;
362 }
363 dictionary += dictLength - s->w_size; /* use the tail */
364 dictLength = s->w_size;
365 }
366
367 /* insert dictionary into window and hash */
368 avail = strm->avail_in;
369 next = strm->next_in;
370 strm->avail_in = dictLength;
371 strm->next_in = (z_const Bytef *)dictionary;
372 fill_window(s);
373 while (s->lookahead >= MIN_MATCH) {
374 str = s->strstart;
375 n = s->lookahead - (MIN_MATCH-1);
376 do {
377 UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]);
378#ifndef FASTEST
379 s->prev[str & s->w_mask] = s->head[s->ins_h];
380#endif
381 s->head[s->ins_h] = (Pos)str;
382 str++;
383 } while (--n);
384 s->strstart = str;
385 s->lookahead = MIN_MATCH-1;
386 fill_window(s);
387 }
388 s->strstart += s->lookahead;
389 s->block_start = (long)s->strstart;
390 s->insert = s->lookahead;
391 s->lookahead = 0;
392 s->match_length = s->prev_length = MIN_MATCH-1;
393 s->match_available = 0;
394 strm->next_in = next;
395 strm->avail_in = avail;
396 s->wrap = wrap;
397 return Z_OK;
398}
399
400/* ========================================================================= */
401int ZEXPORT deflateResetKeep (strm)
402 z_streamp strm;
403{
404 deflate_state *s;
405
406 if (strm == Z_NULL || strm->state == Z_NULL ||
407 strm->zalloc == (alloc_func)0 || strm->zfree == (free_func)0) {
408 return Z_STREAM_ERROR;
409 }
410
411 strm->total_in = strm->total_out = 0;
412 strm->msg = Z_NULL; /* use zfree if we ever allocate msg dynamically */
413 strm->data_type = Z_UNKNOWN;
414
415 s = (deflate_state *)strm->state;
416 s->pending = 0;
417 s->pending_out = s->pending_buf;
418
419 if (s->wrap < 0) {
420 s->wrap = -s->wrap; /* was made negative by deflate(..., Z_FINISH); */
421 }
422 s->status = s->wrap ? INIT_STATE : BUSY_STATE;
423 strm->adler =
424#ifdef GZIP
425 s->wrap == 2 ? crc32(0L, Z_NULL, 0) :
426#endif
427 adler32(0L, Z_NULL, 0);
428 s->last_flush = Z_NO_FLUSH;
429
430 _tr_init(s);
431
432 return Z_OK;
433}
434
435/* ========================================================================= */
436int ZEXPORT deflateReset (strm)
437 z_streamp strm;
438{
439 int ret;
440
441 ret = deflateResetKeep(strm);
442 if (ret == Z_OK)
443 lm_init(strm->state);
444 return ret;
445}
446
447/* ========================================================================= */
448int ZEXPORT deflateSetHeader (strm, head)
449 z_streamp strm;
450 gz_headerp head;
451{
452 if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
453 if (strm->state->wrap != 2) return Z_STREAM_ERROR;
454 strm->state->gzhead = head;
455 return Z_OK;
456}
457
458/* ========================================================================= */
459int ZEXPORT deflatePending (strm, pending, bits)
460 unsigned *pending;
461 int *bits;
462 z_streamp strm;
463{
464 if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
465 if (pending != Z_NULL)
466 *pending = strm->state->pending;
467 if (bits != Z_NULL)
468 *bits = strm->state->bi_valid;
469 return Z_OK;
470}
471
472/* ========================================================================= */
473int ZEXPORT deflatePrime (strm, bits, value)
474 z_streamp strm;
475 int bits;
476 int value;
477{
478 deflate_state *s;
479 int put;
480
481 if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
482 s = strm->state;
483 if ((Bytef *)(s->d_buf) < s->pending_out + ((Buf_size + 7) >> 3))
484 return Z_BUF_ERROR;
485 do {
486 put = Buf_size - s->bi_valid;
487 if (put > bits)
488 put = bits;
489 s->bi_buf |= (ush)((value & ((1 << put) - 1)) << s->bi_valid);
490 s->bi_valid += put;
491 _tr_flush_bits(s);
492 value >>= put;
493 bits -= put;
494 } while (bits);
495 return Z_OK;
496}
497
498/* ========================================================================= */
499int ZEXPORT deflateParams(strm, level, strategy)
500 z_streamp strm;
501 int level;
502 int strategy;
503{
504 deflate_state *s;
505 compress_func func;
506 int err = Z_OK;
507
508 if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
509 s = strm->state;
510
511#ifdef FASTEST
512 if (level != 0) level = 1;
513#else
514 if (level == Z_DEFAULT_COMPRESSION) level = 6;
515#endif
516 if (level < 0 || level > 9 || strategy < 0 || strategy > Z_FIXED) {
517 return Z_STREAM_ERROR;
518 }
519 func = configuration_table[s->level].func;
520
521 if ((strategy != s->strategy || func != configuration_table[level].func) &&
522 strm->total_in != 0) {
523 /* Flush the last buffer: */
524 err = deflate(strm, Z_BLOCK);
525 if (err == Z_BUF_ERROR && s->pending == 0)
526 err = Z_OK;
527 }
528 if (s->level != level) {
529 s->level = level;
530 s->max_lazy_match = configuration_table[level].max_lazy;
531 s->good_match = configuration_table[level].good_length;
532 s->nice_match = configuration_table[level].nice_length;
533 s->max_chain_length = configuration_table[level].max_chain;
534 }
535 s->strategy = strategy;
536 return err;
537}
538
539/* ========================================================================= */
540int ZEXPORT deflateTune(strm, good_length, max_lazy, nice_length, max_chain)
541 z_streamp strm;
542 int good_length;
543 int max_lazy;
544 int nice_length;
545 int max_chain;
546{
547 deflate_state *s;
548
549 if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
550 s = strm->state;
551 s->good_match = good_length;
552 s->max_lazy_match = max_lazy;
553 s->nice_match = nice_length;
554 s->max_chain_length = max_chain;
555 return Z_OK;
556}
557
558/* =========================================================================
559 * For the default windowBits of 15 and memLevel of 8, this function returns
560 * a close to exact, as well as small, upper bound on the compressed size.
561 * They are coded as constants here for a reason--if the #define's are
562 * changed, then this function needs to be changed as well. The return
563 * value for 15 and 8 only works for those exact settings.
564 *
565 * For any setting other than those defaults for windowBits and memLevel,
566 * the value returned is a conservative worst case for the maximum expansion
567 * resulting from using fixed blocks instead of stored blocks, which deflate
568 * can emit on compressed data for some combinations of the parameters.
569 *
570 * This function could be more sophisticated to provide closer upper bounds for
571 * every combination of windowBits and memLevel. But even the conservative
572 * upper bound of about 14% expansion does not seem onerous for output buffer
573 * allocation.
574 */
575uLong ZEXPORT deflateBound(strm, sourceLen)
576 z_streamp strm;
577 uLong sourceLen;
578{
579 deflate_state *s;
580 uLong complen, wraplen;
581 Bytef *str;
582
583 /* conservative upper bound for compressed data */
584 complen = sourceLen +
585 ((sourceLen + 7) >> 3) + ((sourceLen + 63) >> 6) + 5;
586
587 /* if can't get parameters, return conservative bound plus zlib wrapper */
588 if (strm == Z_NULL || strm->state == Z_NULL)
589 return complen + 6;
590
591 /* compute wrapper length */
592 s = strm->state;
593 switch (s->wrap) {
594 case 0: /* raw deflate */
595 wraplen = 0;
596 break;
597 case 1: /* zlib wrapper */
598 wraplen = 6 + (s->strstart ? 4 : 0);
599 break;
600 case 2: /* gzip wrapper */
601 wraplen = 18;
602 if (s->gzhead != Z_NULL) { /* user-supplied gzip header */
603 if (s->gzhead->extra != Z_NULL)
604 wraplen += 2 + s->gzhead->extra_len;
605 str = s->gzhead->name;
606 if (str != Z_NULL)
607 do {
608 wraplen++;
609 } while (*str++);
610 str = s->gzhead->comment;
611 if (str != Z_NULL)
612 do {
613 wraplen++;
614 } while (*str++);
615 if (s->gzhead->hcrc)
616 wraplen += 2;
617 }
618 break;
619 default: /* for compiler happiness */
620 wraplen = 6;
621 }
622
623 /* if not default parameters, return conservative bound */
624 if (s->w_bits != 15 || s->hash_bits != 8 + 7)
625 return complen + wraplen;
626
627 /* default settings: return tight bound for that case */
628 return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) +
629 (sourceLen >> 25) + 13 - 6 + wraplen;
630}
631
632/* =========================================================================
633 * Put a short in the pending buffer. The 16-bit value is put in MSB order.
634 * IN assertion: the stream state is correct and there is enough room in
635 * pending_buf.
636 */
637local void putShortMSB (s, b)
638 deflate_state *s;
639 uInt b;
640{
641 put_byte(s, (Byte)(b >> 8));
642 put_byte(s, (Byte)(b & 0xff));
643}
644
645/* =========================================================================
646 * Flush as much pending output as possible. All deflate() output goes
647 * through this function so some applications may wish to modify it
648 * to avoid allocating a large strm->next_out buffer and copying into it.
649 * (See also read_buf()).
650 */
651local void flush_pending(strm)
652 z_streamp strm;
653{
654 unsigned len;
655 deflate_state *s = strm->state;
656
657 _tr_flush_bits(s);
658 len = s->pending;
659 if (len > strm->avail_out) len = strm->avail_out;
660 if (len == 0) return;
661
662 zmemcpy(strm->next_out, s->pending_out, len);
663 strm->next_out += len;
664 s->pending_out += len;
665 strm->total_out += len;
666 strm->avail_out -= len;
667 s->pending -= len;
668 if (s->pending == 0) {
669 s->pending_out = s->pending_buf;
670 }
671}
672
673/* ========================================================================= */
674int ZEXPORT deflate (strm, flush)
675 z_streamp strm;
676 int flush;
677{
678 int old_flush; /* value of flush param for previous deflate call */
679 deflate_state *s;
680
681 if (strm == Z_NULL || strm->state == Z_NULL ||
682 flush > Z_BLOCK || flush < 0) {
683 return Z_STREAM_ERROR;
684 }
685 s = strm->state;
686
687 if (strm->next_out == Z_NULL ||
688 (strm->next_in == Z_NULL && strm->avail_in != 0) ||
689 (s->status == FINISH_STATE && flush != Z_FINISH)) {
690 ERR_RETURN(strm, Z_STREAM_ERROR);
691 }
692 if (strm->avail_out == 0) ERR_RETURN(strm, Z_BUF_ERROR);
693
694 s->strm = strm; /* just in case */
695 old_flush = s->last_flush;
696 s->last_flush = flush;
697
698 /* Write the header */
699 if (s->status == INIT_STATE) {
700#ifdef GZIP
701 if (s->wrap == 2) {
702 strm->adler = crc32(0L, Z_NULL, 0);
703 put_byte(s, 31);
704 put_byte(s, 139);
705 put_byte(s, 8);
706 if (s->gzhead == Z_NULL) {
707 put_byte(s, 0);
708 put_byte(s, 0);
709 put_byte(s, 0);
710 put_byte(s, 0);
711 put_byte(s, 0);
712 put_byte(s, s->level == 9 ? 2 :
713 (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ?
714 4 : 0));
715 put_byte(s, OS_CODE);
716 s->status = BUSY_STATE;
717 }
718 else {
719 put_byte(s, (s->gzhead->text ? 1 : 0) +
720 (s->gzhead->hcrc ? 2 : 0) +
721 (s->gzhead->extra == Z_NULL ? 0 : 4) +
722 (s->gzhead->name == Z_NULL ? 0 : 8) +
723 (s->gzhead->comment == Z_NULL ? 0 : 16)
724 );
725 put_byte(s, (Byte)(s->gzhead->time & 0xff));
726 put_byte(s, (Byte)((s->gzhead->time >> 8) & 0xff));
727 put_byte(s, (Byte)((s->gzhead->time >> 16) & 0xff));
728 put_byte(s, (Byte)((s->gzhead->time >> 24) & 0xff));
729 put_byte(s, s->level == 9 ? 2 :
730 (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ?
731 4 : 0));
732 put_byte(s, s->gzhead->os & 0xff);
733 if (s->gzhead->extra != Z_NULL) {
734 put_byte(s, s->gzhead->extra_len & 0xff);
735 put_byte(s, (s->gzhead->extra_len >> 8) & 0xff);
736 }
737 if (s->gzhead->hcrc)
738 strm->adler = crc32(strm->adler, s->pending_buf,
739 s->pending);
740 s->gzindex = 0;
741 s->status = EXTRA_STATE;
742 }
743 }
744 else
745#endif
746 {
747 uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8;
748 uInt level_flags;
749
750 if (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2)
751 level_flags = 0;
752 else if (s->level < 6)
753 level_flags = 1;
754 else if (s->level == 6)
755 level_flags = 2;
756 else
757 level_flags = 3;
758 header |= (level_flags << 6);
759 if (s->strstart != 0) header |= PRESET_DICT;
760 header += 31 - (header % 31);
761
762 s->status = BUSY_STATE;
763 putShortMSB(s, header);
764
765 /* Save the adler32 of the preset dictionary: */
766 if (s->strstart != 0) {
767 putShortMSB(s, (uInt)(strm->adler >> 16));
768 putShortMSB(s, (uInt)(strm->adler & 0xffff));
769 }
770 strm->adler = adler32(0L, Z_NULL, 0);
771 }
772 }
773#ifdef GZIP
774 if (s->status == EXTRA_STATE) {
775 if (s->gzhead->extra != Z_NULL) {
776 uInt beg = s->pending; /* start of bytes to update crc */
777
778 while (s->gzindex < (s->gzhead->extra_len & 0xffff)) {
779 if (s->pending == s->pending_buf_size) {
780 if (s->gzhead->hcrc && s->pending > beg)
781 strm->adler = crc32(strm->adler, s->pending_buf + beg,
782 s->pending - beg);
783 flush_pending(strm);
784 beg = s->pending;
785 if (s->pending == s->pending_buf_size)
786 break;
787 }
788 put_byte(s, s->gzhead->extra[s->gzindex]);
789 s->gzindex++;
790 }
791 if (s->gzhead->hcrc && s->pending > beg)
792 strm->adler = crc32(strm->adler, s->pending_buf + beg,
793 s->pending - beg);
794 if (s->gzindex == s->gzhead->extra_len) {
795 s->gzindex = 0;
796 s->status = NAME_STATE;
797 }
798 }
799 else
800 s->status = NAME_STATE;
801 }
802 if (s->status == NAME_STATE) {
803 if (s->gzhead->name != Z_NULL) {
804 uInt beg = s->pending; /* start of bytes to update crc */
805 int val;
806
807 do {
808 if (s->pending == s->pending_buf_size) {
809 if (s->gzhead->hcrc && s->pending > beg)
810 strm->adler = crc32(strm->adler, s->pending_buf + beg,
811 s->pending - beg);
812 flush_pending(strm);
813 beg = s->pending;
814 if (s->pending == s->pending_buf_size) {
815 val = 1;
816 break;
817 }
818 }
819 val = s->gzhead->name[s->gzindex++];
820 put_byte(s, val);
821 } while (val != 0);
822 if (s->gzhead->hcrc && s->pending > beg)
823 strm->adler = crc32(strm->adler, s->pending_buf + beg,
824 s->pending - beg);
825 if (val == 0) {
826 s->gzindex = 0;
827 s->status = COMMENT_STATE;
828 }
829 }
830 else
831 s->status = COMMENT_STATE;
832 }
833 if (s->status == COMMENT_STATE) {
834 if (s->gzhead->comment != Z_NULL) {
835 uInt beg = s->pending; /* start of bytes to update crc */
836 int val;
837
838 do {
839 if (s->pending == s->pending_buf_size) {
840 if (s->gzhead->hcrc && s->pending > beg)
841 strm->adler = crc32(strm->adler, s->pending_buf + beg,
842 s->pending - beg);
843 flush_pending(strm);
844 beg = s->pending;
845 if (s->pending == s->pending_buf_size) {
846 val = 1;
847 break;
848 }
849 }
850 val = s->gzhead->comment[s->gzindex++];
851 put_byte(s, val);
852 } while (val != 0);
853 if (s->gzhead->hcrc && s->pending > beg)
854 strm->adler = crc32(strm->adler, s->pending_buf + beg,
855 s->pending - beg);
856 if (val == 0)
857 s->status = HCRC_STATE;
858 }
859 else
860 s->status = HCRC_STATE;
861 }
862 if (s->status == HCRC_STATE) {
863 if (s->gzhead->hcrc) {
864 if (s->pending + 2 > s->pending_buf_size)
865 flush_pending(strm);
866 if (s->pending + 2 <= s->pending_buf_size) {
867 put_byte(s, (Byte)(strm->adler & 0xff));
868 put_byte(s, (Byte)((strm->adler >> 8) & 0xff));
869 strm->adler = crc32(0L, Z_NULL, 0);
870 s->status = BUSY_STATE;
871 }
872 }
873 else
874 s->status = BUSY_STATE;
875 }
876#endif
877
878 /* Flush as much pending output as possible */
879 if (s->pending != 0) {
880 flush_pending(strm);
881 if (strm->avail_out == 0) {
882 /* Since avail_out is 0, deflate will be called again with
883 * more output space, but possibly with both pending and
884 * avail_in equal to zero. There won't be anything to do,
885 * but this is not an error situation so make sure we
886 * return OK instead of BUF_ERROR at next call of deflate:
887 */
888 s->last_flush = -1;
889 return Z_OK;
890 }
891
892 /* Make sure there is something to do and avoid duplicate consecutive
893 * flushes. For repeated and useless calls with Z_FINISH, we keep
894 * returning Z_STREAM_END instead of Z_BUF_ERROR.
895 */
896 } else if (strm->avail_in == 0 && RANK(flush) <= RANK(old_flush) &&
897 flush != Z_FINISH) {
898 ERR_RETURN(strm, Z_BUF_ERROR);
899 }
900
901 /* User must not provide more input after the first FINISH: */
902 if (s->status == FINISH_STATE && strm->avail_in != 0) {
903 ERR_RETURN(strm, Z_BUF_ERROR);
904 }
905
906 /* Start a new block or continue the current one.
907 */
908 if (strm->avail_in != 0 || s->lookahead != 0 ||
909 (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) {
910 block_state bstate;
911
912 bstate = s->strategy == Z_HUFFMAN_ONLY ? deflate_huff(s, flush) :
913 (s->strategy == Z_RLE ? deflate_rle(s, flush) :
914 (*(configuration_table[s->level].func))(s, flush));
915
916 if (bstate == finish_started || bstate == finish_done) {
917 s->status = FINISH_STATE;
918 }
919 if (bstate == need_more || bstate == finish_started) {
920 if (strm->avail_out == 0) {
921 s->last_flush = -1; /* avoid BUF_ERROR next call, see above */
922 }
923 return Z_OK;
924 /* If flush != Z_NO_FLUSH && avail_out == 0, the next call
925 * of deflate should use the same flush parameter to make sure
926 * that the flush is complete. So we don't have to output an
927 * empty block here, this will be done at next call. This also
928 * ensures that for a very small output buffer, we emit at most
929 * one empty block.
930 */
931 }
932 if (bstate == block_done) {
933 if (flush == Z_PARTIAL_FLUSH) {
934 _tr_align(s);
935 } else if (flush != Z_BLOCK) { /* FULL_FLUSH or SYNC_FLUSH */
936 _tr_stored_block(s, (char*)0, 0L, 0);
937 /* For a full flush, this empty block will be recognized
938 * as a special marker by inflate_sync().
939 */
940 if (flush == Z_FULL_FLUSH) {
941 CLEAR_HASH(s); /* forget history */
942 if (s->lookahead == 0) {
943 s->strstart = 0;
944 s->block_start = 0L;
945 s->insert = 0;
946 }
947 }
948 }
949 flush_pending(strm);
950 if (strm->avail_out == 0) {
951 s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */
952 return Z_OK;
953 }
954 }
955 }
956 Assert(strm->avail_out > 0, "bug2");
957
958 if (flush != Z_FINISH) return Z_OK;
959 if (s->wrap <= 0) return Z_STREAM_END;
960
961 /* Write the trailer */
962#ifdef GZIP
963 if (s->wrap == 2) {
964 put_byte(s, (Byte)(strm->adler & 0xff));
965 put_byte(s, (Byte)((strm->adler >> 8) & 0xff));
966 put_byte(s, (Byte)((strm->adler >> 16) & 0xff));
967 put_byte(s, (Byte)((strm->adler >> 24) & 0xff));
968 put_byte(s, (Byte)(strm->total_in & 0xff));
969 put_byte(s, (Byte)((strm->total_in >> 8) & 0xff));
970 put_byte(s, (Byte)((strm->total_in >> 16) & 0xff));
971 put_byte(s, (Byte)((strm->total_in >> 24) & 0xff));
972 }
973 else
974#endif
975 {
976 putShortMSB(s, (uInt)(strm->adler >> 16));
977 putShortMSB(s, (uInt)(strm->adler & 0xffff));
978 }
979 flush_pending(strm);
980 /* If avail_out is zero, the application will call deflate again
981 * to flush the rest.
982 */
983 if (s->wrap > 0) s->wrap = -s->wrap; /* write the trailer only once! */
984 return s->pending != 0 ? Z_OK : Z_STREAM_END;
985}
986
987/* ========================================================================= */
988int ZEXPORT deflateEnd (strm)
989 z_streamp strm;
990{
991 int status;
992
993 if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
994
995 status = strm->state->status;
996 if (status != INIT_STATE &&
997 status != EXTRA_STATE &&
998 status != NAME_STATE &&
999 status != COMMENT_STATE &&
1000 status != HCRC_STATE &&
1001 status != BUSY_STATE &&
1002 status != FINISH_STATE) {
1003 return Z_STREAM_ERROR;
1004 }
1005
1006 /* Deallocate in reverse order of allocations: */
1007 TRY_FREE(strm, strm->state->pending_buf);
1008 TRY_FREE(strm, strm->state->head);
1009 TRY_FREE(strm, strm->state->prev);
1010 TRY_FREE(strm, strm->state->window);
1011
1012 ZFREE(strm, strm->state);
1013 strm->state = Z_NULL;
1014
1015 return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK;
1016}
1017
1018/* =========================================================================
1019 * Copy the source state to the destination state.
1020 * To simplify the source, this is not supported for 16-bit MSDOS (which
1021 * doesn't have enough memory anyway to duplicate compression states).
1022 */
1023int ZEXPORT deflateCopy (dest, source)
1024 z_streamp dest;
1025 z_streamp source;
1026{
1027#ifdef MAXSEG_64K
1028 return Z_STREAM_ERROR;
1029#else
1030 deflate_state *ds;
1031 deflate_state *ss;
1032 ushf *overlay;
1033
1034
1035 if (source == Z_NULL || dest == Z_NULL || source->state == Z_NULL) {
1036 return Z_STREAM_ERROR;
1037 }
1038
1039 ss = source->state;
1040
1041 zmemcpy((voidpf)dest, (voidpf)source, sizeof(z_stream));
1042
1043 ds = (deflate_state *) ZALLOC(dest, 1, sizeof(deflate_state));
1044 if (ds == Z_NULL) return Z_MEM_ERROR;
1045 dest->state = (struct internal_state FAR *) ds;
1046 zmemcpy((voidpf)ds, (voidpf)ss, sizeof(deflate_state));
1047 ds->strm = dest;
1048
1049 ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte));
1050 ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos));
1051 ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos));
1052 overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2);
1053 ds->pending_buf = (uchf *) overlay;
1054
1055 if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL ||
1056 ds->pending_buf == Z_NULL) {
1057 deflateEnd (dest);
1058 return Z_MEM_ERROR;
1059 }
1060 /* following zmemcpy do not work for 16-bit MSDOS */
1061 zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte));
1062 zmemcpy((voidpf)ds->prev, (voidpf)ss->prev, ds->w_size * sizeof(Pos));
1063 zmemcpy((voidpf)ds->head, (voidpf)ss->head, ds->hash_size * sizeof(Pos));
1064 zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size);
1065
1066 ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf);
1067 ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush);
1068 ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize;
1069
1070 ds->l_desc.dyn_tree = ds->dyn_ltree;
1071 ds->d_desc.dyn_tree = ds->dyn_dtree;
1072 ds->bl_desc.dyn_tree = ds->bl_tree;
1073
1074 return Z_OK;
1075#endif /* MAXSEG_64K */
1076}
1077
1078/* ===========================================================================
1079 * Read a new buffer from the current input stream, update the adler32
1080 * and total number of bytes read. All deflate() input goes through
1081 * this function so some applications may wish to modify it to avoid
1082 * allocating a large strm->next_in buffer and copying from it.
1083 * (See also flush_pending()).
1084 */
1085local int read_buf(strm, buf, size)
1086 z_streamp strm;
1087 Bytef *buf;
1088 unsigned size;
1089{
1090 unsigned len = strm->avail_in;
1091
1092 if (len > size) len = size;
1093 if (len == 0) return 0;
1094
1095 strm->avail_in -= len;
1096
1097 zmemcpy(buf, strm->next_in, len);
1098 if (strm->state->wrap == 1) {
1099 strm->adler = adler32(strm->adler, buf, len);
1100 }
1101#ifdef GZIP
1102 else if (strm->state->wrap == 2) {
1103 strm->adler = crc32(strm->adler, buf, len);
1104 }
1105#endif
1106 strm->next_in += len;
1107 strm->total_in += len;
1108
1109 return (int)len;
1110}
1111
1112/* ===========================================================================
1113 * Initialize the "longest match" routines for a new zlib stream
1114 */
1115local void lm_init (s)
1116 deflate_state *s;
1117{
1118 s->window_size = (ulg)2L*s->w_size;
1119
1120 CLEAR_HASH(s);
1121
1122 /* Set the default configuration parameters:
1123 */
1124 s->max_lazy_match = configuration_table[s->level].max_lazy;
1125 s->good_match = configuration_table[s->level].good_length;
1126 s->nice_match = configuration_table[s->level].nice_length;
1127 s->max_chain_length = configuration_table[s->level].max_chain;
1128
1129 s->strstart = 0;
1130 s->block_start = 0L;
1131 s->lookahead = 0;
1132 s->insert = 0;
1133 s->match_length = s->prev_length = MIN_MATCH-1;
1134 s->match_available = 0;
1135 s->ins_h = 0;
1136#ifndef FASTEST
1137#ifdef ASMV
1138 match_init(); /* initialize the asm code */
1139#endif
1140#endif
1141}
1142
1143#ifndef FASTEST
1144/* ===========================================================================
1145 * Set match_start to the longest match starting at the given string and
1146 * return its length. Matches shorter or equal to prev_length are discarded,
1147 * in which case the result is equal to prev_length and match_start is
1148 * garbage.
1149 * IN assertions: cur_match is the head of the hash chain for the current
1150 * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1
1151 * OUT assertion: the match length is not greater than s->lookahead.
1152 */
1153#ifndef ASMV
1154/* For 80x86 and 680x0, an optimized version will be provided in match.asm or
1155 * match.S. The code will be functionally equivalent.
1156 */
1157local uInt longest_match(s, cur_match)
1158 deflate_state *s;
1159 IPos cur_match; /* current match */
1160{
1161 unsigned chain_length = s->max_chain_length;/* max hash chain length */
1162 register Bytef *scan = s->window + s->strstart; /* current string */
1163 register Bytef *match; /* matched string */
1164 register int len; /* length of current match */
4b3f6d79 1165#ifdef ZLIB_PM3_TUNED
1166 int best_len = MIN_MATCH-1; // lift the restriction on prev-length
1167#else
f3919878 1168 int best_len = s->prev_length; /* best match length so far */
4b3f6d79 1169#endif
f3919878 1170 int nice_match = s->nice_match; /* stop if match long enough */
1171 IPos limit = s->strstart > (IPos)MAX_DIST(s) ?
1172 s->strstart - (IPos)MAX_DIST(s) : NIL;
1173 /* Stop when cur_match becomes <= limit. To simplify the code,
1174 * we prevent matches with the string of window index 0.
1175 */
1176 Posf *prev = s->prev;
1177 uInt wmask = s->w_mask;
1178
1179#ifdef UNALIGNED_OK
1180 /* Compare two bytes at a time. Note: this is not always beneficial.
1181 * Try with and without -DUNALIGNED_OK to check.
1182 */
1183 register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1;
1184 register ush scan_start = *(ushf*)scan;
1185 register ush scan_end = *(ushf*)(scan+best_len-1);
1186#else
1187 register Bytef *strend = s->window + s->strstart + MAX_MATCH;
1188 register Byte scan_end1 = scan[best_len-1];
1189 register Byte scan_end = scan[best_len];
1190#endif
1191
1192 /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
1193 * It is easy to get rid of this optimization if necessary.
1194 */
1195 Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
1196
1197 /* Do not waste too much time if we already have a good match: */
1198 if (s->prev_length >= s->good_match) {
1199 chain_length >>= 2;
1200 }
1201 /* Do not look for matches beyond the end of the input. This is necessary
1202 * to make deflate deterministic.
1203 */
1204 if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead;
1205
1206 Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
1207
1208 do {
1209 Assert(cur_match < s->strstart, "no future");
1210 match = s->window + cur_match;
1211
1212 /* Skip to next match if the match length cannot increase
1213 * or if the match length is less than 2. Note that the checks below
1214 * for insufficient lookahead only occur occasionally for performance
1215 * reasons. Therefore uninitialized memory will be accessed, and
1216 * conditional jumps will be made that depend on those values.
1217 * However the length of the match is limited to the lookahead, so
1218 * the output of deflate is not affected by the uninitialized values.
1219 */
1220#if (defined(UNALIGNED_OK) && MAX_MATCH == 258)
1221 /* This code assumes sizeof(unsigned short) == 2. Do not use
1222 * UNALIGNED_OK if your compiler uses a different size.
1223 */
1224 if (*(ushf*)(match+best_len-1) != scan_end ||
1225 *(ushf*)match != scan_start) continue;
1226
1227 /* It is not necessary to compare scan[2] and match[2] since they are
1228 * always equal when the other bytes match, given that the hash keys
1229 * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at
1230 * strstart+3, +5, ... up to strstart+257. We check for insufficient
1231 * lookahead only every 4th comparison; the 128th check will be made
1232 * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is
1233 * necessary to put more guard bytes at the end of the window, or
1234 * to check more often for insufficient lookahead.
1235 */
1236 Assert(scan[2] == match[2], "scan[2]?");
1237 scan++, match++;
1238 do {
1239 } while (*(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
1240 *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
1241 *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
1242 *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
1243 scan < strend);
1244 /* The funny "do {}" generates better code on most compilers */
1245
1246 /* Here, scan <= window+strstart+257 */
1247 Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
1248 if (*scan == *match) scan++;
1249
1250 len = (MAX_MATCH - 1) - (int)(strend-scan);
1251 scan = strend - (MAX_MATCH-1);
1252
1253#else /* UNALIGNED_OK */
1254
1255 if (match[best_len] != scan_end ||
1256 match[best_len-1] != scan_end1 ||
1257 *match != *scan ||
1258 *++match != scan[1]) continue;
1259
1260 /* The check at best_len-1 can be removed because it will be made
1261 * again later. (This heuristic is not always a win.)
1262 * It is not necessary to compare scan[2] and match[2] since they
1263 * are always equal when the other bytes match, given that
1264 * the hash keys are equal and that HASH_BITS >= 8.
1265 */
1266 scan += 2, match++;
1267 Assert(*scan == *match, "match[2]?");
1268
1269 /* We check for insufficient lookahead only every 8th comparison;
1270 * the 256th check will be made at strstart+258.
1271 */
1272 do {
1273 } while (*++scan == *++match && *++scan == *++match &&
1274 *++scan == *++match && *++scan == *++match &&
1275 *++scan == *++match && *++scan == *++match &&
1276 *++scan == *++match && *++scan == *++match &&
1277 scan < strend);
1278
1279 Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
1280
1281 len = MAX_MATCH - (int)(strend - scan);
1282 scan = strend - MAX_MATCH;
1283
1284#endif /* UNALIGNED_OK */
1285
1286 if (len > best_len) {
1287 s->match_start = cur_match;
1288 best_len = len;
1289 if (len >= nice_match) break;
1290#ifdef UNALIGNED_OK
1291 scan_end = *(ushf*)(scan+best_len-1);
1292#else
1293 scan_end1 = scan[best_len-1];
1294 scan_end = scan[best_len];
1295#endif
1296 }
1297 } while ((cur_match = prev[cur_match & wmask]) > limit
1298 && --chain_length != 0);
1299
1300 if ((uInt)best_len <= s->lookahead) return (uInt)best_len;
1301 return s->lookahead;
1302}
1303#endif /* ASMV */
1304
1305#else /* FASTEST */
1306
1307/* ---------------------------------------------------------------------------
1308 * Optimized version for FASTEST only
1309 */
1310local uInt longest_match(s, cur_match)
1311 deflate_state *s;
1312 IPos cur_match; /* current match */
1313{
1314 register Bytef *scan = s->window + s->strstart; /* current string */
1315 register Bytef *match; /* matched string */
1316 register int len; /* length of current match */
1317 register Bytef *strend = s->window + s->strstart + MAX_MATCH;
1318
1319 /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
1320 * It is easy to get rid of this optimization if necessary.
1321 */
1322 Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
1323
1324 Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
1325
1326 Assert(cur_match < s->strstart, "no future");
1327
1328 match = s->window + cur_match;
1329
1330 /* Return failure if the match length is less than 2:
1331 */
1332 if (match[0] != scan[0] || match[1] != scan[1]) return MIN_MATCH-1;
1333
1334 /* The check at best_len-1 can be removed because it will be made
1335 * again later. (This heuristic is not always a win.)
1336 * It is not necessary to compare scan[2] and match[2] since they
1337 * are always equal when the other bytes match, given that
1338 * the hash keys are equal and that HASH_BITS >= 8.
1339 */
1340 scan += 2, match += 2;
1341 Assert(*scan == *match, "match[2]?");
1342
1343 /* We check for insufficient lookahead only every 8th comparison;
1344 * the 256th check will be made at strstart+258.
1345 */
1346 do {
1347 } while (*++scan == *++match && *++scan == *++match &&
1348 *++scan == *++match && *++scan == *++match &&
1349 *++scan == *++match && *++scan == *++match &&
1350 *++scan == *++match && *++scan == *++match &&
1351 scan < strend);
1352
1353 Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
1354
1355 len = MAX_MATCH - (int)(strend - scan);
1356
1357 if (len < MIN_MATCH) return MIN_MATCH - 1;
1358
1359 s->match_start = cur_match;
1360 return (uInt)len <= s->lookahead ? (uInt)len : s->lookahead;
1361}
1362
1363#endif /* FASTEST */
1364
1365#ifdef DEBUG
1366/* ===========================================================================
1367 * Check that the match at match_start is indeed a match.
1368 */
1369local void check_match(s, start, match, length)
1370 deflate_state *s;
1371 IPos start, match;
1372 int length;
1373{
1374 /* check that the match is indeed a match */
1375 if (zmemcmp(s->window + match,
1376 s->window + start, length) != EQUAL) {
1377 fprintf(stderr, " start %u, match %u, length %d\n",
1378 start, match, length);
1379 do {
1380 fprintf(stderr, "%c%c", s->window[match++], s->window[start++]);
1381 } while (--length != 0);
1382 z_error("invalid match");
1383 }
1384 if (z_verbose > 1) {
1385 fprintf(stderr,"\\[%d,%d]", start-match, length);
1386 do { putc(s->window[start++], stderr); } while (--length != 0);
1387 }
1388}
1389#else
1390# define check_match(s, start, match, length)
1391#endif /* DEBUG */
1392
1393/* ===========================================================================
1394 * Fill the window when the lookahead becomes insufficient.
1395 * Updates strstart and lookahead.
1396 *
1397 * IN assertion: lookahead < MIN_LOOKAHEAD
1398 * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
1399 * At least one byte has been read, or avail_in == 0; reads are
1400 * performed for at least two bytes (required for the zip translate_eol
1401 * option -- not supported here).
1402 */
1403local void fill_window(s)
1404 deflate_state *s;
1405{
1406 register unsigned n, m;
1407 register Posf *p;
1408 unsigned more; /* Amount of free space at the end of the window. */
1409 uInt wsize = s->w_size;
1410
1411 Assert(s->lookahead < MIN_LOOKAHEAD, "already enough lookahead");
1412
1413 do {
1414 more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart);
1415
1416 /* Deal with !@#$% 64K limit: */
1417 if (sizeof(int) <= 2) {
1418 if (more == 0 && s->strstart == 0 && s->lookahead == 0) {
1419 more = wsize;
1420
1421 } else if (more == (unsigned)(-1)) {
1422 /* Very unlikely, but possible on 16 bit machine if
1423 * strstart == 0 && lookahead == 1 (input done a byte at time)
1424 */
1425 more--;
1426 }
1427 }
1428
1429 /* If the window is almost full and there is insufficient lookahead,
1430 * move the upper half to the lower one to make room in the upper half.
1431 */
1432 if (s->strstart >= wsize+MAX_DIST(s)) {
1433
1434 zmemcpy(s->window, s->window+wsize, (unsigned)wsize);
1435 s->match_start -= wsize;
1436 s->strstart -= wsize; /* we now have strstart >= MAX_DIST */
1437 s->block_start -= (long) wsize;
1438
1439 /* Slide the hash table (could be avoided with 32 bit values
1440 at the expense of memory usage). We slide even when level == 0
1441 to keep the hash table consistent if we switch back to level > 0
1442 later. (Using level 0 permanently is not an optimal usage of
1443 zlib, so we don't care about this pathological case.)
1444 */
1445 n = s->hash_size;
1446 p = &s->head[n];
1447 do {
1448 m = *--p;
1449 *p = (Pos)(m >= wsize ? m-wsize : NIL);
1450 } while (--n);
1451
1452 n = wsize;
1453#ifndef FASTEST
1454 p = &s->prev[n];
1455 do {
1456 m = *--p;
1457 *p = (Pos)(m >= wsize ? m-wsize : NIL);
1458 /* If n is not on any hash chain, prev[n] is garbage but
1459 * its value will never be used.
1460 */
1461 } while (--n);
1462#endif
1463 more += wsize;
1464 }
1465 if (s->strm->avail_in == 0) break;
1466
1467 /* If there was no sliding:
1468 * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
1469 * more == window_size - lookahead - strstart
1470 * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
1471 * => more >= window_size - 2*WSIZE + 2
1472 * In the BIG_MEM or MMAP case (not yet supported),
1473 * window_size == input_size + MIN_LOOKAHEAD &&
1474 * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
1475 * Otherwise, window_size == 2*WSIZE so more >= 2.
1476 * If there was sliding, more >= WSIZE. So in all cases, more >= 2.
1477 */
1478 Assert(more >= 2, "more < 2");
1479
1480 n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more);
1481 s->lookahead += n;
1482
1483 /* Initialize the hash value now that we have some input: */
1484 if (s->lookahead + s->insert >= MIN_MATCH) {
1485 uInt str = s->strstart - s->insert;
1486 s->ins_h = s->window[str];
1487 UPDATE_HASH(s, s->ins_h, s->window[str + 1]);
1488#if MIN_MATCH != 3
1489 Call UPDATE_HASH() MIN_MATCH-3 more times
1490#endif
1491 while (s->insert) {
1492 UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]);
1493#ifndef FASTEST
1494 s->prev[str & s->w_mask] = s->head[s->ins_h];
1495#endif
1496 s->head[s->ins_h] = (Pos)str;
1497 str++;
1498 s->insert--;
1499 if (s->lookahead + s->insert < MIN_MATCH)
1500 break;
1501 }
1502 }
1503 /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,
1504 * but this is not important since only literal bytes will be emitted.
1505 */
1506
1507 } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0);
1508
1509 /* If the WIN_INIT bytes after the end of the current data have never been
1510 * written, then zero those bytes in order to avoid memory check reports of
1511 * the use of uninitialized (or uninitialised as Julian writes) bytes by
1512 * the longest match routines. Update the high water mark for the next
1513 * time through here. WIN_INIT is set to MAX_MATCH since the longest match
1514 * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead.
1515 */
1516 if (s->high_water < s->window_size) {
1517 ulg curr = s->strstart + (ulg)(s->lookahead);
1518 ulg init;
1519
1520 if (s->high_water < curr) {
1521 /* Previous high water mark below current data -- zero WIN_INIT
1522 * bytes or up to end of window, whichever is less.
1523 */
1524 init = s->window_size - curr;
1525 if (init > WIN_INIT)
1526 init = WIN_INIT;
1527 zmemzero(s->window + curr, (unsigned)init);
1528 s->high_water = curr + init;
1529 }
1530 else if (s->high_water < (ulg)curr + WIN_INIT) {
1531 /* High water mark at or above current data, but below current data
1532 * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up
1533 * to end of window, whichever is less.
1534 */
1535 init = (ulg)curr + WIN_INIT - s->high_water;
1536 if (init > s->window_size - s->high_water)
1537 init = s->window_size - s->high_water;
1538 zmemzero(s->window + s->high_water, (unsigned)init);
1539 s->high_water += init;
1540 }
1541 }
1542
1543 Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD,
1544 "not enough room for search");
1545}
1546
1547/* ===========================================================================
1548 * Flush the current block, with given end-of-file flag.
1549 * IN assertion: strstart is set to the end of the current match.
1550 */
1551#define FLUSH_BLOCK_ONLY(s, last) { \
1552 _tr_flush_block(s, (s->block_start >= 0L ? \
1553 (charf *)&s->window[(unsigned)s->block_start] : \
1554 (charf *)Z_NULL), \
1555 (ulg)((long)s->strstart - s->block_start), \
1556 (last)); \
1557 s->block_start = s->strstart; \
1558 flush_pending(s->strm); \
1559 Tracev((stderr,"[FLUSH]")); \
1560}
1561
1562/* Same but force premature exit if necessary. */
1563#define FLUSH_BLOCK(s, last) { \
1564 FLUSH_BLOCK_ONLY(s, last); \
1565 if (s->strm->avail_out == 0) return (last) ? finish_started : need_more; \
1566}
1567
1568/* ===========================================================================
1569 * Copy without compression as much as possible from the input stream, return
1570 * the current block state.
1571 * This function does not insert new strings in the dictionary since
1572 * uncompressible data is probably not useful. This function is used
1573 * only for the level=0 compression option.
1574 * NOTE: this function should be optimized to avoid extra copying from
1575 * window to pending_buf.
1576 */
1577local block_state deflate_stored(s, flush)
1578 deflate_state *s;
1579 int flush;
1580{
1581 /* Stored blocks are limited to 0xffff bytes, pending_buf is limited
1582 * to pending_buf_size, and each stored block has a 5 byte header:
1583 */
1584 ulg max_block_size = 0xffff;
1585 ulg max_start;
1586
1587 if (max_block_size > s->pending_buf_size - 5) {
1588 max_block_size = s->pending_buf_size - 5;
1589 }
1590
1591 /* Copy as much as possible from input to output: */
1592 for (;;) {
1593 /* Fill the window as much as possible: */
1594 if (s->lookahead <= 1) {
1595
1596 Assert(s->strstart < s->w_size+MAX_DIST(s) ||
1597 s->block_start >= (long)s->w_size, "slide too late");
1598
1599 fill_window(s);
1600 if (s->lookahead == 0 && flush == Z_NO_FLUSH) return need_more;
1601
1602 if (s->lookahead == 0) break; /* flush the current block */
1603 }
1604 Assert(s->block_start >= 0L, "block gone");
1605
1606 s->strstart += s->lookahead;
1607 s->lookahead = 0;
1608
1609 /* Emit a stored block if pending_buf will be full: */
1610 max_start = s->block_start + max_block_size;
1611 if (s->strstart == 0 || (ulg)s->strstart >= max_start) {
1612 /* strstart == 0 is possible when wraparound on 16-bit machine */
1613 s->lookahead = (uInt)(s->strstart - max_start);
1614 s->strstart = (uInt)max_start;
1615 FLUSH_BLOCK(s, 0);
1616 }
1617 /* Flush if we may have to slide, otherwise block_start may become
1618 * negative and the data will be gone:
1619 */
1620 if (s->strstart - (uInt)s->block_start >= MAX_DIST(s)) {
1621 FLUSH_BLOCK(s, 0);
1622 }
1623 }
1624 s->insert = 0;
1625 if (flush == Z_FINISH) {
1626 FLUSH_BLOCK(s, 1);
1627 return finish_done;
1628 }
1629 if ((long)s->strstart > s->block_start)
1630 FLUSH_BLOCK(s, 0);
1631 return block_done;
1632}
1633
1634/* ===========================================================================
1635 * Compress as much as possible from the input stream, return the current
1636 * block state.
1637 * This function does not perform lazy evaluation of matches and inserts
1638 * new strings in the dictionary only for unmatched strings or for short
1639 * matches. It is used only for the fast compression options.
1640 */
1641local block_state deflate_fast(s, flush)
1642 deflate_state *s;
1643 int flush;
1644{
1645 IPos hash_head; /* head of the hash chain */
1646 int bflush; /* set if current block must be flushed */
1647
1648 for (;;) {
1649 /* Make sure that we always have enough lookahead, except
1650 * at the end of the input file. We need MAX_MATCH bytes
1651 * for the next match, plus MIN_MATCH bytes to insert the
1652 * string following the next match.
1653 */
1654 if (s->lookahead < MIN_LOOKAHEAD) {
1655 fill_window(s);
1656 if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
1657 return need_more;
1658 }
1659 if (s->lookahead == 0) break; /* flush the current block */
1660 }
1661
1662 /* Insert the string window[strstart .. strstart+2] in the
1663 * dictionary, and set hash_head to the head of the hash chain:
1664 */
1665 hash_head = NIL;
1666 if (s->lookahead >= MIN_MATCH) {
1667 INSERT_STRING(s, s->strstart, hash_head);
1668 }
1669
1670 /* Find the longest match, discarding those <= prev_length.
1671 * At this point we have always match_length < MIN_MATCH
1672 */
1673 if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) {
1674 /* To simplify the code, we prevent matches with the string
1675 * of window index 0 (in particular we have to avoid a match
1676 * of the string with itself at the start of the input file).
1677 */
1678 s->match_length = longest_match (s, hash_head);
1679 /* longest_match() sets match_start */
1680 }
1681 if (s->match_length >= MIN_MATCH) {
1682 check_match(s, s->strstart, s->match_start, s->match_length);
1683
1684 _tr_tally_dist(s, s->strstart - s->match_start,
1685 s->match_length - MIN_MATCH, bflush);
1686
1687 s->lookahead -= s->match_length;
1688
1689 /* Insert new strings in the hash table only if the match length
1690 * is not too large. This saves time but degrades compression.
1691 */
1692#ifndef FASTEST
1693 if (s->match_length <= s->max_insert_length &&
1694 s->lookahead >= MIN_MATCH) {
1695 s->match_length--; /* string at strstart already in table */
1696 do {
1697 s->strstart++;
1698 INSERT_STRING(s, s->strstart, hash_head);
1699 /* strstart never exceeds WSIZE-MAX_MATCH, so there are
1700 * always MIN_MATCH bytes ahead.
1701 */
1702 } while (--s->match_length != 0);
1703 s->strstart++;
1704 } else
1705#endif
1706 {
1707 s->strstart += s->match_length;
1708 s->match_length = 0;
1709 s->ins_h = s->window[s->strstart];
1710 UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
1711#if MIN_MATCH != 3
1712 Call UPDATE_HASH() MIN_MATCH-3 more times
1713#endif
1714 /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not
1715 * matter since it will be recomputed at next deflate call.
1716 */
1717 }
1718 } else {
1719 /* No match, output a literal byte */
1720 Tracevv((stderr,"%c", s->window[s->strstart]));
1721 _tr_tally_lit (s, s->window[s->strstart], bflush);
1722 s->lookahead--;
1723 s->strstart++;
1724 }
1725 if (bflush) FLUSH_BLOCK(s, 0);
1726 }
1727 s->insert = s->strstart < MIN_MATCH-1 ? s->strstart : MIN_MATCH-1;
1728 if (flush == Z_FINISH) {
1729 FLUSH_BLOCK(s, 1);
1730 return finish_done;
1731 }
1732 if (s->last_lit)
1733 FLUSH_BLOCK(s, 0);
1734 return block_done;
1735}
1736
4b3f6d79 1737
1738#ifdef ZLIB_PM3_TUNED
8e074056 1739local uInt try_harder(s, strstart, lookahead, hash_head)
4b3f6d79 1740 deflate_state *s;
1741 uInt strstart;
1742 uInt lookahead;
1743 IPos hash_head;
4b3f6d79 1744{
1745 uInt strstart_save = s->strstart;
1746 s->strstart = strstart;
1747 uInt lookahead_save = s->lookahead;
1748 s->lookahead = lookahead;
1749 uInt ins_h_save = s->ins_h;
1750 uInt combined_gain;
1751 uInt best_combined_gain = 0;
1752 uInt match_length;
1753 uInt prev_length = s->prev_length < MIN_MATCH ? 1 : s->prev_length;
1754 uInt best_prev_length = prev_length;
1755 uInt current_match_start = s->match_start;
1756 uInt current_match_length = s->match_length;
1757
1758 do {
1759 if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) {
1760 match_length = longest_match (s, hash_head);
1761 /* longest_match() sets match_start */
1762 } else {
1763 match_length = MIN_MATCH - 1;
1764 }
1765#if TOO_FAR <= 32767
1766 if (match_length == MIN_MATCH && s->strstart - s->match_start > TOO_FAR) {
1767 match_length = MIN_MATCH-1;
1768 }
1769#endif
1770 if (s->strstart == strstart) { // store match at current position
1771 current_match_length = match_length;
1772 current_match_start = s->match_start;
1773 }
1774 if (s->strstart - strstart + 1 < MIN_MATCH) { // previous match reduced to one or two literals
1775 combined_gain = 0; // need one literal per byte: no gain (assuming 8 bits per literal)
1776 } else {
1777 combined_gain = s->strstart - strstart + 1 - MIN_MATCH; // (possibly truncated) previous_length - 3 literals
1778 }
8e074056 1779 if (match_length < MIN_MATCH) {
1780 combined_gain += 0; // no gain
4b3f6d79 1781 } else {
8e074056 1782 combined_gain += match_length - MIN_MATCH; // match_length bytes are coded as three literals
4b3f6d79 1783 }
4b3f6d79 1784 if (combined_gain >= best_combined_gain) { // in case of a tie we prefer the longer prev_length
1785 best_combined_gain = combined_gain;
1786 best_prev_length = s->strstart - strstart + 1;
1787 }
1788 s->strstart++;
1789 s->lookahead--;
1790 UPDATE_HASH(s, s->ins_h, s->window[(s->strstart) + (MIN_MATCH-1)]);
1791 hash_head = s->head[s->ins_h];
4b3f6d79 1792 } while (s->strstart <= strstart-1 + prev_length // try to truncate the previous match to 1, 3, ... prev_length
1793 && s->strstart <= s->window_size - MIN_LOOKAHEAD); // watch out for the end of the input
1794
1795 s->strstart = strstart_save;
1796 s->lookahead = lookahead_save;
1797 s->ins_h = ins_h_save;
1798 s->match_length = current_match_length;
1799 s->match_start = current_match_start;
4b3f6d79 1800 if (best_prev_length >= MIN_MATCH) {
1801 s->prev_length = best_prev_length;
1802 s->match_length = MIN_MATCH - 1;
1803 } else {
1804 s->prev_length = MIN_MATCH - 1;
1805 }
1806 return best_combined_gain;
1807}
1808#endif
1809
1810
1811
f3919878 1812#ifndef FASTEST
1813/* ===========================================================================
1814 * Same as above, but achieves better compression. We use a lazy
1815 * evaluation for matches: a match is finally adopted only if there is
1816 * no better match at the next window position.
1817 */
1818local block_state deflate_slow(s, flush)
1819 deflate_state *s;
1820 int flush;
1821{
1822 IPos hash_head; /* head of hash chain */
1823 int bflush; /* set if current block must be flushed */
1824
1825 /* Process the input block. */
1826 for (;;) {
1827 /* Make sure that we always have enough lookahead, except
1828 * at the end of the input file. We need MAX_MATCH bytes
1829 * for the next match, plus MIN_MATCH bytes to insert the
1830 * string following the next match.
1831 */
1832 if (s->lookahead < MIN_LOOKAHEAD) {
1833 fill_window(s);
1834 if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
1835 return need_more;
1836 }
1837 if (s->lookahead == 0) break; /* flush the current block */
1838 }
1839
1840 /* Insert the string window[strstart .. strstart+2] in the
1841 * dictionary, and set hash_head to the head of the hash chain:
1842 */
1843 hash_head = NIL;
1844 if (s->lookahead >= MIN_MATCH) {
1845 INSERT_STRING(s, s->strstart, hash_head);
1846 }
1847
4b3f6d79 1848 /* Find the longest match, discarding those <= prev_length. */
f3919878 1849 s->prev_length = s->match_length, s->prev_match = s->match_start;
1850 s->match_length = MIN_MATCH-1;
1851
4b3f6d79 1852#ifdef ZLIB_PM3_TUNED
1853 if (s->prev_length < s->max_lazy_match) {
8e074056 1854 try_harder(s, s->strstart, s->lookahead, hash_head);
4b3f6d79 1855 }
1856
1857#else
f3919878 1858 if (hash_head != NIL && s->prev_length < s->max_lazy_match &&
1859 s->strstart - hash_head <= MAX_DIST(s)) {
1860 /* To simplify the code, we prevent matches with the string
1861 * of window index 0 (in particular we have to avoid a match
1862 * of the string with itself at the start of the input file).
1863 */
1864 s->match_length = longest_match (s, hash_head);
1865 /* longest_match() sets match_start */
1866
1867 if (s->match_length <= 5 && (s->strategy == Z_FILTERED
1868#if TOO_FAR <= 32767
1869 || (s->match_length == MIN_MATCH &&
1870 s->strstart - s->match_start > TOO_FAR)
1871#endif
1872 )) {
1873
1874 /* If prev_match is also MIN_MATCH, match_start is garbage
1875 * but we will ignore the current match anyway.
1876 */
1877 s->match_length = MIN_MATCH-1;
1878 }
1879 }
4b3f6d79 1880#endif /* ZLIB_PM3_TUNED */
f3919878 1881 /* If there was a match at the previous step and the current
1882 * match is not better, output the previous match:
1883 */
1884 if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) {
1885 uInt max_insert = s->strstart + s->lookahead - MIN_MATCH;
1886 /* Do not insert strings in hash table beyond this. */
1887
1888 check_match(s, s->strstart-1, s->prev_match, s->prev_length);
1889
1890 _tr_tally_dist(s, s->strstart -1 - s->prev_match,
1891 s->prev_length - MIN_MATCH, bflush);
1892
1893 /* Insert in hash table all strings up to the end of the match.
1894 * strstart-1 and strstart are already inserted. If there is not
1895 * enough lookahead, the last two strings are not inserted in
1896 * the hash table.
1897 */
1898 s->lookahead -= s->prev_length-1;
1899 s->prev_length -= 2;
1900 do {
1901 if (++s->strstart <= max_insert) {
1902 INSERT_STRING(s, s->strstart, hash_head);
1903 }
1904 } while (--s->prev_length != 0);
1905 s->match_available = 0;
1906 s->match_length = MIN_MATCH-1;
1907 s->strstart++;
1908
1909 if (bflush) FLUSH_BLOCK(s, 0);
1910
1911 } else if (s->match_available) {
1912 /* If there was no match at the previous position, output a
1913 * single literal. If there was a match but the current match
1914 * is longer, truncate the previous match to a single literal.
1915 */
1916 Tracevv((stderr,"%c", s->window[s->strstart-1]));
1917 _tr_tally_lit(s, s->window[s->strstart-1], bflush);
1918 if (bflush) {
1919 FLUSH_BLOCK_ONLY(s, 0);
1920 }
1921 s->strstart++;
1922 s->lookahead--;
1923 if (s->strm->avail_out == 0) return need_more;
1924 } else {
1925 /* There is no previous match to compare with, wait for
1926 * the next step to decide.
1927 */
1928 s->match_available = 1;
1929 s->strstart++;
1930 s->lookahead--;
1931 }
1932 }
1933 Assert (flush != Z_NO_FLUSH, "no flush?");
1934 if (s->match_available) {
1935 Tracevv((stderr,"%c", s->window[s->strstart-1]));
1936 _tr_tally_lit(s, s->window[s->strstart-1], bflush);
1937 s->match_available = 0;
1938 }
1939 s->insert = s->strstart < MIN_MATCH-1 ? s->strstart : MIN_MATCH-1;
1940 if (flush == Z_FINISH) {
1941 FLUSH_BLOCK(s, 1);
1942 return finish_done;
1943 }
1944 if (s->last_lit)
1945 FLUSH_BLOCK(s, 0);
1946 return block_done;
1947}
1948#endif /* FASTEST */
1949
1950/* ===========================================================================
1951 * For Z_RLE, simply look for runs of bytes, generate matches only of distance
1952 * one. Do not maintain a hash table. (It will be regenerated if this run of
1953 * deflate switches away from Z_RLE.)
1954 */
1955local block_state deflate_rle(s, flush)
1956 deflate_state *s;
1957 int flush;
1958{
1959 int bflush; /* set if current block must be flushed */
1960 uInt prev; /* byte at distance one to match */
1961 Bytef *scan, *strend; /* scan goes up to strend for length of run */
1962
1963 for (;;) {
1964 /* Make sure that we always have enough lookahead, except
1965 * at the end of the input file. We need MAX_MATCH bytes
1966 * for the longest run, plus one for the unrolled loop.
1967 */
1968 if (s->lookahead <= MAX_MATCH) {
1969 fill_window(s);
1970 if (s->lookahead <= MAX_MATCH && flush == Z_NO_FLUSH) {
1971 return need_more;
1972 }
1973 if (s->lookahead == 0) break; /* flush the current block */
1974 }
1975
1976 /* See how many times the previous byte repeats */
1977 s->match_length = 0;
1978 if (s->lookahead >= MIN_MATCH && s->strstart > 0) {
1979 scan = s->window + s->strstart - 1;
1980 prev = *scan;
1981 if (prev == *++scan && prev == *++scan && prev == *++scan) {
1982 strend = s->window + s->strstart + MAX_MATCH;
1983 do {
1984 } while (prev == *++scan && prev == *++scan &&
1985 prev == *++scan && prev == *++scan &&
1986 prev == *++scan && prev == *++scan &&
1987 prev == *++scan && prev == *++scan &&
1988 scan < strend);
1989 s->match_length = MAX_MATCH - (int)(strend - scan);
1990 if (s->match_length > s->lookahead)
1991 s->match_length = s->lookahead;
1992 }
1993 Assert(scan <= s->window+(uInt)(s->window_size-1), "wild scan");
1994 }
1995
1996 /* Emit match if have run of MIN_MATCH or longer, else emit literal */
1997 if (s->match_length >= MIN_MATCH) {
1998 check_match(s, s->strstart, s->strstart - 1, s->match_length);
1999
2000 _tr_tally_dist(s, 1, s->match_length - MIN_MATCH, bflush);
2001
2002 s->lookahead -= s->match_length;
2003 s->strstart += s->match_length;
2004 s->match_length = 0;
2005 } else {
2006 /* No match, output a literal byte */
2007 Tracevv((stderr,"%c", s->window[s->strstart]));
2008 _tr_tally_lit (s, s->window[s->strstart], bflush);
2009 s->lookahead--;
2010 s->strstart++;
2011 }
2012 if (bflush) FLUSH_BLOCK(s, 0);
2013 }
2014 s->insert = 0;
2015 if (flush == Z_FINISH) {
2016 FLUSH_BLOCK(s, 1);
2017 return finish_done;
2018 }
2019 if (s->last_lit)
2020 FLUSH_BLOCK(s, 0);
2021 return block_done;
2022}
2023
2024/* ===========================================================================
2025 * For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table.
2026 * (It will be regenerated if this run of deflate switches away from Huffman.)
2027 */
2028local block_state deflate_huff(s, flush)
2029 deflate_state *s;
2030 int flush;
2031{
2032 int bflush; /* set if current block must be flushed */
2033
2034 for (;;) {
2035 /* Make sure that we have a literal to write. */
2036 if (s->lookahead == 0) {
2037 fill_window(s);
2038 if (s->lookahead == 0) {
2039 if (flush == Z_NO_FLUSH)
2040 return need_more;
2041 break; /* flush the current block */
2042 }
2043 }
2044
2045 /* Output a literal byte */
2046 s->match_length = 0;
2047 Tracevv((stderr,"%c", s->window[s->strstart]));
2048 _tr_tally_lit (s, s->window[s->strstart], bflush);
2049 s->lookahead--;
2050 s->strstart++;
2051 if (bflush) FLUSH_BLOCK(s, 0);
2052 }
2053 s->insert = 0;
2054 if (flush == Z_FINISH) {
2055 FLUSH_BLOCK(s, 1);
2056 return finish_done;
2057 }
2058 if (s->last_lit)
2059 FLUSH_BLOCK(s, 0);
2060 return block_done;
2061}
Impressum, Datenschutz