Visual Servoing Platform  version 3.6.1 under development (2024-12-13)
basisu_miniz.h
1 /* miniz.c v1.15 - deflate/inflate, zlib-subset, ZIP reading/writing/appending, PNG writing
2  Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951: http://www.ietf.org/rfc/rfc1951.txt
3 
4  Forked from the public domain/unlicense version at: https://code.google.com/archive/p/miniz/
5 
6  Copyright (C) 2019-2021 Binomial LLC. All Rights Reserved.
7 
8  Licensed under the Apache License, Version 2.0 (the "License");
9  you may not use this file except in compliance with the License.
10  You may obtain a copy of the License at
11 
12  http://www.apache.org/licenses/LICENSE-2.0
13 
14  Unless required by applicable law or agreed to in writing, software
15  distributed under the License is distributed on an "AS IS" BASIS,
16  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17  See the License for the specific language governing permissions and
18  limitations under the License.
19 */
20 
21 #ifndef MINIZ_HEADER_INCLUDED
22 #define MINIZ_HEADER_INCLUDED
23 
24 #include <stdlib.h>
25 
26 // Defines to completely disable specific portions of miniz.c:
27 // If all macros here are defined the only functionality remaining will be CRC-32, adler-32, tinfl, and tdefl.
28 
29 // Define MINIZ_NO_STDIO to disable all usage and any functions which rely on stdio for file I/O.
30 //#define MINIZ_NO_STDIO
31 
32 // If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able to get the current time, or
33 // get/set file times, and the C run-time funcs that get/set times won't be called.
34 // The current downside is the times written to your archives will be from 1979.
35 //#define MINIZ_NO_TIME
36 
37 // Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's.
38 //#define MINIZ_NO_ARCHIVE_APIS
39 
40 // Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive API's.
41 //#define MINIZ_NO_ARCHIVE_WRITING_APIS
42 
43 // Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression API's.
44 //#define MINIZ_NO_ZLIB_APIS
45 
46 // Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent conflicts against stock zlib.
47 //#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES
48 
49 // Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc.
50 // Note if MINIZ_NO_MALLOC is defined then the user must always provide custom user alloc/free/realloc
51 // callbacks to the zlib and archive API's, and a few stand-alone helper API's which don't provide custom user
52 // functions (such as tdefl_compress_mem_to_heap() and tinfl_decompress_mem_to_heap()) won't work.
53 //#define MINIZ_NO_MALLOC
54 
55 #if defined(__TINYC__) && (defined(__linux) || defined(__linux__))
56  // TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc on Linux
57 #define MINIZ_NO_TIME
58 #endif
59 
60 #if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS)
61 #include <time.h>
62 #endif
63 
64 #if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || defined(__i386) || defined(__i486__) || defined(__i486) || defined(i386) || defined(__ia64__) || defined(__x86_64__)
65 // MINIZ_X86_OR_X64_CPU is only used to help set the below macros.
66 #define MINIZ_X86_OR_X64_CPU 1
67 #endif
68 
69 #if (__BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
70 // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
71 #define MINIZ_LITTLE_ENDIAN 1
72 #endif
73 
74 #if MINIZ_X86_OR_X64_CPU
75 // Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient integer loads and stores from unaligned addresses.
76 #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
77 #endif
78 
79 // Using unaligned loads and stores causes errors when using UBSan. Jam it off.
80 #if defined(__has_feature)
81 #if __has_feature(undefined_behavior_sanitizer)
82 #undef MINIZ_USE_UNALIGNED_LOADS_AND_STORES
83 #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 0
84 #endif
85 #endif
86 
87 #if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || defined(_LP64) || defined(__LP64__) || defined(__ia64__) || defined(__x86_64__)
88 // Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are reasonably fast (and don't involve compiler generated calls to helper functions).
89 #define MINIZ_HAS_64BIT_REGISTERS 1
90 #endif
91 
92 #ifndef DOXYGEN_SHOULD_SKIP_THIS
93 namespace buminiz
94 {
95 
96 // ------------------- zlib-style API Definitions.
97 
98 // For more compatibility with zlib, miniz.c uses unsigned long for some parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits!
99 typedef unsigned long mz_ulong;
100 
101 // mz_free() internally uses the MZ_FREE() macro (which by default calls free() unless you've modified the MZ_MALLOC macro) to release a block allocated from the heap.
102 void mz_free(void *p);
103 
104 #define MZ_ADLER32_INIT (1)
105 // mz_adler32() returns the initial adler-32 value to use when called with ptr==NULL.
106 mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len);
107 
108 #define MZ_CRC32_INIT (0)
109 // mz_crc32() returns the initial CRC-32 value to use when called with ptr==NULL.
110 mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len);
111 
112 // Compression strategies.
113 enum { MZ_DEFAULT_STRATEGY = 0, MZ_FILTERED = 1, MZ_HUFFMAN_ONLY = 2, MZ_RLE = 3, MZ_FIXED = 4 };
114 
115 // Method
116 #define MZ_DEFLATED 8
117 
118 #ifndef MINIZ_NO_ZLIB_APIS
119 
120 // Heap allocation callbacks.
121 // Note that mz_alloc_func parameter types purpsosely differ from zlib's: items/size is size_t, not unsigned long.
122 typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size);
123 typedef void (*mz_free_func)(void *opaque, void *address);
124 typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items, size_t size);
125 
126 #define MZ_VERSION "9.1.15"
127 #define MZ_VERNUM 0x91F0
128 #define MZ_VER_MAJOR 9
129 #define MZ_VER_MINOR 1
130 #define MZ_VER_REVISION 15
131 #define MZ_VER_SUBREVISION 0
132 
133 // Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The other values are for advanced use (refer to the zlib docs).
134 enum { MZ_NO_FLUSH = 0, MZ_PARTIAL_FLUSH = 1, MZ_SYNC_FLUSH = 2, MZ_FULL_FLUSH = 3, MZ_FINISH = 4, MZ_BLOCK = 5 };
135 
136 // Return status codes. MZ_PARAM_ERROR is non-standard.
137 enum { MZ_OK = 0, MZ_STREAM_END = 1, MZ_NEED_DICT = 2, MZ_ERRNO = -1, MZ_STREAM_ERROR = -2, MZ_DATA_ERROR = -3, MZ_MEM_ERROR = -4, MZ_BUF_ERROR = -5, MZ_VERSION_ERROR = -6, MZ_PARAM_ERROR = -10000 };
138 
139 // Compression levels: 0-9 are the standard zlib-style levels, 10 is best possible compression (not zlib compatible, and may be very slow), MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL.
140 enum { MZ_NO_COMPRESSION = 0, MZ_BEST_SPEED = 1, MZ_BEST_COMPRESSION = 9, MZ_UBER_COMPRESSION = 10, MZ_DEFAULT_LEVEL = 6, MZ_DEFAULT_COMPRESSION = -1 };
141 
142 // Window bits
143 #define MZ_DEFAULT_WINDOW_BITS 15
144 
145 struct mz_internal_state;
146 
147 // Compression/decompression stream struct.
148 typedef struct mz_stream_s
149 {
150  const unsigned char *next_in; // pointer to next byte to read
151  unsigned int avail_in; // number of bytes available at next_in
152  mz_ulong total_in; // total number of bytes consumed so far
153 
154  unsigned char *next_out; // pointer to next byte to write
155  unsigned int avail_out; // number of bytes that can be written to next_out
156  mz_ulong total_out; // total number of bytes produced so far
157 
158  char *msg; // error msg (unused)
159  struct mz_internal_state *state; // internal state, allocated by zalloc/zfree
160 
161  mz_alloc_func zalloc; // optional heap allocation function (defaults to malloc)
162  mz_free_func zfree; // optional heap free function (defaults to free)
163  void *opaque; // heap alloc function user pointer
164 
165  int data_type; // data_type (unused)
166  mz_ulong adler; // adler32 of the source or uncompressed data
167  mz_ulong reserved; // not used
168 } mz_stream;
169 
170 typedef mz_stream *mz_streamp;
171 
172 // Returns the version string of miniz.c.
173 const char *mz_version(void);
174 
175 // mz_deflateInit() initializes a compressor with default options:
176 // Parameters:
177 // pStream must point to an initialized mz_stream struct.
178 // level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION].
179 // level 1 enables a specially optimized compression function that's been optimized purely for performance, not ratio.
180 // (This special func. is currently only enabled when MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.)
181 // Return values:
182 // MZ_OK on success.
183 // MZ_STREAM_ERROR if the stream is bogus.
184 // MZ_PARAM_ERROR if the input parameters are bogus.
185 // MZ_MEM_ERROR on out of memory.
186 int mz_deflateInit(mz_streamp pStream, int level);
187 
188 // mz_deflateInit2() is like mz_deflate(), except with more control:
189 // Additional parameters:
190 // method must be MZ_DEFLATED
191 // window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no header or footer)
192 // mem_level must be between [1, 9] (it's checked but ignored by miniz.c)
193 int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy);
194 
195 // Quickly resets a compressor without having to reallocate anything. Same as calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2().
196 int mz_deflateReset(mz_streamp pStream);
197 
198 // mz_deflate() compresses the input to output, consuming as much of the input and producing as much output as possible.
199 // Parameters:
200 // pStream is the stream to read from and write to. You must initialize/update the next_in, avail_in, next_out, and avail_out members.
201 // flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or MZ_FINISH.
202 // Return values:
203 // MZ_OK on success (when flushing, or if more input is needed but not available, and/or there's more output to be written but the output buffer is full).
204 // MZ_STREAM_END if all input has been consumed and all output bytes have been written. Don't call mz_deflate() on the stream anymore.
205 // MZ_STREAM_ERROR if the stream is bogus.
206 // MZ_PARAM_ERROR if one of the parameters is invalid.
207 // MZ_BUF_ERROR if no forward progress is possible because the input and/or output buffers are empty. (Fill up the input buffer or free up some output space and try again.)
208 int mz_deflate(mz_streamp pStream, int flush);
209 
210 // mz_deflateEnd() deinitializes a compressor:
211 // Return values:
212 // MZ_OK on success.
213 // MZ_STREAM_ERROR if the stream is bogus.
214 int mz_deflateEnd(mz_streamp pStream);
215 
216 // mz_deflateBound() returns a (very) conservative upper bound on the amount of data that could be generated by deflate(), assuming flush is set to only MZ_NO_FLUSH or MZ_FINISH.
217 mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len);
218 
219 // Single-call compression functions mz_compress() and mz_compress2():
220 // Returns MZ_OK on success, or one of the error codes from mz_deflate() on failure.
221 int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len);
222 int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level);
223 
224 // mz_compressBound() returns a (very) conservative upper bound on the amount of data that could be generated by calling mz_compress().
225 mz_ulong mz_compressBound(mz_ulong source_len);
226 
227 // Initializes a decompressor.
228 int mz_inflateInit(mz_streamp pStream);
229 
230 // mz_inflateInit2() is like mz_inflateInit() with an additional option that controls the window size and whether or not the stream has been wrapped with a zlib header/footer:
231 // window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate).
232 int mz_inflateInit2(mz_streamp pStream, int window_bits);
233 
234 // Decompresses the input stream to the output, consuming only as much of the input as needed, and writing as much to the output as possible.
235 // Parameters:
236 // pStream is the stream to read from and write to. You must initialize/update the next_in, avail_in, next_out, and avail_out members.
237 // flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH.
238 // On the first call, if flush is MZ_FINISH it's assumed the input and output buffers are both sized large enough to decompress the entire stream in a single call (this is slightly faster).
239 // MZ_FINISH implies that there are no more source bytes available beside what's already in the input buffer, and that the output buffer is large enough to hold the rest of the decompressed data.
240 // Return values:
241 // MZ_OK on success. Either more input is needed but not available, and/or there's more output to be written but the output buffer is full.
242 // MZ_STREAM_END if all needed input has been consumed and all output bytes have been written. For zlib streams, the adler-32 of the decompressed data has also been verified.
243 // MZ_STREAM_ERROR if the stream is bogus.
244 // MZ_DATA_ERROR if the deflate stream is invalid.
245 // MZ_PARAM_ERROR if one of the parameters is invalid.
246 // MZ_BUF_ERROR if no forward progress is possible because the input buffer is empty but the inflater needs more input to continue, or if the output buffer is not large enough. Call mz_inflate() again
247 // with more input data, or with more room in the output buffer (except when using single call decompression, described above).
248 int mz_inflate(mz_streamp pStream, int flush);
249 int mz_inflate2(mz_streamp pStream, int flush, int adler32_checking);
250 
251 // Deinitializes a decompressor.
252 int mz_inflateEnd(mz_streamp pStream);
253 
254 // Single-call decompression.
255 // Returns MZ_OK on success, or one of the error codes from mz_inflate() on failure.
256 int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len);
257 
258 // Returns a string description of the specified error code, or NULL if the error code is invalid.
259 const char *mz_error(int err);
260 
261 // Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used as a drop-in replacement for the subset of zlib that miniz.c supports.
262 // Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you use zlib in the same project.
263 #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
264 typedef unsigned char Byte;
265 typedef unsigned int uInt;
266 typedef mz_ulong uLong;
267 typedef Byte Bytef;
268 typedef uInt uIntf;
269 typedef char charf;
270 typedef int intf;
271 typedef void *voidpf;
272 typedef uLong uLongf;
273 typedef void *voidp;
274 typedef void *const voidpc;
275 #define Z_NULL 0
276 #define Z_NO_FLUSH MZ_NO_FLUSH
277 #define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH
278 #define Z_SYNC_FLUSH MZ_SYNC_FLUSH
279 #define Z_FULL_FLUSH MZ_FULL_FLUSH
280 #define Z_FINISH MZ_FINISH
281 #define Z_BLOCK MZ_BLOCK
282 #define Z_OK MZ_OK
283 #define Z_STREAM_END MZ_STREAM_END
284 #define Z_NEED_DICT MZ_NEED_DICT
285 #define Z_ERRNO MZ_ERRNO
286 #define Z_STREAM_ERROR MZ_STREAM_ERROR
287 #define Z_DATA_ERROR MZ_DATA_ERROR
288 #define Z_MEM_ERROR MZ_MEM_ERROR
289 #define Z_BUF_ERROR MZ_BUF_ERROR
290 #define Z_VERSION_ERROR MZ_VERSION_ERROR
291 #define Z_PARAM_ERROR MZ_PARAM_ERROR
292 #define Z_NO_COMPRESSION MZ_NO_COMPRESSION
293 #define Z_BEST_SPEED MZ_BEST_SPEED
294 #define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION
295 #define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION
296 #define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY
297 #define Z_FILTERED MZ_FILTERED
298 #define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY
299 #define Z_RLE MZ_RLE
300 #define Z_FIXED MZ_FIXED
301 #define Z_DEFLATED MZ_DEFLATED
302 #define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS
303 #define alloc_func mz_alloc_func
304 #define free_func mz_free_func
305 #define internal_state mz_internal_state
306 #define z_stream mz_stream
307 #define deflateInit mz_deflateInit
308 #define deflateInit2 mz_deflateInit2
309 #define deflateReset mz_deflateReset
310 #define deflate mz_deflate
311 #define deflateEnd mz_deflateEnd
312 #define deflateBound mz_deflateBound
313 #define compress mz_compress
314 #define compress2 mz_compress2
315 #define compressBound mz_compressBound
316 #define inflateInit mz_inflateInit
317 #define inflateInit2 mz_inflateInit2
318 #define inflate mz_inflate
319 #define inflateEnd mz_inflateEnd
320 #define uncompress mz_uncompress
321 #define crc32 mz_crc32
322 #define adler32 mz_adler32
323 #define MAX_WBITS 15
324 #define MAX_MEM_LEVEL 9
325 #define zError mz_error
326 #define ZLIB_VERSION MZ_VERSION
327 #define ZLIB_VERNUM MZ_VERNUM
328 #define ZLIB_VER_MAJOR MZ_VER_MAJOR
329 #define ZLIB_VER_MINOR MZ_VER_MINOR
330 #define ZLIB_VER_REVISION MZ_VER_REVISION
331 #define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION
332 #define zlibVersion mz_version
333 #define zlib_version mz_version()
334 #endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
335 
336 #endif // MINIZ_NO_ZLIB_APIS
337 
338 // ------------------- Types and macros
339 
340 typedef unsigned char mz_uint8;
341 typedef signed short mz_int16;
342 typedef unsigned short mz_uint16;
343 typedef unsigned int mz_uint32;
344 typedef unsigned int mz_uint;
345 typedef long long mz_int64;
346 typedef unsigned long long mz_uint64;
347 typedef int mz_bool;
348 
349 #define MZ_FALSE (0)
350 #define MZ_TRUE (1)
351 
352 // An attempt to work around MSVC's spammy "warning C4127: conditional expression is constant" message.
353 #ifdef _MSC_VER
354 #define MZ_MACRO_END while (0, 0)
355 #else
356 #define MZ_MACRO_END while (0)
357 #endif
358 
359 // ------------------- Low-level Decompression API Definitions
360 
361 // Decompression flags used by tinfl_decompress().
362 // TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the input is a raw deflate stream.
363 // TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available beyond the end of the supplied input buffer. If clear, the input buffer contains all remaining input.
364 // TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large enough to hold the entire decompressed stream. If clear, the output buffer is at least the size of the dictionary (typically 32KB).
365 // TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the decompressed bytes.
366 enum
367 {
368  TINFL_FLAG_PARSE_ZLIB_HEADER = 1,
369  TINFL_FLAG_HAS_MORE_INPUT = 2,
370  TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4,
371  TINFL_FLAG_COMPUTE_ADLER32 = 8
372 };
373 
374 // High level decompression functions:
375 // tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block allocated via malloc().
376 // On entry:
377 // pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data to decompress.
378 // On return:
379 // Function returns a pointer to the decompressed data, or NULL on failure.
380 // *pOut_len will be set to the decompressed data's size, which could be larger than src_buf_len on uncompressible data.
381 // The caller must call mz_free() on the returned block when it's no longer needed.
382 void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, mz_uint flags);
383 
384 // tinfl_decompress_mem_to_mem() decompresses a block in memory to another block in memory.
385 // Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes written on success.
386 #define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1))
387 size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, mz_uint flags);
388 
389 // tinfl_decompress_mem_to_callback() decompresses a block in memory to an internal 32KB buffer, and a user provided callback function will be called to flush the buffer.
390 // Returns 1 on success or 0 on failure.
391 typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser);
392 int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, mz_uint flags);
393 
394 struct tinfl_decompressor_tag; typedef struct tinfl_decompressor_tag tinfl_decompressor;
395 
396 // Max size of LZ dictionary.
397 #define TINFL_LZ_DICT_SIZE 32768
398 
399 // Return status.
400 typedef enum
401 {
402  TINFL_STATUS_BAD_PARAM = -3,
403  TINFL_STATUS_ADLER32_MISMATCH = -2,
404  TINFL_STATUS_FAILED = -1,
405  TINFL_STATUS_DONE = 0,
406  TINFL_STATUS_NEEDS_MORE_INPUT = 1,
407  TINFL_STATUS_HAS_MORE_OUTPUT = 2
408 } tinfl_status;
409 
410 // Initializes the decompressor to its initial state.
411 #define tinfl_init(r) do { (r)->m_state = 0; } MZ_MACRO_END
412 #define tinfl_get_adler32(r) (r)->m_check_adler32
413 
414 // Main low-level decompressor coroutine function. This is the only function actually needed for decompression. All the other functions are just high-level helpers for improved usability.
415 // This is a universal API, i.e. it can be used as a building block to build any desired higher level decompression API. In the limit case, it can be called once per every byte input or output.
416 tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags);
417 
418 // Internal/private bits follow.
419 enum
420 {
421  TINFL_MAX_HUFF_TABLES = 3, TINFL_MAX_HUFF_SYMBOLS_0 = 288, TINFL_MAX_HUFF_SYMBOLS_1 = 32, TINFL_MAX_HUFF_SYMBOLS_2 = 19,
422  TINFL_FAST_LOOKUP_BITS = 10, TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS
423 };
424 
425 typedef struct
426 {
427  mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0];
428  mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE], m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2];
429 } tinfl_huff_table;
430 
431 #if MINIZ_HAS_64BIT_REGISTERS
432 #define TINFL_USE_64BIT_BITBUF 1
433 #endif
434 
435 #if TINFL_USE_64BIT_BITBUF
436 typedef mz_uint64 tinfl_bit_buf_t;
437 #define TINFL_BITBUF_SIZE (64)
438 #else
439 typedef mz_uint32 tinfl_bit_buf_t;
440 #define TINFL_BITBUF_SIZE (32)
441 #endif
442 
443 struct tinfl_decompressor_tag
444 {
445  mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type, m_check_adler32, m_dist, m_counter, m_num_extra, m_table_sizes[TINFL_MAX_HUFF_TABLES];
446  tinfl_bit_buf_t m_bit_buf;
447  size_t m_dist_from_out_buf_start;
448  tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES];
449  mz_uint8 m_raw_header[4], m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137];
450 };
451 
452 // ------------------- Low-level Compression API Definitions
453 
454 // Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly slower, and raw/dynamic blocks will be output more frequently).
455 #define TDEFL_LESS_MEMORY 0
456 
457 // tdefl_init() compression flags logically OR'd together (low 12 bits contain the max. number of probes per dictionary search):
458 // TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap compression), 4095=Huffman+LZ (slowest/best compression).
459 enum
460 {
461  TDEFL_HUFFMAN_ONLY = 0, TDEFL_DEFAULT_MAX_PROBES = 128, TDEFL_MAX_PROBES_MASK = 0xFFF
462 };
463 
464 // TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before the deflate data, and the Adler-32 of the source data at the end. Otherwise, you'll get raw deflate data.
465 // TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even when not writing zlib headers).
466 // TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more efficient lazy parsing.
467 // TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's initialization time to the minimum, but the output may vary from run to run given the same input (depending on the contents of memory).
468 // TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1)
469 // TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled.
470 // TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables.
471 // TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks.
472 // The low 12 bits are reserved to control the max # of hash probes per dictionary lookup (see TDEFL_MAX_PROBES_MASK).
473 enum
474 {
475  TDEFL_WRITE_ZLIB_HEADER = 0x01000,
476  TDEFL_COMPUTE_ADLER32 = 0x02000,
477  TDEFL_GREEDY_PARSING_FLAG = 0x04000,
478  TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000,
479  TDEFL_RLE_MATCHES = 0x10000,
480  TDEFL_FILTER_MATCHES = 0x20000,
481  TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000,
482  TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000
483 };
484 
485 // High level compression functions:
486 // tdefl_compress_mem_to_heap() compresses a block in memory to a heap block allocated via malloc().
487 // On entry:
488 // pSrc_buf, src_buf_len: Pointer and size of source block to compress.
489 // flags: The max match finder probes (default is 128) logically OR'd against the above flags. Higher probes are slower but improve compression.
490 // On return:
491 // Function returns a pointer to the compressed data, or NULL on failure.
492 // *pOut_len will be set to the compressed data's size, which could be larger than src_buf_len on uncompressible data.
493 // The caller must free() the returned block when it's no longer needed.
494 void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, mz_uint flags);
495 
496 // tdefl_compress_mem_to_mem() compresses a block in memory to another block in memory.
497 // Returns 0 on failure.
498 size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, mz_uint flags);
499 
500 // Compresses an image to a compressed PNG file in memory.
501 // On entry:
502 // pImage, w, h, and num_chans describe the image to compress. num_chans may be 1, 2, 3, or 4.
503 // The image pitch in bytes per scanline will be w*num_chans. The leftmost pixel on the top scanline is stored first in memory.
504 // level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED, MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL
505 // If flip is true, the image will be flipped on the Y axis (useful for OpenGL apps).
506 // On return:
507 // Function returns a pointer to the compressed data, or NULL on failure.
508 // *pLen_out will be set to the size of the PNG image file.
509 // The caller must mz_free() the returned heap block (which will typically be larger than *pLen_out) when it's no longer needed.
510 void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, mz_uint8 w, mz_uint8 h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip);
511 void *tdefl_write_image_to_png_file_in_memory(const void *pImage, mz_uint8 w, mz_uint8 h, int num_chans, size_t *pLen_out);
512 
513 // Output stream interface. The compressor uses this interface to write compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time.
514 typedef mz_bool(*tdefl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser);
515 
516 // tdefl_compress_mem_to_output() compresses a block to an output stream. The above helpers use this function internally.
517 mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, mz_uint flags);
518 
519 enum { TDEFL_MAX_HUFF_TABLES = 3, TDEFL_MAX_HUFF_SYMBOLS_0 = 288, TDEFL_MAX_HUFF_SYMBOLS_1 = 32, TDEFL_MAX_HUFF_SYMBOLS_2 = 19, TDEFL_LZ_DICT_SIZE = 32768, TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1, TDEFL_MIN_MATCH_LEN = 3, TDEFL_MAX_MATCH_LEN = 258 };
520 
521 // TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed output block (using static/fixed Huffman codes).
522 #if TDEFL_LESS_MEMORY
523 enum { TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 12, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS };
524 #else
525 enum { TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 15, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS };
526 #endif
527 
528 // The low-level tdefl functions below may be used directly if the above helper functions aren't flexible enough. The low-level functions don't make any heap allocations, unlike the above helper functions.
529 typedef enum
530 {
531  TDEFL_STATUS_BAD_PARAM = -2,
532  TDEFL_STATUS_PUT_BUF_FAILED = -1,
533  TDEFL_STATUS_OKAY = 0,
534  TDEFL_STATUS_DONE = 1,
535 } tdefl_status;
536 
537 // Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums
538 typedef enum
539 {
540  TDEFL_NO_FLUSH = 0,
541  TDEFL_SYNC_FLUSH = 2,
542  TDEFL_FULL_FLUSH = 3,
543  TDEFL_FINISH = 4
544 } tdefl_flush;
545 
546 // tdefl's compression state structure.
547 typedef struct
548 {
549  tdefl_put_buf_func_ptr m_pPut_buf_func;
550  void *m_pPut_buf_user;
551  mz_uint m_flags, m_max_probes[2];
552  int m_greedy_parsing;
553  mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size;
554  mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end;
555  mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in, m_bit_buffer;
556  mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit, m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index, m_wants_to_finish;
557  tdefl_status m_prev_return_status;
558  const void *m_pIn_buf;
559  void *m_pOut_buf;
560  size_t *m_pIn_buf_size, *m_pOut_buf_size;
561  tdefl_flush m_flush;
562  const mz_uint8 *m_pSrc;
563  size_t m_src_buf_left, m_out_buf_ofs;
564  mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1];
565  mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
566  mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
567  mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
568  mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE];
569  mz_uint16 m_next[TDEFL_LZ_DICT_SIZE];
570  mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE];
571  mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE];
572 } tdefl_compressor;
573 
574 // Initializes the compressor.
575 // There is no corresponding deinit() function because the tdefl API's do not dynamically allocate memory.
576 // pBut_buf_func: If NULL, output data will be supplied to the specified callback. In this case, the user should call the tdefl_compress_buffer() API for compression.
577 // If pBut_buf_func is NULL the user should always call the tdefl_compress() API.
578 // flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER, etc.)
579 tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, mz_uint flags);
580 
581 // Compresses a block of data, consuming as much of the specified input buffer as possible, and writing as much compressed data to the specified output buffer as possible.
582 tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush);
583 
584 // tdefl_compress_buffer() is only usable when the tdefl_init() is called with a non-NULL tdefl_put_buf_func_ptr.
585 // tdefl_compress_buffer() always consumes the entire input buffer.
586 tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush);
587 
588 tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d);
589 mz_uint32 tdefl_get_adler32(tdefl_compressor *d);
590 
591 // Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't defined, because it uses some of its macros.
592 #ifndef MINIZ_NO_ZLIB_APIS
593 // Create tdefl_compress() flags given zlib-style compression parameters.
594 // level may range from [0,10] (where 10 is absolute max compression, but may be much slower on some files)
595 // window_bits may be -15 (raw deflate) or 15 (zlib)
596 // strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY, MZ_RLE, or MZ_FIXED
597 mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy);
598 #endif // #ifndef MINIZ_NO_ZLIB_APIS
599 
600 } // namespace buminiz
601 
602 #endif // DOXYGEN_SHOULD_SKIP_THIS
603 #endif // MINIZ_HEADER_INCLUDED
604 
605 // ------------------- End of Header: Implementation follows. (If you only want the header, define MINIZ_HEADER_FILE_ONLY.)
606 
607 #ifndef MINIZ_HEADER_FILE_ONLY
608 
609 #include <string.h>
610 #include <assert.h>
611 
612 #ifndef DOXYGEN_SHOULD_SKIP_THIS
613 namespace buminiz
614 {
615 
616 typedef unsigned char mz_validate_uint16[sizeof(mz_uint16)==2 ? 1 : -1];
617 typedef unsigned char mz_validate_uint32[sizeof(mz_uint32)==4 ? 1 : -1];
618 typedef unsigned char mz_validate_uint64[sizeof(mz_uint64)==8 ? 1 : -1];
619 
620 #define MZ_ASSERT(x) assert(x)
621 
622 #ifdef MINIZ_NO_MALLOC
623 #define MZ_MALLOC(x) NULL
624 #define MZ_FREE(x) (void)x, ((void)0)
625 #define MZ_REALLOC(p, x) NULL
626 #else
627 #define MZ_MALLOC(x) malloc(x)
628 #define MZ_FREE(x) free(x)
629 #define MZ_REALLOC(p, x) realloc(p, x)
630 #endif
631 
632 #define MZ_MAX(a,b) (((a)>(b))?(a):(b))
633 #define MZ_MIN(a,b) (((a)<(b))?(a):(b))
634 #define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj))
635 
636 #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
637 #define MZ_READ_LE16(p) *((const mz_uint16 *)(p))
638 #define MZ_READ_LE32(p) *((const mz_uint32 *)(p))
639 #else
640 #define MZ_READ_LE16(p) ((mz_uint32)(((const mz_uint8 *)(p))[0]) | ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U))
641 #define MZ_READ_LE32(p) ((mz_uint32)(((const mz_uint8 *)(p))[0]) | ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | ((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | ((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U))
642 #endif
643 
644 #ifdef _MSC_VER
645 #define MZ_FORCEINLINE __forceinline
646 #elif defined(__GNUC__)
647 #define MZ_FORCEINLINE inline __attribute__((__always_inline__))
648 #else
649 #define MZ_FORCEINLINE inline
650 #endif
651 
652 // ------------------- zlib-style API's
653 
654 mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len)
655 {
656  mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16); size_t block_len = buf_len % 5552;
657  if (!ptr) return MZ_ADLER32_INIT;
658  while (buf_len) {
659  for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
660  s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1;
661  s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1;
662  }
663  for (; i < block_len; ++i) s1 += *ptr++, s2 += s1;
664  s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552;
665  }
666  return (s2 << 16) + s1;
667 }
668 
669 // Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C implementation that balances processor cache usage against speed": http://www.geocities.com/malbrain/
670 mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len)
671 {
672  static const mz_uint32 s_crc32[16] = { 0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
673  0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c, 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c };
674  mz_uint32 crcu32 = (mz_uint32)crc;
675  if (!ptr) return MZ_CRC32_INIT;
676  crcu32 = ~crcu32; while (buf_len--) { mz_uint8 b = *ptr++; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)]; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)]; }
677  return ~crcu32;
678 }
679 
680 void mz_free(void *p)
681 {
682  MZ_FREE(p);
683 }
684 
685 #ifndef MINIZ_NO_ZLIB_APIS
686 
687 static void *def_alloc_func(void *opaque, size_t items, size_t size) { (void)opaque, (void)items, (void)size; return MZ_MALLOC(items * size); }
688 static void def_free_func(void *opaque, void *address) { (void)opaque, (void)address; MZ_FREE(address); }
689 //static void *def_realloc_func(void *opaque, void *address, size_t items, size_t size) { (void)opaque, (void)address, (void)items, (void)size; return MZ_REALLOC(address, items * size); }
690 
691 const char *mz_version(void)
692 {
693  return MZ_VERSION;
694 }
695 
696 int mz_deflateInit(mz_streamp pStream, int level)
697 {
698  return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9, MZ_DEFAULT_STRATEGY);
699 }
700 
701 int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy)
702 {
703  tdefl_compressor *pComp;
704  mz_uint comp_flags = TDEFL_COMPUTE_ADLER32 | tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy);
705 
706  if (!pStream) return MZ_STREAM_ERROR;
707  if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) || ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS))) return MZ_PARAM_ERROR;
708 
709  pStream->data_type = 0;
710  pStream->adler = MZ_ADLER32_INIT;
711  pStream->msg = NULL;
712  pStream->reserved = 0;
713  pStream->total_in = 0;
714  pStream->total_out = 0;
715  if (!pStream->zalloc) pStream->zalloc = def_alloc_func;
716  if (!pStream->zfree) pStream->zfree = def_free_func;
717 
718  pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1, sizeof(tdefl_compressor));
719  if (!pComp)
720  return MZ_MEM_ERROR;
721 
722  pStream->state = (struct mz_internal_state *)pComp;
723 
724  if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) {
725  mz_deflateEnd(pStream);
726  return MZ_PARAM_ERROR;
727  }
728 
729  return MZ_OK;
730 }
731 
732 int mz_deflateReset(mz_streamp pStream)
733 {
734  if ((!pStream) || (!pStream->state) || (!pStream->zalloc) || (!pStream->zfree)) return MZ_STREAM_ERROR;
735  pStream->total_in = pStream->total_out = 0;
736  tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL, ((tdefl_compressor *)pStream->state)->m_flags);
737  return MZ_OK;
738 }
739 
740 int mz_deflate(mz_streamp pStream, int flush)
741 {
742  size_t in_bytes, out_bytes;
743  mz_ulong orig_total_in, orig_total_out;
744  int mz_status = MZ_OK;
745 
746  if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) || (!pStream->next_out)) return MZ_STREAM_ERROR;
747  if (!pStream->avail_out) return MZ_BUF_ERROR;
748 
749  if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH;
750 
751  if (((tdefl_compressor *)pStream->state)->m_prev_return_status == TDEFL_STATUS_DONE)
752  return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR;
753 
754  orig_total_in = pStream->total_in; orig_total_out = pStream->total_out;
755  for (; ; ) {
756  tdefl_status defl_status;
757  in_bytes = pStream->avail_in; out_bytes = pStream->avail_out;
758 
759  defl_status = tdefl_compress((tdefl_compressor *)pStream->state, pStream->next_in, &in_bytes, pStream->next_out, &out_bytes, (tdefl_flush)flush);
760  pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes;
761  pStream->total_in += (mz_uint)in_bytes; pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state);
762 
763  pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes;
764  pStream->total_out += (mz_uint)out_bytes;
765 
766  if (defl_status < 0) {
767  mz_status = MZ_STREAM_ERROR;
768  break;
769  }
770  else if (defl_status == TDEFL_STATUS_DONE) {
771  mz_status = MZ_STREAM_END;
772  break;
773  }
774  else if (!pStream->avail_out)
775  break;
776  else if ((!pStream->avail_in) && (flush != MZ_FINISH)) {
777  if ((flush) || (pStream->total_in != orig_total_in) || (pStream->total_out != orig_total_out))
778  break;
779  return MZ_BUF_ERROR; // Can't make forward progress without some input.
780  }
781  }
782  return mz_status;
783 }
784 
785 int mz_deflateEnd(mz_streamp pStream)
786 {
787  if (!pStream) return MZ_STREAM_ERROR;
788  if (pStream->state) {
789  pStream->zfree(pStream->opaque, pStream->state);
790  pStream->state = NULL;
791  }
792  return MZ_OK;
793 }
794 
795 mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len)
796 {
797  (void)pStream;
798  // This is really over conservative. (And lame, but it's actually pretty tricky to compute a true upper bound given the way tdefl's blocking works.)
799  mz_uint64 a = 128ULL + (source_len * 110ULL) / 100ULL;
800  mz_uint64 b = 128ULL + (mz_uint64)source_len + ((source_len / (31 * 1024)) + 1ULL) * 5ULL;
801 
802  mz_uint64 t = MZ_MAX(a, b);
803  if (((mz_ulong)t) != t)
804  t = (mz_ulong)(-1);
805 
806  return (mz_ulong)t;
807 }
808 
809 int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level)
810 {
811  int status;
812  mz_stream stream;
813  memset(&stream, 0, sizeof(stream));
814 
815  // In case mz_ulong is 64-bits (argh I hate longs).
816  if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR;
817 
818  stream.next_in = pSource;
819  stream.avail_in = (mz_uint32)source_len;
820  stream.next_out = pDest;
821  stream.avail_out = (mz_uint32)*pDest_len;
822 
823  status = mz_deflateInit(&stream, level);
824  if (status != MZ_OK) return status;
825 
826  status = mz_deflate(&stream, MZ_FINISH);
827  if (status != MZ_STREAM_END) {
828  mz_deflateEnd(&stream);
829  return (status == MZ_OK) ? MZ_BUF_ERROR : status;
830  }
831 
832  *pDest_len = stream.total_out;
833  return mz_deflateEnd(&stream);
834 }
835 
836 int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len)
837 {
838  return mz_compress2(pDest, pDest_len, pSource, source_len, MZ_DEFAULT_COMPRESSION);
839 }
840 
841 mz_ulong mz_compressBound(mz_ulong source_len)
842 {
843  return mz_deflateBound(NULL, source_len);
844 }
845 
846 typedef struct
847 {
848  tinfl_decompressor m_decomp;
849  mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed; int m_window_bits;
850  mz_uint8 m_dict[TINFL_LZ_DICT_SIZE];
851  tinfl_status m_last_status;
852 } inflate_state;
853 
854 int mz_inflateInit2(mz_streamp pStream, int window_bits)
855 {
856  inflate_state *pDecomp;
857  if (!pStream) return MZ_STREAM_ERROR;
858  if ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS)) return MZ_PARAM_ERROR;
859 
860  pStream->data_type = 0;
861  pStream->adler = 0;
862  pStream->msg = NULL;
863  pStream->total_in = 0;
864  pStream->total_out = 0;
865  pStream->reserved = 0;
866  if (!pStream->zalloc) pStream->zalloc = def_alloc_func;
867  if (!pStream->zfree) pStream->zfree = def_free_func;
868 
869  pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1, sizeof(inflate_state));
870  if (!pDecomp) return MZ_MEM_ERROR;
871 
872  pStream->state = (struct mz_internal_state *)pDecomp;
873 
874  tinfl_init(&pDecomp->m_decomp);
875  pDecomp->m_dict_ofs = 0;
876  pDecomp->m_dict_avail = 0;
877  pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT;
878  pDecomp->m_first_call = 1;
879  pDecomp->m_has_flushed = 0;
880  pDecomp->m_window_bits = window_bits;
881 
882  return MZ_OK;
883 }
884 
885 int mz_inflateInit(mz_streamp pStream)
886 {
887  return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS);
888 }
889 
890 int mz_inflate2(mz_streamp pStream, int flush, int adler32_checking)
891 {
892  inflate_state *pState;
893  mz_uint n, first_call, decomp_flags = adler32_checking ? TINFL_FLAG_COMPUTE_ADLER32 : 0;
894  size_t in_bytes, out_bytes, orig_avail_in;
895  tinfl_status status;
896 
897  if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR;
898  if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH;
899  if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH)) return MZ_STREAM_ERROR;
900 
901  pState = (inflate_state *)pStream->state;
902  if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER;
903  orig_avail_in = pStream->avail_in;
904 
905  first_call = pState->m_first_call; pState->m_first_call = 0;
906  if (pState->m_last_status < 0) return MZ_DATA_ERROR;
907 
908  if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR;
909  pState->m_has_flushed |= (flush == MZ_FINISH);
910 
911  if ((flush == MZ_FINISH) && (first_call)) {
912  // MZ_FINISH on the first call implies that the input and output buffers are large enough to hold the entire compressed/decompressed file.
913  decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF;
914  in_bytes = pStream->avail_in; out_bytes = pStream->avail_out;
915  status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes, pStream->next_out, pStream->next_out, &out_bytes, decomp_flags);
916  pState->m_last_status = status;
917  pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes;
918  pStream->adler = tinfl_get_adler32(&pState->m_decomp);
919  pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes;
920 
921  if (status < 0)
922  return MZ_DATA_ERROR;
923  else if (status != TINFL_STATUS_DONE) {
924  pState->m_last_status = TINFL_STATUS_FAILED;
925  return MZ_BUF_ERROR;
926  }
927  return MZ_STREAM_END;
928  }
929  // flush != MZ_FINISH then we must assume there's more input.
930  if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT;
931 
932  if (pState->m_dict_avail) {
933  n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
934  memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
935  pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n;
936  pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
937  return ((pState->m_last_status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK;
938  }
939 
940  for (; ; ) {
941  in_bytes = pStream->avail_in;
942  out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs;
943 
944  status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict, pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags);
945  pState->m_last_status = status;
946 
947  pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes;
948  pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp);
949 
950  pState->m_dict_avail = (mz_uint)out_bytes;
951 
952  n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
953  memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
954  pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n;
955  pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
956 
957  if (status < 0)
958  return MZ_DATA_ERROR; // Stream is corrupted (there could be some uncompressed data left in the output dictionary - oh well).
959  else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in))
960  return MZ_BUF_ERROR; // Signal caller that we can't make forward progress without supplying more input or by setting flush to MZ_FINISH.
961  else if (flush == MZ_FINISH) {
962  // The output buffer MUST be large to hold the remaining uncompressed data when flush==MZ_FINISH.
963  if (status == TINFL_STATUS_DONE)
964  return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END;
965  // status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's at least 1 more byte on the way. If there's no more room left in the output buffer then something is wrong.
966  else if (!pStream->avail_out)
967  return MZ_BUF_ERROR;
968  }
969  else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) || (!pStream->avail_out) || (pState->m_dict_avail))
970  break;
971  }
972 
973  return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK;
974 }
975 
976 int mz_inflate(mz_streamp pStream, int flush)
977 {
978  return mz_inflate2(pStream, flush, MZ_TRUE);
979 }
980 
981 int mz_inflateEnd(mz_streamp pStream)
982 {
983  if (!pStream)
984  return MZ_STREAM_ERROR;
985  if (pStream->state) {
986  pStream->zfree(pStream->opaque, pStream->state);
987  pStream->state = NULL;
988  }
989  return MZ_OK;
990 }
991 
992 int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len)
993 {
994  mz_stream stream;
995  int status;
996  memset(&stream, 0, sizeof(stream));
997 
998  // In case mz_ulong is 64-bits (argh I hate longs).
999  if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR;
1000 
1001  stream.next_in = pSource;
1002  stream.avail_in = (mz_uint32)source_len;
1003  stream.next_out = pDest;
1004  stream.avail_out = (mz_uint32)*pDest_len;
1005 
1006  status = mz_inflateInit(&stream);
1007  if (status != MZ_OK)
1008  return status;
1009 
1010  status = mz_inflate(&stream, MZ_FINISH);
1011  if (status != MZ_STREAM_END) {
1012  mz_inflateEnd(&stream);
1013  return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR : status;
1014  }
1015  *pDest_len = stream.total_out;
1016 
1017  return mz_inflateEnd(&stream);
1018 }
1019 
1020 const char *mz_error(int err)
1021 {
1022  static struct { int m_err; const char *m_pDesc; } s_error_descs[] =
1023  {
1024  { MZ_OK, "" }, { MZ_STREAM_END, "stream end" }, { MZ_NEED_DICT, "need dictionary" }, { MZ_ERRNO, "file error" }, { MZ_STREAM_ERROR, "stream error" },
1025  { MZ_DATA_ERROR, "data error" }, { MZ_MEM_ERROR, "out of memory" }, { MZ_BUF_ERROR, "buf error" }, { MZ_VERSION_ERROR, "version error" }, { MZ_PARAM_ERROR, "parameter error" }
1026  };
1027  mz_uint i; for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i) if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc;
1028  return NULL;
1029 }
1030 
1031 #endif //MINIZ_NO_ZLIB_APIS
1032 
1033 // ------------------- Low-level Decompression (completely independent from all compression API's)
1034 
1035 #define TINFL_MEMCPY(d, s, l) memcpy(d, s, l)
1036 #define TINFL_MEMSET(p, c, l) memset(p, c, l)
1037 
1038 #define TINFL_CR_BEGIN switch(r->m_state) { case 0:
1039 #define TINFL_CR_RETURN(state_index, result) do { status = result; r->m_state = state_index; goto common_exit; case state_index:; } MZ_MACRO_END
1040 #define TINFL_CR_RETURN_FOREVER(state_index, result) do { for ( ; ; ) { TINFL_CR_RETURN(state_index, result); } } MZ_MACRO_END
1041 #define TINFL_CR_FINISH }
1042 
1043 // TODO: If the caller has indicated that there's no more input, and we attempt to read beyond the input buf, then something is wrong with the input because the inflator never
1044 // reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of the stream with 0's in this scenario.
1045 #define TINFL_GET_BYTE(state_index, c) do { \
1046  if (pIn_buf_cur >= pIn_buf_end) { \
1047  for ( ; ; ) { \
1048  if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \
1049  TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \
1050  if (pIn_buf_cur < pIn_buf_end) { \
1051  c = *pIn_buf_cur++; \
1052  break; \
1053  } \
1054  } else { \
1055  c = 0; \
1056  break; \
1057  } \
1058  } \
1059  } else c = *pIn_buf_cur++; } MZ_MACRO_END
1060 
1061 #define TINFL_NEED_BITS(state_index, n) do { mz_uint c; TINFL_GET_BYTE(state_index, c); bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); num_bits += 8; } while (num_bits < (mz_uint)(n))
1062 #define TINFL_SKIP_BITS(state_index, n) do { if (num_bits < (mz_uint)(n)) { TINFL_NEED_BITS(state_index, n); } bit_buf >>= (n); num_bits -= (n); } MZ_MACRO_END
1063 #define TINFL_GET_BITS(state_index, b, n) do { if (num_bits < (mz_uint)(n)) { TINFL_NEED_BITS(state_index, n); } b = bit_buf & ((1 << (n)) - 1); bit_buf >>= (n); num_bits -= (n); } MZ_MACRO_END
1064 
1065 // TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes remaining in the input buffer falls below 2.
1066 // It reads just enough bytes from the input stream that are needed to decode the next Huffman code (and absolutely no more). It works by trying to fully decode a
1067 // Huffman code by using whatever bits are currently present in the bit buffer. If this fails, it reads another byte, and tries again until it succeeds or until the
1068 // bit buffer contains >=15 bits (deflate's max. Huffman code size).
1069 #define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \
1070  do { \
1071  temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \
1072  if (temp >= 0) { \
1073  code_len = temp >> 9; \
1074  if ((code_len) && (num_bits >= code_len)) \
1075  break; \
1076  } else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \
1077  code_len = TINFL_FAST_LOOKUP_BITS; \
1078  do { \
1079  temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
1080  } while ((temp < 0) && (num_bits >= (code_len + 1))); if (temp >= 0) break; \
1081  } TINFL_GET_BYTE(state_index, c); bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); num_bits += 8; \
1082  } while (num_bits < 15);
1083 
1084 // TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex than you would initially expect because the zlib API expects the decompressor to never read
1085 // beyond the final byte of the deflate stream. (In other words, when this macro wants to read another byte from the input, it REALLY needs another byte in order to fully
1086 // decode the next Huffman code.) Handling this properly is particularly important on raw deflate (non-zlib) streams, which aren't followed by a byte aligned adler-32.
1087 // The slow path is only executed at the very end of the input buffer.
1088 #define TINFL_HUFF_DECODE(state_index, sym, pHuff) do { \
1089  int temp; mz_uint code_len, c; \
1090  if (num_bits < 15) { \
1091  if ((pIn_buf_end - pIn_buf_cur) < 2) { \
1092  TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \
1093  } else { \
1094  bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | (((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); pIn_buf_cur += 2; num_bits += 16; \
1095  } \
1096  } \
1097  if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) \
1098  code_len = temp >> 9, temp &= 511; \
1099  else { \
1100  code_len = TINFL_FAST_LOOKUP_BITS; do { temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; } while (temp < 0); \
1101  } sym = temp; bit_buf >>= code_len; num_bits -= code_len; } MZ_MACRO_END
1102 
1103 tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags)
1104 {
1105  static const int s_length_base[31] = { 3,4,5,6,7,8,9,10,11,13, 15,17,19,23,27,31,35,43,51,59, 67,83,99,115,131,163,195,227,258,0,0 };
1106  static const int s_length_extra[31] = { 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0 };
1107  static const int s_dist_base[32] = { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193, 257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0 };
1108  static const int s_dist_extra[32] = { 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13 };
1109  static const mz_uint8 s_length_dezigzag[19] = { 16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15 };
1110  static const int s_min_table_sizes[3] = { 257, 1, 4 };
1111 
1112  tinfl_status status = TINFL_STATUS_FAILED; mz_uint32 num_bits, dist, counter, num_extra; tinfl_bit_buf_t bit_buf;
1113  const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end = pIn_buf_next + *pIn_buf_size;
1114  mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end = pOut_buf_next + *pOut_buf_size;
1115  size_t out_buf_size_mask = (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF) ? (size_t)-1 : ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1, dist_from_out_buf_start;
1116 
1117  // Ensure the output buffer's size is a power of 2, unless the output buffer is large enough to hold the entire output file (in which case it doesn't matter).
1118  if (((out_buf_size_mask + 1) & out_buf_size_mask) || (pOut_buf_next < pOut_buf_start)) { *pIn_buf_size = *pOut_buf_size = 0; return TINFL_STATUS_BAD_PARAM; }
1119 
1120  num_bits = r->m_num_bits; bit_buf = r->m_bit_buf; dist = r->m_dist; counter = r->m_counter; num_extra = r->m_num_extra; dist_from_out_buf_start = r->m_dist_from_out_buf_start;
1121  TINFL_CR_BEGIN
1122 
1123  bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0; r->m_z_adler32 = r->m_check_adler32 = 1;
1124  if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
1125  TINFL_GET_BYTE(1, r->m_zhdr0); TINFL_GET_BYTE(2, r->m_zhdr1);
1126  counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) || (r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8));
1127  if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) || ((out_buf_size_mask + 1) < (size_t)(1ULL << (8U + (r->m_zhdr0 >> 4)))));
1128  if (counter) { TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED); }
1129  }
1130 
1131  do {
1132  TINFL_GET_BITS(3, r->m_final, 3); r->m_type = r->m_final >> 1;
1133  if (r->m_type == 0) {
1134  TINFL_SKIP_BITS(5, num_bits & 7);
1135  for (counter = 0; counter < 4; ++counter) { if (num_bits) TINFL_GET_BITS(6, r->m_raw_header[counter], 8); else TINFL_GET_BYTE(7, r->m_raw_header[counter]); }
1136  if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) != (mz_uint)(0xFFFF ^ (r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) { TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED); }
1137  while ((counter) && (num_bits)) {
1138  TINFL_GET_BITS(51, dist, 8);
1139  while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT); }
1140  *pOut_buf_cur++ = (mz_uint8)dist;
1141  counter--;
1142  }
1143  while (counter) {
1144  size_t n; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT); }
1145  while (pIn_buf_cur >= pIn_buf_end) {
1146  if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) {
1147  TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT);
1148  }
1149  else {
1150  TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED);
1151  }
1152  }
1153  n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur), (size_t)(pIn_buf_end - pIn_buf_cur)), counter);
1154  TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n); pIn_buf_cur += n; pOut_buf_cur += n; counter -= (mz_uint)n;
1155  }
1156  }
1157  else if (r->m_type == 3) {
1158  TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED);
1159  }
1160  else {
1161  if (r->m_type == 1) {
1162  mz_uint8 *p = r->m_tables[0].m_code_size; mz_uint i;
1163  r->m_table_sizes[0] = 288; r->m_table_sizes[1] = 32; TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32);
1164  for (i = 0; i <= 143; ++i) {
1165  *p++ = 8;
1166  }
1167  for (; i <= 255; ++i) {
1168  *p++ = 9;
1169  }
1170  for (; i <= 279; ++i) {
1171  *p++ = 7;
1172  }
1173  for (; i <= 287; ++i) {
1174  *p++ = 8;
1175  }
1176  }
1177  else {
1178  for (counter = 0; counter < 3; counter++) { TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]); r->m_table_sizes[counter] += s_min_table_sizes[counter]; }
1179  MZ_CLEAR_OBJ(r->m_tables[2].m_code_size); for (counter = 0; counter < r->m_table_sizes[2]; counter++) { mz_uint s; TINFL_GET_BITS(14, s, 3); r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s; }
1180  r->m_table_sizes[2] = 19;
1181  }
1182  for (; (int)r->m_type >= 0; r->m_type--) {
1183  int tree_next, tree_cur; tinfl_huff_table *pTable;
1184  mz_uint i, j, used_syms, total, sym_index, next_code[17], total_syms[16]; pTable = &r->m_tables[r->m_type]; MZ_CLEAR_OBJ(total_syms); MZ_CLEAR_OBJ(pTable->m_look_up); MZ_CLEAR_OBJ(pTable->m_tree);
1185  for (i = 0; i < r->m_table_sizes[r->m_type]; ++i) total_syms[pTable->m_code_size[i]]++;
1186  used_syms = 0, total = 0; next_code[0] = next_code[1] = 0;
1187  for (i = 1; i <= 15; ++i) { used_syms += total_syms[i]; next_code[i + 1] = (total = ((total + total_syms[i]) << 1)); }
1188  if ((65536 != total) && (used_syms > 1)) {
1189  TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED);
1190  }
1191  for (tree_next = -1, sym_index = 0; sym_index < r->m_table_sizes[r->m_type]; ++sym_index) {
1192  mz_uint rev_code = 0, l, cur_code, code_size = pTable->m_code_size[sym_index]; if (!code_size) continue;
1193  cur_code = next_code[code_size]++; for (l = code_size; l > 0; l--, cur_code >>= 1) rev_code = (rev_code << 1) | (cur_code & 1);
1194  if (code_size <= TINFL_FAST_LOOKUP_BITS) { mz_int16 k = (mz_int16)((code_size << 9) | sym_index); while (rev_code < TINFL_FAST_LOOKUP_SIZE) { pTable->m_look_up[rev_code] = k; rev_code += (1 << code_size); } continue; }
1195  if (0 == (tree_cur = pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)])) { pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; }
1196  rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1);
1197  for (j = code_size; j >(TINFL_FAST_LOOKUP_BITS + 1); j--) {
1198  tree_cur -= ((rev_code >>= 1) & 1);
1199  if (!pTable->m_tree[-tree_cur - 1]) { pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; }
1200  else tree_cur = pTable->m_tree[-tree_cur - 1];
1201  }
1202  tree_cur -= ((rev_code >>= 1) & 1); pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index;
1203  }
1204  if (r->m_type == 2) {
1205  for (counter = 0; counter < (r->m_table_sizes[0] + r->m_table_sizes[1]); ) {
1206  mz_uint s; TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]); if (dist < 16) { r->m_len_codes[counter++] = (mz_uint8)dist; continue; }
1207  if ((dist == 16) && (!counter)) {
1208  TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED);
1209  }
1210  num_extra = "\02\03\07"[dist - 16]; TINFL_GET_BITS(18, s, num_extra); s += "\03\03\013"[dist - 16];
1211  TINFL_MEMSET(r->m_len_codes + counter, (dist == 16) ? r->m_len_codes[counter - 1] : 0, s); counter += s;
1212  }
1213  if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) {
1214  TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED);
1215  }
1216  TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes, r->m_table_sizes[0]); TINFL_MEMCPY(r->m_tables[1].m_code_size, r->m_len_codes + r->m_table_sizes[0], r->m_table_sizes[1]);
1217  }
1218  }
1219  for (; ; ) {
1220  mz_uint8 *pSrc;
1221  for (; ; ) {
1222  if (((pIn_buf_end - pIn_buf_cur) < 4) || ((pOut_buf_end - pOut_buf_cur) < 2)) {
1223  TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]);
1224  if (counter >= 256)
1225  break;
1226  while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT); }
1227  *pOut_buf_cur++ = (mz_uint8)counter;
1228  }
1229  else {
1230  mz_int16 sym2;
1231  mz_uint code_len;
1232 #if TINFL_USE_64BIT_BITBUF
1233  if (num_bits < 30) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits); pIn_buf_cur += 4; num_bits += 32; }
1234 #else
1235  if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; }
1236 #endif
1237  if ((sym2 = r->m_tables[0].m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0)
1238  code_len = sym2 >> 9;
1239  else {
1240  code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0].m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0);
1241  }
1242  counter = sym2; bit_buf >>= code_len; num_bits -= code_len;
1243  if (counter & 256)
1244  break;
1245 
1246 #if !TINFL_USE_64BIT_BITBUF
1247  if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; }
1248 #endif
1249  if ((sym2 = r->m_tables[0].m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0)
1250  code_len = sym2 >> 9;
1251  else {
1252  code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0].m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0);
1253  }
1254  bit_buf >>= code_len; num_bits -= code_len;
1255 
1256  pOut_buf_cur[0] = (mz_uint8)counter;
1257  if (sym2 & 256) {
1258  pOut_buf_cur++;
1259  counter = sym2;
1260  break;
1261  }
1262  pOut_buf_cur[1] = (mz_uint8)sym2;
1263  pOut_buf_cur += 2;
1264  }
1265  }
1266  if ((counter &= 511) == 256) break;
1267 
1268  num_extra = s_length_extra[counter - 257]; counter = s_length_base[counter - 257];
1269  if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(25, extra_bits, num_extra); counter += extra_bits; }
1270 
1271  TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]);
1272  num_extra = s_dist_extra[dist]; dist = s_dist_base[dist];
1273  if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(27, extra_bits, num_extra); dist += extra_bits; }
1274 
1275  dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start;
1276  if ((dist > dist_from_out_buf_start) && (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) {
1277  TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED);
1278  }
1279 
1280  pSrc = pOut_buf_start + ((dist_from_out_buf_start - dist) & out_buf_size_mask);
1281 
1282  if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) {
1283  while (counter--) {
1284  while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT); }
1285  *pOut_buf_cur++ = pOut_buf_start[(dist_from_out_buf_start++ - dist) & out_buf_size_mask];
1286  }
1287  continue;
1288  }
1289 #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
1290  else if ((counter >= 9) && (counter <= dist)) {
1291  const mz_uint8 *pSrc_end = pSrc + (counter & ~7);
1292  do {
1293  ((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0];
1294  ((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1];
1295  pOut_buf_cur += 8;
1296  } while ((pSrc += 8) < pSrc_end);
1297  if ((counter &= 7) < 3) {
1298  if (counter) {
1299  pOut_buf_cur[0] = pSrc[0];
1300  if (counter > 1)
1301  pOut_buf_cur[1] = pSrc[1];
1302  pOut_buf_cur += counter;
1303  }
1304  continue;
1305  }
1306  }
1307 #endif
1308  do {
1309  pOut_buf_cur[0] = pSrc[0];
1310  pOut_buf_cur[1] = pSrc[1];
1311  pOut_buf_cur[2] = pSrc[2];
1312  pOut_buf_cur += 3; pSrc += 3;
1313  } while ((int)(counter -= 3) > 2);
1314  if ((int)counter > 0) {
1315  pOut_buf_cur[0] = pSrc[0];
1316  if ((int)counter > 1)
1317  pOut_buf_cur[1] = pSrc[1];
1318  pOut_buf_cur += counter;
1319  }
1320  }
1321  }
1322  } while (!(r->m_final & 1));
1323  if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
1324  TINFL_SKIP_BITS(32, num_bits & 7); for (counter = 0; counter < 4; ++counter) { mz_uint s; if (num_bits) TINFL_GET_BITS(41, s, 8); else TINFL_GET_BYTE(42, s); r->m_z_adler32 = (r->m_z_adler32 << 8) | s; }
1325  }
1326  TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE);
1327  TINFL_CR_FINISH
1328 
1329  common_exit :
1330  r->m_num_bits = num_bits; r->m_bit_buf = bit_buf; r->m_dist = dist; r->m_counter = counter; r->m_num_extra = num_extra; r->m_dist_from_out_buf_start = dist_from_out_buf_start;
1331  *pIn_buf_size = pIn_buf_cur - pIn_buf_next; *pOut_buf_size = pOut_buf_cur - pOut_buf_next;
1332  //if ((decomp_flags & (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) && (status >= 0))
1333  if ((decomp_flags & TINFL_FLAG_COMPUTE_ADLER32) && (status >= 0)) {
1334  const mz_uint8 *ptr = pOut_buf_next; size_t buf_len = *pOut_buf_size;
1335  mz_uint32 i, s1 = r->m_check_adler32 & 0xffff, s2 = r->m_check_adler32 >> 16; size_t block_len = buf_len % 5552;
1336  while (buf_len) {
1337  for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
1338  s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1;
1339  s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1;
1340  }
1341  for (; i < block_len; ++i) s1 += *ptr++, s2 += s1;
1342  s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552;
1343  }
1344  r->m_check_adler32 = (s2 << 16) + s1;
1345  if ((status == TINFL_STATUS_DONE) && (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) && (r->m_check_adler32 != r->m_z_adler32))
1346  status = TINFL_STATUS_ADLER32_MISMATCH;
1347  }
1348  return status;
1349 }
1350 
1351 // Higher level helper functions.
1352 void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, mz_uint flags)
1353 {
1354  tinfl_decompressor decomp; void *pBuf = NULL, *pNew_buf; size_t src_buf_ofs = 0, out_buf_capacity = 0;
1355  *pOut_len = 0;
1356  tinfl_init(&decomp);
1357  for (; ; ) {
1358  size_t src_buf_size = src_buf_len - src_buf_ofs, dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity;
1359  tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size, (mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL, &dst_buf_size,
1360  (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
1361  if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) {
1362  MZ_FREE(pBuf); *pOut_len = 0; return NULL;
1363  }
1364  src_buf_ofs += src_buf_size;
1365  *pOut_len += dst_buf_size;
1366  if (status == TINFL_STATUS_DONE) break;
1367  new_out_buf_capacity = out_buf_capacity * 2; if (new_out_buf_capacity < 128) new_out_buf_capacity = 128;
1368  pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity);
1369  if (!pNew_buf) {
1370  MZ_FREE(pBuf); *pOut_len = 0; return NULL;
1371  }
1372  pBuf = pNew_buf; out_buf_capacity = new_out_buf_capacity;
1373  }
1374  return pBuf;
1375 }
1376 
1377 size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, mz_uint flags)
1378 {
1379  tinfl_decompressor decomp; tinfl_status status; tinfl_init(&decomp);
1380  status = tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len, (mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
1381  return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED : out_buf_len;
1382 }
1383 
1384 int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, mz_uint flags)
1385 {
1386  int result = 0;
1387  tinfl_decompressor decomp;
1388  mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE); size_t in_buf_ofs = 0, dict_ofs = 0;
1389  if (!pDict)
1390  return TINFL_STATUS_FAILED;
1391  tinfl_init(&decomp);
1392  for (; ; ) {
1393  size_t in_buf_size = *pIn_buf_size - in_buf_ofs, dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs;
1394  tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs, &in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size,
1395  (flags & ~(TINFL_FLAG_HAS_MORE_INPUT | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)));
1396  in_buf_ofs += in_buf_size;
1397  if ((dst_buf_size) && (!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user)))
1398  break;
1399  if (status != TINFL_STATUS_HAS_MORE_OUTPUT) {
1400  result = (status == TINFL_STATUS_DONE);
1401  break;
1402  }
1403  dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1);
1404  }
1405  MZ_FREE(pDict);
1406  *pIn_buf_size = in_buf_ofs;
1407  return result;
1408 }
1409 
1410 // ------------------- Low-level Compression (independent from all decompression API's)
1411 
1412 // Purposely making these tables static for faster init and thread safety.
1413 static const mz_uint16 s_tdefl_len_sym[256] = {
1414  257,258,259,260,261,262,263,264,265,265,266,266,267,267,268,268,269,269,269,269,270,270,270,270,271,271,271,271,272,272,272,272,
1415  273,273,273,273,273,273,273,273,274,274,274,274,274,274,274,274,275,275,275,275,275,275,275,275,276,276,276,276,276,276,276,276,
1416  277,277,277,277,277,277,277,277,277,277,277,277,277,277,277,277,278,278,278,278,278,278,278,278,278,278,278,278,278,278,278,278,
1417  279,279,279,279,279,279,279,279,279,279,279,279,279,279,279,279,280,280,280,280,280,280,280,280,280,280,280,280,280,280,280,280,
1418  281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,
1419  282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,
1420  283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,
1421  284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,285 };
1422 
1423 static const mz_uint8 s_tdefl_len_extra[256] = {
1424  0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
1425  4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
1426  5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
1427  5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,0 };
1428 
1429 static const mz_uint8 s_tdefl_small_dist_sym[512] = {
1430  0,1,2,3,4,4,5,5,6,6,6,6,7,7,7,7,8,8,8,8,8,8,8,8,9,9,9,9,9,9,9,9,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,11,11,11,11,11,11,
1431  11,11,11,11,11,11,11,11,11,11,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,13,
1432  13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,14,14,14,14,14,14,14,14,14,14,14,14,
1433  14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,
1434  14,14,14,14,14,14,14,14,14,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,
1435  15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,16,16,16,16,16,16,16,16,16,16,16,16,16,
1436  16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,
1437  16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,
1438  16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,17,17,17,17,17,17,17,17,17,17,17,17,17,17,
1439  17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,
1440  17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,
1441  17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17 };
1442 
1443 static const mz_uint8 s_tdefl_small_dist_extra[512] = {
1444  0,0,0,0,1,1,1,1,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,
1445  5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
1446  6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
1447  6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
1448  7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
1449  7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
1450  7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
1451  7,7,7,7,7,7,7,7 };
1452 
1453 static const mz_uint8 s_tdefl_large_dist_sym[128] = {
1454  0,0,18,19,20,20,21,21,22,22,22,22,23,23,23,23,24,24,24,24,24,24,24,24,25,25,25,25,25,25,25,25,26,26,26,26,26,26,26,26,26,26,26,26,
1455  26,26,26,26,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,
1456  28,28,28,28,28,28,28,28,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29 };
1457 
1458 static const mz_uint8 s_tdefl_large_dist_extra[128] = {
1459  0,0,8,8,9,9,9,9,10,10,10,10,10,10,10,10,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,
1460  12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,
1461  13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13 };
1462 
1463 // Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted values.
1464 typedef struct { mz_uint16 m_key, m_sym_index; } tdefl_sym_freq;
1465 static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms, tdefl_sym_freq *pSyms0, tdefl_sym_freq *pSyms1)
1466 {
1467  mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2]; tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1; MZ_CLEAR_OBJ(hist);
1468  for (i = 0; i < num_syms; i++) { mz_uint freq = pSyms0[i].m_key; hist[freq & 0xFF]++; hist[256 + ((freq >> 8) & 0xFF)]++; }
1469  while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256])) total_passes--;
1470  for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) {
1471  const mz_uint32 *pHist = &hist[pass << 8];
1472  mz_uint offsets[256], cur_ofs = 0;
1473  for (i = 0; i < 256; i++) { offsets[i] = cur_ofs; cur_ofs += pHist[i]; }
1474  for (i = 0; i < num_syms; i++) pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i];
1475  { tdefl_sym_freq *t = pCur_syms; pCur_syms = pNew_syms; pNew_syms = t; }
1476  }
1477  return pCur_syms;
1478 }
1479 
1480 // tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat, alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996.
1481 static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n)
1482 {
1483  int root, leaf, next, avbl, used, dpth;
1484  if (n==0) return; else if (n==1) { A[0].m_key = 1; return; }
1485  A[0].m_key += A[1].m_key; root = 0; leaf = 2;
1486  for (next = 1; next < n-1; next++) {
1487  if (leaf>=n || A[root].m_key<A[leaf].m_key) { A[next].m_key = A[root].m_key; A[root++].m_key = (mz_uint16)next; }
1488  else A[next].m_key = A[leaf++].m_key;
1489  if (leaf>=n || (root<next && A[root].m_key<A[leaf].m_key)) { A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key); A[root++].m_key = (mz_uint16)next; }
1490  else A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key);
1491  }
1492  A[n-2].m_key = 0; for (next = n-3; next>=0; next--) A[next].m_key = A[A[next].m_key].m_key+1;
1493  avbl = 1; used = dpth = 0; root = n-2; next = n-1;
1494  while (avbl>0) {
1495  while (root>=0 && (int)A[root].m_key==dpth) { used++; root--; }
1496  while (avbl>used) { A[next--].m_key = (mz_uint16)(dpth); avbl--; }
1497  avbl = 2*used; dpth++; used = 0;
1498  }
1499 }
1500 
1501 // Limits canonical Huffman code table's max code size.
1502 enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 };
1503 static void tdefl_huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size)
1504 {
1505  int i; mz_uint32 total = 0; if (code_list_len <= 1) return;
1506  for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; ++i) pNum_codes[max_code_size] += pNum_codes[i];
1507  for (i = max_code_size; i > 0; i--) total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i));
1508  while (total != (1UL << max_code_size)) {
1509  pNum_codes[max_code_size]--;
1510  for (i = max_code_size - 1; i > 0; i--) if (pNum_codes[i]) { pNum_codes[i]--; pNum_codes[i + 1] += 2; break; }
1511  total--;
1512  }
1513 }
1514 
1515 static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num, int table_len, int code_size_limit, int static_table)
1516 {
1517  int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE]; mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1]; MZ_CLEAR_OBJ(num_codes);
1518  if (static_table) {
1519  for (i = 0; i < table_len; i++) num_codes[d->m_huff_code_sizes[table_num][i]]++;
1520  }
1521  else {
1522  tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS], *pSyms;
1523  int num_used_syms = 0;
1524  const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0];
1525  for (i = 0; i < table_len; ++i) if (pSym_count[i]) { syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i]; syms0[num_used_syms++].m_sym_index = (mz_uint16)i; }
1526 
1527  pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1); tdefl_calculate_minimum_redundancy(pSyms, num_used_syms);
1528 
1529  for (i = 0; i < num_used_syms; ++i) num_codes[pSyms[i].m_key]++;
1530 
1531  tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms, code_size_limit);
1532 
1533  MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]); MZ_CLEAR_OBJ(d->m_huff_codes[table_num]);
1534  for (i = 1, j = num_used_syms; i <= code_size_limit; ++i)
1535  for (l = num_codes[i]; l > 0; l--) d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i);
1536  }
1537 
1538  next_code[1] = 0; for (j = 0, i = 2; i <= code_size_limit; ++i) next_code[i] = j = ((j + num_codes[i - 1]) << 1);
1539 
1540  for (i = 0; i < table_len; ++i) {
1541  mz_uint rev_code = 0, code, code_size; if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue;
1542  code = next_code[code_size]++; for (l = code_size; l > 0; l--, code >>= 1) rev_code = (rev_code << 1) | (code & 1);
1543  d->m_huff_codes[table_num][i] = (mz_uint16)rev_code;
1544  }
1545 }
1546 
1547 #define TDEFL_PUT_BITS(b, l) do { \
1548  mz_uint bits = b; mz_uint len = l; MZ_ASSERT(bits <= ((1U << len) - 1U)); \
1549  d->m_bit_buffer |= (bits << d->m_bits_in); d->m_bits_in += len; \
1550  while (d->m_bits_in >= 8) { \
1551  if (d->m_pOutput_buf < d->m_pOutput_buf_end) { \
1552  *d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \
1553  d->m_bit_buffer >>= 8; \
1554  d->m_bits_in -= 8; \
1555  } \
1556  } \
1557 } MZ_MACRO_END
1558 
1559 #define TDEFL_RLE_PREV_CODE_SIZE() { if (rle_repeat_count) { \
1560  if (rle_repeat_count < 3) { \
1561  d->m_huff_count[2][prev_code_size] = (mz_uint16)(d->m_huff_count[2][prev_code_size] + rle_repeat_count); \
1562  while (rle_repeat_count--) packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \
1563  } else { \
1564  d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); packed_code_sizes[num_packed_code_sizes++] = 16; packed_code_sizes[num_packed_code_sizes++] = (mz_uint8)(rle_repeat_count - 3); \
1565 } rle_repeat_count = 0; } }
1566 
1567 #define TDEFL_RLE_ZERO_CODE_SIZE() { if (rle_z_count) { \
1568  if (rle_z_count < 3) { \
1569  d->m_huff_count[2][0] = (mz_uint16)(d->m_huff_count[2][0] + rle_z_count); while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \
1570  } else if (rle_z_count <= 10) { \
1571  d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); packed_code_sizes[num_packed_code_sizes++] = 17; packed_code_sizes[num_packed_code_sizes++] = (mz_uint8)(rle_z_count - 3); \
1572  } else { \
1573  d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); packed_code_sizes[num_packed_code_sizes++] = 18; packed_code_sizes[num_packed_code_sizes++] = (mz_uint8)(rle_z_count - 11); \
1574 } rle_z_count = 0; } }
1575 
1576 static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 };
1577 
1578 static void tdefl_start_dynamic_block(tdefl_compressor *d)
1579 {
1580  int num_lit_codes, num_dist_codes, num_bit_lengths; mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count, rle_repeat_count, packed_code_sizes_index;
1581  mz_uint8 code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], prev_code_size = 0xFF;
1582 
1583  d->m_huff_count[0][256] = 1;
1584 
1585  tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE);
1586  tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE);
1587 
1588  for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--) if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break;
1589  for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--) if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break;
1590 
1591  memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes);
1592  memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0], num_dist_codes);
1593  total_code_sizes_to_pack = num_lit_codes + num_dist_codes; num_packed_code_sizes = 0; rle_z_count = 0; rle_repeat_count = 0;
1594 
1595  memset(&d->m_huff_count[2][0], 0, sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2);
1596  for (i = 0; i < total_code_sizes_to_pack; ++i) {
1597  mz_uint8 code_size = code_sizes_to_pack[i];
1598  if (!code_size) {
1599  TDEFL_RLE_PREV_CODE_SIZE();
1600  if (++rle_z_count == 138) { TDEFL_RLE_ZERO_CODE_SIZE(); }
1601  }
1602  else {
1603  TDEFL_RLE_ZERO_CODE_SIZE();
1604  if (code_size != prev_code_size) {
1605  TDEFL_RLE_PREV_CODE_SIZE();
1606  d->m_huff_count[2][code_size] = (mz_uint16)(d->m_huff_count[2][code_size] + 1); packed_code_sizes[num_packed_code_sizes++] = code_size;
1607  }
1608  else if (++rle_repeat_count == 6) {
1609  TDEFL_RLE_PREV_CODE_SIZE();
1610  }
1611  }
1612  prev_code_size = code_size;
1613  }
1614  if (rle_repeat_count) { TDEFL_RLE_PREV_CODE_SIZE(); }
1615  else { TDEFL_RLE_ZERO_CODE_SIZE(); }
1616 
1617  tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE);
1618 
1619  TDEFL_PUT_BITS(2, 2);
1620 
1621  TDEFL_PUT_BITS(num_lit_codes - 257, 5);
1622  TDEFL_PUT_BITS(num_dist_codes - 1, 5);
1623 
1624  for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--) if (d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]]) break;
1625  num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1)); TDEFL_PUT_BITS(num_bit_lengths - 4, 4);
1626  for (i = 0; static_cast<int>(i) < num_bit_lengths; ++i) TDEFL_PUT_BITS(d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3);
1627 
1628  for (packed_code_sizes_index = 0; packed_code_sizes_index < num_packed_code_sizes; ) {
1629  mz_uint code = packed_code_sizes[packed_code_sizes_index++]; MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2);
1630  TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]);
1631  if (code >= 16) TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++], "\02\03\07"[code - 16]);
1632  }
1633 }
1634 
1635 static void tdefl_start_static_block(tdefl_compressor *d)
1636 {
1637  mz_uint i;
1638  mz_uint8 *p = &d->m_huff_code_sizes[0][0];
1639 
1640  for (i = 0; i <= 143; ++i) *p++ = 8;
1641  for (; i <= 255; ++i) *p++ = 9;
1642  for (; i <= 279; ++i) *p++ = 7;
1643  for (; i <= 287; ++i) *p++ = 8;
1644 
1645  memset(d->m_huff_code_sizes[1], 5, 32);
1646 
1647  tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE);
1648  tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE);
1649 
1650  TDEFL_PUT_BITS(1, 2);
1651 }
1652 
1653 static const mz_uint mz_bitmasks[17] = { 0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF, 0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF };
1654 
1655 #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && MINIZ_HAS_64BIT_REGISTERS
1656 static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d)
1657 {
1658  mz_uint flags;
1659  mz_uint8 *pLZ_codes;
1660  mz_uint8 *pOutput_buf = d->m_pOutput_buf;
1661  mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf;
1662  mz_uint64 bit_buffer = d->m_bit_buffer;
1663  mz_uint bits_in = d->m_bits_in;
1664 
1665 #define TDEFL_PUT_BITS_FAST(b, l) { bit_buffer |= (((mz_uint64)(b)) << bits_in); bits_in += (l); }
1666 
1667  flags = 1;
1668  pLZ_codes = d->m_lz_code_buf;
1669  while (pLZ_codes < pLZ_code_buf_end) {
1670  if (flags == 1)
1671  flags = *pLZ_codes++ | 0x100;
1672 
1673  if (flags & 1) {
1674  mz_uint s0, s1, n0, n1, sym, num_extra_bits;
1675  mz_uint match_len = pLZ_codes[0], match_dist = *(const mz_uint16 *)(pLZ_codes + 1); pLZ_codes += 3;
1676 
1677  MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
1678  TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
1679  TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]);
1680 
1681  // This sequence coaxes MSVC into using cmov's vs. jmp's.
1682  s0 = s_tdefl_small_dist_sym[match_dist & 511];
1683  n0 = s_tdefl_small_dist_extra[match_dist & 511];
1684  s1 = s_tdefl_large_dist_sym[match_dist >> 8];
1685  n1 = s_tdefl_large_dist_extra[match_dist >> 8];
1686  sym = (match_dist < 512) ? s0 : s1;
1687  num_extra_bits = (match_dist < 512) ? n0 : n1;
1688 
1689  MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
1690  TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]);
1691  TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits);
1692  }
1693  else {
1694  mz_uint lit = *pLZ_codes++;
1695  MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
1696  TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
1697 
1698  if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
1699  flags >>= 1;
1700  lit = *pLZ_codes++;
1701  MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
1702  TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
1703 
1704  if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
1705  flags >>= 1;
1706  lit = *pLZ_codes++;
1707  MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
1708  TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
1709  }
1710  }
1711  }
1712 
1713  if (pOutput_buf >= d->m_pOutput_buf_end)
1714  return MZ_FALSE;
1715 
1716  *(mz_uint64 *)pOutput_buf = bit_buffer;
1717  pOutput_buf += (bits_in >> 3);
1718  bit_buffer >>= (bits_in & ~7);
1719  bits_in &= 7;
1720  flags >>= 1;
1721  }
1722 
1723 #undef TDEFL_PUT_BITS_FAST
1724 
1725  d->m_pOutput_buf = pOutput_buf;
1726  d->m_bits_in = 0;
1727  d->m_bit_buffer = 0;
1728 
1729  while (bits_in) {
1730  mz_uint32 n = MZ_MIN(bits_in, 16);
1731  TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n);
1732  bit_buffer >>= n;
1733  bits_in -= n;
1734  }
1735 
1736  TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
1737 
1738  return (d->m_pOutput_buf < d->m_pOutput_buf_end);
1739 }
1740 #else
1741 static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d)
1742 {
1743  mz_uint flags;
1744  mz_uint8 *pLZ_codes;
1745 
1746  flags = 1;
1747  for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf; flags >>= 1) {
1748  if (flags == 1)
1749  flags = *pLZ_codes++ | 0x100;
1750  if (flags & 1) {
1751  mz_uint sym, num_extra_bits;
1752  mz_uint match_len = pLZ_codes[0], match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8)); pLZ_codes += 3;
1753 
1754  MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
1755  TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
1756  TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]);
1757 
1758  if (match_dist < 512) {
1759  sym = s_tdefl_small_dist_sym[match_dist]; num_extra_bits = s_tdefl_small_dist_extra[match_dist];
1760  }
1761  else {
1762  sym = s_tdefl_large_dist_sym[match_dist >> 8]; num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8];
1763  }
1764  MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
1765  TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]);
1766  TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits);
1767  }
1768  else {
1769  mz_uint lit = *pLZ_codes++;
1770  MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
1771  TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
1772  }
1773  }
1774 
1775  TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
1776 
1777  return (d->m_pOutput_buf < d->m_pOutput_buf_end);
1778 }
1779 #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && MINIZ_HAS_64BIT_REGISTERS
1780 
1781 static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block)
1782 {
1783  if (static_block)
1784  tdefl_start_static_block(d);
1785  else
1786  tdefl_start_dynamic_block(d);
1787  return tdefl_compress_lz_codes(d);
1788 }
1789 
1790 static int tdefl_flush_block(tdefl_compressor *d, int flush)
1791 {
1792  mz_uint saved_bit_buf, saved_bits_in;
1793  mz_uint8 *pSaved_output_buf;
1794  mz_bool comp_block_succeeded = MZ_FALSE;
1795  int n, use_raw_block = ((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) && (d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size;
1796  mz_uint8 *pOutput_buf_start = ((d->m_pPut_buf_func == NULL) && ((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE)) ? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs) : d->m_output_buf;
1797 
1798  d->m_pOutput_buf = pOutput_buf_start;
1799  d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16;
1800 
1801  MZ_ASSERT(!d->m_output_flush_remaining);
1802  d->m_output_flush_ofs = 0;
1803  d->m_output_flush_remaining = 0;
1804 
1805  *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left);
1806  d->m_pLZ_code_buf -= (d->m_num_flags_left == 8);
1807 
1808  if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) {
1809  TDEFL_PUT_BITS(0x78, 8); TDEFL_PUT_BITS(0x01, 8);
1810  }
1811 
1812  TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1);
1813 
1814  pSaved_output_buf = d->m_pOutput_buf; saved_bit_buf = d->m_bit_buffer; saved_bits_in = d->m_bits_in;
1815 
1816  if (!use_raw_block)
1817  comp_block_succeeded = tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) || (d->m_total_lz_bytes < 48));
1818 
1819  // If the block gets expanded, forget the current contents of the output buffer and send a raw block instead.
1820  if (((use_raw_block) || ((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >= d->m_total_lz_bytes))) &&
1821  ((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) {
1822  mz_uint i; d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
1823  TDEFL_PUT_BITS(0, 2);
1824  if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); }
1825  for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) {
1826  TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16);
1827  }
1828  for (i = 0; i < d->m_total_lz_bytes; ++i) {
1829  TDEFL_PUT_BITS(d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK], 8);
1830  }
1831  }
1832  // Check for the extremely unlikely (if not impossible) case of the compressed block not fitting into the output buffer when using dynamic codes.
1833  else if (!comp_block_succeeded) {
1834  d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
1835  tdefl_compress_block(d, MZ_TRUE);
1836  }
1837 
1838  if (flush) {
1839  if (flush == TDEFL_FINISH) {
1840  if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); }
1841  if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) { mz_uint i, a = d->m_adler32; for (i = 0; i < 4; ++i) { TDEFL_PUT_BITS((a >> 24) & 0xFF, 8); a <<= 8; } }
1842  }
1843  else {
1844  mz_uint i, z = 0; TDEFL_PUT_BITS(0, 3); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, z ^= 0xFFFF) { TDEFL_PUT_BITS(z & 0xFFFF, 16); }
1845  }
1846  }
1847 
1848  MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end);
1849 
1850  memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
1851  memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
1852 
1853  d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes; d->m_total_lz_bytes = 0; d->m_block_index++;
1854 
1855  if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) {
1856  if (d->m_pPut_buf_func) {
1857  *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
1858  if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user))
1859  return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED);
1860  }
1861  else if (pOutput_buf_start == d->m_output_buf) {
1862  int bytes_to_copy = (int)MZ_MIN((size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs));
1863  memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf, bytes_to_copy);
1864  d->m_out_buf_ofs += bytes_to_copy;
1865  if ((n -= bytes_to_copy) != 0) {
1866  d->m_output_flush_ofs = bytes_to_copy;
1867  d->m_output_flush_remaining = n;
1868  }
1869  }
1870  else {
1871  d->m_out_buf_ofs += n;
1872  }
1873  }
1874 
1875  return d->m_output_flush_remaining;
1876 }
1877 
1878 #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
1879 #define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16*)(p)
1880 static MZ_FORCEINLINE void tdefl_find_match(tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len)
1881 {
1882  mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len;
1883  mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
1884  const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q;
1885  mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]), s01 = TDEFL_READ_UNALIGNED_WORD(s);
1886  MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return;
1887  for (; ; ) {
1888  for (; ; ) {
1889  if (--num_probes_left == 0) return;
1890 #define TDEFL_PROBE \
1891  next_probe_pos = d->m_next[probe_pos]; \
1892  if ((!next_probe_pos) || ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) return; \
1893  probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
1894  if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) break;
1895  TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE;
1896  }
1897  if (!dist) {
1898  break;
1899  }
1900  q = (const mz_uint16 *)(d->m_dict + probe_pos);
1901  if (TDEFL_READ_UNALIGNED_WORD(q) != s01) {
1902  continue;
1903  }
1904  p = s; probe_len = 32;
1905  do { } while ((TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
1906  (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0));
1907  if (!probe_len) {
1908  *pMatch_dist = dist; *pMatch_len = MZ_MIN(max_match_len, (mz_uint)TDEFL_MAX_MATCH_LEN); break;
1909  }
1910  else if ((probe_len = ((mz_uint)(p - s) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q)) > match_len) {
1911  *pMatch_dist = dist; if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) == max_match_len) break;
1912  c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]);
1913  }
1914  }
1915 }
1916 #else
1917 static MZ_FORCEINLINE void tdefl_find_match(tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len)
1918 {
1919  mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len;
1920  mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
1921  const mz_uint8 *s = d->m_dict + pos, *p, *q;
1922  mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1];
1923  MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return;
1924  for (; ; ) {
1925  for (; ; ) {
1926  if (--num_probes_left == 0) return;
1927 #define TDEFL_PROBE \
1928  next_probe_pos = d->m_next[probe_pos]; \
1929  if ((!next_probe_pos) || ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) return; \
1930  probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
1931  if ((d->m_dict[probe_pos + match_len] == c0) && (d->m_dict[probe_pos + match_len - 1] == c1)) break;
1932  TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE;
1933  }
1934  if (!dist) break; p = s; q = d->m_dict + probe_pos; for (probe_len = 0; probe_len < max_match_len; probe_len++) if (*p++ != *q++) break;
1935  if (probe_len > match_len) {
1936  *pMatch_dist = dist; if ((*pMatch_len = match_len = probe_len) == max_match_len) return;
1937  c0 = d->m_dict[pos + match_len]; c1 = d->m_dict[pos + match_len - 1];
1938  }
1939  }
1940 }
1941 #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
1942 
1943 #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
1944 static mz_bool tdefl_compress_fast(tdefl_compressor *d)
1945 {
1946  // Faster, minimally featured LZRW1-style match+parse loop with better register utilization. Intended for applications where raw throughput is valued more highly than ratio.
1947  mz_uint lookahead_pos = d->m_lookahead_pos, lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size, total_lz_bytes = d->m_total_lz_bytes, num_flags_left = d->m_num_flags_left;
1948  mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags;
1949  mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
1950 
1951  while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) {
1952  const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096;
1953  mz_uint dst_pos = (lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK;
1954  mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size);
1955  d->m_src_buf_left -= num_bytes_to_process;
1956  lookahead_size += num_bytes_to_process;
1957 
1958  while (num_bytes_to_process) {
1959  mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process);
1960  memcpy(d->m_dict + dst_pos, d->m_pSrc, n);
1961  if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
1962  memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc, MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos));
1963  d->m_pSrc += n;
1964  dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK;
1965  num_bytes_to_process -= n;
1966  }
1967 
1968  dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size);
1969  if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE)) break;
1970 
1971  while (lookahead_size >= 4) {
1972  mz_uint cur_match_dist, cur_match_len = 1;
1973  mz_uint8 *pCur_dict = d->m_dict + cur_pos;
1974  mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF;
1975  mz_uint hash = (first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) & TDEFL_LEVEL1_HASH_SIZE_MASK;
1976  mz_uint probe_pos = d->m_hash[hash];
1977  d->m_hash[hash] = (mz_uint16)lookahead_pos;
1978 
1979  if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <= dict_size) && ((*(const mz_uint32 *)(d->m_dict + (probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) & 0xFFFFFF) == first_trigram)) {
1980  const mz_uint16 *p = (const mz_uint16 *)pCur_dict;
1981  const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos);
1982  mz_uint32 probe_len = 32;
1983  do { } while ((TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
1984  (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0));
1985  cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q);
1986  if (!probe_len)
1987  cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0;
1988 
1989  if ((cur_match_len < TDEFL_MIN_MATCH_LEN) || ((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U*1024U))) {
1990  cur_match_len = 1;
1991  *pLZ_code_buf++ = (mz_uint8)first_trigram;
1992  *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
1993  d->m_huff_count[0][(mz_uint8)first_trigram]++;
1994  }
1995  else {
1996  mz_uint32 s0, s1;
1997  cur_match_len = MZ_MIN(cur_match_len, lookahead_size);
1998 
1999  MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 1) && (cur_match_dist <= TDEFL_LZ_DICT_SIZE));
2000 
2001  cur_match_dist--;
2002 
2003  pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN);
2004  *(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist;
2005  pLZ_code_buf += 3;
2006  *pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80);
2007 
2008  s0 = s_tdefl_small_dist_sym[cur_match_dist & 511];
2009  s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8];
2010  d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++;
2011 
2012  d->m_huff_count[0][s_tdefl_len_sym[cur_match_len - TDEFL_MIN_MATCH_LEN]]++;
2013  }
2014  }
2015  else {
2016  *pLZ_code_buf++ = (mz_uint8)first_trigram;
2017  *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
2018  d->m_huff_count[0][(mz_uint8)first_trigram]++;
2019  }
2020 
2021  if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; }
2022 
2023  total_lz_bytes += cur_match_len;
2024  lookahead_pos += cur_match_len;
2025  dict_size = MZ_MIN(dict_size + cur_match_len, (mz_uint)TDEFL_LZ_DICT_SIZE);
2026  cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK;
2027  MZ_ASSERT(lookahead_size >= cur_match_len);
2028  lookahead_size -= cur_match_len;
2029 
2030  if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
2031  int n;
2032  d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size;
2033  d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left;
2034  if ((n = tdefl_flush_block(d, 0)) != 0)
2035  return (n < 0) ? MZ_FALSE : MZ_TRUE;
2036  total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left;
2037  }
2038  }
2039 
2040  while (lookahead_size) {
2041  mz_uint8 lit = d->m_dict[cur_pos];
2042 
2043  total_lz_bytes++;
2044  *pLZ_code_buf++ = lit;
2045  *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
2046  if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; }
2047 
2048  d->m_huff_count[0][lit]++;
2049 
2050  lookahead_pos++;
2051  dict_size = MZ_MIN(dict_size + 1, (mz_uint)TDEFL_LZ_DICT_SIZE);
2052  cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
2053  lookahead_size--;
2054 
2055  if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
2056  int n;
2057  d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size;
2058  d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left;
2059  if ((n = tdefl_flush_block(d, 0)) != 0)
2060  return (n < 0) ? MZ_FALSE : MZ_TRUE;
2061  total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left;
2062  }
2063  }
2064  }
2065 
2066  d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size;
2067  d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left;
2068  return MZ_TRUE;
2069 }
2070 #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
2071 
2072 static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d, mz_uint8 lit)
2073 {
2074  d->m_total_lz_bytes++;
2075  *d->m_pLZ_code_buf++ = lit;
2076  *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; }
2077  d->m_huff_count[0][lit]++;
2078 }
2079 
2080 static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d, mz_uint match_len, mz_uint match_dist)
2081 {
2082  mz_uint32 s0, s1;
2083 
2084  MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) && (match_dist <= TDEFL_LZ_DICT_SIZE));
2085 
2086  d->m_total_lz_bytes += match_len;
2087 
2088  d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN);
2089 
2090  match_dist -= 1;
2091  d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF);
2092  d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8); d->m_pLZ_code_buf += 3;
2093 
2094  *d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; }
2095 
2096  s0 = s_tdefl_small_dist_sym[match_dist & 511]; s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127];
2097  d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++;
2098 
2099  if (match_len >= TDEFL_MIN_MATCH_LEN) d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++;
2100 }
2101 
2102 static mz_bool tdefl_compress_normal(tdefl_compressor *d)
2103 {
2104  const mz_uint8 *pSrc = d->m_pSrc; size_t src_buf_left = d->m_src_buf_left;
2105  tdefl_flush flush = d->m_flush;
2106 
2107  while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) {
2108  mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos;
2109  // Update dictionary and hash chains. Keeps the lookahead size equal to TDEFL_MAX_MATCH_LEN.
2110  if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) {
2111  mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK, ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2;
2112  mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK];
2113  mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size);
2114  const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process;
2115  src_buf_left -= num_bytes_to_process;
2116  d->m_lookahead_size += num_bytes_to_process;
2117  while (pSrc != pSrc_end) {
2118  mz_uint8 c = *pSrc++; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
2119  hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1);
2120  d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos);
2121  dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; ins_pos++;
2122  }
2123  }
2124  else {
2125  while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) {
2126  mz_uint8 c = *pSrc++;
2127  mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK;
2128  src_buf_left--;
2129  d->m_dict[dst_pos] = c;
2130  if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
2131  d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
2132  if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) {
2133  mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2;
2134  mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << (TDEFL_LZ_HASH_SHIFT * 2)) ^ (d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1);
2135  d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos);
2136  }
2137  }
2138  }
2139  d->m_dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size);
2140  if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN))
2141  break;
2142 
2143  // Simple lazy/greedy parsing state machine.
2144  len_to_move = 1; cur_match_dist = 0; cur_match_len = d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1); cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
2145  if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) {
2146  if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) {
2147  mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK];
2148  cur_match_len = 0; while (cur_match_len < d->m_lookahead_size) { if (d->m_dict[cur_pos + cur_match_len] != c) break; cur_match_len++; }
2149  if (cur_match_len < TDEFL_MIN_MATCH_LEN) cur_match_len = 0; else cur_match_dist = 1;
2150  }
2151  }
2152  else {
2153  tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size, d->m_lookahead_size, &cur_match_dist, &cur_match_len);
2154  }
2155  if (((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U*1024U)) || (cur_pos == cur_match_dist) || ((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) {
2156  cur_match_dist = cur_match_len = 0;
2157  }
2158  if (d->m_saved_match_len) {
2159  if (cur_match_len > d->m_saved_match_len) {
2160  tdefl_record_literal(d, (mz_uint8)d->m_saved_lit);
2161  if (cur_match_len >= 128) {
2162  tdefl_record_match(d, cur_match_len, cur_match_dist);
2163  d->m_saved_match_len = 0; len_to_move = cur_match_len;
2164  }
2165  else {
2166  d->m_saved_lit = d->m_dict[cur_pos]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len;
2167  }
2168  }
2169  else {
2170  tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist);
2171  len_to_move = d->m_saved_match_len - 1; d->m_saved_match_len = 0;
2172  }
2173  }
2174  else if (!cur_match_dist)
2175  tdefl_record_literal(d, d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]);
2176  else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) || (cur_match_len >= 128)) {
2177  tdefl_record_match(d, cur_match_len, cur_match_dist);
2178  len_to_move = cur_match_len;
2179  }
2180  else {
2181  d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len;
2182  }
2183  // Move the lookahead forward by len_to_move bytes.
2184  d->m_lookahead_pos += len_to_move;
2185  MZ_ASSERT(d->m_lookahead_size >= len_to_move);
2186  d->m_lookahead_size -= len_to_move;
2187  d->m_dict_size = MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE);
2188  // Check if it's time to flush the current LZ codes to the internal output buffer.
2189  if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) ||
2190  ((d->m_total_lz_bytes > 31*1024) && (((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >= d->m_total_lz_bytes) || (d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) {
2191  int n;
2192  d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left;
2193  if ((n = tdefl_flush_block(d, 0)) != 0)
2194  return (n < 0) ? MZ_FALSE : MZ_TRUE;
2195  }
2196  }
2197 
2198  d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left;
2199  return MZ_TRUE;
2200 }
2201 
2202 static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d)
2203 {
2204  if (d->m_pIn_buf_size) {
2205  *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
2206  }
2207 
2208  if (d->m_pOut_buf_size) {
2209  size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs, d->m_output_flush_remaining);
2210  memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf + d->m_output_flush_ofs, n);
2211  d->m_output_flush_ofs += (mz_uint)n;
2212  d->m_output_flush_remaining -= (mz_uint)n;
2213  d->m_out_buf_ofs += n;
2214 
2215  *d->m_pOut_buf_size = d->m_out_buf_ofs;
2216  }
2217 
2218  return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE : TDEFL_STATUS_OKAY;
2219 }
2220 
2221 tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush)
2222 {
2223  if (!d) {
2224  if (pIn_buf_size) *pIn_buf_size = 0;
2225  if (pOut_buf_size) *pOut_buf_size = 0;
2226  return TDEFL_STATUS_BAD_PARAM;
2227  }
2228 
2229  d->m_pIn_buf = pIn_buf; d->m_pIn_buf_size = pIn_buf_size;
2230  d->m_pOut_buf = pOut_buf; d->m_pOut_buf_size = pOut_buf_size;
2231  d->m_pSrc = (const mz_uint8 *)(pIn_buf); d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0;
2232  d->m_out_buf_ofs = 0;
2233  d->m_flush = flush;
2234 
2235  if (((d->m_pPut_buf_func != NULL) == ((pOut_buf != NULL) || (pOut_buf_size != NULL))) || (d->m_prev_return_status != TDEFL_STATUS_OKAY) ||
2236  (d->m_wants_to_finish && (flush != TDEFL_FINISH)) || (pIn_buf_size && *pIn_buf_size && !pIn_buf) || (pOut_buf_size && *pOut_buf_size && !pOut_buf)) {
2237  if (pIn_buf_size) *pIn_buf_size = 0;
2238  if (pOut_buf_size) *pOut_buf_size = 0;
2239  return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM);
2240  }
2241  d->m_wants_to_finish |= (flush == TDEFL_FINISH);
2242 
2243  if ((d->m_output_flush_remaining) || (d->m_finished))
2244  return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
2245 
2246 #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
2247  if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) &&
2248  ((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) &&
2249  ((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS | TDEFL_RLE_MATCHES)) == 0)) {
2250  if (!tdefl_compress_fast(d))
2251  return d->m_prev_return_status;
2252  }
2253  else
2254 #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
2255  {
2256  if (!tdefl_compress_normal(d))
2257  return d->m_prev_return_status;
2258  }
2259 
2260  if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) && (pIn_buf))
2261  d->m_adler32 = (mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf, d->m_pSrc - (const mz_uint8 *)pIn_buf);
2262 
2263  if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) && (!d->m_output_flush_remaining)) {
2264  if (tdefl_flush_block(d, flush) < 0)
2265  return d->m_prev_return_status;
2266  d->m_finished = (flush == TDEFL_FINISH);
2267  if (flush == TDEFL_FULL_FLUSH) { MZ_CLEAR_OBJ(d->m_hash); MZ_CLEAR_OBJ(d->m_next); d->m_dict_size = 0; }
2268  }
2269 
2270  return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
2271 }
2272 
2273 tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush)
2274 {
2275  MZ_ASSERT(d->m_pPut_buf_func); return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush);
2276 }
2277 
2278 tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, mz_uint flags)
2279 {
2280  d->m_pPut_buf_func = pPut_buf_func; d->m_pPut_buf_user = pPut_buf_user;
2281  d->m_flags = (mz_uint)(flags);
2282  d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3;
2283  d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0;
2284  d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3;
2285  if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash);
2286  d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size = d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0;
2287  d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished = d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0;
2288  d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8;
2289  d->m_pOutput_buf = d->m_output_buf; d->m_pOutput_buf_end = d->m_output_buf; d->m_prev_return_status = TDEFL_STATUS_OKAY;
2290  d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0; d->m_adler32 = 1;
2291  d->m_pIn_buf = NULL; d->m_pOut_buf = NULL;
2292  d->m_pIn_buf_size = NULL; d->m_pOut_buf_size = NULL;
2293  d->m_flush = TDEFL_NO_FLUSH; d->m_pSrc = NULL; d->m_src_buf_left = 0; d->m_out_buf_ofs = 0;
2294  memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
2295  memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
2296  return TDEFL_STATUS_OKAY;
2297 }
2298 
2299 tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d)
2300 {
2301  return d->m_prev_return_status;
2302 }
2303 
2304 mz_uint32 tdefl_get_adler32(tdefl_compressor *d)
2305 {
2306  return d->m_adler32;
2307 }
2308 
2309 mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags)
2310 {
2311  tdefl_compressor *pComp; mz_bool succeeded; if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE;
2312  pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); if (!pComp) return MZ_FALSE;
2313  succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) == TDEFL_STATUS_OKAY);
2314  succeeded = succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) == TDEFL_STATUS_DONE);
2315  MZ_FREE(pComp); return succeeded;
2316 }
2317 
2318 typedef struct
2319 {
2320  size_t m_size, m_capacity;
2321  mz_uint8 *m_pBuf;
2322  mz_bool m_expandable;
2323 } tdefl_output_buffer;
2324 
2325 static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len, void *pUser)
2326 {
2327  tdefl_output_buffer *p = (tdefl_output_buffer *)pUser;
2328  size_t new_size = p->m_size + len;
2329  if (new_size > p->m_capacity) {
2330  size_t new_capacity = p->m_capacity; mz_uint8 *pNew_buf; if (!p->m_expandable) return MZ_FALSE;
2331  do { new_capacity = MZ_MAX(128U, new_capacity << 1U); } while (new_size > new_capacity);
2332  pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity); if (!pNew_buf) return MZ_FALSE;
2333  p->m_pBuf = pNew_buf; p->m_capacity = new_capacity;
2334  }
2335  memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len); p->m_size = new_size;
2336  return MZ_TRUE;
2337 }
2338 
2339 void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags)
2340 {
2341  tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf);
2342  if (!pOut_len) return MZ_FALSE; else *pOut_len = 0;
2343  out_buf.m_expandable = MZ_TRUE;
2344  if (!tdefl_compress_mem_to_output(pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return NULL;
2345  *pOut_len = out_buf.m_size; return out_buf.m_pBuf;
2346 }
2347 
2348 size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags)
2349 {
2350  tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf);
2351  if (!pOut_buf) return 0;
2352  out_buf.m_pBuf = (mz_uint8 *)pOut_buf; out_buf.m_capacity = out_buf_len;
2353  if (!tdefl_compress_mem_to_output(pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return 0;
2354  return out_buf.m_size;
2355 }
2356 
2357 #ifndef MINIZ_NO_ZLIB_APIS
2358 static const mz_uint s_tdefl_num_probes[11] = { 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500 };
2359 
2360 // level may actually range from [0,10] (10 is a "hidden" max level, where we want a bit more compression and it's fine if throughput to fall off a cliff on some files).
2361 mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy)
2362 {
2363  mz_uint comp_flags = s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] | ((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0);
2364  if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER;
2365 
2366  if (!level) comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS;
2367  else if (strategy == MZ_FILTERED) comp_flags |= TDEFL_FILTER_MATCHES;
2368  else if (strategy == MZ_HUFFMAN_ONLY) comp_flags &= ~TDEFL_MAX_PROBES_MASK;
2369  else if (strategy == MZ_FIXED) comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS;
2370  else if (strategy == MZ_RLE) comp_flags |= TDEFL_RLE_MATCHES;
2371 
2372  return comp_flags;
2373 }
2374 #endif //MINIZ_NO_ZLIB_APIS
2375 
2376 #ifdef _MSC_VER
2377 #pragma warning (push)
2378 #pragma warning (disable:4204) // nonstandard extension used : non-constant aggregate initializer (also supported by GNU C and C99, so no big deal)
2379 #endif
2380 
2381 // Simple PNG writer function by Alex Evans, 2011. Released into the public domain: https://gist.github.com/908299, more context at
2382 // http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/.
2383 // This is actually a modification of Alex's original code so PNG files generated by this function pass pngcheck.
2384 void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, mz_uint8 w, mz_uint8 h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip)
2385 {
2386  // Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was defined.
2387  static const mz_uint s_tdefl_png_num_probes[11] = { 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500 };
2388  tdefl_compressor *pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); tdefl_output_buffer out_buf; int i, bpl = w * num_chans, y, z; mz_uint32 c; *pLen_out = 0;
2389  if (!pComp) return NULL;
2390  MZ_CLEAR_OBJ(out_buf); out_buf.m_expandable = MZ_TRUE; out_buf.m_capacity = 57+MZ_MAX(64, (1+bpl)*h); if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) { MZ_FREE(pComp); return NULL; }
2391  // write dummy header
2392  for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf);
2393  // compress image data
2394  tdefl_init(pComp, tdefl_output_buffer_putter, &out_buf, s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER | (level <= 3 ? TDEFL_GREEDY_PARSING_FLAG : 0));
2395  for (y = 0; y < h; ++y) { tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH); tdefl_compress_buffer(pComp, (mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl, bpl, TDEFL_NO_FLUSH); }
2396  if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) != TDEFL_STATUS_DONE) { MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; }
2397  // write real header
2398  *pLen_out = out_buf.m_size-41;
2399  {
2400  static const mz_uint8 chans[] = { 0x00, 0x00, 0x04, 0x02, 0x06 };
2401  mz_uint8 pnghdr[41] = { 0x89,0x50,0x4e,0x47,0x0d,0x0a,0x1a,0x0a,0x00,0x00,0x00,0x0d,0x49,0x48,0x44,0x52,
2402  0,0,(mz_uint8)(w>>8),(mz_uint8)w,0,0,(mz_uint8)(h>>8),(mz_uint8)h,8,chans[num_chans],0,0,0,0,0,0,0,
2403  (mz_uint8)(*pLen_out>>24),(mz_uint8)(*pLen_out>>16),(mz_uint8)(*pLen_out>>8),(mz_uint8)*pLen_out,0x49,0x44,0x41,0x54 };
2404  c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr+12, 17); for (i = 0; i<4; ++i, c <<= 8) ((mz_uint8 *)(pnghdr+29))[i] = (mz_uint8)(c>>24);
2405  memcpy(out_buf.m_pBuf, pnghdr, 41);
2406  }
2407  // write footer (IDAT CRC-32, followed by IEND chunk)
2408  if (!tdefl_output_buffer_putter("\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) { *pLen_out = 0; MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; }
2409  c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf+41-4, *pLen_out+4); for (i = 0; i<4; ++i, c <<= 8) (out_buf.m_pBuf+out_buf.m_size-16)[i] = (mz_uint8)(c >> 24);
2410  // compute final size of file, grab compressed data buffer and return
2411  *pLen_out += 57; MZ_FREE(pComp); return out_buf.m_pBuf;
2412 }
2413 void *tdefl_write_image_to_png_file_in_memory(const void *pImage, mz_uint8 w, mz_uint8 h, int num_chans, size_t *pLen_out)
2414 {
2415  // Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's where #defined out)
2416  return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans, pLen_out, 6, MZ_FALSE);
2417 }
2418 
2419 #ifdef _MSC_VER
2420 #pragma warning (pop)
2421 #endif
2422 
2423 } // namespace buminiz
2424 
2425 #endif // DOXYGEN_SHOULD_SKIP_THIS
2426 #endif // MINIZ_HEADER_FILE_ONLY