miniz_oxide/deflate/
core.rs

1//! Streaming compression functionality.
2
3use alloc::boxed::Box;
4use core::convert::TryInto;
5use core::{cmp, mem};
6
7use super::super::*;
8use super::deflate_flags::*;
9use super::CompressionLevel;
10use crate::deflate::buffer::{
11    update_hash, HashBuffers, LocalBuf, LZ_CODE_BUF_SIZE, LZ_DICT_FULL_SIZE, LZ_HASH_BITS,
12    LZ_HASH_SHIFT, LZ_HASH_SIZE, OUT_BUF_SIZE,
13};
14use crate::deflate::stored::compress_stored;
15use crate::deflate::zlib;
16use crate::shared::{update_adler32, HUFFMAN_LENGTH_ORDER, MZ_ADLER32_INIT};
17use crate::DataFormat;
18
19// Currently not bubbled up outside this module, so can fill in with more
20// context eventually if needed.
21type Result<T, E = Error> = core::result::Result<T, E>;
22pub(crate) struct Error {}
23
24pub(crate) const MAX_PROBES_MASK: u32 = 0xFFF;
25
26const MAX_SUPPORTED_HUFF_CODESIZE: usize = 15;
27
28/// Length code for length values.
29#[rustfmt::skip]
30const LEN_SYM: [u16; 256] = [
31    257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268, 268,
32    269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272, 272, 272,
33    273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274, 274, 274, 274,
34    275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276, 276, 276, 276, 276,
35    277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
36    278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
37    279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279,
38    280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280,
39    281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
40    281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
41    282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282,
42    282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282,
43    283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283,
44    283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283,
45    284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
46    284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 285
47];
48
49/// Number of extra bits for length values.
50#[rustfmt::skip]
51const LEN_EXTRA: [u8; 256] = [
52    0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
53    2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
54    3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
55    3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
56    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
57    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
58    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
59    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
60    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
61    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
62    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
63    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
64    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
65    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
66    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
67    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0
68];
69
70/// Distance codes for distances smaller than 512.
71#[rustfmt::skip]
72const SMALL_DIST_SYM: [u8; 512] = [
73     0,  1,  2,  3,  4,  4,  5,  5,  6,  6,  6,  6,  7,  7,  7,  7,
74     8,  8,  8,  8,  8,  8,  8,  8,  9,  9,  9,  9,  9,  9,  9,  9,
75    10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
76    11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
77    12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
78    12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
79    13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
80    13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
81    14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
82    14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
83    14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
84    14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
85    15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
86    15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
87    15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
88    15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
89    16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
90    16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
91    16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
92    16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
93    16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
94    16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
95    16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
96    16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
97    17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
98    17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
99    17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
100    17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
101    17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
102    17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
103    17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
104    17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17
105];
106
107/// Number of extra bits for distances smaller than 512.
108#[rustfmt::skip]
109const SMALL_DIST_EXTRA: [u8; 512] = [
110    0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
111    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
112    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
113    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
114    6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
115    6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
116    6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
117    6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
118    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
119    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
120    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
121    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
122    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
123    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
124    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
125    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
126];
127
128/// Base values to calculate distances above 512.
129#[rustfmt::skip]
130const LARGE_DIST_SYM: [u8; 128] = [
131     0,  0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23,
132    24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25,
133    26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
134    27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
135    28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
136    28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
137    29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
138    29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29
139];
140
141/// Number of extra bits distances above 512.
142#[rustfmt::skip]
143const LARGE_DIST_EXTRA: [u8; 128] = [
144     0,  0,  8,  8,  9,  9,  9,  9, 10, 10, 10, 10, 10, 10, 10, 10,
145    11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
146    12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
147    12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
148    13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
149    13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
150    13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
151    13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13
152];
153
154#[rustfmt::skip]
155const BITMASKS: [u32; 17] = [
156    0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF,
157    0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF
158];
159
160/// The maximum number of checks for matches in the hash table the compressor will make for each
161/// compression level.
162pub(crate) const NUM_PROBES: [u16; 11] = [0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500];
163
164#[derive(Copy, Clone)]
165struct SymFreq {
166    key: u16,
167    sym_index: u16,
168}
169
170pub mod deflate_flags {
171    /// Whether to use a zlib wrapper.
172    pub const TDEFL_WRITE_ZLIB_HEADER: u32 = 0x0000_1000;
173    /// Should we compute the adler32 checksum.
174    pub const TDEFL_COMPUTE_ADLER32: u32 = 0x0000_2000;
175    /// Should we use greedy parsing (as opposed to lazy parsing where look ahead one or more
176    /// bytes to check for better matches.)
177    pub const TDEFL_GREEDY_PARSING_FLAG: u32 = 0x0000_4000;
178    /// Used in miniz to skip zero-initializing hash and dict. We don't do this here, so
179    /// this flag is ignored.
180    pub const TDEFL_NONDETERMINISTIC_PARSING_FLAG: u32 = 0x0000_8000;
181    /// Only look for matches with a distance of 0.
182    pub const TDEFL_RLE_MATCHES: u32 = 0x0001_0000;
183    /// Only use matches that are at least 6 bytes long.
184    pub const TDEFL_FILTER_MATCHES: u32 = 0x0002_0000;
185    /// Force the compressor to only output static blocks. (Blocks using the default huffman codes
186    /// specified in the deflate specification.)
187    pub const TDEFL_FORCE_ALL_STATIC_BLOCKS: u32 = 0x0004_0000;
188    /// Force the compressor to only output raw/uncompressed blocks.
189    pub const TDEFL_FORCE_ALL_RAW_BLOCKS: u32 = 0x0008_0000;
190}
191
192/// Strategy setting for compression.
193///
194/// The non-default settings offer some special-case compression variants.
195#[repr(i32)]
196#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
197pub enum CompressionStrategy {
198    /// Don't use any of the special strategies.
199    Default = 0,
200    /// Only use matches that are at least 5 bytes long.
201    Filtered = 1,
202    /// Don't look for matches, only huffman encode the literals.
203    HuffmanOnly = 2,
204    /// Only look for matches with a distance of 1, i.e do run-length encoding only.
205    RLE = 3,
206    /// Only use static/fixed blocks. (Blocks using the default huffman codes
207    /// specified in the deflate specification.)
208    Fixed = 4,
209}
210
211impl From<CompressionStrategy> for i32 {
212    #[inline(always)]
213    fn from(value: CompressionStrategy) -> Self {
214        value as i32
215    }
216}
217
218/// A list of deflate flush types.
219#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
220pub enum TDEFLFlush {
221    /// Normal operation.
222    ///
223    /// Compress as much as there is space for, and then return waiting for more input.
224    None = 0,
225
226    /// Try to flush all the current data and output an empty raw block.
227    Sync = 2,
228
229    /// Same as [`Sync`][Self::Sync], but reset the dictionary so that the following data does not
230    /// depend on previous data.
231    Full = 3,
232
233    /// Try to flush everything and end the deflate stream.
234    ///
235    /// On success this will yield a [`TDEFLStatus::Done`] return status.
236    Finish = 4,
237}
238
239impl From<MZFlush> for TDEFLFlush {
240    fn from(flush: MZFlush) -> Self {
241        match flush {
242            MZFlush::None => TDEFLFlush::None,
243            MZFlush::Sync => TDEFLFlush::Sync,
244            MZFlush::Full => TDEFLFlush::Full,
245            MZFlush::Finish => TDEFLFlush::Finish,
246            _ => TDEFLFlush::None, // TODO: ??? What to do ???
247        }
248    }
249}
250
251impl TDEFLFlush {
252    pub const fn new(flush: i32) -> Result<Self, MZError> {
253        match flush {
254            0 => Ok(TDEFLFlush::None),
255            2 => Ok(TDEFLFlush::Sync),
256            3 => Ok(TDEFLFlush::Full),
257            4 => Ok(TDEFLFlush::Finish),
258            _ => Err(MZError::Param),
259        }
260    }
261}
262
263/// Return status of compression.
264#[repr(i32)]
265#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
266pub enum TDEFLStatus {
267    /// Usage error.
268    ///
269    /// This indicates that either the [`CompressorOxide`] experienced a previous error, or the
270    /// stream has already been [`TDEFLFlush::Finish`]'d.
271    BadParam = -2,
272
273    /// Error putting data into output buffer.
274    ///
275    /// This usually indicates a too-small buffer.
276    PutBufFailed = -1,
277
278    /// Compression succeeded normally.
279    Okay = 0,
280
281    /// Compression succeeded and the deflate stream was ended.
282    ///
283    /// This is the result of calling compression with [`TDEFLFlush::Finish`].
284    Done = 1,
285}
286
287const MAX_HUFF_SYMBOLS: usize = 288;
288/// Size of hash chain for fast compression mode.
289const LEVEL1_HASH_SIZE_MASK: u32 = 4095;
290/// The number of huffman tables used by the compressor.
291/// Literal/length, Distances and Length of the huffman codes for the other two tables.
292const MAX_HUFF_TABLES: usize = 3;
293/// Literal/length codes
294const MAX_HUFF_SYMBOLS_0: usize = 288;
295/// Distance codes.
296const MAX_HUFF_SYMBOLS_1: usize = 32;
297/// Huffman length values.
298const MAX_HUFF_SYMBOLS_2: usize = 19;
299/// Size of the chained hash table.
300pub(crate) const LZ_DICT_SIZE: usize = 32_768;
301/// Mask used when stepping through the hash chains.
302pub(crate) const LZ_DICT_SIZE_MASK: usize = (LZ_DICT_SIZE as u32 - 1) as usize;
303/// The minimum length of a match.
304pub(crate) const MIN_MATCH_LEN: u8 = 3;
305/// The maximum length of a match.
306pub(crate) const MAX_MATCH_LEN: usize = 258;
307
308pub(crate) const DEFAULT_FLAGS: u32 = NUM_PROBES[4] as u32 | TDEFL_WRITE_ZLIB_HEADER;
309
310#[cfg(test)]
311#[inline]
312fn write_u16_le(val: u16, slice: &mut [u8], pos: usize) {
313    slice[pos] = val as u8;
314    slice[pos + 1] = (val >> 8) as u8;
315}
316
317// Read the two bytes starting at pos and interpret them as an u16.
318#[inline]
319const fn read_u16_le<const N: usize>(slice: &[u8; N], pos: usize) -> u16 {
320    // The compiler is smart enough to optimize this into an unaligned load.
321    slice[pos] as u16 | ((slice[pos + 1] as u16) << 8)
322}
323
324/// Main compression struct.
325pub struct CompressorOxide {
326    pub(crate) lz: LZOxide,
327    pub(crate) params: ParamsOxide,
328    /// Put HuffmanOxide on the heap with default trick to avoid
329    /// excessive stack copies.
330    pub(crate) huff: Box<HuffmanOxide>,
331    pub(crate) dict: DictOxide,
332}
333
334impl CompressorOxide {
335    /// Create a new `CompressorOxide` with the given flags.
336    ///
337    /// # Notes
338    /// This function may be changed to take different parameters in the future.
339    pub fn new(flags: u32) -> Self {
340        CompressorOxide {
341            lz: LZOxide::new(),
342            params: ParamsOxide::new(flags),
343            huff: Box::default(),
344            dict: DictOxide::new(flags),
345        }
346    }
347
348    /// Get the adler32 checksum of the currently encoded data.
349    pub const fn adler32(&self) -> u32 {
350        self.params.adler32
351    }
352
353    /// Get the return status of the previous [`compress`](fn.compress.html)
354    /// call with this compressor.
355    pub const fn prev_return_status(&self) -> TDEFLStatus {
356        self.params.prev_return_status
357    }
358
359    /// Get the raw compressor flags.
360    ///
361    /// # Notes
362    /// This function may be deprecated or changed in the future to use more rust-style flags.
363    pub const fn flags(&self) -> i32 {
364        self.params.flags as i32
365    }
366
367    /// Returns whether the compressor is wrapping the data in a zlib format or not.
368    pub const fn data_format(&self) -> DataFormat {
369        if (self.params.flags & TDEFL_WRITE_ZLIB_HEADER) != 0 {
370            DataFormat::Zlib
371        } else {
372            DataFormat::Raw
373        }
374    }
375
376    /// Reset the state of the compressor, keeping the same parameters.
377    ///
378    /// This avoids re-allocating data.
379    pub fn reset(&mut self) {
380        // LZ buf and huffman has no settings or dynamic memory
381        // that needs to be saved, so we simply replace them.
382        self.lz = LZOxide::new();
383        self.params.reset();
384        *self.huff = HuffmanOxide::default();
385        self.dict.reset();
386    }
387
388    /// Set the compression level of the compressor.
389    ///
390    /// Using this to change level after compression has started is supported.
391    /// # Notes
392    /// The compression strategy will be reset to the default one when this is called.
393    pub fn set_compression_level(&mut self, level: CompressionLevel) {
394        let format = self.data_format();
395        self.set_format_and_level(format, level as u8);
396    }
397
398    /// Set the compression level of the compressor using an integer value.
399    ///
400    /// Using this to change level after compression has started is supported.
401    /// # Notes
402    /// The compression strategy will be reset to the default one when this is called.
403    pub fn set_compression_level_raw(&mut self, level: u8) {
404        let format = self.data_format();
405        self.set_format_and_level(format, level);
406    }
407
408    /// Update the compression settings of the compressor.
409    ///
410    /// Changing the `DataFormat` after compression has started will result in
411    /// a corrupted stream.
412    ///
413    /// # Notes
414    /// This function mainly intended for setting the initial settings after e.g creating with
415    /// `default` or after calling `CompressorOxide::reset()`, and behaviour may be changed
416    /// to disallow calling it after starting compression in the future.
417    pub fn set_format_and_level(&mut self, data_format: DataFormat, level: u8) {
418        let flags = create_comp_flags_from_zip_params(
419            level.into(),
420            data_format.to_window_bits(),
421            CompressionStrategy::Default as i32,
422        );
423        self.params.update_flags(flags);
424        self.dict.update_flags(flags);
425    }
426}
427
428impl Default for CompressorOxide {
429    /// Initialize the compressor with a level of 4, zlib wrapper and
430    /// the default strategy.
431    fn default() -> Self {
432        CompressorOxide {
433            lz: LZOxide::new(),
434            params: ParamsOxide::new(DEFAULT_FLAGS),
435            huff: Box::default(),
436            dict: DictOxide::new(DEFAULT_FLAGS),
437        }
438    }
439}
440
441/// Callback function and user used in `compress_to_output`.
442pub struct CallbackFunc<'a> {
443    pub put_buf_func: &'a mut dyn FnMut(&[u8]) -> bool,
444}
445
446impl CallbackFunc<'_> {
447    fn flush_output(
448        &mut self,
449        saved_output: SavedOutputBufferOxide,
450        params: &mut ParamsOxide,
451    ) -> i32 {
452        // TODO: As this could be unsafe since
453        // we can't verify the function pointer
454        // this whole function should maybe be unsafe as well.
455        let call_success = (self.put_buf_func)(&params.local_buf.b[0..saved_output.pos]);
456
457        if !call_success {
458            params.prev_return_status = TDEFLStatus::PutBufFailed;
459            return params.prev_return_status as i32;
460        }
461
462        params.flush_remaining as i32
463    }
464}
465
466struct CallbackBuf<'a> {
467    pub out_buf: &'a mut [u8],
468}
469
470impl CallbackBuf<'_> {
471    fn flush_output(
472        &mut self,
473        saved_output: SavedOutputBufferOxide,
474        params: &mut ParamsOxide,
475    ) -> i32 {
476        if saved_output.local {
477            let n = cmp::min(saved_output.pos, self.out_buf.len() - params.out_buf_ofs);
478            (self.out_buf[params.out_buf_ofs..params.out_buf_ofs + n])
479                .copy_from_slice(&params.local_buf.b[..n]);
480
481            params.out_buf_ofs += n;
482            if saved_output.pos != n {
483                params.flush_ofs = n as u32;
484                params.flush_remaining = (saved_output.pos - n) as u32;
485            }
486        } else {
487            params.out_buf_ofs += saved_output.pos;
488        }
489
490        params.flush_remaining as i32
491    }
492}
493
494enum CallbackOut<'a> {
495    Func(CallbackFunc<'a>),
496    Buf(CallbackBuf<'a>),
497}
498
499impl CallbackOut<'_> {
500    fn new_output_buffer<'b>(
501        &'b mut self,
502        local_buf: &'b mut [u8],
503        out_buf_ofs: usize,
504    ) -> OutputBufferOxide<'b> {
505        let is_local;
506        let buf_len = OUT_BUF_SIZE - 16;
507        let chosen_buffer = match *self {
508            CallbackOut::Buf(ref mut cb) if cb.out_buf.len() - out_buf_ofs >= OUT_BUF_SIZE => {
509                is_local = false;
510                &mut cb.out_buf[out_buf_ofs..out_buf_ofs + buf_len]
511            }
512            _ => {
513                is_local = true;
514                &mut local_buf[..buf_len]
515            }
516        };
517
518        OutputBufferOxide {
519            inner: chosen_buffer,
520            inner_pos: 0,
521            local: is_local,
522            bit_buffer: 0,
523            bits_in: 0,
524        }
525    }
526}
527
528pub(crate) struct CallbackOxide<'a> {
529    in_buf: Option<&'a [u8]>,
530    in_buf_size: Option<&'a mut usize>,
531    out_buf_size: Option<&'a mut usize>,
532    out: CallbackOut<'a>,
533}
534
535impl<'a> CallbackOxide<'a> {
536    fn new_callback_buf(in_buf: &'a [u8], out_buf: &'a mut [u8]) -> Self {
537        CallbackOxide {
538            in_buf: Some(in_buf),
539            in_buf_size: None,
540            out_buf_size: None,
541            out: CallbackOut::Buf(CallbackBuf { out_buf }),
542        }
543    }
544
545    fn new_callback_func(in_buf: &'a [u8], callback_func: CallbackFunc<'a>) -> Self {
546        CallbackOxide {
547            in_buf: Some(in_buf),
548            in_buf_size: None,
549            out_buf_size: None,
550            out: CallbackOut::Func(callback_func),
551        }
552    }
553
554    fn update_size(&mut self, in_size: Option<usize>, out_size: Option<usize>) {
555        if let (Some(in_size), Some(size)) = (in_size, self.in_buf_size.as_mut()) {
556            **size = in_size;
557        }
558
559        if let (Some(out_size), Some(size)) = (out_size, self.out_buf_size.as_mut()) {
560            **size = out_size
561        }
562    }
563
564    fn flush_output(
565        &mut self,
566        saved_output: SavedOutputBufferOxide,
567        params: &mut ParamsOxide,
568    ) -> i32 {
569        if saved_output.pos == 0 {
570            return params.flush_remaining as i32;
571        }
572
573        self.update_size(Some(params.src_pos), None);
574        match self.out {
575            CallbackOut::Func(ref mut cf) => cf.flush_output(saved_output, params),
576            CallbackOut::Buf(ref mut cb) => cb.flush_output(saved_output, params),
577        }
578    }
579
580    pub(crate) fn buf(&mut self) -> Option<&'a [u8]> {
581        self.in_buf
582    }
583}
584
585struct OutputBufferOxide<'a> {
586    pub inner: &'a mut [u8],
587    pub inner_pos: usize,
588    pub local: bool,
589
590    pub bit_buffer: u32,
591    pub bits_in: u32,
592}
593
594impl OutputBufferOxide<'_> {
595    /// Write bits to the bit buffer and flushes
596    /// the bit buffer so any whole bytes are output
597    /// to the underlying buffer.
598    fn put_bits(&mut self, bits: u32, len: u32) {
599        // TODO: Removing this assertion worsens performance
600        // Need to figure out why
601        assert!(bits <= ((1u32 << len) - 1u32));
602        self.bit_buffer |= bits << self.bits_in;
603        self.bits_in += len;
604
605        while self.bits_in >= 8 {
606            self.inner[self.inner_pos] = self.bit_buffer as u8;
607            self.inner_pos += 1;
608            self.bit_buffer >>= 8;
609            self.bits_in -= 8;
610        }
611    }
612
613    #[inline]
614    /// Write the provided bits to the bit buffer without flushing
615    /// anything. Does not check if there is actually space for it.
616    fn put_bits_no_flush(&mut self, bits: u32, len: u32) {
617        self.bit_buffer |= bits << self.bits_in;
618        self.bits_in += len;
619    }
620
621    const fn save(&self) -> SavedOutputBufferOxide {
622        SavedOutputBufferOxide {
623            pos: self.inner_pos,
624            bit_buffer: self.bit_buffer,
625            bits_in: self.bits_in,
626            local: self.local,
627        }
628    }
629
630    fn load(&mut self, saved: SavedOutputBufferOxide) {
631        self.inner_pos = saved.pos;
632        self.bit_buffer = saved.bit_buffer;
633        self.bits_in = saved.bits_in;
634        self.local = saved.local;
635    }
636
637    #[inline]
638    /// Pad the bit buffer to a whole byte with
639    /// zeroes and write that byte to the output buffer.
640    fn pad_to_bytes(&mut self) {
641        if self.bits_in != 0 {
642            let len = 8 - self.bits_in;
643            self.put_bits(0, len);
644        }
645    }
646
647    #[inline]
648    fn write_bytes(&mut self, bytes: &[u8]) {
649        debug_assert_eq!(self.bits_in, 0);
650        self.inner[self.inner_pos..self.inner_pos + bytes.len()].copy_from_slice(bytes);
651        self.inner_pos += bytes.len();
652    }
653}
654
655struct SavedOutputBufferOxide {
656    pub pos: usize,
657    pub bit_buffer: u32,
658    pub bits_in: u32,
659    pub local: bool,
660}
661
662struct BitBuffer {
663    pub bit_buffer: u64,
664    pub bits_in: u32,
665}
666
667impl BitBuffer {
668    fn put_fast(&mut self, bits: u64, len: u32) {
669        self.bit_buffer |= bits << self.bits_in;
670        self.bits_in += len;
671    }
672
673    fn flush(&mut self, output: &mut OutputBufferOxide) -> Result<()> {
674        let pos = output.inner_pos;
675        {
676            // isolation to please borrow checker
677            let inner = &mut output.inner[pos..pos + 8];
678            let bytes = u64::to_le_bytes(self.bit_buffer);
679            inner.copy_from_slice(&bytes);
680        }
681        match output.inner_pos.checked_add((self.bits_in >> 3) as usize) {
682            Some(n) if n <= output.inner.len() => output.inner_pos = n,
683            _ => return Err(Error {}),
684        }
685        self.bit_buffer >>= self.bits_in & !7;
686        self.bits_in &= 7;
687        Ok(())
688    }
689}
690
691/// A struct containing data about huffman codes and symbol frequencies.
692///
693/// NOTE: Only the literal/lengths have enough symbols to actually use
694/// the full array. It's unclear why it's defined like this in miniz,
695/// it could be for cache/alignment reasons.
696pub(crate) struct HuffmanOxide {
697    /// Number of occurrences of each symbol.
698    pub count: [[u16; MAX_HUFF_SYMBOLS]; MAX_HUFF_TABLES],
699    /// The bits of the huffman code assigned to the symbol
700    pub codes: [[u16; MAX_HUFF_SYMBOLS]; MAX_HUFF_TABLES],
701    /// The length of the huffman code assigned to the symbol.
702    pub code_sizes: [[u8; MAX_HUFF_SYMBOLS]; MAX_HUFF_TABLES],
703}
704
705/// Tables used for literal/lengths in `HuffmanOxide`.
706const LITLEN_TABLE: usize = 0;
707/// Tables for distances.
708const DIST_TABLE: usize = 1;
709/// Tables for the run-length encoded huffman lengths for literals/lengths/distances.
710const HUFF_CODES_TABLE: usize = 2;
711
712/// Status of RLE encoding of huffman code lengths.
713struct Rle {
714    pub z_count: u32,
715    pub repeat_count: u16,
716    pub prev_code_size: u8,
717}
718
719impl Rle {
720    fn prev_code_size(
721        &mut self,
722        packed_code_sizes: &mut [u8],
723        packed_pos: &mut usize,
724        h: &mut HuffmanOxide,
725    ) -> Result<()> {
726        let mut write = |buf| write(buf, packed_code_sizes, packed_pos);
727        let counts = &mut h.count[HUFF_CODES_TABLE];
728        if self.repeat_count != 0 {
729            if self.repeat_count < 3 {
730                counts[self.prev_code_size as usize] =
731                    counts[self.prev_code_size as usize].wrapping_add(self.repeat_count);
732                let code = self.prev_code_size;
733                write(&[code, code, code][..self.repeat_count as usize])?;
734            } else {
735                counts[16] = counts[16].wrapping_add(1);
736                write(&[16, (self.repeat_count - 3) as u8][..])?;
737            }
738            self.repeat_count = 0;
739        }
740
741        Ok(())
742    }
743
744    fn zero_code_size(
745        &mut self,
746        packed_code_sizes: &mut [u8],
747        packed_pos: &mut usize,
748        h: &mut HuffmanOxide,
749    ) -> Result<()> {
750        let mut write = |buf| write(buf, packed_code_sizes, packed_pos);
751        let counts = &mut h.count[HUFF_CODES_TABLE];
752        if self.z_count != 0 {
753            if self.z_count < 3 {
754                counts[0] = counts[0].wrapping_add(self.z_count as u16);
755                write(&[0, 0, 0][..self.z_count as usize])?;
756            } else if self.z_count <= 10 {
757                counts[17] = counts[17].wrapping_add(1);
758                write(&[17, (self.z_count - 3) as u8][..])?;
759            } else {
760                counts[18] = counts[18].wrapping_add(1);
761                write(&[18, (self.z_count - 11) as u8][..])?;
762            }
763            self.z_count = 0;
764        }
765
766        Ok(())
767    }
768}
769
770fn write(src: &[u8], dst: &mut [u8], dst_pos: &mut usize) -> Result<()> {
771    match dst.get_mut(*dst_pos..*dst_pos + src.len()) {
772        Some(s) => s.copy_from_slice(src),
773        None => return Err(Error {}),
774    }
775    *dst_pos += src.len();
776    Ok(())
777}
778
779impl Default for HuffmanOxide {
780    fn default() -> Self {
781        HuffmanOxide {
782            count: [[0; MAX_HUFF_SYMBOLS]; MAX_HUFF_TABLES],
783            codes: [[0; MAX_HUFF_SYMBOLS]; MAX_HUFF_TABLES],
784            code_sizes: [[0; MAX_HUFF_SYMBOLS]; MAX_HUFF_TABLES],
785        }
786    }
787}
788
789impl HuffmanOxide {
790    fn radix_sort_symbols<'a>(
791        symbols0: &'a mut [SymFreq],
792        symbols1: &'a mut [SymFreq],
793    ) -> &'a mut [SymFreq] {
794        let mut hist = [[0; 256]; 2];
795
796        for freq in symbols0.iter() {
797            hist[0][(freq.key & 0xFF) as usize] += 1;
798            hist[1][((freq.key >> 8) & 0xFF) as usize] += 1;
799        }
800
801        let mut n_passes = 2;
802        if symbols0.len() == hist[1][0] {
803            n_passes -= 1;
804        }
805
806        let mut current_symbols = symbols0;
807        let mut new_symbols = symbols1;
808
809        for (pass, hist_item) in hist.iter().enumerate().take(n_passes) {
810            let mut offsets = [0; 256];
811            let mut offset = 0;
812            for i in 0..256 {
813                offsets[i] = offset;
814                offset += hist_item[i];
815            }
816
817            for sym in current_symbols.iter() {
818                let j = ((sym.key >> (pass * 8)) & 0xFF) as usize;
819                new_symbols[offsets[j]] = *sym;
820                offsets[j] += 1;
821            }
822
823            mem::swap(&mut current_symbols, &mut new_symbols);
824        }
825
826        current_symbols
827    }
828
829    fn calculate_minimum_redundancy(symbols: &mut [SymFreq]) {
830        match symbols.len() {
831            0 => (),
832            1 => symbols[0].key = 1,
833            n => {
834                symbols[0].key += symbols[1].key;
835                let mut root = 0;
836                let mut leaf = 2;
837                for next in 1..n - 1 {
838                    if (leaf >= n) || (symbols[root].key < symbols[leaf].key) {
839                        symbols[next].key = symbols[root].key;
840                        symbols[root].key = next as u16;
841                        root += 1;
842                    } else {
843                        symbols[next].key = symbols[leaf].key;
844                        leaf += 1;
845                    }
846
847                    if (leaf >= n) || (root < next && symbols[root].key < symbols[leaf].key) {
848                        symbols[next].key = symbols[next].key.wrapping_add(symbols[root].key);
849                        symbols[root].key = next as u16;
850                        root += 1;
851                    } else {
852                        symbols[next].key = symbols[next].key.wrapping_add(symbols[leaf].key);
853                        leaf += 1;
854                    }
855                }
856
857                symbols[n - 2].key = 0;
858                for next in (0..n - 2).rev() {
859                    symbols[next].key = symbols[symbols[next].key as usize].key + 1;
860                }
861
862                let mut avbl = 1;
863                let mut used = 0;
864                let mut dpth = 0;
865                let mut root = (n - 2) as i32;
866                let mut next = (n - 1) as i32;
867                while avbl > 0 {
868                    while (root >= 0) && (symbols[root as usize].key == dpth) {
869                        used += 1;
870                        root -= 1;
871                    }
872                    while avbl > used {
873                        symbols[next as usize].key = dpth;
874                        next -= 1;
875                        avbl -= 1;
876                    }
877                    avbl = 2 * used;
878                    dpth += 1;
879                    used = 0;
880                }
881            }
882        }
883    }
884
885    fn enforce_max_code_size(num_codes: &mut [i32], code_list_len: usize, max_code_size: usize) {
886        if code_list_len <= 1 {
887            return;
888        }
889
890        num_codes[max_code_size] += num_codes[max_code_size + 1..].iter().sum::<i32>();
891        let total = num_codes[1..=max_code_size]
892            .iter()
893            .rev()
894            .enumerate()
895            .fold(0u32, |total, (i, &x)| total + ((x as u32) << i));
896
897        for _ in (1 << max_code_size)..total {
898            num_codes[max_code_size] -= 1;
899            for i in (1..max_code_size).rev() {
900                if num_codes[i] != 0 {
901                    num_codes[i] -= 1;
902                    num_codes[i + 1] += 2;
903                    break;
904                }
905            }
906        }
907    }
908
909    fn optimize_table(
910        &mut self,
911        table_num: usize,
912        table_len: usize,
913        code_size_limit: usize,
914        static_table: bool,
915    ) {
916        let mut num_codes = [0i32; 32 + 1];
917        let mut next_code = [0u32; MAX_SUPPORTED_HUFF_CODESIZE + 1];
918
919        if static_table {
920            for &code_size in &self.code_sizes[table_num][..table_len] {
921                num_codes[code_size as usize] += 1;
922            }
923        } else {
924            let mut symbols0 = [SymFreq {
925                key: 0,
926                sym_index: 0,
927            }; MAX_HUFF_SYMBOLS];
928            let mut symbols1 = [SymFreq {
929                key: 0,
930                sym_index: 0,
931            }; MAX_HUFF_SYMBOLS];
932
933            let mut num_used_symbols = 0;
934            for i in 0..table_len {
935                if self.count[table_num][i] != 0 {
936                    symbols0[num_used_symbols] = SymFreq {
937                        key: self.count[table_num][i],
938                        sym_index: i as u16,
939                    };
940                    num_used_symbols += 1;
941                }
942            }
943
944            let symbols = Self::radix_sort_symbols(
945                &mut symbols0[..num_used_symbols],
946                &mut symbols1[..num_used_symbols],
947            );
948            Self::calculate_minimum_redundancy(symbols);
949
950            for symbol in symbols.iter() {
951                num_codes[symbol.key as usize] += 1;
952            }
953
954            Self::enforce_max_code_size(&mut num_codes, num_used_symbols, code_size_limit);
955
956            self.code_sizes[table_num].fill(0);
957            self.codes[table_num].fill(0);
958
959            let mut last = num_used_symbols;
960            for (i, &num_item) in num_codes
961                .iter()
962                .enumerate()
963                .take(code_size_limit + 1)
964                .skip(1)
965            {
966                let first = last - num_item as usize;
967                for symbol in &symbols[first..last] {
968                    self.code_sizes[table_num][symbol.sym_index as usize] = i as u8;
969                }
970                last = first;
971            }
972        }
973
974        let mut j = 0;
975        next_code[1] = 0;
976        for i in 2..=code_size_limit {
977            j = (j + num_codes[i - 1]) << 1;
978            next_code[i] = j as u32;
979        }
980
981        for (&code_size, huff_code) in self.code_sizes[table_num]
982            .iter()
983            .take(table_len)
984            .zip(self.codes[table_num].iter_mut().take(table_len))
985        {
986            if code_size == 0 {
987                continue;
988            }
989
990            let code = next_code[code_size as usize];
991
992            next_code[code_size as usize] += 1;
993
994            let rev_code = (code as u16).reverse_bits() >> (16 - code_size);
995
996            *huff_code = rev_code;
997        }
998    }
999
1000    fn start_static_block(&mut self, output: &mut OutputBufferOxide) {
1001        self.code_sizes[LITLEN_TABLE][0..144].fill(8);
1002        self.code_sizes[LITLEN_TABLE][144..256].fill(9);
1003        self.code_sizes[LITLEN_TABLE][256..280].fill(7);
1004        self.code_sizes[LITLEN_TABLE][280..288].fill(8);
1005
1006        self.code_sizes[DIST_TABLE][..32].fill(5);
1007
1008        self.optimize_table(LITLEN_TABLE, 288, 15, true);
1009        self.optimize_table(DIST_TABLE, 32, 15, true);
1010
1011        output.put_bits(0b01, 2)
1012    }
1013
1014    fn start_dynamic_block(&mut self, output: &mut OutputBufferOxide) -> Result<()> {
1015        // There will always be one, and only one end of block code.
1016        self.count[0][256] = 1;
1017
1018        self.optimize_table(0, MAX_HUFF_SYMBOLS_0, 15, false);
1019        self.optimize_table(1, MAX_HUFF_SYMBOLS_1, 15, false);
1020
1021        let num_lit_codes = 286
1022            - &self.code_sizes[0][257..286]
1023                .iter()
1024                .rev()
1025                .take_while(|&x| *x == 0)
1026                .count();
1027
1028        let num_dist_codes = 30
1029            - &self.code_sizes[1][1..30]
1030                .iter()
1031                .rev()
1032                .take_while(|&x| *x == 0)
1033                .count();
1034
1035        let mut code_sizes_to_pack = [0u8; MAX_HUFF_SYMBOLS_0 + MAX_HUFF_SYMBOLS_1];
1036        let mut packed_code_sizes = [0u8; MAX_HUFF_SYMBOLS_0 + MAX_HUFF_SYMBOLS_1];
1037
1038        let total_code_sizes_to_pack = num_lit_codes + num_dist_codes;
1039
1040        code_sizes_to_pack[..num_lit_codes].copy_from_slice(&self.code_sizes[0][..num_lit_codes]);
1041
1042        code_sizes_to_pack[num_lit_codes..total_code_sizes_to_pack]
1043            .copy_from_slice(&self.code_sizes[1][..num_dist_codes]);
1044
1045        let mut rle = Rle {
1046            z_count: 0,
1047            repeat_count: 0,
1048            prev_code_size: 0xFF,
1049        };
1050
1051        self.count[HUFF_CODES_TABLE][..MAX_HUFF_SYMBOLS_2].fill(0);
1052
1053        let mut packed_pos = 0;
1054        for &code_size in &code_sizes_to_pack[..total_code_sizes_to_pack] {
1055            if code_size == 0 {
1056                rle.prev_code_size(&mut packed_code_sizes, &mut packed_pos, self)?;
1057                rle.z_count += 1;
1058                if rle.z_count == 138 {
1059                    rle.zero_code_size(&mut packed_code_sizes, &mut packed_pos, self)?;
1060                }
1061            } else {
1062                rle.zero_code_size(&mut packed_code_sizes, &mut packed_pos, self)?;
1063                if code_size != rle.prev_code_size {
1064                    rle.prev_code_size(&mut packed_code_sizes, &mut packed_pos, self)?;
1065                    self.count[HUFF_CODES_TABLE][code_size as usize] =
1066                        self.count[HUFF_CODES_TABLE][code_size as usize].wrapping_add(1);
1067                    write(&[code_size], &mut packed_code_sizes, &mut packed_pos)?;
1068                } else {
1069                    rle.repeat_count += 1;
1070                    if rle.repeat_count == 6 {
1071                        rle.prev_code_size(&mut packed_code_sizes, &mut packed_pos, self)?;
1072                    }
1073                }
1074            }
1075            rle.prev_code_size = code_size;
1076        }
1077
1078        if rle.repeat_count != 0 {
1079            rle.prev_code_size(&mut packed_code_sizes, &mut packed_pos, self)?;
1080        } else {
1081            rle.zero_code_size(&mut packed_code_sizes, &mut packed_pos, self)?;
1082        }
1083
1084        self.optimize_table(2, MAX_HUFF_SYMBOLS_2, 7, false);
1085
1086        output.put_bits_no_flush(2, 2);
1087
1088        output.put_bits_no_flush((num_lit_codes - 257) as u32, 5);
1089        output.put_bits_no_flush((num_dist_codes - 1) as u32, 5);
1090
1091        let mut num_bit_lengths = 18
1092            - HUFFMAN_LENGTH_ORDER
1093                .iter()
1094                .rev()
1095                .take_while(|&swizzle| self.code_sizes[HUFF_CODES_TABLE][*swizzle as usize] == 0)
1096                .count();
1097
1098        num_bit_lengths = cmp::max(4, num_bit_lengths + 1);
1099        output.put_bits(num_bit_lengths as u32 - 4, 4);
1100        for &swizzle in &HUFFMAN_LENGTH_ORDER[..num_bit_lengths] {
1101            output.put_bits(
1102                u32::from(self.code_sizes[HUFF_CODES_TABLE][swizzle as usize]),
1103                3,
1104            );
1105        }
1106
1107        let mut packed_code_size_index = 0;
1108        while packed_code_size_index < packed_pos {
1109            let code = packed_code_sizes[packed_code_size_index] as usize;
1110            packed_code_size_index += 1;
1111            assert!(code < MAX_HUFF_SYMBOLS_2);
1112            output.put_bits(
1113                u32::from(self.codes[HUFF_CODES_TABLE][code]),
1114                u32::from(self.code_sizes[HUFF_CODES_TABLE][code]),
1115            );
1116            if code >= 16 {
1117                output.put_bits(
1118                    u32::from(packed_code_sizes[packed_code_size_index]),
1119                    [2, 3, 7][code - 16],
1120                );
1121                packed_code_size_index += 1;
1122            }
1123        }
1124
1125        Ok(())
1126    }
1127}
1128
1129pub(crate) struct DictOxide {
1130    /// The maximum number of checks in the hash chain, for the initial,
1131    /// and the lazy match respectively.
1132    pub max_probes: [u32; 2],
1133    /// Buffer of input data.
1134    /// Padded with 1 byte to simplify matching code in `compress_fast`.
1135    pub b: HashBuffers,
1136
1137    pub code_buf_dict_pos: usize,
1138    pub lookahead_size: usize,
1139    pub lookahead_pos: usize,
1140    pub size: usize,
1141    loop_len: u8,
1142}
1143
1144const fn probes_from_flags(flags: u32) -> [u32; 2] {
1145    [
1146        1 + ((flags & 0xFFF) + 2) / 3,
1147        1 + (((flags & 0xFFF) >> 2) + 2) / 3,
1148    ]
1149}
1150
1151impl DictOxide {
1152    fn new(flags: u32) -> Self {
1153        DictOxide {
1154            max_probes: probes_from_flags(flags),
1155            b: HashBuffers::default(),
1156            code_buf_dict_pos: 0,
1157            lookahead_size: 0,
1158            lookahead_pos: 0,
1159            size: 0,
1160            loop_len: 32,
1161        }
1162    }
1163
1164    fn update_flags(&mut self, flags: u32) {
1165        self.max_probes = probes_from_flags(flags);
1166    }
1167
1168    fn reset(&mut self) {
1169        self.b.reset();
1170        self.code_buf_dict_pos = 0;
1171        self.lookahead_size = 0;
1172        self.lookahead_pos = 0;
1173        self.size = 0;
1174    }
1175
1176    /// Do an unaligned read of the data at `pos` in the dictionary and treat it as if it was of
1177    /// type T.
1178    #[inline]
1179    fn read_unaligned_u32(&self, pos: usize) -> u32 {
1180        // Masking the value here helps avoid bounds checks.
1181        let pos = pos & LZ_DICT_SIZE_MASK;
1182        let end = pos + 4;
1183        // Somehow this assertion makes things faster.
1184        // TODO: as of may 2024 this does not seem to make any difference
1185        // so consider removing.
1186        assert!(end < LZ_DICT_FULL_SIZE);
1187
1188        let bytes: [u8; 4] = self.b.dict[pos..end].try_into().unwrap();
1189        u32::from_le_bytes(bytes)
1190    }
1191
1192    /// Do an unaligned read of the data at `pos` in the dictionary and treat it as if it was of
1193    /// type T.
1194    #[inline]
1195    fn read_unaligned_u64(&self, pos: usize) -> u64 {
1196        // Help evade bounds/panic code check by masking the position value
1197        // This provides a small speedup at the cost of an instruction or two instead of
1198        // having to use unsafe.
1199        let pos = pos & LZ_DICT_SIZE_MASK;
1200        let bytes: [u8; 8] = self.b.dict[pos..pos + 8].try_into().unwrap();
1201        u64::from_le_bytes(bytes)
1202    }
1203
1204    /// Try to find a match for the data at lookahead_pos in the dictionary that is
1205    /// longer than `match_len`.
1206    /// Returns a tuple containing (match_distance, match_length). Will be equal to the input
1207    /// values if no better matches were found.
1208    fn find_match(
1209        &self,
1210        lookahead_pos: usize,
1211        max_dist: usize,
1212        max_match_len: u32,
1213        mut match_dist: u32,
1214        mut match_len: u32,
1215    ) -> (u32, u32) {
1216        // Clamp the match len and max_match_len to be valid. (It should be when this is called, but
1217        // do it for now just in case for safety reasons.)
1218        // This should normally end up as at worst conditional moves,
1219        // so it shouldn't slow us down much.
1220        // TODO: Statically verify these so we don't need to do this.
1221        let max_match_len = cmp::min(MAX_MATCH_LEN as u32, max_match_len);
1222        match_len = cmp::max(match_len, 1);
1223
1224        // If we already have a match of the full length don't bother searching for another one.
1225        if max_match_len <= match_len {
1226            return (match_dist, match_len);
1227        }
1228
1229        let pos = lookahead_pos & LZ_DICT_SIZE_MASK;
1230        let mut probe_pos = pos;
1231        // Number of probes into the hash chains.
1232        let mut num_probes_left = if match_len < 32 {
1233            self.max_probes[0]
1234        } else {
1235            self.max_probes[1]
1236        };
1237
1238        // Read the last byte of the current match, and the next one, used to compare matches.
1239        let mut c01: u16 = read_u16_le(&self.b.dict, pos + match_len as usize - 1);
1240        // Read the two bytes at the end position of the current match.
1241        let s01: u16 = read_u16_le(&self.b.dict, pos);
1242
1243        'outer: loop {
1244            let mut dist;
1245            'found: loop {
1246                num_probes_left -= 1;
1247                if num_probes_left == 0 {
1248                    // We have done as many probes in the hash chain as the current compression
1249                    // settings allow, so return the best match we found, if any.
1250                    return (match_dist, match_len);
1251                }
1252
1253                for _ in 0..3 {
1254                    let next_probe_pos = self.b.next[probe_pos] as usize;
1255
1256                    dist = (lookahead_pos - next_probe_pos) & 0xFFFF;
1257                    if next_probe_pos == 0 || dist > max_dist {
1258                        // We reached the end of the hash chain, or the next value is further away
1259                        // than the maximum allowed distance, so return the best match we found, if
1260                        // any.
1261                        return (match_dist, match_len);
1262                    }
1263
1264                    // Mask the position value to get the position in the hash chain of the next
1265                    // position to match against.
1266                    probe_pos = next_probe_pos & LZ_DICT_SIZE_MASK;
1267
1268                    // TODO: This bounds check does not get optimized out
1269                    if read_u16_le(&self.b.dict, probe_pos + match_len as usize - 1) == c01 {
1270                        break 'found;
1271                    }
1272                }
1273            }
1274
1275            if dist == 0 {
1276                // We've looked through the whole match range, so return the best match we
1277                // found.
1278                return (match_dist, match_len);
1279            }
1280
1281            // Check if the two first bytes match.
1282            if read_u16_le(&self.b.dict, probe_pos) != s01 {
1283                continue;
1284            }
1285
1286            let mut p = pos + 2;
1287            let mut q = probe_pos + 2;
1288            // The first two bytes matched, so check the full length of the match.
1289            // TODO: This is a workaround for an upstream issue introduced after a LLVM upgrade in rust 1.82.
1290            // the compiler is too smart and ends up unrolling the loop which causes the performance to get worse
1291            // Using a variable instead of a constant here to prevent it seems to at least get back some of the performance loss.
1292            for _ in 0..self.loop_len as i32 {
1293                let p_data: u64 = self.read_unaligned_u64(p);
1294                let q_data: u64 = self.read_unaligned_u64(q);
1295                // Compare of 8 bytes at a time by using unaligned loads of 64-bit integers.
1296                let xor_data = p_data ^ q_data;
1297                if xor_data == 0 {
1298                    p += 8;
1299                    q += 8;
1300                } else {
1301                    // If not all of the last 8 bytes matched, check how may of them did.
1302                    let trailing = xor_data.trailing_zeros();
1303
1304                    let probe_len = p - pos + (trailing as usize >> 3);
1305                    if probe_len > match_len as usize {
1306                        match_dist = dist as u32;
1307                        match_len = cmp::min(max_match_len, probe_len as u32);
1308                        if match_len == max_match_len {
1309                            // We found a match that had the maximum allowed length,
1310                            // so there is now point searching further.
1311                            return (match_dist, match_len);
1312                        }
1313                        // We found a better match, so save the last two bytes for further match
1314                        // comparisons.
1315                        c01 = read_u16_le(&self.b.dict, pos + match_len as usize - 1);
1316                    }
1317                    continue 'outer;
1318                }
1319            }
1320
1321            return (dist as u32, cmp::min(max_match_len, MAX_MATCH_LEN as u32));
1322        }
1323    }
1324}
1325
1326pub(crate) struct ParamsOxide {
1327    pub flags: u32,
1328    pub greedy_parsing: bool,
1329    pub block_index: u32,
1330
1331    pub saved_match_dist: u32,
1332    pub saved_match_len: u32,
1333    pub saved_lit: u8,
1334
1335    pub flush: TDEFLFlush,
1336    pub flush_ofs: u32,
1337    pub flush_remaining: u32,
1338    pub finished: bool,
1339
1340    pub adler32: u32,
1341
1342    pub src_pos: usize,
1343
1344    pub out_buf_ofs: usize,
1345    pub prev_return_status: TDEFLStatus,
1346
1347    pub saved_bit_buffer: u32,
1348    pub saved_bits_in: u32,
1349
1350    pub local_buf: Box<LocalBuf>,
1351}
1352
1353impl ParamsOxide {
1354    fn new(flags: u32) -> Self {
1355        ParamsOxide {
1356            flags,
1357            greedy_parsing: flags & TDEFL_GREEDY_PARSING_FLAG != 0,
1358            block_index: 0,
1359            saved_match_dist: 0,
1360            saved_match_len: 0,
1361            saved_lit: 0,
1362            flush: TDEFLFlush::None,
1363            flush_ofs: 0,
1364            flush_remaining: 0,
1365            finished: false,
1366            adler32: MZ_ADLER32_INIT,
1367            src_pos: 0,
1368            out_buf_ofs: 0,
1369            prev_return_status: TDEFLStatus::Okay,
1370            saved_bit_buffer: 0,
1371            saved_bits_in: 0,
1372            local_buf: Box::default(),
1373        }
1374    }
1375
1376    fn update_flags(&mut self, flags: u32) {
1377        self.flags = flags;
1378        self.greedy_parsing = self.flags & TDEFL_GREEDY_PARSING_FLAG != 0;
1379    }
1380
1381    /// Reset state, saving settings.
1382    fn reset(&mut self) {
1383        self.block_index = 0;
1384        self.saved_match_len = 0;
1385        self.saved_match_dist = 0;
1386        self.saved_lit = 0;
1387        self.flush = TDEFLFlush::None;
1388        self.flush_ofs = 0;
1389        self.flush_remaining = 0;
1390        self.finished = false;
1391        self.adler32 = MZ_ADLER32_INIT;
1392        self.src_pos = 0;
1393        self.out_buf_ofs = 0;
1394        self.prev_return_status = TDEFLStatus::Okay;
1395        self.saved_bit_buffer = 0;
1396        self.saved_bits_in = 0;
1397        self.local_buf.b = [0; OUT_BUF_SIZE];
1398    }
1399}
1400
1401pub(crate) struct LZOxide {
1402    pub codes: [u8; LZ_CODE_BUF_SIZE],
1403    pub code_position: usize,
1404    pub flag_position: usize,
1405
1406    // The total number of bytes in the current block.
1407    pub total_bytes: u32,
1408    pub num_flags_left: u32,
1409}
1410
1411impl LZOxide {
1412    const fn new() -> Self {
1413        LZOxide {
1414            codes: [0; LZ_CODE_BUF_SIZE],
1415            code_position: 1,
1416            flag_position: 0,
1417            total_bytes: 0,
1418            num_flags_left: 8,
1419        }
1420    }
1421
1422    fn write_code(&mut self, val: u8) {
1423        // Perf - go via u16 to help evade bounds check
1424        // TODO: see if we can use u16 for flag_position in general.
1425        self.codes[usize::from(self.code_position as u16)] = val;
1426        self.code_position += 1;
1427    }
1428
1429    fn init_flag(&mut self) {
1430        if self.num_flags_left == 8 {
1431            *self.get_flag() = 0;
1432            self.code_position -= 1;
1433        } else {
1434            *self.get_flag() >>= self.num_flags_left;
1435        }
1436    }
1437
1438    fn get_flag(&mut self) -> &mut u8 {
1439        // Perf - go via u16 to help evade bounds check
1440        // TODO: see if we can use u16 for flag_position in general.
1441        &mut self.codes[usize::from(self.flag_position as u16)]
1442    }
1443
1444    fn plant_flag(&mut self) {
1445        self.flag_position = self.code_position;
1446        self.code_position += 1;
1447    }
1448
1449    fn consume_flag(&mut self) {
1450        self.num_flags_left -= 1;
1451        if self.num_flags_left == 0 {
1452            self.num_flags_left = 8;
1453            self.plant_flag();
1454        }
1455    }
1456}
1457
1458fn compress_lz_codes(
1459    huff: &HuffmanOxide,
1460    output: &mut OutputBufferOxide,
1461    lz_code_buf: &[u8; LZ_CODE_BUF_SIZE],
1462    lz_code_buf_used_len: usize,
1463) -> Result<bool> {
1464    let mut flags = 1;
1465    let mut bb = BitBuffer {
1466        bit_buffer: u64::from(output.bit_buffer),
1467        bits_in: output.bits_in,
1468    };
1469
1470    // Help out the compiler know this variable won't be larger than
1471    // the buffer length since the constants won't propagate through the function call.
1472    let lz_code_buf_used_len = cmp::min(lz_code_buf.len(), lz_code_buf_used_len);
1473
1474    let mut i: usize = 0;
1475    while i < lz_code_buf_used_len {
1476        if flags == 1 {
1477            flags = u32::from(lz_code_buf[i]) | 0x100;
1478            i += 1;
1479        }
1480
1481        // The lz code was a length code
1482        if flags & 1 == 1 {
1483            flags >>= 1;
1484
1485            let sym;
1486            let num_extra_bits;
1487
1488            let match_len = lz_code_buf[i] as usize;
1489
1490            let match_dist = read_u16_le(lz_code_buf, i + 1);
1491
1492            i += 3;
1493
1494            debug_assert!(huff.code_sizes[0][LEN_SYM[match_len] as usize] != 0);
1495            bb.put_fast(
1496                u64::from(huff.codes[0][LEN_SYM[match_len] as usize]),
1497                u32::from(huff.code_sizes[0][LEN_SYM[match_len] as usize]),
1498            );
1499            bb.put_fast(
1500                match_len as u64 & u64::from(BITMASKS[LEN_EXTRA[match_len] as usize]),
1501                u32::from(LEN_EXTRA[match_len]),
1502            );
1503
1504            if match_dist < 512 {
1505                sym = SMALL_DIST_SYM[match_dist as usize] as usize;
1506                num_extra_bits = SMALL_DIST_EXTRA[match_dist as usize] as usize;
1507            } else {
1508                sym = LARGE_DIST_SYM[(match_dist >> 8) as usize] as usize;
1509                num_extra_bits = LARGE_DIST_EXTRA[(match_dist >> 8) as usize] as usize;
1510            }
1511
1512            debug_assert!(huff.code_sizes[1][sym] != 0);
1513            bb.put_fast(
1514                u64::from(huff.codes[1][sym]),
1515                u32::from(huff.code_sizes[1][sym]),
1516            );
1517            bb.put_fast(
1518                u64::from(match_dist) & u64::from(BITMASKS[num_extra_bits]),
1519                num_extra_bits as u32,
1520            );
1521        } else {
1522            // The lz code was a literal
1523            for _ in 0..3 {
1524                flags >>= 1;
1525                let lit = lz_code_buf[i & (LZ_CODE_BUF_SIZE - 1)];
1526                i += 1;
1527
1528                debug_assert!(huff.code_sizes[0][lit as usize] != 0);
1529                bb.put_fast(
1530                    u64::from(huff.codes[0][lit as usize]),
1531                    u32::from(huff.code_sizes[0][lit as usize]),
1532                );
1533
1534                if flags & 1 == 1 || i >= lz_code_buf_used_len {
1535                    break;
1536                }
1537            }
1538        }
1539
1540        bb.flush(output)?;
1541    }
1542
1543    output.bits_in = 0;
1544    output.bit_buffer = 0;
1545    while bb.bits_in != 0 {
1546        let n = cmp::min(bb.bits_in, 16);
1547        output.put_bits(bb.bit_buffer as u32 & BITMASKS[n as usize], n);
1548        bb.bit_buffer >>= n;
1549        bb.bits_in -= n;
1550    }
1551
1552    // Output the end of block symbol.
1553    output.put_bits(
1554        u32::from(huff.codes[0][256]),
1555        u32::from(huff.code_sizes[0][256]),
1556    );
1557
1558    Ok(true)
1559}
1560
1561fn compress_block(
1562    huff: &mut HuffmanOxide,
1563    output: &mut OutputBufferOxide,
1564    lz: &LZOxide,
1565    static_block: bool,
1566) -> Result<bool> {
1567    if static_block {
1568        huff.start_static_block(output);
1569    } else {
1570        huff.start_dynamic_block(output)?;
1571    }
1572
1573    compress_lz_codes(huff, output, &lz.codes, lz.code_position)
1574}
1575
1576pub(crate) fn flush_block(
1577    d: &mut CompressorOxide,
1578    callback: &mut CallbackOxide,
1579    flush: TDEFLFlush,
1580) -> Result<i32> {
1581    let mut saved_buffer;
1582    {
1583        let mut output = callback
1584            .out
1585            .new_output_buffer(&mut d.params.local_buf.b, d.params.out_buf_ofs);
1586        output.bit_buffer = d.params.saved_bit_buffer;
1587        output.bits_in = d.params.saved_bits_in;
1588
1589        // TODO: Don't think this second condition should be here but need to verify.
1590        let use_raw_block = (d.params.flags & TDEFL_FORCE_ALL_RAW_BLOCKS != 0)
1591            && (d.dict.lookahead_pos - d.dict.code_buf_dict_pos) <= d.dict.size;
1592        debug_assert_eq!(
1593            use_raw_block,
1594            d.params.flags & TDEFL_FORCE_ALL_RAW_BLOCKS != 0
1595        );
1596
1597        assert!(d.params.flush_remaining == 0);
1598        d.params.flush_ofs = 0;
1599        d.params.flush_remaining = 0;
1600
1601        d.lz.init_flag();
1602
1603        // If we are at the start of the stream, write the zlib header if requested.
1604        if d.params.flags & TDEFL_WRITE_ZLIB_HEADER != 0 && d.params.block_index == 0 {
1605            let header = zlib::header_from_flags(d.params.flags);
1606            output.put_bits_no_flush(header[0].into(), 8);
1607            output.put_bits(header[1].into(), 8);
1608        }
1609
1610        // Output the block header.
1611        output.put_bits((flush == TDEFLFlush::Finish) as u32, 1);
1612
1613        saved_buffer = output.save();
1614
1615        let comp_success = if !use_raw_block {
1616            let use_static =
1617                (d.params.flags & TDEFL_FORCE_ALL_STATIC_BLOCKS != 0) || (d.lz.total_bytes < 48);
1618            compress_block(&mut d.huff, &mut output, &d.lz, use_static)?
1619        } else {
1620            false
1621        };
1622
1623        // If we failed to compress anything and the output would take up more space than the output
1624        // data, output a stored block instead, which has at most 5 bytes of overhead.
1625        // We only use some simple heuristics for now.
1626        // A stored block will have an overhead of at least 4 bytes containing the block length
1627        // but usually more due to the length parameters having to start at a byte boundary and thus
1628        // requiring up to 5 bytes of padding.
1629        // As a static block will have an overhead of at most 1 bit per byte
1630        // (as literals are either 8 or 9 bytes), a raw block will
1631        // never take up less space if the number of input bytes are less than 32.
1632        let expanded = (d.lz.total_bytes > 32)
1633            && (output.inner_pos - saved_buffer.pos + 1 >= (d.lz.total_bytes as usize))
1634            && (d.dict.lookahead_pos - d.dict.code_buf_dict_pos <= d.dict.size);
1635
1636        if use_raw_block || expanded {
1637            output.load(saved_buffer);
1638
1639            // Block header.
1640            output.put_bits(0, 2);
1641
1642            // Block length has to start on a byte boundary, so pad.
1643            output.pad_to_bytes();
1644
1645            // Block length and ones complement of block length.
1646            output.put_bits(d.lz.total_bytes & 0xFFFF, 16);
1647            output.put_bits(!d.lz.total_bytes & 0xFFFF, 16);
1648
1649            // Write the actual bytes.
1650            let start = d.dict.code_buf_dict_pos & LZ_DICT_SIZE_MASK;
1651            let end = (d.dict.code_buf_dict_pos + d.lz.total_bytes as usize) & LZ_DICT_SIZE_MASK;
1652            let dict = &mut d.dict.b.dict;
1653            if start < end {
1654                // The data does not wrap around.
1655                output.write_bytes(&dict[start..end]);
1656            } else if d.lz.total_bytes > 0 {
1657                // The data wraps around and the input was not 0 bytes.
1658                output.write_bytes(&dict[start..LZ_DICT_SIZE]);
1659                output.write_bytes(&dict[..end]);
1660            }
1661        } else if !comp_success {
1662            output.load(saved_buffer);
1663            compress_block(&mut d.huff, &mut output, &d.lz, true)?;
1664        }
1665
1666        if flush != TDEFLFlush::None {
1667            if flush == TDEFLFlush::Finish {
1668                output.pad_to_bytes();
1669                if d.params.flags & TDEFL_WRITE_ZLIB_HEADER != 0 {
1670                    let mut adler = d.params.adler32;
1671                    for _ in 0..4 {
1672                        output.put_bits((adler >> 24) & 0xFF, 8);
1673                        adler <<= 8;
1674                    }
1675                }
1676            } else {
1677                // Sync or Full flush.
1678                // Output an empty raw block.
1679                output.put_bits(0, 3);
1680                output.pad_to_bytes();
1681                output.put_bits(0, 16);
1682                output.put_bits(0xFFFF, 16);
1683            }
1684        }
1685
1686        d.huff.count[0][..MAX_HUFF_SYMBOLS_0].fill(0);
1687        d.huff.count[1][..MAX_HUFF_SYMBOLS_1].fill(0);
1688
1689        // Clear LZ buffer for the next block.
1690        d.lz.code_position = 1;
1691        d.lz.flag_position = 0;
1692        d.lz.num_flags_left = 8;
1693        d.dict.code_buf_dict_pos += d.lz.total_bytes as usize;
1694        d.lz.total_bytes = 0;
1695        d.params.block_index += 1;
1696
1697        saved_buffer = output.save();
1698
1699        d.params.saved_bit_buffer = saved_buffer.bit_buffer;
1700        d.params.saved_bits_in = saved_buffer.bits_in;
1701    }
1702
1703    Ok(callback.flush_output(saved_buffer, &mut d.params))
1704}
1705
1706pub(crate) fn record_literal(h: &mut HuffmanOxide, lz: &mut LZOxide, lit: u8) {
1707    lz.total_bytes += 1;
1708    lz.write_code(lit);
1709
1710    *lz.get_flag() >>= 1;
1711    lz.consume_flag();
1712
1713    h.count[0][lit as usize] += 1;
1714}
1715
1716fn record_match(h: &mut HuffmanOxide, lz: &mut LZOxide, mut match_len: u32, mut match_dist: u32) {
1717    debug_assert!(match_len >= MIN_MATCH_LEN.into());
1718    debug_assert!(match_dist >= 1);
1719    debug_assert!(match_dist as usize <= LZ_DICT_SIZE);
1720
1721    lz.total_bytes += match_len;
1722    match_dist -= 1;
1723    match_len -= u32::from(MIN_MATCH_LEN);
1724    lz.write_code(match_len as u8);
1725    lz.write_code(match_dist as u8);
1726    lz.write_code((match_dist >> 8) as u8);
1727
1728    *lz.get_flag() >>= 1;
1729    *lz.get_flag() |= 0x80;
1730    lz.consume_flag();
1731
1732    let symbol = if match_dist < 512 {
1733        SMALL_DIST_SYM[match_dist as usize]
1734    } else {
1735        LARGE_DIST_SYM[((match_dist >> 8) & 127) as usize]
1736    } as usize;
1737    h.count[1][symbol] += 1;
1738    // Perf - go via u8 to help optimize out bounds check.
1739    h.count[0][LEN_SYM[usize::from(match_len as u8)] as usize] += 1;
1740}
1741
1742fn compress_normal(d: &mut CompressorOxide, callback: &mut CallbackOxide) -> bool {
1743    let in_buf = match callback.in_buf {
1744        None => return true,
1745        Some(in_buf) => in_buf,
1746    };
1747
1748    let mut src_pos = d.params.src_pos;
1749    let mut lookahead_size = d.dict.lookahead_size;
1750    let mut lookahead_pos = d.dict.lookahead_pos;
1751    let mut saved_lit = d.params.saved_lit;
1752    let mut saved_match_dist = d.params.saved_match_dist;
1753    let mut saved_match_len = d.params.saved_match_len;
1754
1755    while src_pos < in_buf.len() || (d.params.flush != TDEFLFlush::None && lookahead_size != 0) {
1756        let src_buf_left = in_buf.len() - src_pos;
1757        let num_bytes_to_process = cmp::min(src_buf_left, MAX_MATCH_LEN - lookahead_size);
1758
1759        if lookahead_size + d.dict.size >= usize::from(MIN_MATCH_LEN) - 1
1760            && num_bytes_to_process > 0
1761        {
1762            let dictb = &mut d.dict.b;
1763
1764            let mut dst_pos = (lookahead_pos + lookahead_size) & LZ_DICT_SIZE_MASK;
1765            let mut ins_pos = lookahead_pos + lookahead_size - 2;
1766            // Start the hash value from the first two bytes
1767            let mut hash = update_hash(
1768                u16::from(dictb.dict[ins_pos & LZ_DICT_SIZE_MASK]),
1769                dictb.dict[(ins_pos + 1) & LZ_DICT_SIZE_MASK],
1770            );
1771
1772            lookahead_size += num_bytes_to_process;
1773
1774            for &c in &in_buf[src_pos..src_pos + num_bytes_to_process] {
1775                // Add byte to input buffer.
1776                dictb.dict[dst_pos] = c;
1777                if dst_pos < MAX_MATCH_LEN - 1 {
1778                    dictb.dict[LZ_DICT_SIZE + dst_pos] = c;
1779                }
1780
1781                // Generate hash from the current byte,
1782                hash = update_hash(hash, c);
1783                dictb.next[ins_pos & LZ_DICT_SIZE_MASK] = dictb.hash[hash as usize];
1784                // and insert it into the hash chain.
1785                dictb.hash[hash as usize] = ins_pos as u16;
1786                dst_pos = (dst_pos + 1) & LZ_DICT_SIZE_MASK;
1787                ins_pos += 1;
1788            }
1789            src_pos += num_bytes_to_process;
1790        } else {
1791            let dictb = &mut d.dict.b;
1792            for &c in &in_buf[src_pos..src_pos + num_bytes_to_process] {
1793                let dst_pos = (lookahead_pos + lookahead_size) & LZ_DICT_SIZE_MASK;
1794                dictb.dict[dst_pos] = c;
1795                if dst_pos < MAX_MATCH_LEN - 1 {
1796                    dictb.dict[LZ_DICT_SIZE + dst_pos] = c;
1797                }
1798
1799                lookahead_size += 1;
1800                if lookahead_size + d.dict.size >= MIN_MATCH_LEN.into() {
1801                    let ins_pos = lookahead_pos + lookahead_size - 3;
1802                    let hash = ((u32::from(dictb.dict[ins_pos & LZ_DICT_SIZE_MASK])
1803                        << (LZ_HASH_SHIFT * 2))
1804                        ^ ((u32::from(dictb.dict[(ins_pos + 1) & LZ_DICT_SIZE_MASK])
1805                            << LZ_HASH_SHIFT)
1806                            ^ u32::from(c)))
1807                        & (LZ_HASH_SIZE as u32 - 1);
1808
1809                    dictb.next[ins_pos & LZ_DICT_SIZE_MASK] = dictb.hash[hash as usize];
1810                    dictb.hash[hash as usize] = ins_pos as u16;
1811                }
1812            }
1813
1814            src_pos += num_bytes_to_process;
1815        }
1816
1817        d.dict.size = cmp::min(LZ_DICT_SIZE - lookahead_size, d.dict.size);
1818        if d.params.flush == TDEFLFlush::None && lookahead_size < MAX_MATCH_LEN {
1819            break;
1820        }
1821
1822        let mut len_to_move = 1;
1823        let mut cur_match_dist = 0;
1824        let mut cur_match_len = if saved_match_len != 0 {
1825            saved_match_len
1826        } else {
1827            u32::from(MIN_MATCH_LEN) - 1
1828        };
1829        let cur_pos = lookahead_pos & LZ_DICT_SIZE_MASK;
1830        if d.params.flags & TDEFL_RLE_MATCHES != 0 {
1831            // If TDEFL_RLE_MATCHES is set, we only look for repeating sequences of the current byte.
1832            if d.dict.size != 0 {
1833                let c = d.dict.b.dict[(cur_pos.wrapping_sub(1)) & LZ_DICT_SIZE_MASK];
1834                cur_match_len = d.dict.b.dict[cur_pos..(cur_pos + lookahead_size)]
1835                    .iter()
1836                    .take_while(|&x| *x == c)
1837                    .count() as u32;
1838                if cur_match_len < MIN_MATCH_LEN.into() {
1839                    cur_match_len = 0
1840                } else {
1841                    cur_match_dist = 1
1842                }
1843            }
1844        } else {
1845            // Try to find a match for the bytes at the current position.
1846            let dist_len = d.dict.find_match(
1847                lookahead_pos,
1848                d.dict.size,
1849                lookahead_size as u32,
1850                cur_match_dist,
1851                cur_match_len,
1852            );
1853            cur_match_dist = dist_len.0;
1854            cur_match_len = dist_len.1;
1855        }
1856
1857        let far_and_small = cur_match_len == MIN_MATCH_LEN.into() && cur_match_dist >= 8 * 1024;
1858        let filter_small = d.params.flags & TDEFL_FILTER_MATCHES != 0 && cur_match_len <= 5;
1859        if far_and_small || filter_small || cur_pos == cur_match_dist as usize {
1860            cur_match_dist = 0;
1861            cur_match_len = 0;
1862        }
1863
1864        if saved_match_len != 0 {
1865            if cur_match_len > saved_match_len {
1866                record_literal(&mut d.huff, &mut d.lz, saved_lit);
1867                if cur_match_len >= 128 {
1868                    record_match(&mut d.huff, &mut d.lz, cur_match_len, cur_match_dist);
1869                    saved_match_len = 0;
1870                    len_to_move = cur_match_len as usize;
1871                } else {
1872                    saved_lit = d.dict.b.dict[cur_pos];
1873                    saved_match_dist = cur_match_dist;
1874                    saved_match_len = cur_match_len;
1875                }
1876            } else {
1877                record_match(&mut d.huff, &mut d.lz, saved_match_len, saved_match_dist);
1878                len_to_move = (saved_match_len - 1) as usize;
1879                saved_match_len = 0;
1880            }
1881        } else if cur_match_dist == 0 {
1882            record_literal(
1883                &mut d.huff,
1884                &mut d.lz,
1885                d.dict.b.dict[cmp::min(cur_pos, d.dict.b.dict.len() - 1)],
1886            );
1887        } else if d.params.greedy_parsing
1888            || (d.params.flags & TDEFL_RLE_MATCHES != 0)
1889            || cur_match_len >= 128
1890        {
1891            // If we are using lazy matching, check for matches at the next byte if the current
1892            // match was shorter than 128 bytes.
1893            record_match(&mut d.huff, &mut d.lz, cur_match_len, cur_match_dist);
1894            len_to_move = cur_match_len as usize;
1895        } else {
1896            saved_lit = d.dict.b.dict[cmp::min(cur_pos, d.dict.b.dict.len() - 1)];
1897            saved_match_dist = cur_match_dist;
1898            saved_match_len = cur_match_len;
1899        }
1900
1901        lookahead_pos += len_to_move;
1902        assert!(lookahead_size >= len_to_move);
1903        lookahead_size -= len_to_move;
1904        d.dict.size = cmp::min(d.dict.size + len_to_move, LZ_DICT_SIZE);
1905
1906        let lz_buf_tight = d.lz.code_position > LZ_CODE_BUF_SIZE - 8;
1907        let fat = ((d.lz.code_position * 115) >> 7) >= d.lz.total_bytes as usize;
1908        let buf_fat = (d.lz.total_bytes > 31 * 1024) && fat;
1909
1910        if lz_buf_tight || buf_fat {
1911            d.params.src_pos = src_pos;
1912            // These values are used in flush_block, so we need to write them back here.
1913            d.dict.lookahead_size = lookahead_size;
1914            d.dict.lookahead_pos = lookahead_pos;
1915
1916            let n = flush_block(d, callback, TDEFLFlush::None)
1917                .unwrap_or(TDEFLStatus::PutBufFailed as i32);
1918            if n != 0 {
1919                d.params.saved_lit = saved_lit;
1920                d.params.saved_match_dist = saved_match_dist;
1921                d.params.saved_match_len = saved_match_len;
1922                return n > 0;
1923            }
1924        }
1925    }
1926
1927    d.params.src_pos = src_pos;
1928    d.dict.lookahead_size = lookahead_size;
1929    d.dict.lookahead_pos = lookahead_pos;
1930    d.params.saved_lit = saved_lit;
1931    d.params.saved_match_dist = saved_match_dist;
1932    d.params.saved_match_len = saved_match_len;
1933    true
1934}
1935
1936const COMP_FAST_LOOKAHEAD_SIZE: usize = 4096;
1937
1938fn compress_fast(d: &mut CompressorOxide, callback: &mut CallbackOxide) -> bool {
1939    let mut src_pos = d.params.src_pos;
1940    let mut lookahead_size = d.dict.lookahead_size;
1941    let mut lookahead_pos = d.dict.lookahead_pos;
1942
1943    let mut cur_pos = lookahead_pos & LZ_DICT_SIZE_MASK;
1944    let in_buf = match callback.in_buf {
1945        None => return true,
1946        Some(in_buf) => in_buf,
1947    };
1948
1949    debug_assert!(d.lz.code_position < LZ_CODE_BUF_SIZE - 2);
1950
1951    while src_pos < in_buf.len() || (d.params.flush != TDEFLFlush::None && lookahead_size > 0) {
1952        let mut dst_pos = (lookahead_pos + lookahead_size) & LZ_DICT_SIZE_MASK;
1953        let mut num_bytes_to_process = cmp::min(
1954            in_buf.len() - src_pos,
1955            COMP_FAST_LOOKAHEAD_SIZE - lookahead_size,
1956        );
1957        lookahead_size += num_bytes_to_process;
1958
1959        while num_bytes_to_process != 0 {
1960            let n = cmp::min(LZ_DICT_SIZE - dst_pos, num_bytes_to_process);
1961            d.dict.b.dict[dst_pos..dst_pos + n].copy_from_slice(&in_buf[src_pos..src_pos + n]);
1962
1963            if dst_pos < MAX_MATCH_LEN - 1 {
1964                let m = cmp::min(n, MAX_MATCH_LEN - 1 - dst_pos);
1965                d.dict.b.dict[dst_pos + LZ_DICT_SIZE..dst_pos + LZ_DICT_SIZE + m]
1966                    .copy_from_slice(&in_buf[src_pos..src_pos + m]);
1967            }
1968
1969            src_pos += n;
1970            dst_pos = (dst_pos + n) & LZ_DICT_SIZE_MASK;
1971            num_bytes_to_process -= n;
1972        }
1973
1974        d.dict.size = cmp::min(LZ_DICT_SIZE - lookahead_size, d.dict.size);
1975        if d.params.flush == TDEFLFlush::None && lookahead_size < COMP_FAST_LOOKAHEAD_SIZE {
1976            break;
1977        }
1978
1979        while lookahead_size >= 4 {
1980            let mut cur_match_len = 1;
1981
1982            let first_trigram = d.dict.read_unaligned_u32(cur_pos) & 0xFF_FFFF;
1983
1984            let hash = (first_trigram ^ (first_trigram >> (24 - (LZ_HASH_BITS - 8))))
1985                & LEVEL1_HASH_SIZE_MASK;
1986
1987            let mut probe_pos = usize::from(d.dict.b.hash[hash as usize]);
1988            d.dict.b.hash[hash as usize] = lookahead_pos as u16;
1989
1990            let mut cur_match_dist = (lookahead_pos - probe_pos) as u16;
1991            if cur_match_dist as usize <= d.dict.size {
1992                probe_pos &= LZ_DICT_SIZE_MASK;
1993
1994                let trigram = d.dict.read_unaligned_u32(probe_pos) & 0xFF_FFFF;
1995
1996                if first_trigram == trigram {
1997                    // Trigram was tested, so we can start with "+ 3" displacement.
1998                    let mut p = cur_pos + 3;
1999                    let mut q = probe_pos + 3;
2000                    cur_match_len = (|| {
2001                        for _ in 0..32 {
2002                            let p_data: u64 = d.dict.read_unaligned_u64(p);
2003                            let q_data: u64 = d.dict.read_unaligned_u64(q);
2004                            let xor_data = p_data ^ q_data;
2005                            if xor_data == 0 {
2006                                p += 8;
2007                                q += 8;
2008                            } else {
2009                                let trailing = xor_data.trailing_zeros();
2010                                return p as u32 - cur_pos as u32 + (trailing >> 3);
2011                            }
2012                        }
2013
2014                        if cur_match_dist == 0 {
2015                            0
2016                        } else {
2017                            MAX_MATCH_LEN as u32
2018                        }
2019                    })();
2020
2021                    if cur_match_len < MIN_MATCH_LEN.into()
2022                        || (cur_match_len == MIN_MATCH_LEN.into() && cur_match_dist >= 8 * 1024)
2023                    {
2024                        let lit = first_trigram as u8;
2025                        cur_match_len = 1;
2026                        d.lz.write_code(lit);
2027                        *d.lz.get_flag() >>= 1;
2028                        d.huff.count[0][lit as usize] += 1;
2029                    } else {
2030                        // Limit the match to the length of the lookahead so we don't create a match
2031                        // that ends after the end of the input data.
2032                        cur_match_len = cmp::min(cur_match_len, lookahead_size as u32);
2033                        debug_assert!(cur_match_len >= MIN_MATCH_LEN.into());
2034                        debug_assert!(cur_match_dist >= 1);
2035                        debug_assert!(cur_match_dist as usize <= LZ_DICT_SIZE);
2036                        cur_match_dist -= 1;
2037
2038                        d.lz.write_code((cur_match_len - u32::from(MIN_MATCH_LEN)) as u8);
2039                        d.lz.write_code(cur_match_dist as u8);
2040                        d.lz.write_code((cur_match_dist >> 8) as u8);
2041
2042                        *d.lz.get_flag() >>= 1;
2043                        *d.lz.get_flag() |= 0x80;
2044                        if cur_match_dist < 512 {
2045                            d.huff.count[1][SMALL_DIST_SYM[cur_match_dist as usize] as usize] += 1;
2046                        } else {
2047                            d.huff.count[1]
2048                                [LARGE_DIST_SYM[(cur_match_dist >> 8) as usize] as usize] += 1;
2049                        }
2050
2051                        d.huff.count[0][LEN_SYM[(cur_match_len - u32::from(MIN_MATCH_LEN)) as usize]
2052                            as usize] += 1;
2053                    }
2054                } else {
2055                    d.lz.write_code(first_trigram as u8);
2056                    *d.lz.get_flag() >>= 1;
2057                    d.huff.count[0][first_trigram as u8 as usize] += 1;
2058                }
2059
2060                d.lz.consume_flag();
2061                d.lz.total_bytes += cur_match_len;
2062                lookahead_pos += cur_match_len as usize;
2063                d.dict.size = cmp::min(d.dict.size + cur_match_len as usize, LZ_DICT_SIZE);
2064                cur_pos = (cur_pos + cur_match_len as usize) & LZ_DICT_SIZE_MASK;
2065                lookahead_size -= cur_match_len as usize;
2066
2067                if d.lz.code_position > LZ_CODE_BUF_SIZE - 8 {
2068                    // These values are used in flush_block, so we need to write them back here.
2069                    d.dict.lookahead_size = lookahead_size;
2070                    d.dict.lookahead_pos = lookahead_pos;
2071
2072                    let n = match flush_block(d, callback, TDEFLFlush::None) {
2073                        Err(_) => {
2074                            d.params.src_pos = src_pos;
2075                            d.params.prev_return_status = TDEFLStatus::PutBufFailed;
2076                            return false;
2077                        }
2078                        Ok(status) => status,
2079                    };
2080                    if n != 0 {
2081                        d.params.src_pos = src_pos;
2082                        return n > 0;
2083                    }
2084                    debug_assert!(d.lz.code_position < LZ_CODE_BUF_SIZE - 2);
2085
2086                    lookahead_size = d.dict.lookahead_size;
2087                    lookahead_pos = d.dict.lookahead_pos;
2088                }
2089            }
2090        }
2091
2092        while lookahead_size != 0 {
2093            let lit = d.dict.b.dict[cur_pos];
2094            d.lz.total_bytes += 1;
2095            d.lz.write_code(lit);
2096            *d.lz.get_flag() >>= 1;
2097            d.lz.consume_flag();
2098
2099            d.huff.count[0][lit as usize] += 1;
2100            lookahead_pos += 1;
2101            d.dict.size = cmp::min(d.dict.size + 1, LZ_DICT_SIZE);
2102            cur_pos = (cur_pos + 1) & LZ_DICT_SIZE_MASK;
2103            lookahead_size -= 1;
2104
2105            if d.lz.code_position > LZ_CODE_BUF_SIZE - 8 {
2106                // These values are used in flush_block, so we need to write them back here.
2107                d.dict.lookahead_size = lookahead_size;
2108                d.dict.lookahead_pos = lookahead_pos;
2109
2110                let n = match flush_block(d, callback, TDEFLFlush::None) {
2111                    Err(_) => {
2112                        d.params.prev_return_status = TDEFLStatus::PutBufFailed;
2113                        d.params.src_pos = src_pos;
2114                        return false;
2115                    }
2116                    Ok(status) => status,
2117                };
2118                if n != 0 {
2119                    d.params.src_pos = src_pos;
2120                    return n > 0;
2121                }
2122
2123                lookahead_size = d.dict.lookahead_size;
2124                lookahead_pos = d.dict.lookahead_pos;
2125            }
2126        }
2127    }
2128
2129    d.params.src_pos = src_pos;
2130    d.dict.lookahead_size = lookahead_size;
2131    d.dict.lookahead_pos = lookahead_pos;
2132    true
2133}
2134
2135fn flush_output_buffer(c: &mut CallbackOxide, p: &mut ParamsOxide) -> (TDEFLStatus, usize, usize) {
2136    let mut res = (TDEFLStatus::Okay, p.src_pos, 0);
2137    if let CallbackOut::Buf(ref mut cb) = c.out {
2138        let n = cmp::min(cb.out_buf.len() - p.out_buf_ofs, p.flush_remaining as usize);
2139        if n != 0 {
2140            cb.out_buf[p.out_buf_ofs..p.out_buf_ofs + n]
2141                .copy_from_slice(&p.local_buf.b[p.flush_ofs as usize..p.flush_ofs as usize + n]);
2142        }
2143        p.flush_ofs += n as u32;
2144        p.flush_remaining -= n as u32;
2145        p.out_buf_ofs += n;
2146        res.2 = p.out_buf_ofs;
2147    }
2148
2149    if p.finished && p.flush_remaining == 0 {
2150        res.0 = TDEFLStatus::Done
2151    }
2152    res
2153}
2154
2155/// Main compression function. Tries to compress as much as possible from `in_buf` and
2156/// puts compressed output into `out_buf`.
2157///
2158/// The value of `flush` determines if the compressor should attempt to flush all output
2159/// and alternatively try to finish the stream.
2160///
2161/// Use [`TDEFLFlush::Finish`] on the final call to signal that the stream is finishing.
2162///
2163/// Note that this function does not keep track of whether a flush marker has been output, so
2164/// if called using [`TDEFLFlush::Sync`], the caller needs to ensure there is enough space in the
2165/// output buffer if they want to avoid repeated flush markers.
2166/// See #105 for details.
2167///
2168/// # Returns
2169/// Returns a tuple containing the current status of the compressor, the current position
2170/// in the input buffer and the current position in the output buffer.
2171pub fn compress(
2172    d: &mut CompressorOxide,
2173    in_buf: &[u8],
2174    out_buf: &mut [u8],
2175    flush: TDEFLFlush,
2176) -> (TDEFLStatus, usize, usize) {
2177    compress_inner(
2178        d,
2179        &mut CallbackOxide::new_callback_buf(in_buf, out_buf),
2180        flush,
2181    )
2182}
2183
2184/// Main compression function. Callbacks output.
2185///
2186/// # Returns
2187/// Returns a tuple containing the current status of the compressor, the current position
2188/// in the input buffer.
2189///
2190/// The caller is responsible for ensuring the `CallbackFunc` struct will not cause undefined
2191/// behaviour.
2192pub fn compress_to_output(
2193    d: &mut CompressorOxide,
2194    in_buf: &[u8],
2195    flush: TDEFLFlush,
2196    mut callback_func: impl FnMut(&[u8]) -> bool,
2197) -> (TDEFLStatus, usize) {
2198    let res = compress_inner(
2199        d,
2200        &mut CallbackOxide::new_callback_func(
2201            in_buf,
2202            CallbackFunc {
2203                put_buf_func: &mut callback_func,
2204            },
2205        ),
2206        flush,
2207    );
2208
2209    (res.0, res.1)
2210}
2211
2212fn compress_inner(
2213    d: &mut CompressorOxide,
2214    callback: &mut CallbackOxide,
2215    flush: TDEFLFlush,
2216) -> (TDEFLStatus, usize, usize) {
2217    d.params.out_buf_ofs = 0;
2218    d.params.src_pos = 0;
2219
2220    let prev_ok = d.params.prev_return_status == TDEFLStatus::Okay;
2221    let flush_finish_once = d.params.flush != TDEFLFlush::Finish || flush == TDEFLFlush::Finish;
2222
2223    d.params.flush = flush;
2224    if !prev_ok || !flush_finish_once {
2225        d.params.prev_return_status = TDEFLStatus::BadParam;
2226        return (d.params.prev_return_status, 0, 0);
2227    }
2228
2229    if d.params.flush_remaining != 0 || d.params.finished {
2230        let res = flush_output_buffer(callback, &mut d.params);
2231        d.params.prev_return_status = res.0;
2232        return res;
2233    }
2234
2235    let one_probe = d.params.flags & MAX_PROBES_MASK == 1;
2236    let greedy = d.params.flags & TDEFL_GREEDY_PARSING_FLAG != 0;
2237    let filter_or_rle = d.params.flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS) != 0;
2238
2239    let raw = d.params.flags & TDEFL_FORCE_ALL_RAW_BLOCKS != 0;
2240
2241    let compress_success = if raw {
2242        compress_stored(d, callback)
2243    } else if one_probe && greedy && !filter_or_rle {
2244        compress_fast(d, callback)
2245    } else {
2246        compress_normal(d, callback)
2247    };
2248
2249    if !compress_success {
2250        return (
2251            d.params.prev_return_status,
2252            d.params.src_pos,
2253            d.params.out_buf_ofs,
2254        );
2255    }
2256
2257    if let Some(in_buf) = callback.in_buf {
2258        if d.params.flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32) != 0 {
2259            d.params.adler32 = update_adler32(d.params.adler32, &in_buf[..d.params.src_pos]);
2260        }
2261    }
2262
2263    let flush_none = d.params.flush == TDEFLFlush::None;
2264    let in_left = callback.in_buf.map_or(0, |buf| buf.len()) - d.params.src_pos;
2265    let remaining = in_left != 0 || d.params.flush_remaining != 0;
2266    if !flush_none && d.dict.lookahead_size == 0 && !remaining {
2267        let flush = d.params.flush;
2268        match flush_block(d, callback, flush) {
2269            Err(_) => {
2270                d.params.prev_return_status = TDEFLStatus::PutBufFailed;
2271                return (
2272                    d.params.prev_return_status,
2273                    d.params.src_pos,
2274                    d.params.out_buf_ofs,
2275                );
2276            }
2277            Ok(x) if x < 0 => {
2278                return (
2279                    d.params.prev_return_status,
2280                    d.params.src_pos,
2281                    d.params.out_buf_ofs,
2282                )
2283            }
2284            _ => {
2285                d.params.finished = d.params.flush == TDEFLFlush::Finish;
2286                if d.params.flush == TDEFLFlush::Full {
2287                    d.dict.b.hash.fill(0);
2288                    d.dict.b.next.fill(0);
2289                    d.dict.size = 0;
2290                }
2291            }
2292        }
2293    }
2294
2295    let res = flush_output_buffer(callback, &mut d.params);
2296    d.params.prev_return_status = res.0;
2297
2298    res
2299}
2300
2301/// Create a set of compression flags using parameters used by zlib and other compressors.
2302/// Mainly intended for use with transition from c libraries as it deals with raw integers.
2303///
2304/// # Parameters
2305/// `level` determines compression level. Clamped to maximum of 10. Negative values result in
2306/// `CompressionLevel::DefaultLevel`.
2307/// `window_bits`: Above 0, wraps the stream in a zlib wrapper, 0 or negative for a raw deflate
2308/// stream.
2309/// `strategy`: Sets the strategy if this conforms to any of the values in `CompressionStrategy`.
2310///
2311/// # Notes
2312/// This function may be removed or moved to the `miniz_oxide_c_api` in the future.
2313pub fn create_comp_flags_from_zip_params(level: i32, window_bits: i32, strategy: i32) -> u32 {
2314    let num_probes = (if level >= 0 {
2315        cmp::min(10, level)
2316    } else {
2317        CompressionLevel::DefaultLevel as i32
2318    }) as usize;
2319    let greedy = if level <= 3 {
2320        TDEFL_GREEDY_PARSING_FLAG
2321    } else {
2322        0
2323    };
2324    let mut comp_flags = u32::from(NUM_PROBES[num_probes]) | greedy;
2325
2326    if window_bits > 0 {
2327        comp_flags |= TDEFL_WRITE_ZLIB_HEADER;
2328    }
2329
2330    if level == 0 {
2331        comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS;
2332    } else if strategy == CompressionStrategy::Filtered as i32 {
2333        comp_flags |= TDEFL_FILTER_MATCHES;
2334    } else if strategy == CompressionStrategy::HuffmanOnly as i32 {
2335        comp_flags &= !MAX_PROBES_MASK;
2336    } else if strategy == CompressionStrategy::Fixed as i32 {
2337        comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS;
2338    } else if strategy == CompressionStrategy::RLE as i32 {
2339        comp_flags |= TDEFL_RLE_MATCHES;
2340    }
2341
2342    comp_flags
2343}
2344
2345#[cfg(test)]
2346mod test {
2347    use super::{
2348        compress_to_output, create_comp_flags_from_zip_params, read_u16_le, write_u16_le,
2349        CompressionStrategy, CompressorOxide, TDEFLFlush, TDEFLStatus, DEFAULT_FLAGS,
2350        MZ_DEFAULT_WINDOW_BITS,
2351    };
2352    use crate::inflate::decompress_to_vec;
2353    use alloc::vec;
2354
2355    #[test]
2356    fn u16_to_slice() {
2357        let mut slice = [0, 0];
2358        write_u16_le(2000, &mut slice, 0);
2359        assert_eq!(slice, [208, 7]);
2360    }
2361
2362    #[test]
2363    fn u16_from_slice() {
2364        let slice = [208, 7];
2365        assert_eq!(read_u16_le(&slice, 0), 2000);
2366    }
2367
2368    #[test]
2369    fn compress_output() {
2370        assert_eq!(
2371            DEFAULT_FLAGS,
2372            create_comp_flags_from_zip_params(
2373                4,
2374                MZ_DEFAULT_WINDOW_BITS,
2375                CompressionStrategy::Default as i32
2376            )
2377        );
2378
2379        let slice = [
2380            1, 2, 3, 4, 1, 2, 3, 1, 2, 3, 1, 2, 6, 1, 2, 3, 1, 2, 3, 2, 3, 1, 2, 3,
2381        ];
2382        let mut encoded = vec![];
2383        let flags = create_comp_flags_from_zip_params(6, 0, 0);
2384        let mut d = CompressorOxide::new(flags);
2385        let (status, in_consumed) =
2386            compress_to_output(&mut d, &slice, TDEFLFlush::Finish, |out: &[u8]| {
2387                encoded.extend_from_slice(out);
2388                true
2389            });
2390
2391        assert_eq!(status, TDEFLStatus::Done);
2392        assert_eq!(in_consumed, slice.len());
2393
2394        let decoded = decompress_to_vec(&encoded[..]).unwrap();
2395        assert_eq!(&decoded[..], &slice[..]);
2396    }
2397
2398    #[test]
2399    /// Check fast compress mode
2400    fn compress_fast() {
2401        let slice = [
2402            1, 2, 3, 4, 1, 2, 3, 1, 2, 3, 1, 2, 6, 1, 2, 3, 1, 2, 3, 2, 3, 1, 2, 3,
2403        ];
2404        let mut encoded = vec![];
2405        let flags = create_comp_flags_from_zip_params(1, 0, 0);
2406        let mut d = CompressorOxide::new(flags);
2407        let (status, in_consumed) =
2408            compress_to_output(&mut d, &slice, TDEFLFlush::Finish, |out: &[u8]| {
2409                encoded.extend_from_slice(out);
2410                true
2411            });
2412
2413        assert_eq!(status, TDEFLStatus::Done);
2414        assert_eq!(in_consumed, slice.len());
2415
2416        // Needs to be altered if algorithm improves.
2417        assert_eq!(
2418            &encoded[..],
2419            [99, 100, 98, 102, 1, 98, 48, 98, 3, 147, 204, 76, 204, 140, 76, 204, 0]
2420        );
2421
2422        let decoded = decompress_to_vec(&encoded[..]).unwrap();
2423        assert_eq!(&decoded[..], &slice[..]);
2424    }
2425
2426    #[test]
2427    fn zlib_window_bits() {
2428        use crate::inflate::stream::{inflate, InflateState};
2429        use crate::DataFormat;
2430        use alloc::boxed::Box;
2431        let slice = [
2432            1, 2, 3, 4, 1, 2, 3, 1, 2, 3, 1, 2, 6, 1, 2, 3, 1, 2, 3, 2, 3, 1, 2, 3, 35, 22, 22, 2,
2433            6, 2, 6,
2434        ];
2435        let mut encoded = vec![];
2436        let flags = create_comp_flags_from_zip_params(2, 1, CompressionStrategy::RLE.into());
2437        let mut d = CompressorOxide::new(flags);
2438        let (status, in_consumed) =
2439            compress_to_output(&mut d, &slice, TDEFLFlush::Finish, |out: &[u8]| {
2440                encoded.extend_from_slice(out);
2441                true
2442            });
2443
2444        assert_eq!(status, TDEFLStatus::Done);
2445        assert_eq!(in_consumed, slice.len());
2446
2447        let mut output = vec![0; slice.len()];
2448
2449        let mut decompressor = Box::new(InflateState::new(DataFormat::Zlib));
2450
2451        let mut out_slice = output.as_mut_slice();
2452        // Feed 1 byte at a time and no back buffer to test that RLE encoding has been used.
2453        for i in 0..encoded.len() {
2454            let result = inflate(
2455                &mut decompressor,
2456                &encoded[i..i + 1],
2457                out_slice,
2458                crate::MZFlush::None,
2459            );
2460            out_slice = &mut out_slice[result.bytes_written..];
2461        }
2462        let cmf = decompressor.decompressor().zlib_header().0;
2463        assert_eq!(cmf, 8);
2464        assert_eq!(output, slice)
2465    }
2466}