zopfli/
deflate.rs

1use alloc::vec::Vec;
2use core::{cmp, iter};
3
4#[cfg(feature = "std")]
5use log::{debug, log_enabled};
6
7use crate::{
8    blocksplitter::{blocksplit, blocksplit_lz77},
9    cache::ZopfliLongestMatchCache,
10    iter::ToFlagLastIterator,
11    katajainen::length_limited_code_lengths,
12    lz77::{LitLen, Lz77Store},
13    squeeze::{lz77_optimal, lz77_optimal_fixed},
14    symbols::{
15        get_dist_extra_bits, get_dist_extra_bits_value, get_dist_symbol,
16        get_dist_symbol_extra_bits, get_length_extra_bits, get_length_extra_bits_value,
17        get_length_symbol, get_length_symbol_extra_bits,
18    },
19    tree::lengths_to_symbols,
20    util::{ZOPFLI_NUM_D, ZOPFLI_NUM_LL, ZOPFLI_WINDOW_SIZE},
21    Error, Options, Write,
22};
23
24/// A DEFLATE encoder powered by the Zopfli algorithm that compresses data written
25/// to it to the specified sink. Most users will find using [`compress`](crate::compress)
26/// easier and more performant.
27///
28/// The data will be compressed as soon as possible, without trying to fill a
29/// backreference window. As a consequence, frequent short writes may cause more
30/// DEFLATE blocks to be emitted with less optimal Huffman trees, which can hurt
31/// compression and runtime. If they are a concern, short writes can be conveniently
32/// dealt with by wrapping this encoder with a [`BufWriter`](std::io::BufWriter), as done
33/// by the [`new_buffered`](DeflateEncoder::new_buffered) method. An adequate write size
34/// would be >32 KiB, which allows the second complete chunk to leverage a full-sized
35/// backreference window.
36pub struct DeflateEncoder<W: Write> {
37    options: Options,
38    btype: BlockType,
39    have_chunk: bool,
40    chunk_start: usize,
41    window_and_chunk: Vec<u8>,
42    bitwise_writer: Option<BitwiseWriter<W>>,
43}
44
45impl<W: Write> DeflateEncoder<W> {
46    /// Creates a new Zopfli DEFLATE encoder that will operate according to the
47    /// specified options.
48    pub fn new(options: Options, btype: BlockType, sink: W) -> Self {
49        Self {
50            options,
51            btype,
52            have_chunk: false,
53            chunk_start: 0,
54            window_and_chunk: Vec::with_capacity(ZOPFLI_WINDOW_SIZE),
55            bitwise_writer: Some(BitwiseWriter::new(sink)),
56        }
57    }
58
59    /// Creates a new Zopfli DEFLATE encoder that operates according to the
60    /// specified options and is wrapped with a buffer to guarantee that
61    /// data is compressed in large chunks, which is necessary for decent
62    /// performance and good compression ratio.
63    #[cfg(feature = "std")]
64    pub fn new_buffered(options: Options, btype: BlockType, sink: W) -> std::io::BufWriter<Self> {
65        std::io::BufWriter::with_capacity(
66            crate::util::ZOPFLI_MASTER_BLOCK_SIZE,
67            Self::new(options, btype, sink),
68        )
69    }
70
71    /// Encodes any pending chunks of data and writes them to the sink,
72    /// consuming the encoder and returning the wrapped sink. The sink
73    /// will have received a complete DEFLATE stream when this method
74    /// returns.
75    ///
76    /// The encoder is automatically [`finish`](Self::finish)ed when
77    /// dropped, but explicitly finishing it with this method allows
78    /// handling I/O errors.
79    pub fn finish(mut self) -> Result<W, Error> {
80        self.__finish().map(|sink| sink.unwrap())
81    }
82
83    /// Compresses the chunk stored at `window_and_chunk`. This includes
84    /// a rolling window of the last `ZOPFLI_WINDOW_SIZE` data bytes, if
85    /// available.
86    #[inline]
87    fn compress_chunk(&mut self, is_last: bool) -> Result<(), Error> {
88        deflate_part(
89            &self.options,
90            self.btype,
91            is_last,
92            &self.window_and_chunk,
93            self.chunk_start,
94            self.window_and_chunk.len(),
95            self.bitwise_writer.as_mut().unwrap(),
96        )
97    }
98
99    /// Sets the next chunk that will be compressed by the next
100    /// call to `compress_chunk` and updates the rolling data window
101    /// accordingly.
102    fn set_chunk(&mut self, chunk: &[u8]) {
103        // Remove bytes exceeding the window size. Start with the
104        // oldest bytes, which are at the beginning of the buffer.
105        // The buffer length is then the position where the chunk
106        // we've just received starts
107        self.window_and_chunk.drain(
108            ..self
109                .window_and_chunk
110                .len()
111                .saturating_sub(ZOPFLI_WINDOW_SIZE),
112        );
113        self.chunk_start = self.window_and_chunk.len();
114
115        self.window_and_chunk.extend_from_slice(chunk);
116
117        self.have_chunk = true;
118    }
119
120    /// Encodes the last chunk and finishes any partial bits.
121    /// The encoder will be unusable for further compression
122    /// after this method returns. This is intended to be an
123    /// implementation detail of the `Drop` trait and
124    /// [`finish`](Self::finish) method.
125    fn __finish(&mut self) -> Result<Option<W>, Error> {
126        if self.bitwise_writer.is_none() {
127            return Ok(None);
128        }
129
130        self.compress_chunk(true)?;
131
132        let mut bitwise_writer = self.bitwise_writer.take().unwrap();
133        bitwise_writer.finish_partial_bits()?;
134
135        Ok(Some(bitwise_writer.out))
136    }
137
138    /// Gets a reference to the underlying writer.
139    pub fn get_ref(&self) -> &W {
140        &self.bitwise_writer.as_ref().unwrap().out
141    }
142
143    /// Gets a mutable reference to the underlying writer.
144    ///
145    /// Note that mutating the output/input state of the stream may corrupt
146    /// this object, so care must be taken when using this method.
147    pub fn get_mut(&mut self) -> &mut W {
148        &mut self.bitwise_writer.as_mut().unwrap().out
149    }
150}
151
152impl<W: Write> Write for DeflateEncoder<W> {
153    fn write(&mut self, buf: &[u8]) -> Result<usize, Error> {
154        // Any previous chunk is known to be non-last at this point,
155        // so compress it now
156        if self.have_chunk {
157            self.compress_chunk(false)?;
158        }
159
160        // Set the chunk to be used for the next compression operation
161        // to this chunk. We don't know whether it's last or not yet
162        self.set_chunk(buf);
163
164        Ok(buf.len())
165    }
166
167    fn flush(&mut self) -> Result<(), Error> {
168        self.bitwise_writer.as_mut().unwrap().out.flush()
169    }
170}
171
172impl<W: Write> Drop for DeflateEncoder<W> {
173    fn drop(&mut self) {
174        self.__finish().ok();
175    }
176}
177
178// Boilerplate to make latest Rustdoc happy: https://github.com/rust-lang/rust/issues/117796
179#[cfg(all(doc, feature = "std"))]
180impl<W: crate::io::Write> std::io::Write for DeflateEncoder<W> {
181    fn write(&mut self, _buf: &[u8]) -> std::io::Result<usize> {
182        unimplemented!()
183    }
184
185    fn flush(&mut self) -> std::io::Result<()> {
186        unimplemented!()
187    }
188}
189
190/// Deflate a part, to allow for chunked, streaming compression with [`DeflateEncoder`].
191/// It is possible to call this function multiple times in a row, shifting
192/// instart and inend to next bytes of the data. If instart is larger than 0, then
193/// previous bytes are used as the initial dictionary for LZ77.
194/// This function will usually output multiple deflate blocks. If final is true, then
195/// the final bit will be set on the last block.
196/// Like deflate, but allows to specify start and end byte with instart and
197/// inend. Only that part is compressed, but earlier bytes are still used for the
198/// back window.
199fn deflate_part<W: Write>(
200    options: &Options,
201    btype: BlockType,
202    final_block: bool,
203    in_data: &[u8],
204    instart: usize,
205    inend: usize,
206    bitwise_writer: &mut BitwiseWriter<W>,
207) -> Result<(), Error> {
208    /* If btype=Dynamic is specified, it tries all block types. If a lesser btype is
209    given, then however it forces that one. Neither of the lesser types needs
210    block splitting as they have no dynamic huffman trees. */
211    match btype {
212        BlockType::Uncompressed => {
213            add_non_compressed_block(final_block, in_data, instart, inend, bitwise_writer)
214        }
215        BlockType::Fixed => {
216            let mut store = Lz77Store::new();
217
218            lz77_optimal_fixed(
219                &mut ZopfliLongestMatchCache::new(inend - instart),
220                in_data,
221                instart,
222                inend,
223                &mut store,
224            );
225            add_lz77_block(
226                btype,
227                final_block,
228                in_data,
229                &store,
230                0,
231                store.size(),
232                0,
233                bitwise_writer,
234            )
235        }
236        BlockType::Dynamic => blocksplit_attempt(
237            options,
238            final_block,
239            in_data,
240            instart,
241            inend,
242            bitwise_writer,
243        ),
244    }
245}
246
247/// The type of data blocks to generate for a DEFLATE stream.
248#[derive(PartialEq, Eq, Copy, Clone, Debug)]
249#[cfg_attr(all(test, feature = "std"), derive(proptest_derive::Arbitrary))]
250#[derive(Default)]
251pub enum BlockType {
252    /// Non-compressed blocks (BTYPE=00).
253    ///
254    /// The input data will be divided into chunks up to 64 KiB big and
255    /// stored in the DEFLATE stream without compression. This is mainly
256    /// useful for test and development purposes.
257    Uncompressed,
258    /// Compressed blocks with fixed Huffman codes (BTYPE=01).
259    ///
260    /// The input data will be compressed into DEFLATE blocks using a fixed
261    /// Huffman tree defined in the DEFLATE specification. This provides fast
262    /// but poor compression, as the Zopfli algorithm is not actually used.
263    Fixed,
264    /// Select the most space-efficient block types for the input data.
265    /// This is the recommended type for the vast majority of Zopfli
266    /// applications.
267    ///
268    /// This mode lets the Zopfli algorithm choose the combination of block
269    /// types that minimizes data size. The emitted block types may be
270    /// [`Uncompressed`](Self::Uncompressed) or [`Fixed`](Self::Fixed), in
271    /// addition to compressed with dynamic Huffman codes (BTYPE=10).
272    #[default]
273    Dynamic,
274}
275
276fn fixed_tree() -> (Vec<u32>, Vec<u32>) {
277    let mut ll = Vec::with_capacity(ZOPFLI_NUM_LL);
278    ll.resize(144, 8);
279    ll.resize(256, 9);
280    ll.resize(280, 7);
281    ll.resize(288, 8);
282    let d = vec![5; ZOPFLI_NUM_D];
283    (ll, d)
284}
285
286/// Changes the population counts in a way that the consequent Huffman tree
287/// compression, especially its rle-part, will be more likely to compress this data
288/// more efficiently. length contains the size of the histogram.
289fn optimize_huffman_for_rle(counts: &mut [usize]) {
290    let mut length = counts.len();
291    // 1) We don't want to touch the trailing zeros. We may break the
292    // rules of the format by adding more data in the distance codes.
293    loop {
294        if length == 0 {
295            return;
296        }
297        if counts[length - 1] != 0 {
298            // Now counts[0..length - 1] does not have trailing zeros.
299            break;
300        }
301        length -= 1;
302    }
303
304    // 2) Let's mark all population counts that already can be encoded
305    // with an rle code.
306    let mut good_for_rle = vec![false; length];
307
308    // Let's not spoil any of the existing good rle codes.
309    // Mark any seq of 0's that is longer than 5 as a good_for_rle.
310    // Mark any seq of non-0's that is longer than 7 as a good_for_rle.
311    let mut symbol = counts[0];
312    let mut stride = 0;
313    for (i, &count) in counts.iter().enumerate().take(length) {
314        if count == symbol {
315            stride += 1;
316        } else {
317            if (symbol == 0 && stride >= 5) || (symbol != 0 && stride >= 7) {
318                for k in 0..stride {
319                    good_for_rle[i - k - 1] = true;
320                }
321            }
322            stride = 1;
323            symbol = count;
324        }
325    }
326
327    // 3) Let's replace those population counts that lead to more rle codes.
328    stride = 0;
329    let mut limit = counts[0];
330    let mut sum = 0;
331    for i in 0..=length {
332        // Heuristic for selecting the stride ranges to collapse.
333        if i == length || good_for_rle[i] || (counts[i] as i32 - limit as i32).abs() >= 4 {
334            if stride >= 4 || (stride >= 3 && sum == 0) {
335                // The stride must end, collapse what we have, if we have enough (4).
336                let count = if sum == 0 {
337                    // Don't upgrade an all zeros stride to ones.
338                    0
339                } else {
340                    cmp::max((sum + stride / 2) / stride, 1)
341                };
342                set_counts_to_count(counts, count, i, stride);
343            }
344            stride = 0;
345            sum = 0;
346            if length > 2 && i < length - 3 {
347                // All interesting strides have a count of at least 4,
348                // at least when non-zeros.
349                limit = (counts[i] + counts[i + 1] + counts[i + 2] + counts[i + 3] + 2) / 4;
350            } else if i < length {
351                limit = counts[i];
352            } else {
353                limit = 0;
354            }
355        }
356        stride += 1;
357        if i != length {
358            sum += counts[i];
359        }
360    }
361}
362
363// Ensures there are at least 2 distance codes to support buggy decoders.
364// Zlib 1.2.1 and below have a bug where it fails if there isn't at least 1
365// distance code (with length > 0), even though it's valid according to the
366// deflate spec to have 0 distance codes. On top of that, some mobile phones
367// require at least two distance codes. To support these decoders too (but
368// potentially at the cost of a few bytes), add dummy code lengths of 1.
369// References to this bug can be found in the changelog of
370// Zlib 1.2.2 and here: http://www.jonof.id.au/forum/index.php?topic=515.0.
371//
372// d_lengths: the 32 lengths of the distance codes.
373fn patch_distance_codes_for_buggy_decoders(d_lengths: &mut [u32]) {
374    // Ignore the two unused codes from the spec
375    let num_dist_codes = d_lengths
376        .iter()
377        .take(30)
378        .filter(|&&d_length| d_length != 0)
379        .count();
380
381    match num_dist_codes {
382        0 => {
383            d_lengths[0] = 1;
384            d_lengths[1] = 1;
385        }
386        1 => {
387            let index = usize::from(d_lengths[0] != 0);
388            d_lengths[index] = 1;
389        }
390        _ => {} // Two or more codes is fine.
391    }
392}
393
394/// Same as `calculate_block_symbol_size`, but for block size smaller than histogram
395/// size.
396fn calculate_block_symbol_size_small(
397    ll_lengths: &[u32],
398    d_lengths: &[u32],
399    lz77: &Lz77Store,
400    lstart: usize,
401    lend: usize,
402) -> usize {
403    let mut result = 0;
404
405    debug_assert!(lend == lstart || lend - 1 < lz77.size());
406
407    for &item in &lz77.litlens[lstart..lend] {
408        match item {
409            LitLen::Literal(litlens_i) => {
410                debug_assert!(litlens_i < 259);
411                result += ll_lengths[litlens_i as usize];
412            }
413            LitLen::LengthDist(litlens_i, dists_i) => {
414                debug_assert!(litlens_i < 259);
415                let ll_symbol = get_length_symbol(litlens_i as usize);
416                let d_symbol = get_dist_symbol(dists_i) as usize;
417                result += ll_lengths[ll_symbol];
418                result += d_lengths[d_symbol];
419                result += get_length_symbol_extra_bits(ll_symbol);
420                result += get_dist_symbol_extra_bits(d_symbol);
421            }
422        }
423    }
424    result += ll_lengths[256]; // end symbol
425    result as usize
426}
427
428/// Same as `calculate_block_symbol_size`, but with the histogram provided by the caller.
429fn calculate_block_symbol_size_given_counts(
430    ll_counts: &[usize],
431    d_counts: &[usize],
432    ll_lengths: &[u32],
433    d_lengths: &[u32],
434    lz77: &Lz77Store,
435    lstart: usize,
436    lend: usize,
437) -> usize {
438    if lstart + ZOPFLI_NUM_LL * 3 > lend {
439        calculate_block_symbol_size_small(ll_lengths, d_lengths, lz77, lstart, lend)
440    } else {
441        let mut result = 0;
442        for i in 0..256 {
443            result += ll_lengths[i] * ll_counts[i] as u32;
444        }
445        for i in 257..286 {
446            result += ll_lengths[i] * ll_counts[i] as u32;
447            result += (get_length_symbol_extra_bits(i) as usize * ll_counts[i]) as u32;
448        }
449        for i in 0..30 {
450            result += d_lengths[i] * d_counts[i] as u32;
451            result += (get_dist_symbol_extra_bits(i) as usize * d_counts[i]) as u32;
452        }
453        result += ll_lengths[256]; // end symbol
454        result as usize
455    }
456}
457
458/// Calculates size of the part after the header and tree of an LZ77 block, in bits.
459fn calculate_block_symbol_size(
460    ll_lengths: &[u32],
461    d_lengths: &[u32],
462    lz77: &Lz77Store,
463    lstart: usize,
464    lend: usize,
465) -> usize {
466    if lstart + ZOPFLI_NUM_LL * 3 > lend {
467        calculate_block_symbol_size_small(ll_lengths, d_lengths, lz77, lstart, lend)
468    } else {
469        let (ll_counts, d_counts) = lz77.get_histogram(lstart, lend);
470        calculate_block_symbol_size_given_counts(
471            &*ll_counts,
472            &*d_counts,
473            ll_lengths,
474            d_lengths,
475            lz77,
476            lstart,
477            lend,
478        )
479    }
480}
481
482/// Encodes the Huffman tree and returns how many bits its encoding takes; only returns the size
483/// and runs faster.
484fn encode_tree_no_output(
485    ll_lengths: &[u32],
486    d_lengths: &[u32],
487    use_16: bool,
488    use_17: bool,
489    use_18: bool,
490) -> usize {
491    let mut hlit = 29; /* 286 - 257 */
492    let mut hdist = 29; /* 32 - 1, but gzip does not like hdist > 29.*/
493
494    let mut clcounts = [0; 19];
495    /* The order in which code length code lengths are encoded as per deflate. */
496    let order = [
497        16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15,
498    ];
499    let mut result_size = 0;
500
501    /* Trim zeros. */
502    while hlit > 0 && ll_lengths[257 + hlit - 1] == 0 {
503        hlit -= 1;
504    }
505    while hdist > 0 && d_lengths[1 + hdist - 1] == 0 {
506        hdist -= 1;
507    }
508    let hlit2 = hlit + 257;
509
510    let lld_total = hlit2 + hdist + 1; /* Total amount of literal, length, distance codes. */
511
512    let mut i = 0;
513
514    while i < lld_total {
515        /* This is an encoding of a huffman tree, so now the length is a symbol */
516        let symbol = if i < hlit2 {
517            ll_lengths[i]
518        } else {
519            d_lengths[i - hlit2]
520        } as u8;
521
522        let mut count = 1;
523        if use_16 || (symbol == 0 && (use_17 || use_18)) {
524            let mut j = i + 1;
525            let mut symbol_calc = if j < hlit2 {
526                ll_lengths[j]
527            } else {
528                d_lengths[j - hlit2]
529            } as u8;
530
531            while j < lld_total && symbol == symbol_calc {
532                count += 1;
533                j += 1;
534                symbol_calc = if j < hlit2 {
535                    ll_lengths[j]
536                } else {
537                    d_lengths[j - hlit2]
538                } as u8;
539            }
540        }
541
542        i += count - 1;
543
544        /* Repetitions of zeroes */
545        if symbol == 0 && count >= 3 {
546            if use_18 {
547                while count >= 11 {
548                    let count2 = if count > 138 { 138 } else { count };
549                    clcounts[18] += 1;
550                    count -= count2;
551                }
552            }
553            if use_17 {
554                while count >= 3 {
555                    let count2 = if count > 10 { 10 } else { count };
556                    clcounts[17] += 1;
557                    count -= count2;
558                }
559            }
560        }
561
562        /* Repetitions of any symbol */
563        if use_16 && count >= 4 {
564            count -= 1; /* Since the first one is hardcoded. */
565            clcounts[symbol as usize] += 1;
566            while count >= 3 {
567                let count2 = if count > 6 { 6 } else { count };
568                clcounts[16] += 1;
569                count -= count2;
570            }
571        }
572
573        /* No or insufficient repetition */
574        clcounts[symbol as usize] += count;
575        while count > 0 {
576            count -= 1;
577        }
578        i += 1;
579    }
580
581    let clcl = length_limited_code_lengths(&clcounts, 7);
582
583    let mut hclen = 15;
584    /* Trim zeros. */
585    while hclen > 0 && clcounts[order[hclen + 4 - 1]] == 0 {
586        hclen -= 1;
587    }
588
589    result_size += 14; /* hlit, hdist, hclen bits */
590    result_size += (hclen + 4) * 3; /* clcl bits */
591    for i in 0..19 {
592        result_size += clcl[i] as usize * clcounts[i];
593    }
594    /* Extra bits. */
595    result_size += clcounts[16] * 2;
596    result_size += clcounts[17] * 3;
597    result_size += clcounts[18] * 7;
598
599    result_size
600}
601
602/// Gives the exact size of the tree, in bits, as it will be encoded in DEFLATE.
603fn calculate_tree_size(ll_lengths: &[u32], d_lengths: &[u32]) -> usize {
604    static TRUTH_TABLE: [(bool, bool, bool); 8] = [
605        (false, false, false),
606        (true, false, false),
607        (false, true, false),
608        (true, true, false),
609        (false, false, true),
610        (true, false, true),
611        (false, true, true),
612        (true, true, true),
613    ];
614
615    TRUTH_TABLE
616        .iter()
617        .map(|&(use_16, use_17, use_18)| {
618            encode_tree_no_output(ll_lengths, d_lengths, use_16, use_17, use_18)
619        })
620        .min()
621        .unwrap_or(0)
622}
623
624/// Encodes the Huffman tree and returns how many bits its encoding takes and returns output.
625// TODO: This return value is unused.
626fn encode_tree<W: Write>(
627    ll_lengths: &[u32],
628    d_lengths: &[u32],
629    use_16: bool,
630    use_17: bool,
631    use_18: bool,
632    bitwise_writer: &mut BitwiseWriter<W>,
633) -> Result<usize, Error> {
634    let mut hlit = 29; /* 286 - 257 */
635    let mut hdist = 29; /* 32 - 1, but gzip does not like hdist > 29.*/
636
637    let mut clcounts = [0; 19];
638    /* The order in which code length code lengths are encoded as per deflate. */
639    let order = [
640        16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15,
641    ];
642    let mut result_size = 0;
643
644    let mut rle = vec![];
645    let mut rle_bits = vec![];
646
647    /* Trim zeros. */
648    while hlit > 0 && ll_lengths[257 + hlit - 1] == 0 {
649        hlit -= 1;
650    }
651    while hdist > 0 && d_lengths[1 + hdist - 1] == 0 {
652        hdist -= 1;
653    }
654    let hlit2 = hlit + 257;
655
656    let lld_total = hlit2 + hdist + 1; /* Total amount of literal, length, distance codes. */
657
658    let mut i = 0;
659
660    while i < lld_total {
661        /* This is an encoding of a huffman tree, so now the length is a symbol */
662        let symbol = if i < hlit2 {
663            ll_lengths[i]
664        } else {
665            d_lengths[i - hlit2]
666        } as u8;
667
668        let mut count = 1;
669        if use_16 || (symbol == 0 && (use_17 || use_18)) {
670            let mut j = i + 1;
671            let mut symbol_calc = if j < hlit2 {
672                ll_lengths[j]
673            } else {
674                d_lengths[j - hlit2]
675            } as u8;
676
677            while j < lld_total && symbol == symbol_calc {
678                count += 1;
679                j += 1;
680                symbol_calc = if j < hlit2 {
681                    ll_lengths[j]
682                } else {
683                    d_lengths[j - hlit2]
684                } as u8;
685            }
686        }
687
688        i += count - 1;
689
690        /* Repetitions of zeroes */
691        if symbol == 0 && count >= 3 {
692            if use_18 {
693                while count >= 11 {
694                    let count2 = if count > 138 { 138 } else { count };
695                    rle.push(18);
696                    rle_bits.push(count2 - 11);
697                    clcounts[18] += 1;
698                    count -= count2;
699                }
700            }
701            if use_17 {
702                while count >= 3 {
703                    let count2 = if count > 10 { 10 } else { count };
704                    rle.push(17);
705                    rle_bits.push(count2 - 3);
706                    clcounts[17] += 1;
707                    count -= count2;
708                }
709            }
710        }
711
712        /* Repetitions of any symbol */
713        if use_16 && count >= 4 {
714            count -= 1; /* Since the first one is hardcoded. */
715            clcounts[symbol as usize] += 1;
716            rle.push(symbol);
717            rle_bits.push(0);
718
719            while count >= 3 {
720                let count2 = if count > 6 { 6 } else { count };
721                rle.push(16);
722                rle_bits.push(count2 - 3);
723                clcounts[16] += 1;
724                count -= count2;
725            }
726        }
727
728        /* No or insufficient repetition */
729        clcounts[symbol as usize] += count;
730        while count > 0 {
731            rle.push(symbol);
732            rle_bits.push(0);
733            count -= 1;
734        }
735        i += 1;
736    }
737
738    let clcl = length_limited_code_lengths(&clcounts, 7);
739    let clsymbols = lengths_to_symbols(&clcl, 7);
740
741    let mut hclen = 15;
742    /* Trim zeros. */
743    while hclen > 0 && clcounts[order[hclen + 4 - 1]] == 0 {
744        hclen -= 1;
745    }
746
747    bitwise_writer.add_bits(hlit as u32, 5)?;
748    bitwise_writer.add_bits(hdist as u32, 5)?;
749    bitwise_writer.add_bits(hclen as u32, 4)?;
750
751    for &item in order.iter().take(hclen + 4) {
752        bitwise_writer.add_bits(clcl[item], 3)?;
753    }
754
755    for i in 0..rle.len() {
756        let rle_i = rle[i] as usize;
757        let rle_bits_i = rle_bits[i] as u32;
758        let sym = clsymbols[rle_i];
759        bitwise_writer.add_huffman_bits(sym, clcl[rle_i])?;
760        /* Extra bits. */
761        if rle_i == 16 {
762            bitwise_writer.add_bits(rle_bits_i, 2)?;
763        } else if rle_i == 17 {
764            bitwise_writer.add_bits(rle_bits_i, 3)?;
765        } else if rle_i == 18 {
766            bitwise_writer.add_bits(rle_bits_i, 7)?;
767        }
768    }
769
770    result_size += 14; /* hlit, hdist, hclen bits */
771    result_size += (hclen + 4) * 3; /* clcl bits */
772    for i in 0..19 {
773        result_size += clcl[i] as usize * clcounts[i];
774    }
775    /* Extra bits. */
776    result_size += clcounts[16] * 2;
777    result_size += clcounts[17] * 3;
778    result_size += clcounts[18] * 7;
779
780    Ok(result_size)
781}
782
783fn add_dynamic_tree<W: Write>(
784    ll_lengths: &[u32],
785    d_lengths: &[u32],
786    bitwise_writer: &mut BitwiseWriter<W>,
787) -> Result<(), Error> {
788    let mut best = 0;
789    let mut bestsize = 0;
790
791    for i in 0..8 {
792        let size = encode_tree_no_output(ll_lengths, d_lengths, i & 1 > 0, i & 2 > 0, i & 4 > 0);
793        if bestsize == 0 || size < bestsize {
794            bestsize = size;
795            best = i;
796        }
797    }
798
799    encode_tree(
800        ll_lengths,
801        d_lengths,
802        best & 1 > 0,
803        best & 2 > 0,
804        best & 4 > 0,
805        bitwise_writer,
806    )
807    .map(|_| ())
808}
809
810/// Adds a deflate block with the given LZ77 data to the output.
811/// `options`: global program options
812/// `btype`: the block type, must be `Fixed` or `Dynamic`
813/// `final`: whether to set the "final" bit on this block, must be the last block
814/// `litlens`: literal/length array of the LZ77 data, in the same format as in
815///     `Lz77Store`.
816/// `dists`: distance array of the LZ77 data, in the same format as in
817///     `Lz77Store`.
818/// `lstart`: where to start in the LZ77 data
819/// `lend`: where to end in the LZ77 data (not inclusive)
820/// `expected_data_size`: the uncompressed block size, used for assert, but you can
821///   set it to `0` to not do the assertion.
822/// `bitwise_writer`: writer responsible for appending bits
823#[allow(clippy::too_many_arguments)] // Not feasible to refactor in a more readable way
824fn add_lz77_block<W: Write>(
825    btype: BlockType,
826    final_block: bool,
827    in_data: &[u8],
828    lz77: &Lz77Store,
829    lstart: usize,
830    lend: usize,
831    expected_data_size: usize,
832    bitwise_writer: &mut BitwiseWriter<W>,
833) -> Result<(), Error> {
834    if btype == BlockType::Uncompressed {
835        let length = lz77.get_byte_range(lstart, lend);
836        let pos = if lstart == lend { 0 } else { lz77.pos[lstart] };
837        let end = pos + length;
838        return add_non_compressed_block(final_block, in_data, pos, end, bitwise_writer);
839    }
840
841    bitwise_writer.add_bit(u8::from(final_block))?;
842
843    let (ll_lengths, d_lengths) = match btype {
844        BlockType::Uncompressed => unreachable!(),
845        BlockType::Fixed => {
846            bitwise_writer.add_bit(1)?;
847            bitwise_writer.add_bit(0)?;
848            fixed_tree()
849        }
850        BlockType::Dynamic => {
851            bitwise_writer.add_bit(0)?;
852            bitwise_writer.add_bit(1)?;
853            let (_, ll_lengths, d_lengths) = get_dynamic_lengths(lz77, lstart, lend);
854
855            let _detect_tree_size = bitwise_writer.bytes_written();
856            add_dynamic_tree(&ll_lengths, &d_lengths, bitwise_writer)?;
857            debug!(
858                "treesize: {}",
859                bitwise_writer.bytes_written() - _detect_tree_size
860            );
861            (ll_lengths, d_lengths)
862        }
863    };
864
865    let ll_symbols = lengths_to_symbols(&ll_lengths, 15);
866    let d_symbols = lengths_to_symbols(&d_lengths, 15);
867
868    let detect_block_size = bitwise_writer.bytes_written();
869    add_lz77_data(
870        lz77,
871        lstart,
872        lend,
873        expected_data_size,
874        &ll_symbols,
875        &ll_lengths,
876        &d_symbols,
877        &d_lengths,
878        bitwise_writer,
879    )?;
880
881    /* End symbol. */
882    bitwise_writer.add_huffman_bits(ll_symbols[256], ll_lengths[256])?;
883
884    if log_enabled!(log::Level::Debug) {
885        let _uncompressed_size = lz77.litlens[lstart..lend]
886            .iter()
887            .fold(0, |acc, &x| acc + x.size());
888        let _compressed_size = bitwise_writer.bytes_written() - detect_block_size;
889        debug!(
890            "compressed block size: {} ({}k) (unc: {})",
891            _compressed_size,
892            _compressed_size / 1024,
893            _uncompressed_size
894        );
895    }
896
897    Ok(())
898}
899
900/// Calculates block size in bits.
901/// litlens: lz77 lit/lengths
902/// dists: ll77 distances
903/// lstart: start of block
904/// lend: end of block (not inclusive)
905pub fn calculate_block_size(lz77: &Lz77Store, lstart: usize, lend: usize, btype: BlockType) -> f64 {
906    match btype {
907        BlockType::Uncompressed => {
908            let length = lz77.get_byte_range(lstart, lend);
909            let rem = length % 65535;
910            let blocks = length / 65535 + usize::from(rem > 0);
911            /* An uncompressed block must actually be split into multiple blocks if it's
912            larger than 65535 bytes long. Eeach block header is 5 bytes: 3 bits,
913            padding, LEN and NLEN (potential less padding for first one ignored). */
914            (blocks * 5 * 8 + length * 8) as f64
915        }
916        BlockType::Fixed => {
917            let fixed_tree = fixed_tree();
918            let ll_lengths = fixed_tree.0;
919            let d_lengths = fixed_tree.1;
920
921            let mut result = 3.0; /* bfinal and btype bits */
922            result +=
923                calculate_block_symbol_size(&ll_lengths, &d_lengths, lz77, lstart, lend) as f64;
924            result
925        }
926        BlockType::Dynamic => get_dynamic_lengths(lz77, lstart, lend).0 + 3.0,
927    }
928}
929
930/// Tries out `OptimizeHuffmanForRle` for this block, if the result is smaller,
931/// uses it, otherwise keeps the original. Returns size of encoded tree and data in
932/// bits, not including the 3-bit block header.
933fn try_optimize_huffman_for_rle(
934    lz77: &Lz77Store,
935    lstart: usize,
936    lend: usize,
937    ll_counts: &[usize],
938    d_counts: &[usize],
939    ll_lengths: Vec<u32>,
940    d_lengths: Vec<u32>,
941) -> (f64, Vec<u32>, Vec<u32>) {
942    let mut ll_counts2 = Vec::from(ll_counts);
943    let mut d_counts2 = Vec::from(d_counts);
944
945    let treesize = calculate_tree_size(&ll_lengths, &d_lengths);
946    let datasize = calculate_block_symbol_size_given_counts(
947        ll_counts,
948        d_counts,
949        &ll_lengths,
950        &d_lengths,
951        lz77,
952        lstart,
953        lend,
954    );
955
956    optimize_huffman_for_rle(&mut ll_counts2);
957    optimize_huffman_for_rle(&mut d_counts2);
958
959    let ll_lengths2 = length_limited_code_lengths(&ll_counts2, 15);
960    let mut d_lengths2 = length_limited_code_lengths(&d_counts2, 15);
961    patch_distance_codes_for_buggy_decoders(&mut d_lengths2[..]);
962
963    let treesize2 = calculate_tree_size(&ll_lengths2, &d_lengths2);
964    let datasize2 = calculate_block_symbol_size_given_counts(
965        ll_counts,
966        d_counts,
967        &ll_lengths2,
968        &d_lengths2,
969        lz77,
970        lstart,
971        lend,
972    );
973
974    if treesize2 + datasize2 < treesize + datasize {
975        (((treesize2 + datasize2) as f64), ll_lengths2, d_lengths2)
976    } else {
977        ((treesize + datasize) as f64, ll_lengths, d_lengths)
978    }
979}
980
981/// Calculates the bit lengths for the symbols for dynamic blocks. Chooses bit
982/// lengths that give the smallest size of tree encoding + encoding of all the
983/// symbols to have smallest output size. This are not necessarily the ideal Huffman
984/// bit lengths. Returns size of encoded tree and data in bits, not including the
985/// 3-bit block header.
986fn get_dynamic_lengths(lz77: &Lz77Store, lstart: usize, lend: usize) -> (f64, Vec<u32>, Vec<u32>) {
987    let (mut ll_counts, d_counts) = lz77.get_histogram(lstart, lend);
988    ll_counts[256] = 1; /* End symbol. */
989
990    let ll_lengths = length_limited_code_lengths(&*ll_counts, 15);
991    let mut d_lengths = length_limited_code_lengths(&*d_counts, 15);
992
993    patch_distance_codes_for_buggy_decoders(&mut d_lengths[..]);
994
995    try_optimize_huffman_for_rle(
996        lz77,
997        lstart,
998        lend,
999        &*ll_counts,
1000        &*d_counts,
1001        ll_lengths,
1002        d_lengths,
1003    )
1004}
1005
1006/// Adds all lit/len and dist codes from the lists as huffman symbols. Does not add
1007/// end code 256. `expected_data_size` is the uncompressed block size, used for
1008/// assert, but you can set it to `0` to not do the assertion.
1009#[allow(clippy::too_many_arguments)] // Not feasible to refactor in a more readable way
1010fn add_lz77_data<W: Write>(
1011    lz77: &Lz77Store,
1012    lstart: usize,
1013    lend: usize,
1014    expected_data_size: usize,
1015    ll_symbols: &[u32],
1016    ll_lengths: &[u32],
1017    d_symbols: &[u32],
1018    d_lengths: &[u32],
1019    bitwise_writer: &mut BitwiseWriter<W>,
1020) -> Result<(), Error> {
1021    let mut testlength = 0;
1022
1023    for &item in &lz77.litlens[lstart..lend] {
1024        match item {
1025            LitLen::Literal(lit) => {
1026                let litlen = lit as usize;
1027                debug_assert!(litlen < 256);
1028                debug_assert!(ll_lengths[litlen] > 0);
1029                bitwise_writer.add_huffman_bits(ll_symbols[litlen], ll_lengths[litlen])?;
1030                testlength += 1;
1031            }
1032            LitLen::LengthDist(len, dist) => {
1033                let litlen = len as usize;
1034                assert!((3..=288).contains(&litlen)); // Eases inlining and gets rid of index bound checks below
1035                let lls = get_length_symbol(litlen);
1036                let ds = get_dist_symbol(dist) as usize;
1037                debug_assert!(ll_lengths[lls] > 0);
1038                debug_assert!(d_lengths[ds] > 0);
1039                bitwise_writer.add_huffman_bits(ll_symbols[lls], ll_lengths[lls])?;
1040                bitwise_writer.add_bits(
1041                    get_length_extra_bits_value(litlen),
1042                    get_length_extra_bits(litlen),
1043                )?;
1044                bitwise_writer.add_huffman_bits(d_symbols[ds], d_lengths[ds])?;
1045                bitwise_writer.add_bits(
1046                    u32::from(get_dist_extra_bits_value(dist)),
1047                    get_dist_extra_bits(dist),
1048                )?;
1049                testlength += litlen;
1050            }
1051        }
1052    }
1053    debug_assert!(expected_data_size == 0 || testlength == expected_data_size);
1054    Ok(())
1055}
1056
1057#[allow(clippy::too_many_arguments)] // Not feasible to refactor in a more readable way
1058fn add_lz77_block_auto_type<W: Write>(
1059    final_block: bool,
1060    in_data: &[u8],
1061    lz77: &Lz77Store,
1062    lstart: usize,
1063    lend: usize,
1064    expected_data_size: usize,
1065    bitwise_writer: &mut BitwiseWriter<W>,
1066) -> Result<(), Error> {
1067    let uncompressedcost = calculate_block_size(lz77, lstart, lend, BlockType::Uncompressed);
1068    let mut fixedcost = calculate_block_size(lz77, lstart, lend, BlockType::Fixed);
1069    let dyncost = calculate_block_size(lz77, lstart, lend, BlockType::Dynamic);
1070
1071    /* Whether to perform the expensive calculation of creating an optimal block
1072    with fixed huffman tree to check if smaller. Only do this for small blocks or
1073    blocks which already are pretty good with fixed huffman tree. */
1074    let expensivefixed = (lz77.size() < 1000) || fixedcost <= dyncost * 1.1;
1075
1076    let mut fixedstore = Lz77Store::new();
1077    if lstart == lend {
1078        /* Smallest empty block is represented by fixed block */
1079        bitwise_writer.add_bits(u32::from(final_block), 1)?;
1080        bitwise_writer.add_bits(1, 2)?; /* btype 01 */
1081        bitwise_writer.add_bits(0, 7)?; /* end symbol has code 0000000 */
1082        return Ok(());
1083    }
1084    if expensivefixed {
1085        /* Recalculate the LZ77 with lz77_optimal_fixed */
1086        let instart = lz77.pos[lstart];
1087        let inend = instart + lz77.get_byte_range(lstart, lend);
1088
1089        lz77_optimal_fixed(
1090            &mut ZopfliLongestMatchCache::new(inend - instart),
1091            in_data,
1092            instart,
1093            inend,
1094            &mut fixedstore,
1095        );
1096        fixedcost = calculate_block_size(&fixedstore, 0, fixedstore.size(), BlockType::Fixed);
1097    }
1098
1099    if uncompressedcost <= fixedcost && uncompressedcost <= dyncost {
1100        add_lz77_block(
1101            BlockType::Uncompressed,
1102            final_block,
1103            in_data,
1104            lz77,
1105            lstart,
1106            lend,
1107            expected_data_size,
1108            bitwise_writer,
1109        )
1110    } else if fixedcost <= dyncost {
1111        if expensivefixed {
1112            add_lz77_block(
1113                BlockType::Fixed,
1114                final_block,
1115                in_data,
1116                &fixedstore,
1117                0,
1118                fixedstore.size(),
1119                expected_data_size,
1120                bitwise_writer,
1121            )
1122        } else {
1123            add_lz77_block(
1124                BlockType::Fixed,
1125                final_block,
1126                in_data,
1127                lz77,
1128                lstart,
1129                lend,
1130                expected_data_size,
1131                bitwise_writer,
1132            )
1133        }
1134    } else {
1135        add_lz77_block(
1136            BlockType::Dynamic,
1137            final_block,
1138            in_data,
1139            lz77,
1140            lstart,
1141            lend,
1142            expected_data_size,
1143            bitwise_writer,
1144        )
1145    }
1146}
1147
1148/// Calculates block size in bits, automatically using the best btype.
1149pub fn calculate_block_size_auto_type(lz77: &Lz77Store, lstart: usize, lend: usize) -> f64 {
1150    let uncompressedcost = calculate_block_size(lz77, lstart, lend, BlockType::Uncompressed);
1151    /* Don't do the expensive fixed cost calculation for larger blocks that are
1152    unlikely to use it. */
1153    let fixedcost = if lz77.size() > 1000 {
1154        uncompressedcost
1155    } else {
1156        calculate_block_size(lz77, lstart, lend, BlockType::Fixed)
1157    };
1158    let dyncost = calculate_block_size(lz77, lstart, lend, BlockType::Dynamic);
1159    uncompressedcost.min(fixedcost).min(dyncost)
1160}
1161
1162fn add_all_blocks<W: Write>(
1163    splitpoints: &[usize],
1164    lz77: &Lz77Store,
1165    final_block: bool,
1166    in_data: &[u8],
1167    bitwise_writer: &mut BitwiseWriter<W>,
1168) -> Result<(), Error> {
1169    let mut last = 0;
1170    for &item in splitpoints {
1171        add_lz77_block_auto_type(false, in_data, lz77, last, item, 0, bitwise_writer)?;
1172        last = item;
1173    }
1174    add_lz77_block_auto_type(
1175        final_block,
1176        in_data,
1177        lz77,
1178        last,
1179        lz77.size(),
1180        0,
1181        bitwise_writer,
1182    )
1183}
1184
1185fn blocksplit_attempt<W: Write>(
1186    options: &Options,
1187    final_block: bool,
1188    in_data: &[u8],
1189    instart: usize,
1190    inend: usize,
1191    bitwise_writer: &mut BitwiseWriter<W>,
1192) -> Result<(), Error> {
1193    let mut totalcost = 0.0;
1194    let mut lz77 = Lz77Store::new();
1195
1196    /* byte coordinates rather than lz77 index */
1197    let mut splitpoints_uncompressed = Vec::with_capacity(options.maximum_block_splits as usize);
1198
1199    blocksplit(
1200        in_data,
1201        instart,
1202        inend,
1203        options.maximum_block_splits,
1204        &mut splitpoints_uncompressed,
1205    );
1206    let npoints = splitpoints_uncompressed.len();
1207    let mut splitpoints = Vec::with_capacity(npoints);
1208
1209    let mut last = instart;
1210    for &item in &splitpoints_uncompressed {
1211        let store = lz77_optimal(
1212            &mut ZopfliLongestMatchCache::new(item - last),
1213            in_data,
1214            last,
1215            item,
1216            options.iteration_count.get(),
1217            options.iterations_without_improvement.get(),
1218        );
1219        totalcost += calculate_block_size_auto_type(&store, 0, store.size());
1220
1221        // ZopfliAppendLZ77Store(&store, &lz77);
1222        debug_assert!(instart == inend || store.size() > 0);
1223        for (&litlens, &pos) in store.litlens.iter().zip(store.pos.iter()) {
1224            lz77.append_store_item(litlens, pos);
1225        }
1226
1227        splitpoints.push(lz77.size());
1228
1229        last = item;
1230    }
1231
1232    let store = lz77_optimal(
1233        &mut ZopfliLongestMatchCache::new(inend - last),
1234        in_data,
1235        last,
1236        inend,
1237        options.iteration_count.get(),
1238        options.iterations_without_improvement.get(),
1239    );
1240    totalcost += calculate_block_size_auto_type(&store, 0, store.size());
1241
1242    // ZopfliAppendLZ77Store(&store, &lz77);
1243    debug_assert!(instart == inend || store.size() > 0);
1244    for (&litlens, &pos) in store.litlens.iter().zip(store.pos.iter()) {
1245        lz77.append_store_item(litlens, pos);
1246    }
1247
1248    /* Second block splitting attempt */
1249    if npoints > 1 {
1250        let mut splitpoints2 = Vec::with_capacity(splitpoints_uncompressed.len());
1251        let mut totalcost2 = 0.0;
1252
1253        blocksplit_lz77(&lz77, options.maximum_block_splits, &mut splitpoints2);
1254
1255        let mut last = 0;
1256        for &item in &splitpoints2 {
1257            totalcost2 += calculate_block_size_auto_type(&lz77, last, item);
1258            last = item;
1259        }
1260        totalcost2 += calculate_block_size_auto_type(&lz77, last, lz77.size());
1261
1262        if totalcost2 < totalcost {
1263            splitpoints = splitpoints2;
1264        }
1265    }
1266
1267    add_all_blocks(&splitpoints, &lz77, final_block, in_data, bitwise_writer)
1268}
1269
1270/// Since an uncompressed block can be max 65535 in size, it actually adds
1271/// multiple blocks if needed.
1272fn add_non_compressed_block<W: Write>(
1273    final_block: bool,
1274    in_data: &[u8],
1275    instart: usize,
1276    inend: usize,
1277    bitwise_writer: &mut BitwiseWriter<W>,
1278) -> Result<(), Error> {
1279    let in_data = &in_data[instart..inend];
1280
1281    let in_data_chunks = in_data.chunks(65535).size_hint().0;
1282
1283    for (chunk, is_final) in in_data
1284        .chunks(65535)
1285        .flag_last()
1286        // Make sure that we output at least one chunk if this is the final block
1287        .chain(iter::once((&[][..], true)))
1288        .take(if final_block {
1289            cmp::max(in_data_chunks, 1)
1290        } else {
1291            in_data_chunks
1292        })
1293    {
1294        let blocksize = chunk.len();
1295        let nlen = !blocksize;
1296
1297        bitwise_writer.add_bit(u8::from(final_block && is_final))?;
1298        /* BTYPE 00 */
1299        bitwise_writer.add_bit(0)?;
1300        bitwise_writer.add_bit(0)?;
1301
1302        bitwise_writer.finish_partial_bits()?;
1303
1304        bitwise_writer.add_byte((blocksize % 256) as u8)?;
1305        bitwise_writer.add_byte(((blocksize / 256) % 256) as u8)?;
1306        bitwise_writer.add_byte((nlen % 256) as u8)?;
1307        bitwise_writer.add_byte(((nlen / 256) % 256) as u8)?;
1308
1309        bitwise_writer.add_bytes(chunk)?;
1310    }
1311
1312    Ok(())
1313}
1314
1315struct BitwiseWriter<W> {
1316    bit: u8,
1317    bp: u8,
1318    len: usize,
1319    out: W,
1320}
1321
1322impl<W: Write> BitwiseWriter<W> {
1323    const fn new(out: W) -> Self {
1324        Self {
1325            bit: 0,
1326            bp: 0,
1327            len: 0,
1328            out,
1329        }
1330    }
1331
1332    const fn bytes_written(&self) -> usize {
1333        self.len + if self.bp > 0 { 1 } else { 0 }
1334    }
1335
1336    /// For when you want to add a full byte.
1337    fn add_byte(&mut self, byte: u8) -> Result<(), Error> {
1338        self.add_bytes(&[byte])
1339    }
1340
1341    /// For adding a slice of bytes.
1342    fn add_bytes(&mut self, bytes: &[u8]) -> Result<(), Error> {
1343        self.len += bytes.len();
1344        self.out.write_all(bytes)
1345    }
1346
1347    fn add_bit(&mut self, bit: u8) -> Result<(), Error> {
1348        self.bit |= bit << self.bp;
1349        self.bp += 1;
1350        if self.bp == 8 {
1351            self.finish_partial_bits()
1352        } else {
1353            Ok(())
1354        }
1355    }
1356
1357    fn add_bits(&mut self, symbol: u32, length: u32) -> Result<(), Error> {
1358        // TODO: make more efficient (add more bits at once)
1359        for i in 0..length {
1360            let bit = ((symbol >> i) & 1) as u8;
1361            self.add_bit(bit)?;
1362        }
1363
1364        Ok(())
1365    }
1366
1367    /// Adds bits, like `add_bits`, but the order is inverted. The deflate specification
1368    /// uses both orders in one standard.
1369    fn add_huffman_bits(&mut self, symbol: u32, length: u32) -> Result<(), Error> {
1370        // TODO: make more efficient (add more bits at once)
1371        for i in 0..length {
1372            let bit = ((symbol >> (length - i - 1)) & 1) as u8;
1373            self.add_bit(bit)?;
1374        }
1375
1376        Ok(())
1377    }
1378
1379    fn finish_partial_bits(&mut self) -> Result<(), Error> {
1380        if self.bp != 0 {
1381            let bytes = &[self.bit];
1382            self.add_bytes(bytes)?;
1383            self.bit = 0;
1384            self.bp = 0;
1385        }
1386        Ok(())
1387    }
1388}
1389
1390fn set_counts_to_count(counts: &mut [usize], count: usize, i: usize, stride: usize) {
1391    for c in &mut counts[(i - stride)..i] {
1392        *c = count;
1393    }
1394}
1395
1396#[cfg(test)]
1397mod test {
1398    use miniz_oxide::inflate;
1399
1400    use super::*;
1401
1402    #[test]
1403    fn test_set_counts_to_count() {
1404        let mut counts = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
1405        let count = 100;
1406        let i = 8;
1407        let stride = 5;
1408
1409        set_counts_to_count(&mut counts, count, i, stride);
1410
1411        assert_eq!(counts, vec![0, 1, 2, 100, 100, 100, 100, 100, 8, 9]);
1412    }
1413
1414    #[test]
1415    fn weird_encoder_write_size_combinations_works() {
1416        let mut compressed_data = vec![];
1417
1418        let default_options = Options::default();
1419        let mut encoder =
1420            DeflateEncoder::new(default_options, BlockType::default(), &mut compressed_data);
1421
1422        encoder.write_all(&[0]).unwrap();
1423        encoder.write_all(&[]).unwrap();
1424        encoder.write_all(&[1, 2]).unwrap();
1425        encoder.write_all(&[]).unwrap();
1426        encoder.write_all(&[]).unwrap();
1427        encoder.write_all(&[3]).unwrap();
1428        encoder.write_all(&[4]).unwrap();
1429
1430        encoder.finish().unwrap();
1431
1432        let decompressed_data = inflate::decompress_to_vec(&compressed_data)
1433            .expect("Could not inflate compressed stream");
1434
1435        assert_eq!(
1436            &[0, 1, 2, 3, 4][..],
1437            decompressed_data,
1438            "Decompressed data should match input data"
1439        );
1440    }
1441}