block_buffer/
lib.rs

1//! Fixed size buffer for block processing of data.
2#![no_std]
3#![doc(
4    html_logo_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg",
5    html_favicon_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg",
6    html_root_url = "https://docs.rs/block-buffer/0.10.1"
7)]
8#![warn(missing_docs, rust_2018_idioms)]
9
10pub use generic_array;
11
12use core::{marker::PhantomData, slice};
13use generic_array::{
14    typenum::{IsLess, Le, NonZero, U256},
15    ArrayLength, GenericArray,
16};
17
18mod sealed;
19
20/// Block on which `BlockBuffer` operates.
21pub type Block<BlockSize> = GenericArray<u8, BlockSize>;
22
23/// Trait for buffer kinds.
24pub trait BufferKind: sealed::Sealed {}
25
26/// Eager block buffer kind, which guarantees that buffer position
27/// always lies in the range of `0..BlockSize`.
28#[derive(Copy, Clone, Debug, Default)]
29pub struct Eager {}
30
31/// Lazy block buffer kind, which guarantees that buffer position
32/// always lies in the range of `0..=BlockSize`.
33#[derive(Copy, Clone, Debug, Default)]
34pub struct Lazy {}
35
36impl BufferKind for Eager {}
37impl BufferKind for Lazy {}
38
39/// Eager block buffer.
40pub type EagerBuffer<B> = BlockBuffer<B, Eager>;
41/// Lazy block buffer.
42pub type LazyBuffer<B> = BlockBuffer<B, Lazy>;
43
44/// Buffer for block processing of data.
45#[derive(Debug)]
46pub struct BlockBuffer<BlockSize, Kind>
47where
48    BlockSize: ArrayLength<u8> + IsLess<U256>,
49    Le<BlockSize, U256>: NonZero,
50    Kind: BufferKind,
51{
52    buffer: Block<BlockSize>,
53    pos: u8,
54    _pd: PhantomData<Kind>,
55}
56
57impl<BlockSize, Kind> Default for BlockBuffer<BlockSize, Kind>
58where
59    BlockSize: ArrayLength<u8> + IsLess<U256>,
60    Le<BlockSize, U256>: NonZero,
61    Kind: BufferKind,
62{
63    fn default() -> Self {
64        Self {
65            buffer: Default::default(),
66            pos: 0,
67            _pd: PhantomData,
68        }
69    }
70}
71
72impl<BlockSize, Kind> Clone for BlockBuffer<BlockSize, Kind>
73where
74    BlockSize: ArrayLength<u8> + IsLess<U256>,
75    Le<BlockSize, U256>: NonZero,
76    Kind: BufferKind,
77{
78    fn clone(&self) -> Self {
79        Self {
80            buffer: self.buffer.clone(),
81            pos: self.pos,
82            _pd: PhantomData,
83        }
84    }
85}
86
87impl<BlockSize, Kind> BlockBuffer<BlockSize, Kind>
88where
89    BlockSize: ArrayLength<u8> + IsLess<U256>,
90    Le<BlockSize, U256>: NonZero,
91    Kind: BufferKind,
92{
93    /// Create new buffer from slice.
94    ///
95    /// # Panics
96    /// If slice length is not valid for used buffer kind.
97    #[inline(always)]
98    pub fn new(buf: &[u8]) -> Self {
99        let pos = buf.len();
100        assert!(Kind::invariant(pos, BlockSize::USIZE));
101        let mut buffer = Block::<BlockSize>::default();
102        buffer[..pos].copy_from_slice(buf);
103        Self {
104            buffer,
105            pos: pos as u8,
106            _pd: PhantomData,
107        }
108    }
109
110    /// Digest data in `input` in blocks of size `BlockSize` using
111    /// the `compress` function, which accepts slice of blocks.
112    #[inline]
113    pub fn digest_blocks(
114        &mut self,
115        mut input: &[u8],
116        mut compress: impl FnMut(&[Block<BlockSize>]),
117    ) {
118        let pos = self.get_pos();
119        // using `self.remaining()` for some reason
120        // prevents panic elimination
121        let rem = self.size() - pos;
122        let n = input.len();
123        // Note that checking condition `pos + n < BlockSize` is
124        // equivalent to checking `n < rem`, where `rem` is equal
125        // to `BlockSize - pos`. Using the latter allows us to work
126        // around compiler accounting for possible overflow of
127        // `pos + n` which results in it inserting unreachable
128        // panic branches. Using `unreachable_unchecked` in `get_pos`
129        // we convince compiler that `BlockSize - pos` never underflows.
130        if Kind::invariant(n, rem) {
131            // double slicing allows to remove panic branches
132            self.buffer[pos..][..n].copy_from_slice(input);
133            self.set_pos_unchecked(pos + n);
134            return;
135        }
136        if pos != 0 {
137            let (left, right) = input.split_at(rem);
138            input = right;
139            self.buffer[pos..].copy_from_slice(left);
140            compress(slice::from_ref(&self.buffer));
141        }
142
143        let (blocks, leftover) = Kind::split_blocks(input);
144        if !blocks.is_empty() {
145            compress(blocks);
146        }
147
148        let n = leftover.len();
149        self.buffer[..n].copy_from_slice(leftover);
150        self.set_pos_unchecked(n);
151    }
152
153    /// Reset buffer by setting cursor position to zero.
154    #[inline(always)]
155    pub fn reset(&mut self) {
156        self.set_pos_unchecked(0);
157    }
158
159    /// Pad remaining data with zeros and return resulting block.
160    #[inline(always)]
161    pub fn pad_with_zeros(&mut self) -> &mut Block<BlockSize> {
162        let pos = self.get_pos();
163        self.buffer[pos..].iter_mut().for_each(|b| *b = 0);
164        self.set_pos_unchecked(0);
165        &mut self.buffer
166    }
167
168    /// Return current cursor position.
169    #[inline(always)]
170    pub fn get_pos(&self) -> usize {
171        let pos = self.pos as usize;
172        if !Kind::invariant(pos, BlockSize::USIZE) {
173            debug_assert!(false);
174            // SAFETY: `pos` never breaks the invariant
175            unsafe {
176                core::hint::unreachable_unchecked();
177            }
178        }
179        pos
180    }
181
182    /// Return slice of data stored inside the buffer.
183    #[inline(always)]
184    pub fn get_data(&self) -> &[u8] {
185        &self.buffer[..self.get_pos()]
186    }
187
188    /// Set buffer content and cursor position.
189    ///
190    /// # Panics
191    /// If `pos` is bigger or equal to block size.
192    #[inline]
193    pub fn set(&mut self, buf: Block<BlockSize>, pos: usize) {
194        assert!(Kind::invariant(pos, BlockSize::USIZE));
195        self.buffer = buf;
196        self.set_pos_unchecked(pos);
197    }
198
199    /// Return size of the internall buffer in bytes.
200    #[inline(always)]
201    pub fn size(&self) -> usize {
202        BlockSize::USIZE
203    }
204
205    /// Return number of remaining bytes in the internall buffer.
206    #[inline(always)]
207    pub fn remaining(&self) -> usize {
208        self.size() - self.get_pos()
209    }
210
211    #[inline(always)]
212    fn set_pos_unchecked(&mut self, pos: usize) {
213        debug_assert!(Kind::invariant(pos, BlockSize::USIZE));
214        self.pos = pos as u8;
215    }
216}
217
218impl<BlockSize> BlockBuffer<BlockSize, Eager>
219where
220    BlockSize: ArrayLength<u8> + IsLess<U256>,
221    Le<BlockSize, U256>: NonZero,
222{
223    /// Set `data` to generated blocks.
224    #[inline]
225    pub fn set_data(
226        &mut self,
227        mut data: &mut [u8],
228        mut process_blocks: impl FnMut(&mut [Block<BlockSize>]),
229    ) {
230        let pos = self.get_pos();
231        let r = self.remaining();
232        let n = data.len();
233        if pos != 0 {
234            if n < r {
235                // double slicing allows to remove panic branches
236                data.copy_from_slice(&self.buffer[pos..][..n]);
237                self.set_pos_unchecked(pos + n);
238                return;
239            }
240            let (left, right) = data.split_at_mut(r);
241            data = right;
242            left.copy_from_slice(&self.buffer[pos..]);
243        }
244
245        let (blocks, leftover) = to_blocks_mut(data);
246        process_blocks(blocks);
247
248        let n = leftover.len();
249        if n != 0 {
250            let mut block = Default::default();
251            process_blocks(slice::from_mut(&mut block));
252            leftover.copy_from_slice(&block[..n]);
253            self.buffer = block;
254        }
255        self.set_pos_unchecked(n);
256    }
257
258    /// Compress remaining data after padding it with `delim`, zeros and
259    /// the `suffix` bytes. If there is not enough unused space, `compress`
260    /// will be called twice.
261    ///
262    /// # Panics
263    /// If suffix length is bigger than block size.
264    #[inline(always)]
265    pub fn digest_pad(
266        &mut self,
267        delim: u8,
268        suffix: &[u8],
269        mut compress: impl FnMut(&Block<BlockSize>),
270    ) {
271        if suffix.len() > BlockSize::USIZE {
272            panic!("suffix is too long");
273        }
274        let pos = self.get_pos();
275        self.buffer[pos] = delim;
276        for b in &mut self.buffer[pos + 1..] {
277            *b = 0;
278        }
279
280        let n = self.size() - suffix.len();
281        if self.size() - pos - 1 < suffix.len() {
282            compress(&self.buffer);
283            let mut block = Block::<BlockSize>::default();
284            block[n..].copy_from_slice(suffix);
285            compress(&block);
286        } else {
287            self.buffer[n..].copy_from_slice(suffix);
288            compress(&self.buffer);
289        }
290        self.set_pos_unchecked(0)
291    }
292
293    /// Pad message with 0x80, zeros and 64-bit message length using
294    /// big-endian byte order.
295    #[inline]
296    pub fn len64_padding_be(&mut self, data_len: u64, compress: impl FnMut(&Block<BlockSize>)) {
297        self.digest_pad(0x80, &data_len.to_be_bytes(), compress);
298    }
299
300    /// Pad message with 0x80, zeros and 64-bit message length using
301    /// little-endian byte order.
302    #[inline]
303    pub fn len64_padding_le(&mut self, data_len: u64, compress: impl FnMut(&Block<BlockSize>)) {
304        self.digest_pad(0x80, &data_len.to_le_bytes(), compress);
305    }
306
307    /// Pad message with 0x80, zeros and 128-bit message length using
308    /// big-endian byte order.
309    #[inline]
310    pub fn len128_padding_be(&mut self, data_len: u128, compress: impl FnMut(&Block<BlockSize>)) {
311        self.digest_pad(0x80, &data_len.to_be_bytes(), compress);
312    }
313}
314
315/// Split message into mutable slice of parallel blocks, blocks, and leftover bytes.
316#[inline(always)]
317fn to_blocks_mut<N: ArrayLength<u8>>(data: &mut [u8]) -> (&mut [Block<N>], &mut [u8]) {
318    let nb = data.len() / N::USIZE;
319    let (left, right) = data.split_at_mut(nb * N::USIZE);
320    let p = left.as_mut_ptr() as *mut Block<N>;
321    // SAFETY: we guarantee that `blocks` does not point outside of `data`, and `p` is valid for
322    // mutation
323    let blocks = unsafe { slice::from_raw_parts_mut(p, nb) };
324    (blocks, right)
325}