zlib_rs/
allocate.rs

1#![allow(unpredictable_function_pointer_comparisons)]
2
3#[cfg(unix)]
4use core::ffi::c_int;
5use core::{
6    alloc::Layout,
7    ffi::{c_uint, c_void},
8    marker::PhantomData,
9    mem,
10    ptr::NonNull,
11};
12
13#[allow(non_camel_case_types)]
14type size_t = usize;
15
16const ALIGN: u8 = 64;
17// posix_memalign requires that the alignment be a power of two and a multiple of sizeof(void*).
18const _: () = assert!(ALIGN.count_ones() == 1);
19const _: () = assert!(ALIGN as usize % mem::size_of::<*mut c_void>() == 0);
20
21/// # Safety
22///
23/// This function is safe, but must have this type signature to be used elsewhere in the library
24#[cfg(unix)]
25unsafe extern "C" fn zalloc_c(opaque: *mut c_void, items: c_uint, size: c_uint) -> *mut c_void {
26    let _ = opaque;
27
28    extern "C" {
29        fn posix_memalign(memptr: *mut *mut c_void, align: size_t, size: size_t) -> c_int;
30    }
31
32    let mut ptr = core::ptr::null_mut();
33    let size = items as size_t * size as size_t;
34    if size == 0 {
35        return ptr;
36    }
37    // SAFETY: ALIGN is a power of 2 and multiple of sizeof(void*), as required by posix_memalign.
38    // In addition, since posix_memalign is allowed to return a unique but non-null pointer when
39    // called with a size of zero, we returned above if the size was zero.
40    match unsafe { posix_memalign(&mut ptr, ALIGN.into(), size) } {
41        0 => ptr,
42        _ => core::ptr::null_mut(),
43    }
44}
45
46/// # Safety
47///
48/// This function is safe, but must have this type signature to be used elsewhere in the library
49#[cfg(not(unix))]
50unsafe extern "C" fn zalloc_c(opaque: *mut c_void, items: c_uint, size: c_uint) -> *mut c_void {
51    let _ = opaque;
52
53    let size = items as size_t * size as size_t;
54    if size == 0 {
55        return core::ptr::null_mut();
56    }
57
58    extern "C" {
59        fn malloc(size: size_t) -> *mut c_void;
60    }
61
62    // SAFETY: malloc is allowed to return a unique but non-null pointer when given a size
63    // of zero. To prevent potentially undefined behavior from such a pointer reaching Rust code
64    // and being dereferenced, we handled the zero-size case separately above.
65    unsafe { malloc(size) }
66}
67
68/// # Safety
69///
70/// This function is safe, but must have this type signature to be used elsewhere in the library
71unsafe extern "C" fn zalloc_c_calloc(
72    opaque: *mut c_void,
73    items: c_uint,
74    size: c_uint,
75) -> *mut c_void {
76    let _ = opaque;
77
78    extern "C" {
79        fn calloc(nitems: size_t, size: size_t) -> *mut c_void;
80    }
81
82    if items as size_t * size as size_t == 0 {
83        return core::ptr::null_mut();
84    }
85
86    // SAFETY: When the item count or size is zero, calloc is allowed to return either
87    // null or some non-null value that is safe to free but not safe to dereference.
88    // To avoid the possibility of exposing such a pointer to Rust code, we check for
89    // zero above and avoid calling calloc.
90    unsafe { calloc(items as size_t, size as size_t) }
91}
92
93/// # Safety
94///
95/// The `ptr` must be allocated with the allocator that is used internally by `zcfree`
96unsafe extern "C" fn zfree_c(opaque: *mut c_void, ptr: *mut c_void) {
97    let _ = opaque;
98
99    extern "C" {
100        fn free(p: *mut c_void);
101    }
102
103    // SAFETY: The caller ensured that ptr was obtained from the same allocator. Also,
104    // free properly handles the case where ptr == NULL.
105    unsafe { free(ptr) }
106}
107
108/// # Safety
109///
110/// This function is safe to call.
111#[cfg(feature = "rust-allocator")]
112unsafe extern "C" fn zalloc_rust(_opaque: *mut c_void, count: c_uint, size: c_uint) -> *mut c_void {
113    let size = count as usize * size as usize;
114    if size == 0 {
115        return core::ptr::null_mut();
116    }
117
118    // internally, we want to align allocations to 64 bytes (in part for SIMD reasons)
119    let layout = Layout::from_size_align(size, ALIGN.into()).unwrap();
120
121    // SAFETY: alloc requires that the layout have a nonzero size, so we return null
122    // above (and never reach this call) if the requested count * size is zero.
123    let ptr = unsafe { std::alloc::alloc(layout) };
124
125    ptr as *mut c_void
126}
127
128/// # Safety
129///
130/// This function is safe to call.
131#[cfg(feature = "rust-allocator")]
132unsafe extern "C" fn zalloc_rust_calloc(
133    _opaque: *mut c_void,
134    count: c_uint,
135    size: c_uint,
136) -> *mut c_void {
137    let size = count as usize * size as usize;
138    if size == 0 {
139        return core::ptr::null_mut();
140    }
141
142    // internally, we want to align allocations to 64 bytes (in part for SIMD reasons)
143    let layout = Layout::from_size_align(size, ALIGN.into()).unwrap();
144
145    // SAFETY: alloc_zeroed requires that the layout have a nonzero size, so we return
146    // null above (and never reach this call) if the requested count * size is zero.
147    let ptr = unsafe { std::alloc::alloc_zeroed(layout) };
148
149    ptr as *mut c_void
150}
151
152/// # Safety
153///
154/// - `ptr` must be allocated with the rust `alloc::alloc` allocator
155/// - `opaque` is a `&usize` that represents the size of the allocation
156#[cfg(feature = "rust-allocator")]
157unsafe extern "C" fn zfree_rust(opaque: *mut c_void, ptr: *mut c_void) {
158    if ptr.is_null() {
159        return;
160    }
161
162    // we can't really do much else. Deallocating with an invalid layout is UB.
163    debug_assert!(!opaque.is_null());
164    if opaque.is_null() {
165        return;
166    }
167
168    // SAFETY: The caller ensured that *opaque is valid to dereference.
169    let size = unsafe { *(opaque as *mut usize) };
170
171    // zalloc_rust and zalloc_rust_calloc bypass the Rust allocator and just return
172    // null when asked to allocate something of zero size. So if a caller tries to
173    // free something of zero size, we return here rather than trying to call the
174    // Rust deallocator.
175    if size == 0 {
176        return;
177    }
178
179    let layout = Layout::from_size_align(size, ALIGN.into());
180    let layout = layout.unwrap();
181
182    // SAFETY: The caller ensured that ptr was allocated with the `alloc` allocator,
183    // and the size check above ensures that we are not trying to use a zero-size layout
184    // that would produce undefined behavior in the allocator.
185    unsafe { std::alloc::dealloc(ptr.cast(), layout) };
186}
187
188#[cfg(test)]
189unsafe extern "C" fn zalloc_fail(_: *mut c_void, _: c_uint, _: c_uint) -> *mut c_void {
190    core::ptr::null_mut()
191}
192
193#[cfg(test)]
194unsafe extern "C" fn zfree_fail(_: *mut c_void, _: *mut c_void) {
195    // do nothing
196}
197
198#[derive(Clone, Copy)]
199#[repr(C)]
200pub struct Allocator<'a> {
201    pub zalloc: crate::c_api::alloc_func,
202    pub zfree: crate::c_api::free_func,
203    pub opaque: crate::c_api::voidpf,
204    pub _marker: PhantomData<&'a ()>,
205}
206
207unsafe impl Sync for Allocator<'static> {}
208
209#[cfg(feature = "rust-allocator")]
210pub static RUST: Allocator<'static> = Allocator {
211    zalloc: zalloc_rust,
212    zfree: zfree_rust,
213    opaque: core::ptr::null_mut(),
214    _marker: PhantomData,
215};
216
217#[cfg(feature = "c-allocator")]
218pub static C: Allocator<'static> = Allocator {
219    zalloc: zalloc_c,
220    zfree: zfree_c,
221    opaque: core::ptr::null_mut(),
222    _marker: PhantomData,
223};
224
225#[cfg(test)]
226static FAIL: Allocator<'static> = Allocator {
227    zalloc: zalloc_fail,
228    zfree: zfree_fail,
229    opaque: core::ptr::null_mut(),
230    _marker: PhantomData,
231};
232
233impl Allocator<'_> {
234    fn allocate_layout(&self, layout: Layout) -> *mut c_void {
235        assert!(layout.align() <= ALIGN.into());
236
237        // Special case for the Rust `alloc` backed allocator
238        #[cfg(feature = "rust-allocator")]
239        if self.zalloc == RUST.zalloc {
240            let ptr = unsafe { (RUST.zalloc)(self.opaque, layout.size() as _, 1) };
241
242            debug_assert_eq!(ptr as usize % layout.align(), 0);
243
244            return ptr;
245        }
246
247        // General case for c-style allocation
248
249        // We cannot rely on the allocator giving properly aligned allocations and have to fix that ourselves.
250        //
251        // The general approach is to allocate a bit more than the layout needs, so that we can
252        // give the application a properly aligned address and also store the real allocation
253        // pointer in the allocation so that `free` can free the real allocation pointer.
254        //
255        //
256        // Example: The layout represents `(u32, u32)`, with an alignment of 4 bytes and a
257        // total size of 8 bytes.
258        //
259        // Assume that the allocator will give us address `0x07`. We need that to be a multiple
260        // of the alignment, so that shifts the starting position to `0x08`. Then we also need
261        // to store the pointer to the start of the allocation so that `free` can free that
262        // pointer, bumping to `0x10`. The `0x10` pointer is then the pointer that the application
263        // deals with. When free'ing, the original allocation pointer can be read from `0x10 - size_of::<*const c_void>()`.
264        //
265        // Of course there does need to be enough space in the allocation such that when we
266        // shift the start forwards, the end is still within the allocation. Hence we allocate
267        // `extra_space` bytes: enough for a full alignment plus a pointer.
268
269        // we need at least
270        //
271        // - `align` extra space so that no matter what pointer we get from zalloc, we can shift the start of the
272        //      allocation by at most `align - 1` so that `ptr as usize % align == 0
273        // - `size_of::<*mut _>` extra space so that after aligning to `align`,
274        //      there is `size_of::<*mut _>` space to store the pointer to the allocation.
275        //      This pointer is then retrieved in `free`
276        let extra_space = core::mem::size_of::<*mut c_void>() + layout.align();
277
278        // Safety: we assume allocating works correctly in the safety assumptions on
279        // `DeflateStream` and `InflateStream`.
280        let ptr = unsafe { (self.zalloc)(self.opaque, (layout.size() + extra_space) as _, 1) };
281
282        if ptr.is_null() {
283            return ptr;
284        }
285
286        // Calculate return pointer address with space enough to store original pointer
287        let align_diff = (ptr as usize).next_multiple_of(layout.align()) - (ptr as usize);
288
289        // Safety: offset is smaller than 64, and we allocated 64 extra bytes in the allocation
290        let mut return_ptr = unsafe { ptr.cast::<u8>().add(align_diff) };
291
292        // if there is not enough space to store a pointer we need to make more
293        if align_diff < core::mem::size_of::<*mut c_void>() {
294            // # Safety
295            //
296            // - `return_ptr` is well-aligned, therefore `return_ptr + align` is also well-aligned
297            // - we reserve `size_of::<*mut _> + align` extra space in the allocation, so
298            //      `ptr + align_diff + align` is still valid for (at least) `layout.size` bytes
299            let offset = Ord::max(core::mem::size_of::<*mut c_void>(), layout.align());
300            return_ptr = unsafe { return_ptr.add(offset) };
301        }
302
303        // Store the original pointer for free()
304        //
305        // Safety: `align >= size_of::<*mut _>`, so there is now space for a pointer before `return_ptr`
306        // in the allocation
307        unsafe {
308            let original_ptr = return_ptr.sub(core::mem::size_of::<*mut c_void>());
309            core::ptr::write_unaligned(original_ptr.cast::<*mut c_void>(), ptr);
310        };
311
312        // Return properly aligned pointer in allocation
313        let ptr = return_ptr.cast::<c_void>();
314
315        debug_assert_eq!(ptr as usize % layout.align(), 0);
316
317        ptr
318    }
319
320    fn allocate_layout_zeroed(&self, layout: Layout) -> *mut c_void {
321        assert!(layout.align() <= ALIGN.into());
322
323        #[cfg(feature = "rust-allocator")]
324        if self.zalloc == RUST.zalloc {
325            let ptr = unsafe { zalloc_rust_calloc(self.opaque, layout.size() as _, 1) };
326
327            debug_assert_eq!(ptr as usize % layout.align(), 0);
328
329            return ptr;
330        }
331
332        #[cfg(feature = "c-allocator")]
333        if self.zalloc == C.zalloc {
334            let alloc = Allocator {
335                zalloc: zalloc_c_calloc,
336                zfree: zfree_c,
337                opaque: core::ptr::null_mut(),
338                _marker: PhantomData,
339            };
340
341            return alloc.allocate_layout(layout);
342        }
343
344        // create the allocation (contents are uninitialized)
345        let ptr = self.allocate_layout(layout);
346
347        if !ptr.is_null() {
348            // zero all contents (thus initializing the buffer)
349            unsafe { core::ptr::write_bytes(ptr, 0u8, layout.size()) };
350        }
351
352        ptr
353    }
354
355    pub fn allocate_raw<T>(&self) -> Option<NonNull<T>> {
356        NonNull::new(self.allocate_layout(Layout::new::<T>()).cast())
357    }
358
359    pub fn allocate_slice_raw<T>(&self, len: usize) -> Option<NonNull<T>> {
360        NonNull::new(self.allocate_layout(Layout::array::<T>(len).ok()?).cast())
361    }
362
363    pub fn allocate_zeroed_raw<T>(&self) -> Option<NonNull<T>> {
364        NonNull::new(self.allocate_layout_zeroed(Layout::new::<T>()).cast())
365    }
366
367    pub fn allocate_zeroed_buffer(&self, len: usize) -> Option<NonNull<u8>> {
368        let layout = Layout::array::<u8>(len).ok()?;
369        NonNull::new(self.allocate_layout_zeroed(layout).cast())
370    }
371
372    /// # Panics
373    ///
374    /// - when `len` is 0
375    ///
376    /// # Safety
377    ///
378    /// - `ptr` must be allocated with this allocator
379    /// - `len` must be the number of `T`s that are in this allocation
380    #[allow(unused)] // Rust needs `len` for deallocation
381    pub unsafe fn deallocate<T>(&self, ptr: *mut T, len: usize) {
382        if !ptr.is_null() {
383            // Special case for the Rust `alloc` backed allocator
384            #[cfg(feature = "rust-allocator")]
385            if self.zfree == RUST.zfree {
386                assert_ne!(len, 0, "invalid size for {ptr:?}");
387                let mut size = core::mem::size_of::<T>() * len;
388                // SAFETY: The caller ensured that ptr was allocated with this allocator, and
389                // we initialized size above.
390                return unsafe { (RUST.zfree)(&mut size as *mut usize as *mut c_void, ptr.cast()) };
391            }
392
393            // General case for c-style allocation
394            // SAFETY: allocate_layout allocates extra space at the start so that *ptr is preceded
395            // by a pointer holding the pointer to the actual allocation. Therefore, it is safe to
396            // subtract size_of::<*const c_void> from pointer, dereference the resulting address,
397            // and use that as the argument to the low-level free function.
398            unsafe {
399                let original_ptr = (ptr as *mut u8).sub(core::mem::size_of::<*const c_void>());
400                let free_ptr = core::ptr::read_unaligned(original_ptr as *mut *mut c_void);
401
402                (self.zfree)(self.opaque, free_ptr)
403            }
404        }
405    }
406}
407
408#[cfg(test)]
409mod tests {
410    use core::sync::atomic::{AtomicPtr, Ordering};
411    use std::ptr;
412    use std::sync::Mutex;
413
414    use super::*;
415
416    static PTR: AtomicPtr<c_void> = AtomicPtr::new(core::ptr::null_mut());
417    static MUTEX: Mutex<()> = Mutex::new(());
418
419    unsafe extern "C" fn unaligned_alloc(
420        _opaque: *mut c_void,
421        _items: c_uint,
422        _size: c_uint,
423    ) -> *mut c_void {
424        PTR.load(Ordering::Relaxed)
425    }
426
427    unsafe extern "C" fn unaligned_free(_opaque: *mut c_void, ptr: *mut c_void) {
428        let expected = PTR.load(Ordering::Relaxed);
429        assert_eq!(expected, ptr)
430    }
431
432    fn unaligned_allocator_help<T>() {
433        let mut buf = [0u8; 1024];
434
435        // we don't want anyone else messing with the PTR static
436        let _guard = MUTEX.lock().unwrap();
437
438        for i in 0..64 {
439            let ptr = unsafe { buf.as_mut_ptr().add(i).cast() };
440            PTR.store(ptr, Ordering::Relaxed);
441
442            let allocator = Allocator {
443                zalloc: unaligned_alloc,
444                zfree: unaligned_free,
445                opaque: core::ptr::null_mut(),
446                _marker: PhantomData,
447            };
448
449            let ptr = allocator.allocate_raw::<T>().unwrap().as_ptr();
450            assert_eq!(ptr as usize % core::mem::align_of::<T>(), 0);
451            unsafe { allocator.deallocate(ptr, 1) }
452
453            let ptr = allocator.allocate_slice_raw::<T>(10).unwrap().as_ptr();
454            assert_eq!(ptr as usize % core::mem::align_of::<T>(), 0);
455            unsafe { allocator.deallocate(ptr, 10) }
456        }
457    }
458
459    #[test]
460    fn unaligned_allocator_0() {
461        unaligned_allocator_help::<()>()
462    }
463
464    #[test]
465    fn unaligned_allocator_1() {
466        unaligned_allocator_help::<u8>()
467    }
468
469    #[test]
470    fn unaligned_allocator_2() {
471        unaligned_allocator_help::<u16>()
472    }
473    #[test]
474    fn unaligned_allocator_4() {
475        unaligned_allocator_help::<u32>()
476    }
477    #[test]
478    fn unaligned_allocator_8() {
479        unaligned_allocator_help::<u64>()
480    }
481    #[test]
482    fn unaligned_allocator_16() {
483        unaligned_allocator_help::<u128>()
484    }
485
486    #[test]
487    fn unaligned_allocator_32() {
488        #[repr(C, align(32))]
489        struct Align32(u8);
490
491        unaligned_allocator_help::<Align32>()
492    }
493
494    #[test]
495    fn unaligned_allocator_64() {
496        #[repr(C, align(64))]
497        struct Align64(u8);
498
499        unaligned_allocator_help::<Align64>()
500    }
501
502    fn test_allocate_zeroed_help(allocator: Allocator) {
503        #[repr(C, align(64))]
504        struct Align64(u8);
505
506        let ptr = allocator.allocate_raw::<Align64>();
507        assert!(ptr.is_some());
508        unsafe { allocator.deallocate(ptr.unwrap().as_ptr(), 1) };
509    }
510
511    #[test]
512    fn test_allocate_zeroed() {
513        #[cfg(feature = "rust-allocator")]
514        test_allocate_zeroed_help(RUST);
515
516        #[cfg(feature = "c-allocator")]
517        test_allocate_zeroed_help(C);
518
519        assert!(FAIL.allocate_raw::<u128>().is_none());
520    }
521
522    fn test_allocate_zeroed_buffer_help(allocator: Allocator) {
523        let len = 42;
524        let Some(buf) = allocator.allocate_zeroed_buffer(len) else {
525            return;
526        };
527
528        let slice = unsafe { core::slice::from_raw_parts_mut(buf.as_ptr(), len) };
529
530        assert_eq!(slice.iter().sum::<u8>(), 0);
531
532        unsafe { allocator.deallocate(buf.as_ptr(), len) };
533    }
534
535    #[test]
536    fn test_allocate_buffer_zeroed() {
537        #[cfg(feature = "rust-allocator")]
538        test_allocate_zeroed_buffer_help(RUST);
539
540        #[cfg(feature = "c-allocator")]
541        test_allocate_zeroed_buffer_help(C);
542
543        test_allocate_zeroed_buffer_help(FAIL);
544    }
545
546    #[test]
547    fn test_deallocate_null() {
548        unsafe {
549            #[cfg(feature = "rust-allocator")]
550            (RUST.zfree)(core::ptr::null_mut(), core::ptr::null_mut());
551
552            #[cfg(feature = "c-allocator")]
553            (C.zfree)(core::ptr::null_mut(), core::ptr::null_mut());
554
555            (FAIL.zfree)(core::ptr::null_mut(), core::ptr::null_mut());
556        }
557    }
558
559    #[test]
560    fn test_allocate_zero_size() {
561        // Verify that zero-size allocation requests return a null pointer.
562        unsafe {
563            assert!(zalloc_c(ptr::null_mut(), 1, 0).is_null());
564            assert!(zalloc_c(ptr::null_mut(), 0, 1).is_null());
565            assert!(zalloc_c_calloc(ptr::null_mut(), 1, 0).is_null());
566            assert!(zalloc_c_calloc(ptr::null_mut(), 0, 1).is_null());
567            assert!(zalloc_rust(ptr::null_mut(), 1, 0).is_null());
568            assert!(zalloc_rust(ptr::null_mut(), 0, 1).is_null());
569            assert!(zalloc_rust_calloc(ptr::null_mut(), 1, 0).is_null());
570            assert!(zalloc_rust_calloc(ptr::null_mut(), 0, 1).is_null());
571        }
572    }
573}