zerocopy/util/mod.rs
1// Copyright 2023 The Fuchsia Authors
2//
3// Licensed under a BSD-style license <LICENSE-BSD>, Apache License, Version 2.0
4// <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
5// license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
6// This file may not be copied, modified, or distributed except according to
7// those terms.
8
9#[macro_use]
10mod macros;
11
12#[doc(hidden)]
13pub mod macro_util;
14
15use core::{
16 cell::UnsafeCell,
17 marker::PhantomData,
18 mem::{self, ManuallyDrop, MaybeUninit},
19 num::{NonZeroUsize, Wrapping},
20 ptr::NonNull,
21};
22
23use crate::{
24 error::AlignmentError,
25 pointer::invariant::{self, Invariants},
26 Unalign,
27};
28
29/// A type which has the same layout as the type it wraps.
30///
31/// # Safety
32///
33/// `T: TransparentWrapper` implies that `T` has the same size as [`T::Inner`].
34/// Further, `T: TransparentWrapper<I>` implies that:
35/// - If `T::UnsafeCellVariance = Covariant`, then `T` has `UnsafeCell`s
36/// covering the same byte ranges as `T::Inner`.
37/// - If a `T` pointer satisfies the alignment invariant `I::Alignment`, then
38/// that same pointer, cast to `T::Inner`, satisfies the alignment invariant
39/// `<T::AlignmentVariance as AlignmentVariance<I::Alignment>>::Applied`.
40/// - If a `T` pointer satisfies the validity invariant `I::Validity`, then that
41/// same pointer, cast to `T::Inner`, satisfies the validity invariant
42/// `<T::ValidityVariance as ValidityVariance<I::Validity>>::Applied`.
43///
44/// [`T::Inner`]: TransparentWrapper::Inner
45/// [`UnsafeCell`]: core::cell::UnsafeCell
46/// [`T::AlignmentVariance`]: TransparentWrapper::AlignmentVariance
47/// [`T::ValidityVariance`]: TransparentWrapper::ValidityVariance
48#[doc(hidden)]
49pub unsafe trait TransparentWrapper<I: Invariants> {
50 type Inner: ?Sized;
51
52 type UnsafeCellVariance;
53 type AlignmentVariance: AlignmentVariance<I::Alignment>;
54 type ValidityVariance: ValidityVariance<I::Validity>;
55
56 /// Casts a wrapper pointer to an inner pointer.
57 ///
58 /// # Safety
59 ///
60 /// The resulting pointer has the same address and provenance as `ptr`, and
61 /// addresses the same number of bytes.
62 fn cast_into_inner(ptr: *mut Self) -> *mut Self::Inner;
63
64 /// Casts an inner pointer to a wrapper pointer.
65 ///
66 /// # Safety
67 ///
68 /// The resulting pointer has the same address and provenance as `ptr`, and
69 /// addresses the same number of bytes.
70 fn cast_from_inner(ptr: *mut Self::Inner) -> *mut Self;
71}
72
73#[allow(unreachable_pub)]
74#[doc(hidden)]
75pub trait AlignmentVariance<I: invariant::Alignment> {
76 type Applied: invariant::Alignment;
77}
78
79#[allow(unreachable_pub)]
80#[doc(hidden)]
81pub trait ValidityVariance<I: invariant::Validity> {
82 type Applied: invariant::Validity;
83}
84
85#[doc(hidden)]
86#[allow(missing_copy_implementations, missing_debug_implementations)]
87pub enum Covariant {}
88
89impl<I: invariant::Alignment> AlignmentVariance<I> for Covariant {
90 type Applied = I;
91}
92
93impl<I: invariant::Validity> ValidityVariance<I> for Covariant {
94 type Applied = I;
95}
96
97#[doc(hidden)]
98#[allow(missing_copy_implementations, missing_debug_implementations)]
99pub enum Invariant {}
100
101impl<I: invariant::Alignment> AlignmentVariance<I> for Invariant {
102 type Applied = invariant::Any;
103}
104
105impl<I: invariant::Validity> ValidityVariance<I> for Invariant {
106 type Applied = invariant::Any;
107}
108
109// SAFETY:
110// - Per [1], `MaybeUninit<T>` has the same size as `T`.
111// - See inline comments for other safety justifications.
112//
113// [1] Per https://doc.rust-lang.org/1.81.0/std/mem/union.MaybeUninit.html#layout-1:
114//
115// `MaybeUninit<T>` is guaranteed to have the same size, alignment, and ABI as
116// `T`
117unsafe impl<T, I: Invariants> TransparentWrapper<I> for MaybeUninit<T> {
118 type Inner = T;
119
120 // SAFETY: `MaybeUninit<T>` has `UnsafeCell`s covering the same byte ranges
121 // as `Inner = T`. This is not explicitly documented, but it can be
122 // inferred. Per [1] in the preceding safety comment, `MaybeUninit<T>` has
123 // the same size as `T`. Further, note the signature of
124 // `MaybeUninit::assume_init_ref` [2]:
125 //
126 // pub unsafe fn assume_init_ref(&self) -> &T
127 //
128 // If the argument `&MaybeUninit<T>` and the returned `&T` had `UnsafeCell`s
129 // at different offsets, this would be unsound. Its existence is proof that
130 // this is not the case.
131 //
132 // [2] https://doc.rust-lang.org/1.81.0/std/mem/union.MaybeUninit.html#method.assume_init_ref
133 type UnsafeCellVariance = Covariant;
134 // SAFETY: Per [1], `MaybeUninit<T>` has the same layout as `T`, and thus
135 // has the same alignment as `T`.
136 //
137 // [1] Per https://doc.rust-lang.org/std/mem/union.MaybeUninit.html#layout-1:
138 //
139 // `MaybeUninit<T>` is guaranteed to have the same size, alignment, and
140 // ABI as `T`.
141 type AlignmentVariance = Covariant;
142 // SAFETY: `MaybeUninit` has no validity invariants. Thus, a valid
143 // `MaybeUninit<T>` is not necessarily a valid `T`.
144 type ValidityVariance = Invariant;
145
146 #[inline(always)]
147 fn cast_into_inner(ptr: *mut MaybeUninit<T>) -> *mut T {
148 // SAFETY: Per [1] (from comment above), `MaybeUninit<T>` has the same
149 // layout as `T`. Thus, this cast preserves size.
150 //
151 // This cast trivially preserves provenance.
152 ptr.cast::<T>()
153 }
154
155 #[inline(always)]
156 fn cast_from_inner(ptr: *mut T) -> *mut MaybeUninit<T> {
157 // SAFETY: Per [1] (from comment above), `MaybeUninit<T>` has the same
158 // layout as `T`. Thus, this cast preserves size.
159 //
160 // This cast trivially preserves provenance.
161 ptr.cast::<MaybeUninit<T>>()
162 }
163}
164
165// SAFETY:
166// - Per [1], `ManuallyDrop<T>` has the same size as `T`.
167// - See inline comments for other safety justifications.
168//
169// [1] Per https://doc.rust-lang.org/1.81.0/std/mem/struct.ManuallyDrop.html:
170//
171// `ManuallyDrop<T>` is guaranteed to have the same layout and bit validity as
172// `T`
173unsafe impl<T: ?Sized, I: Invariants> TransparentWrapper<I> for ManuallyDrop<T> {
174 type Inner = T;
175
176 // SAFETY: Per [1], `ManuallyDrop<T>` has `UnsafeCell`s covering the same
177 // byte ranges as `Inner = T`.
178 //
179 // [1] Per https://doc.rust-lang.org/1.81.0/std/mem/struct.ManuallyDrop.html:
180 //
181 // `ManuallyDrop<T>` is guaranteed to have the same layout and bit
182 // validity as `T`, and is subject to the same layout optimizations as
183 // `T`. As a consequence, it has no effect on the assumptions that the
184 // compiler makes about its contents.
185 type UnsafeCellVariance = Covariant;
186 // SAFETY: Per [1], `ManuallyDrop<T>` has the same layout as `T`, and thus
187 // has the same alignment as `T`.
188 //
189 // [1] Per https://doc.rust-lang.org/nightly/core/mem/struct.ManuallyDrop.html:
190 //
191 // `ManuallyDrop<T>` is guaranteed to have the same layout and bit
192 // validity as `T`
193 type AlignmentVariance = Covariant;
194
195 // SAFETY: Per [1] (from comment above), `ManuallyDrop<T>` has the same bit
196 // validity as `T`.
197 type ValidityVariance = Covariant;
198
199 #[inline(always)]
200 fn cast_into_inner(ptr: *mut ManuallyDrop<T>) -> *mut T {
201 // SAFETY: Per [1] (from comment above), `ManuallyDrop<T>` has the same
202 // layout as `T`. Thus, this cast preserves size even if `T` is unsized.
203 //
204 // This cast trivially preserves provenance.
205 #[allow(clippy::as_conversions)]
206 return ptr as *mut T;
207 }
208
209 #[inline(always)]
210 fn cast_from_inner(ptr: *mut T) -> *mut ManuallyDrop<T> {
211 // SAFETY: Per [1] (from comment above), `ManuallyDrop<T>` has the same
212 // layout as `T`. Thus, this cast preserves size even if `T` is unsized.
213 //
214 // This cast trivially preserves provenance.
215 #[allow(clippy::as_conversions)]
216 return ptr as *mut ManuallyDrop<T>;
217 }
218}
219
220// SAFETY:
221// - Per [1], `Wrapping<T>` has the same size as `T`.
222// - See inline comments for other safety justifications.
223//
224// [1] Per https://doc.rust-lang.org/1.81.0/std/num/struct.Wrapping.html#layout-1:
225//
226// `Wrapping<T>` is guaranteed to have the same layout and ABI as `T`.
227unsafe impl<T, I: Invariants> TransparentWrapper<I> for Wrapping<T> {
228 type Inner = T;
229
230 // SAFETY: Per [1], `Wrapping<T>` has the same layout as `T`. Since its
231 // single field (of type `T`) is public, it would be a breaking change to
232 // add or remove fields. Thus, we know that `Wrapping<T>` contains a `T` (as
233 // opposed to just having the same size and alignment as `T`) with no pre-
234 // or post-padding. Thus, `Wrapping<T>` must have `UnsafeCell`s covering the
235 // same byte ranges as `Inner = T`.
236 //
237 // [1] Per https://doc.rust-lang.org/1.81.0/std/num/struct.Wrapping.html#layout-1:
238 //
239 // `Wrapping<T>` is guaranteed to have the same layout and ABI as `T`.
240 type UnsafeCellVariance = Covariant;
241 // SAFETY: Per [1], `Wrapping<T>` has the same layout as `T`, and thus has
242 // the same alignment as `T`.
243 //
244 // [1] Per https://doc.rust-lang.org/core/num/struct.Wrapping.html#layout-1:
245 //
246 // `Wrapping<T>` is guaranteed to have the same layout and ABI as `T`.
247 type AlignmentVariance = Covariant;
248
249 // SAFETY: `Wrapping<T>` has only one field, which is `pub` [2]. We are also
250 // guaranteed per [1] (from the comment above) that `Wrapping<T>` has the
251 // same layout as `T`. The only way for both of these to be true
252 // simultaneously is for `Wrapping<T>` to have the same bit validity as `T`.
253 // In particular, in order to change the bit validity, one of the following
254 // would need to happen:
255 // - `Wrapping` could change its `repr`, but this would violate the layout
256 // guarantee.
257 // - `Wrapping` could add or change its fields, but this would be a
258 // stability-breaking change.
259 //
260 // [2] https://doc.rust-lang.org/core/num/struct.Wrapping.html
261 type ValidityVariance = Covariant;
262
263 #[inline(always)]
264 fn cast_into_inner(ptr: *mut Wrapping<T>) -> *mut T {
265 // SAFETY: Per [1] (from comment above), `Wrapping<T>` has the same
266 // layout as `T`. Thus, this cast preserves size.
267 //
268 // This cast trivially preserves provenance.
269 ptr.cast::<T>()
270 }
271
272 #[inline(always)]
273 fn cast_from_inner(ptr: *mut T) -> *mut Wrapping<T> {
274 // SAFETY: Per [1] (from comment above), `Wrapping<T>` has the same
275 // layout as `T`. Thus, this cast preserves size.
276 //
277 // This cast trivially preserves provenance.
278 ptr.cast::<Wrapping<T>>()
279 }
280}
281
282// SAFETY:
283// - Per [1], `UnsafeCell<T>` has the same size as `T`.
284// - See inline comments for other safety justifications.
285//
286// [1] Per https://doc.rust-lang.org/1.81.0/core/cell/struct.UnsafeCell.html#memory-layout:
287//
288// `UnsafeCell<T>` has the same in-memory representation as its inner type
289// `T`.
290unsafe impl<T: ?Sized, I: Invariants> TransparentWrapper<I> for UnsafeCell<T> {
291 type Inner = T;
292
293 // SAFETY: Since we set this to `Invariant`, we make no safety claims.
294 type UnsafeCellVariance = Invariant;
295
296 // SAFETY: Per [1] (from comment on impl), `Unalign<T>` has the same
297 // representation as `T`, and thus has the same alignment as `T`.
298 type AlignmentVariance = Covariant;
299
300 // SAFETY: Per [1], `Unalign<T>` has the same bit validity as `T`.
301 // Technically the term "representation" doesn't guarantee this, but the
302 // subsequent sentence in the documentation makes it clear that this is the
303 // intention.
304 //
305 // [1] Per https://doc.rust-lang.org/1.81.0/core/cell/struct.UnsafeCell.html#memory-layout:
306 //
307 // `UnsafeCell<T>` has the same in-memory representation as its inner type
308 // `T`. A consequence of this guarantee is that it is possible to convert
309 // between `T` and `UnsafeCell<T>`.
310 type ValidityVariance = Covariant;
311
312 #[inline(always)]
313 fn cast_into_inner(ptr: *mut UnsafeCell<T>) -> *mut T {
314 // SAFETY: Per [1] (from comment above), `UnsafeCell<T>` has the same
315 // representation as `T`. Thus, this cast preserves size.
316 //
317 // This cast trivially preserves provenance.
318 #[allow(clippy::as_conversions)]
319 return ptr as *mut T;
320 }
321
322 #[inline(always)]
323 fn cast_from_inner(ptr: *mut T) -> *mut UnsafeCell<T> {
324 // SAFETY: Per [1] (from comment above), `UnsafeCell<T>` has the same
325 // representation as `T`. Thus, this cast preserves size.
326 //
327 // This cast trivially preserves provenance.
328 #[allow(clippy::as_conversions)]
329 return ptr as *mut UnsafeCell<T>;
330 }
331}
332
333// SAFETY: `Unalign<T>` promises to have the same size as `T`.
334//
335// See inline comments for other safety justifications.
336unsafe impl<T, I: Invariants> TransparentWrapper<I> for Unalign<T> {
337 type Inner = T;
338
339 // SAFETY: `Unalign<T>` promises to have `UnsafeCell`s covering the same
340 // byte ranges as `Inner = T`.
341 type UnsafeCellVariance = Covariant;
342
343 // SAFETY: Since `Unalign<T>` promises to have alignment 1 regardless of
344 // `T`'s alignment. Thus, an aligned pointer to `Unalign<T>` is not
345 // necessarily an aligned pointer to `T`.
346 type AlignmentVariance = Invariant;
347
348 // SAFETY: `Unalign<T>` promises to have the same validity as `T`.
349 type ValidityVariance = Covariant;
350
351 #[inline(always)]
352 fn cast_into_inner(ptr: *mut Unalign<T>) -> *mut T {
353 // SAFETY: Per the safety comment on the impl block, `Unalign<T>` has
354 // the size as `T`. Thus, this cast preserves size.
355 //
356 // This cast trivially preserves provenance.
357 ptr.cast::<T>()
358 }
359
360 #[inline(always)]
361 fn cast_from_inner(ptr: *mut T) -> *mut Unalign<T> {
362 // SAFETY: Per the safety comment on the impl block, `Unalign<T>` has
363 // the size as `T`. Thus, this cast preserves size.
364 //
365 // This cast trivially preserves provenance.
366 ptr.cast::<Unalign<T>>()
367 }
368}
369
370/// Implements `TransparentWrapper` for an atomic type.
371///
372/// # Safety
373///
374/// The caller promises that `$atomic` is an atomic type whose natie equivalent
375/// is `$native`.
376#[cfg(all(
377 zerocopy_target_has_atomics,
378 any(
379 target_has_atomic = "8",
380 target_has_atomic = "16",
381 target_has_atomic = "32",
382 target_has_atomic = "64",
383 target_has_atomic = "ptr"
384 )
385))]
386macro_rules! unsafe_impl_transparent_wrapper_for_atomic {
387 ($(#[$attr:meta])* $(,)?) => {};
388 ($(#[$attr:meta])* $atomic:ty [$native:ty], $($atomics:ty [$natives:ty]),* $(,)?) => {
389 $(#[$attr])*
390 // SAFETY: See safety comment in next match arm.
391 unsafe impl<I: crate::invariant::Invariants> crate::util::TransparentWrapper<I> for $atomic {
392 unsafe_impl_transparent_wrapper_for_atomic!(@inner $atomic [$native]);
393 }
394 unsafe_impl_transparent_wrapper_for_atomic!($(#[$attr])* $($atomics [$natives],)*);
395 };
396 ($(#[$attr:meta])* $tyvar:ident => $atomic:ty [$native:ty]) => {
397 // We implement for `$atomic` and set `Inner = $native`. The caller has
398 // promised that `$atomic` and `$native` are an atomic type and its
399 // native counterpart, respectively. Per [1], `$atomic` and `$native`
400 // have the same size.
401 //
402 // [1] Per (for example) https://doc.rust-lang.org/1.81.0/std/sync/atomic/struct.AtomicU64.html:
403 //
404 // This type has the same size and bit validity as the underlying
405 // integer type
406 $(#[$attr])*
407 unsafe impl<$tyvar, I: crate::invariant::Invariants> crate::util::TransparentWrapper<I> for $atomic {
408 unsafe_impl_transparent_wrapper_for_atomic!(@inner $atomic [$native]);
409 }
410 };
411 (@inner $atomic:ty [$native:ty]) => {
412 type Inner = UnsafeCell<$native>;
413
414 // SAFETY: It is "obvious" that each atomic type contains a single
415 // `UnsafeCell` that covers all bytes of the type, but we can also prove
416 // it:
417 // - Since `$atomic` provides an API which permits loading and storing
418 // values of type `$native` via a `&self` (shared) reference, *some*
419 // interior mutation must be happening, and interior mutation can only
420 // happen via `UnsafeCell`. Further, there must be enough bytes in
421 // `$atomic` covered by an `UnsafeCell` to hold every possible value
422 // of `$native`.
423 // - Per [1], `$atomic` has the same size as `$native`. This on its own
424 // isn't enough: it would still be possible for `$atomic` to store
425 // `$native` using a compact representation (for `$native` types for
426 // which some bit patterns are illegal). However, this is ruled out by
427 // the fact that `$atomic` has the same bit validity as `$native` [1].
428 // Thus, we can conclude that every byte of `$atomic` must be covered
429 // by an `UnsafeCell`.
430 //
431 // Thus, every byte of `$atomic` is covered by an `UnsafeCell`, and we
432 // set `type Inner = UnsafeCell<$native>`. Thus, `Self` and
433 // `Self::Inner` have `UnsafeCell`s covering the same byte ranges.
434 //
435 // [1] Per (for example) https://doc.rust-lang.org/1.81.0/std/sync/atomic/struct.AtomicU64.html:
436 //
437 // This type has the same size and bit validity as the underlying
438 // integer type
439 type UnsafeCellVariance = crate::util::Covariant;
440
441 // SAFETY: No safety justification is required for an invariant
442 // variance.
443 type AlignmentVariance = crate::util::Invariant;
444
445 // SAFETY: Per [1], all atomic types have the same bit validity as their
446 // native counterparts. The caller has promised that `$atomic` and
447 // `$native` are an atomic type and its native counterpart,
448 // respectively.
449 //
450 // [1] Per (for example) https://doc.rust-lang.org/1.81.0/std/sync/atomic/struct.AtomicU64.html:
451 //
452 // This type has the same size and bit validity as the underlying
453 // integer type
454 type ValidityVariance = crate::util::Covariant;
455
456 #[inline(always)]
457 fn cast_into_inner(ptr: *mut $atomic) -> *mut UnsafeCell<$native> {
458 // SAFETY: Per [1] (from comment on impl block), `$atomic` has the
459 // same size as `$native`. Thus, this cast preserves size.
460 //
461 // This cast trivially preserves provenance.
462 ptr.cast::<UnsafeCell<$native>>()
463 }
464
465 #[inline(always)]
466 fn cast_from_inner(ptr: *mut UnsafeCell<$native>) -> *mut $atomic {
467 // SAFETY: Per [1] (from comment on impl block), `$atomic` has the
468 // same size as `$native`. Thus, this cast preserves size.
469 //
470 // This cast trivially preserves provenance.
471 ptr.cast::<$atomic>()
472 }
473 };
474}
475
476/// Like [`PhantomData`], but [`Send`] and [`Sync`] regardless of whether the
477/// wrapped `T` is.
478pub(crate) struct SendSyncPhantomData<T: ?Sized>(PhantomData<T>);
479
480// SAFETY: `SendSyncPhantomData` does not enable any behavior which isn't sound
481// to be called from multiple threads.
482unsafe impl<T: ?Sized> Send for SendSyncPhantomData<T> {}
483// SAFETY: `SendSyncPhantomData` does not enable any behavior which isn't sound
484// to be called from multiple threads.
485unsafe impl<T: ?Sized> Sync for SendSyncPhantomData<T> {}
486
487impl<T: ?Sized> Default for SendSyncPhantomData<T> {
488 fn default() -> SendSyncPhantomData<T> {
489 SendSyncPhantomData(PhantomData)
490 }
491}
492
493impl<T: ?Sized> PartialEq for SendSyncPhantomData<T> {
494 fn eq(&self, other: &Self) -> bool {
495 self.0.eq(&other.0)
496 }
497}
498
499impl<T: ?Sized> Eq for SendSyncPhantomData<T> {}
500
501pub(crate) trait AsAddress {
502 fn addr(self) -> usize;
503}
504
505impl<'a, T: ?Sized> AsAddress for &'a T {
506 #[inline(always)]
507 fn addr(self) -> usize {
508 let ptr: *const T = self;
509 AsAddress::addr(ptr)
510 }
511}
512
513impl<'a, T: ?Sized> AsAddress for &'a mut T {
514 #[inline(always)]
515 fn addr(self) -> usize {
516 let ptr: *const T = self;
517 AsAddress::addr(ptr)
518 }
519}
520
521impl<T: ?Sized> AsAddress for NonNull<T> {
522 #[inline(always)]
523 fn addr(self) -> usize {
524 AsAddress::addr(self.as_ptr())
525 }
526}
527
528impl<T: ?Sized> AsAddress for *const T {
529 #[inline(always)]
530 fn addr(self) -> usize {
531 // TODO(#181), TODO(https://github.com/rust-lang/rust/issues/95228): Use
532 // `.addr()` instead of `as usize` once it's stable, and get rid of this
533 // `allow`. Currently, `as usize` is the only way to accomplish this.
534 #[allow(clippy::as_conversions)]
535 #[cfg_attr(
536 __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS,
537 allow(lossy_provenance_casts)
538 )]
539 return self.cast::<()>() as usize;
540 }
541}
542
543impl<T: ?Sized> AsAddress for *mut T {
544 #[inline(always)]
545 fn addr(self) -> usize {
546 let ptr: *const T = self;
547 AsAddress::addr(ptr)
548 }
549}
550
551/// Validates that `t` is aligned to `align_of::<U>()`.
552#[inline(always)]
553pub(crate) fn validate_aligned_to<T: AsAddress, U>(t: T) -> Result<(), AlignmentError<(), U>> {
554 // `mem::align_of::<U>()` is guaranteed to return a non-zero value, which in
555 // turn guarantees that this mod operation will not panic.
556 #[allow(clippy::arithmetic_side_effects)]
557 let remainder = t.addr() % mem::align_of::<U>();
558 if remainder == 0 {
559 Ok(())
560 } else {
561 // SAFETY: We just confirmed that `t.addr() % align_of::<U>() != 0`.
562 // That's only possible if `align_of::<U>() > 1`.
563 Err(unsafe { AlignmentError::new_unchecked(()) })
564 }
565}
566
567/// Returns the bytes needed to pad `len` to the next multiple of `align`.
568///
569/// This function assumes that align is a power of two; there are no guarantees
570/// on the answer it gives if this is not the case.
571pub(crate) const fn padding_needed_for(len: usize, align: NonZeroUsize) -> usize {
572 // Abstractly, we want to compute:
573 // align - (len % align).
574 // Handling the case where len%align is 0.
575 // Because align is a power of two, len % align = len & (align-1).
576 // Guaranteed not to underflow as align is nonzero.
577 #[allow(clippy::arithmetic_side_effects)]
578 let mask = align.get() - 1;
579
580 // To efficiently subtract this value from align, we can use the bitwise complement.
581 // Note that ((!len) & (align-1)) gives us a number that with (len &
582 // (align-1)) sums to align-1. So subtracting 1 from x before taking the
583 // complement subtracts `len` from `align`. Some quick inspection of
584 // cases shows that this also handles the case where `len % align = 0`
585 // correctly too: len-1 % align then equals align-1, so the complement mod
586 // align will be 0, as desired.
587 //
588 // The following reasoning can be verified quickly by an SMT solver
589 // supporting the theory of bitvectors:
590 // ```smtlib
591 // ; Naive implementation of padding
592 // (define-fun padding1 (
593 // (len (_ BitVec 32))
594 // (align (_ BitVec 32))) (_ BitVec 32)
595 // (ite
596 // (= (_ bv0 32) (bvand len (bvsub align (_ bv1 32))))
597 // (_ bv0 32)
598 // (bvsub align (bvand len (bvsub align (_ bv1 32))))))
599 //
600 // ; The implementation below
601 // (define-fun padding2 (
602 // (len (_ BitVec 32))
603 // (align (_ BitVec 32))) (_ BitVec 32)
604 // (bvand (bvnot (bvsub len (_ bv1 32))) (bvsub align (_ bv1 32))))
605 //
606 // (define-fun is-power-of-two ((x (_ BitVec 32))) Bool
607 // (= (_ bv0 32) (bvand x (bvsub x (_ bv1 32)))))
608 //
609 // (declare-const len (_ BitVec 32))
610 // (declare-const align (_ BitVec 32))
611 // ; Search for a case where align is a power of two and padding2 disagrees with padding1
612 // (assert (and (is-power-of-two align)
613 // (not (= (padding1 len align) (padding2 len align)))))
614 // (simplify (padding1 (_ bv300 32) (_ bv32 32))) ; 20
615 // (simplify (padding2 (_ bv300 32) (_ bv32 32))) ; 20
616 // (simplify (padding1 (_ bv322 32) (_ bv32 32))) ; 30
617 // (simplify (padding2 (_ bv322 32) (_ bv32 32))) ; 30
618 // (simplify (padding1 (_ bv8 32) (_ bv8 32))) ; 0
619 // (simplify (padding2 (_ bv8 32) (_ bv8 32))) ; 0
620 // (check-sat) ; unsat, also works for 64-bit bitvectors
621 // ```
622 !(len.wrapping_sub(1)) & mask
623}
624
625/// Rounds `n` down to the largest value `m` such that `m <= n` and `m % align
626/// == 0`.
627///
628/// # Panics
629///
630/// May panic if `align` is not a power of two. Even if it doesn't panic in this
631/// case, it will produce nonsense results.
632#[inline(always)]
633pub(crate) const fn round_down_to_next_multiple_of_alignment(
634 n: usize,
635 align: NonZeroUsize,
636) -> usize {
637 let align = align.get();
638 #[cfg(zerocopy_panic_in_const_and_vec_try_reserve)]
639 debug_assert!(align.is_power_of_two());
640
641 // Subtraction can't underflow because `align.get() >= 1`.
642 #[allow(clippy::arithmetic_side_effects)]
643 let mask = !(align - 1);
644 n & mask
645}
646
647pub(crate) const fn max(a: NonZeroUsize, b: NonZeroUsize) -> NonZeroUsize {
648 if a.get() < b.get() {
649 b
650 } else {
651 a
652 }
653}
654
655pub(crate) const fn min(a: NonZeroUsize, b: NonZeroUsize) -> NonZeroUsize {
656 if a.get() > b.get() {
657 b
658 } else {
659 a
660 }
661}
662
663/// Copies `src` into the prefix of `dst`.
664///
665/// # Safety
666///
667/// The caller guarantees that `src.len() <= dst.len()`.
668#[inline(always)]
669pub(crate) unsafe fn copy_unchecked(src: &[u8], dst: &mut [u8]) {
670 debug_assert!(src.len() <= dst.len());
671 // SAFETY: This invocation satisfies the safety contract of
672 // copy_nonoverlapping [1]:
673 // - `src.as_ptr()` is trivially valid for reads of `src.len()` bytes
674 // - `dst.as_ptr()` is valid for writes of `src.len()` bytes, because the
675 // caller has promised that `src.len() <= dst.len()`
676 // - `src` and `dst` are, trivially, properly aligned
677 // - the region of memory beginning at `src` with a size of `src.len()`
678 // bytes does not overlap with the region of memory beginning at `dst`
679 // with the same size, because `dst` is derived from an exclusive
680 // reference.
681 unsafe {
682 core::ptr::copy_nonoverlapping(src.as_ptr(), dst.as_mut_ptr(), src.len());
683 };
684}
685
686/// Since we support multiple versions of Rust, there are often features which
687/// have been stabilized in the most recent stable release which do not yet
688/// exist (stably) on our MSRV. This module provides polyfills for those
689/// features so that we can write more "modern" code, and just remove the
690/// polyfill once our MSRV supports the corresponding feature. Without this,
691/// we'd have to write worse/more verbose code and leave TODO comments sprinkled
692/// throughout the codebase to update to the new pattern once it's stabilized.
693///
694/// Each trait is imported as `_` at the crate root; each polyfill should "just
695/// work" at usage sites.
696pub(crate) mod polyfills {
697 use core::ptr::{self, NonNull};
698
699 // A polyfill for `NonNull::slice_from_raw_parts` that we can use before our
700 // MSRV is 1.70, when that function was stabilized.
701 //
702 // The `#[allow(unused)]` is necessary because, on sufficiently recent
703 // toolchain versions, `ptr.slice_from_raw_parts()` resolves to the inherent
704 // method rather than to this trait, and so this trait is considered unused.
705 //
706 // TODO(#67): Once our MSRV is 1.70, remove this.
707 #[allow(unused)]
708 pub(crate) trait NonNullExt<T> {
709 fn slice_from_raw_parts(data: Self, len: usize) -> NonNull<[T]>;
710 }
711
712 impl<T> NonNullExt<T> for NonNull<T> {
713 // NOTE on coverage: this will never be tested in nightly since it's a
714 // polyfill for a feature which has been stabilized on our nightly
715 // toolchain.
716 #[cfg_attr(coverage_nightly, coverage(off))]
717 #[inline(always)]
718 fn slice_from_raw_parts(data: Self, len: usize) -> NonNull<[T]> {
719 let ptr = ptr::slice_from_raw_parts_mut(data.as_ptr(), len);
720 // SAFETY: `ptr` is converted from `data`, which is non-null.
721 unsafe { NonNull::new_unchecked(ptr) }
722 }
723 }
724
725 // A polyfill for `Self::unchecked_sub` that we can use until methods like
726 // `usize::unchecked_sub` is stabilized.
727 //
728 // The `#[allow(unused)]` is necessary because, on sufficiently recent
729 // toolchain versions, `ptr.slice_from_raw_parts()` resolves to the inherent
730 // method rather than to this trait, and so this trait is considered unused.
731 //
732 // TODO(#67): Once our MSRV is high enough, remove this.
733 #[allow(unused)]
734 pub(crate) trait NumExt {
735 /// Subtract without checking for underflow.
736 ///
737 /// # Safety
738 ///
739 /// The caller promises that the subtraction will not underflow.
740 unsafe fn unchecked_sub(self, rhs: Self) -> Self;
741 }
742
743 impl NumExt for usize {
744 // NOTE on coverage: this will never be tested in nightly since it's a
745 // polyfill for a feature which has been stabilized on our nightly
746 // toolchain.
747 #[cfg_attr(coverage_nightly, coverage(off))]
748 #[inline(always)]
749 unsafe fn unchecked_sub(self, rhs: usize) -> usize {
750 match self.checked_sub(rhs) {
751 Some(x) => x,
752 None => {
753 // SAFETY: The caller promises that the subtraction will not
754 // underflow.
755 unsafe { core::hint::unreachable_unchecked() }
756 }
757 }
758 }
759 }
760}
761
762#[cfg(test)]
763pub(crate) mod testutil {
764 use crate::*;
765
766 /// A `T` which is aligned to at least `align_of::<A>()`.
767 #[derive(Default)]
768 pub(crate) struct Align<T, A> {
769 pub(crate) t: T,
770 _a: [A; 0],
771 }
772
773 impl<T: Default, A> Align<T, A> {
774 pub(crate) fn set_default(&mut self) {
775 self.t = T::default();
776 }
777 }
778
779 impl<T, A> Align<T, A> {
780 pub(crate) const fn new(t: T) -> Align<T, A> {
781 Align { t, _a: [] }
782 }
783 }
784
785 /// A `T` which is guaranteed not to satisfy `align_of::<A>()`.
786 ///
787 /// It must be the case that `align_of::<T>() < align_of::<A>()` in order
788 /// fot this type to work properly.
789 #[repr(C)]
790 pub(crate) struct ForceUnalign<T: Unaligned, A> {
791 // The outer struct is aligned to `A`, and, thanks to `repr(C)`, `t` is
792 // placed at the minimum offset that guarantees its alignment. If
793 // `align_of::<T>() < align_of::<A>()`, then that offset will be
794 // guaranteed *not* to satisfy `align_of::<A>()`.
795 //
796 // Note that we need `T: Unaligned` in order to guarantee that there is
797 // no padding between `_u` and `t`.
798 _u: u8,
799 pub(crate) t: T,
800 _a: [A; 0],
801 }
802
803 impl<T: Unaligned, A> ForceUnalign<T, A> {
804 pub(crate) fn new(t: T) -> ForceUnalign<T, A> {
805 ForceUnalign { _u: 0, t, _a: [] }
806 }
807 }
808 // A `u64` with alignment 8.
809 //
810 // Though `u64` has alignment 8 on some platforms, it's not guaranteed. By
811 // contrast, `AU64` is guaranteed to have alignment 8 on all platforms.
812 #[derive(
813 KnownLayout,
814 Immutable,
815 FromBytes,
816 IntoBytes,
817 Eq,
818 PartialEq,
819 Ord,
820 PartialOrd,
821 Default,
822 Debug,
823 Copy,
824 Clone,
825 )]
826 #[repr(C, align(8))]
827 pub(crate) struct AU64(pub(crate) u64);
828
829 impl AU64 {
830 // Converts this `AU64` to bytes using this platform's endianness.
831 pub(crate) fn to_bytes(self) -> [u8; 8] {
832 crate::transmute!(self)
833 }
834 }
835
836 impl Display for AU64 {
837 #[cfg_attr(coverage_nightly, coverage(off))]
838 fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
839 Display::fmt(&self.0, f)
840 }
841 }
842
843 #[derive(Immutable, FromBytes, Eq, PartialEq, Ord, PartialOrd, Default, Debug, Copy, Clone)]
844 #[repr(C)]
845 pub(crate) struct Nested<T, U: ?Sized> {
846 _t: T,
847 _u: U,
848 }
849}
850
851#[cfg(test)]
852mod tests {
853 use super::*;
854
855 #[test]
856 fn test_round_down_to_next_multiple_of_alignment() {
857 fn alt_impl(n: usize, align: NonZeroUsize) -> usize {
858 let mul = n / align.get();
859 mul * align.get()
860 }
861
862 for align in [1, 2, 4, 8, 16] {
863 for n in 0..256 {
864 let align = NonZeroUsize::new(align).unwrap();
865 let want = alt_impl(n, align);
866 let got = round_down_to_next_multiple_of_alignment(n, align);
867 assert_eq!(got, want, "round_down_to_next_multiple_of_alignment({}, {})", n, align);
868 }
869 }
870 }
871
872 #[rustversion::since(1.57.0)]
873 #[test]
874 #[should_panic]
875 fn test_round_down_to_next_multiple_of_alignment_zerocopy_panic_in_const_and_vec_try_reserve() {
876 round_down_to_next_multiple_of_alignment(0, NonZeroUsize::new(3).unwrap());
877 }
878}
879
880#[cfg(kani)]
881mod proofs {
882 use super::*;
883
884 #[kani::proof]
885 fn prove_round_down_to_next_multiple_of_alignment() {
886 fn model_impl(n: usize, align: NonZeroUsize) -> usize {
887 assert!(align.get().is_power_of_two());
888 let mul = n / align.get();
889 mul * align.get()
890 }
891
892 let align: NonZeroUsize = kani::any();
893 kani::assume(align.get().is_power_of_two());
894 let n: usize = kani::any();
895
896 let expected = model_impl(n, align);
897 let actual = round_down_to_next_multiple_of_alignment(n, align);
898 assert_eq!(expected, actual, "round_down_to_next_multiple_of_alignment({}, {})", n, align);
899 }
900
901 // Restricted to nightly since we use the unstable `usize::next_multiple_of`
902 // in our model implementation.
903 #[cfg(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS)]
904 #[kani::proof]
905 fn prove_padding_needed_for() {
906 fn model_impl(len: usize, align: NonZeroUsize) -> usize {
907 let padded = len.next_multiple_of(align.get());
908 let padding = padded - len;
909 padding
910 }
911
912 let align: NonZeroUsize = kani::any();
913 kani::assume(align.get().is_power_of_two());
914 let len: usize = kani::any();
915 // Constrain `len` to valid Rust lengths, since our model implementation
916 // isn't robust to overflow.
917 kani::assume(len <= isize::MAX as usize);
918 kani::assume(align.get() < 1 << 29);
919
920 let expected = model_impl(len, align);
921 let actual = padding_needed_for(len, align);
922 assert_eq!(expected, actual, "padding_needed_for({}, {})", len, align);
923
924 let padded_len = actual + len;
925 assert_eq!(padded_len % align, 0);
926 assert!(padded_len / align >= len / align);
927 }
928}