zerocopy/
lib.rs

1// Copyright 2018 The Fuchsia Authors
2//
3// Licensed under the 2-Clause BSD License <LICENSE-BSD or
4// https://opensource.org/license/bsd-2-clause>, Apache License, Version 2.0
5// <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
6// license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
7// This file may not be copied, modified, or distributed except according to
8// those terms.
9
10// After updating the following doc comment, make sure to run the following
11// command to update `README.md` based on its contents:
12//
13//   cargo -q run --manifest-path tools/Cargo.toml -p generate-readme > README.md
14
15//! *<span style="font-size: 100%; color:grey;">Need more out of zerocopy?
16//! Submit a [customer request issue][customer-request-issue]!</span>*
17//!
18//! ***<span style="font-size: 140%">Fast, safe, <span
19//! style="color:red;">compile error</span>. Pick two.</span>***
20//!
21//! Zerocopy makes zero-cost memory manipulation effortless. We write `unsafe`
22//! so you don't have to.
23//!
24//! *Thanks for using zerocopy 0.8! For an overview of what changes from 0.7,
25//! check out our [release notes][release-notes], which include a step-by-step
26//! guide for upgrading from 0.7.*
27//!
28//! *Have questions? Need help? Ask the maintainers on [GitHub][github-q-a] or
29//! on [Discord][discord]!*
30//!
31//! [customer-request-issue]: https://github.com/google/zerocopy/issues/new/choose
32//! [release-notes]: https://github.com/google/zerocopy/discussions/1680
33//! [github-q-a]: https://github.com/google/zerocopy/discussions/categories/q-a
34//! [discord]: https://discord.gg/MAvWH2R6zk
35//!
36//! # Overview
37//!
38//! ##### Conversion Traits
39//!
40//! Zerocopy provides four derivable traits for zero-cost conversions:
41//! - [`TryFromBytes`] indicates that a type may safely be converted from
42//!   certain byte sequences (conditional on runtime checks)
43//! - [`FromZeros`] indicates that a sequence of zero bytes represents a valid
44//!   instance of a type
45//! - [`FromBytes`] indicates that a type may safely be converted from an
46//!   arbitrary byte sequence
47//! - [`IntoBytes`] indicates that a type may safely be converted *to* a byte
48//!   sequence
49//!
50//! These traits support sized types, slices, and [slice DSTs][slice-dsts].
51//!
52//! [slice-dsts]: KnownLayout#dynamically-sized-types
53//!
54//! ##### Marker Traits
55//!
56//! Zerocopy provides three derivable marker traits that do not provide any
57//! functionality themselves, but are required to call certain methods provided
58//! by the conversion traits:
59//! - [`KnownLayout`] indicates that zerocopy can reason about certain layout
60//!   qualities of a type
61//! - [`Immutable`] indicates that a type is free from interior mutability,
62//!   except by ownership or an exclusive (`&mut`) borrow
63//! - [`Unaligned`] indicates that a type's alignment requirement is 1
64//!
65//! You should generally derive these marker traits whenever possible.
66//!
67//! ##### Conversion Macros
68//!
69//! Zerocopy provides six macros for safe casting between types:
70//!
71//! - ([`try_`][try_transmute])[`transmute`] (conditionally) converts a value of
72//!   one type to a value of another type of the same size
73//! - ([`try_`][try_transmute_mut])[`transmute_mut`] (conditionally) converts a
74//!   mutable reference of one type to a mutable reference of another type of
75//!   the same size
76//! - ([`try_`][try_transmute_ref])[`transmute_ref`] (conditionally) converts a
77//!   mutable or immutable reference of one type to an immutable reference of
78//!   another type of the same size
79//!
80//! These macros perform *compile-time* size and alignment checks, meaning that
81//! unconditional casts have zero cost at runtime. Conditional casts do not need
82//! to validate size or alignment runtime, but do need to validate contents.
83//!
84//! These macros cannot be used in generic contexts. For generic conversions,
85//! use the methods defined by the [conversion traits](#conversion-traits).
86//!
87//! ##### Byteorder-Aware Numerics
88//!
89//! Zerocopy provides byte-order aware integer types that support these
90//! conversions; see the [`byteorder`] module. These types are especially useful
91//! for network parsing.
92//!
93//! # Cargo Features
94//!
95//! - **`alloc`**
96//!   By default, `zerocopy` is `no_std`. When the `alloc` feature is enabled,
97//!   the `alloc` crate is added as a dependency, and some allocation-related
98//!   functionality is added.
99//!
100//! - **`std`**
101//!   By default, `zerocopy` is `no_std`. When the `std` feature is enabled, the
102//!   `std` crate is added as a dependency (ie, `no_std` is disabled), and
103//!   support for some `std` types is added. `std` implies `alloc`.
104//!
105//! - **`derive`**
106//!   Provides derives for the core marker traits via the `zerocopy-derive`
107//!   crate. These derives are re-exported from `zerocopy`, so it is not
108//!   necessary to depend on `zerocopy-derive` directly.
109//!
110//!   However, you may experience better compile times if you instead directly
111//!   depend on both `zerocopy` and `zerocopy-derive` in your `Cargo.toml`,
112//!   since doing so will allow Rust to compile these crates in parallel. To do
113//!   so, do *not* enable the `derive` feature, and list both dependencies in
114//!   your `Cargo.toml` with the same leading non-zero version number; e.g:
115//!
116//!   ```toml
117//!   [dependencies]
118//!   zerocopy = "0.X"
119//!   zerocopy-derive = "0.X"
120//!   ```
121//!
122//!   To avoid the risk of [duplicate import errors][duplicate-import-errors] if
123//!   one of your dependencies enables zerocopy's `derive` feature, import
124//!   derives as `use zerocopy_derive::*` rather than by name (e.g., `use
125//!   zerocopy_derive::FromBytes`).
126//!
127//! - **`simd`**
128//!   When the `simd` feature is enabled, `FromZeros`, `FromBytes`, and
129//!   `IntoBytes` impls are emitted for all stable SIMD types which exist on the
130//!   target platform. Note that the layout of SIMD types is not yet stabilized,
131//!   so these impls may be removed in the future if layout changes make them
132//!   invalid. For more information, see the Unsafe Code Guidelines Reference
133//!   page on the [layout of packed SIMD vectors][simd-layout].
134//!
135//! - **`simd-nightly`**
136//!   Enables the `simd` feature and adds support for SIMD types which are only
137//!   available on nightly. Since these types are unstable, support for any type
138//!   may be removed at any point in the future.
139//!
140//! [duplicate-import-errors]: https://github.com/google/zerocopy/issues/1587
141//! [simd-layout]: https://rust-lang.github.io/unsafe-code-guidelines/layout/packed-simd-vectors.html
142//!
143//! # Security Ethos
144//!
145//! Zerocopy is expressly designed for use in security-critical contexts. We
146//! strive to ensure that that zerocopy code is sound under Rust's current
147//! memory model, and *any future memory model*. We ensure this by:
148//! - **...not 'guessing' about Rust's semantics.**
149//!   We annotate `unsafe` code with a precise rationale for its soundness that
150//!   cites a relevant section of Rust's official documentation. When Rust's
151//!   documented semantics are unclear, we work with the Rust Operational
152//!   Semantics Team to clarify Rust's documentation.
153//! - **...rigorously testing our implementation.**
154//!   We run tests using [Miri], ensuring that zerocopy is sound across a wide
155//!   array of supported target platforms of varying endianness and pointer
156//!   width, and across both current and experimental memory models of Rust.
157//! - **...formally proving the correctness of our implementation.**
158//!   We apply formal verification tools like [Kani][kani] to prove zerocopy's
159//!   correctness.
160//!
161//! For more information, see our full [soundness policy].
162//!
163//! [Miri]: https://github.com/rust-lang/miri
164//! [Kani]: https://github.com/model-checking/kani
165//! [soundness policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#soundness
166//!
167//! # Relationship to Project Safe Transmute
168//!
169//! [Project Safe Transmute] is an official initiative of the Rust Project to
170//! develop language-level support for safer transmutation. The Project consults
171//! with crates like zerocopy to identify aspects of safer transmutation that
172//! would benefit from compiler support, and has developed an [experimental,
173//! compiler-supported analysis][mcp-transmutability] which determines whether,
174//! for a given type, any value of that type may be soundly transmuted into
175//! another type. Once this functionality is sufficiently mature, zerocopy
176//! intends to replace its internal transmutability analysis (implemented by our
177//! custom derives) with the compiler-supported one. This change will likely be
178//! an implementation detail that is invisible to zerocopy's users.
179//!
180//! Project Safe Transmute will not replace the need for most of zerocopy's
181//! higher-level abstractions. The experimental compiler analysis is a tool for
182//! checking the soundness of `unsafe` code, not a tool to avoid writing
183//! `unsafe` code altogether. For the foreseeable future, crates like zerocopy
184//! will still be required in order to provide higher-level abstractions on top
185//! of the building block provided by Project Safe Transmute.
186//!
187//! [Project Safe Transmute]: https://rust-lang.github.io/rfcs/2835-project-safe-transmute.html
188//! [mcp-transmutability]: https://github.com/rust-lang/compiler-team/issues/411
189//!
190//! # MSRV
191//!
192//! See our [MSRV policy].
193//!
194//! [MSRV policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#msrv
195//!
196//! # Changelog
197//!
198//! Zerocopy uses [GitHub Releases].
199//!
200//! [GitHub Releases]: https://github.com/google/zerocopy/releases
201
202// Sometimes we want to use lints which were added after our MSRV.
203// `unknown_lints` is `warn` by default and we deny warnings in CI, so without
204// this attribute, any unknown lint would cause a CI failure when testing with
205// our MSRV.
206#![allow(unknown_lints, unreachable_patterns)]
207#![deny(renamed_and_removed_lints)]
208#![deny(
209    anonymous_parameters,
210    deprecated_in_future,
211    late_bound_lifetime_arguments,
212    missing_copy_implementations,
213    missing_debug_implementations,
214    missing_docs,
215    path_statements,
216    patterns_in_fns_without_body,
217    rust_2018_idioms,
218    trivial_numeric_casts,
219    unreachable_pub,
220    unsafe_op_in_unsafe_fn,
221    unused_extern_crates,
222    // We intentionally choose not to deny `unused_qualifications`. When items
223    // are added to the prelude (e.g., `core::mem::size_of`), this has the
224    // consequence of making some uses trigger this lint on the latest toolchain
225    // (e.g., `mem::size_of`), but fixing it (e.g. by replacing with `size_of`)
226    // does not work on older toolchains.
227    //
228    // We tested a more complicated fix in #1413, but ultimately decided that,
229    // since this lint is just a minor style lint, the complexity isn't worth it
230    // - it's fine to occasionally have unused qualifications slip through,
231    // especially since these do not affect our user-facing API in any way.
232    variant_size_differences
233)]
234#![cfg_attr(
235    __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS,
236    deny(fuzzy_provenance_casts, lossy_provenance_casts)
237)]
238#![deny(
239    clippy::all,
240    clippy::alloc_instead_of_core,
241    clippy::arithmetic_side_effects,
242    clippy::as_underscore,
243    clippy::assertions_on_result_states,
244    clippy::as_conversions,
245    clippy::correctness,
246    clippy::dbg_macro,
247    clippy::decimal_literal_representation,
248    clippy::double_must_use,
249    clippy::get_unwrap,
250    clippy::indexing_slicing,
251    clippy::missing_inline_in_public_items,
252    clippy::missing_safety_doc,
253    clippy::must_use_candidate,
254    clippy::must_use_unit,
255    clippy::obfuscated_if_else,
256    clippy::perf,
257    clippy::print_stdout,
258    clippy::return_self_not_must_use,
259    clippy::std_instead_of_core,
260    clippy::style,
261    clippy::suspicious,
262    clippy::todo,
263    clippy::undocumented_unsafe_blocks,
264    clippy::unimplemented,
265    clippy::unnested_or_patterns,
266    clippy::unwrap_used,
267    clippy::use_debug
268)]
269#![allow(clippy::type_complexity)]
270#![deny(
271    rustdoc::bare_urls,
272    rustdoc::broken_intra_doc_links,
273    rustdoc::invalid_codeblock_attributes,
274    rustdoc::invalid_html_tags,
275    rustdoc::invalid_rust_codeblocks,
276    rustdoc::missing_crate_level_docs,
277    rustdoc::private_intra_doc_links
278)]
279// In test code, it makes sense to weight more heavily towards concise, readable
280// code over correct or debuggable code.
281#![cfg_attr(any(test, kani), allow(
282    // In tests, you get line numbers and have access to source code, so panic
283    // messages are less important. You also often unwrap a lot, which would
284    // make expect'ing instead very verbose.
285    clippy::unwrap_used,
286    // In tests, there's no harm to "panic risks" - the worst that can happen is
287    // that your test will fail, and you'll fix it. By contrast, panic risks in
288    // production code introduce the possibly of code panicking unexpectedly "in
289    // the field".
290    clippy::arithmetic_side_effects,
291    clippy::indexing_slicing,
292))]
293#![cfg_attr(not(any(test, feature = "std")), no_std)]
294#![cfg_attr(
295    all(feature = "simd-nightly", any(target_arch = "x86", target_arch = "x86_64")),
296    feature(stdarch_x86_avx512)
297)]
298#![cfg_attr(
299    all(feature = "simd-nightly", target_arch = "arm"),
300    feature(stdarch_arm_dsp, stdarch_arm_neon_intrinsics)
301)]
302#![cfg_attr(
303    all(feature = "simd-nightly", any(target_arch = "powerpc", target_arch = "powerpc64")),
304    feature(stdarch_powerpc)
305)]
306#![cfg_attr(doc_cfg, feature(doc_cfg))]
307#![cfg_attr(
308    __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS,
309    feature(layout_for_ptr, strict_provenance, coverage_attribute)
310)]
311
312// This is a hack to allow zerocopy-derive derives to work in this crate. They
313// assume that zerocopy is linked as an extern crate, so they access items from
314// it as `zerocopy::Xxx`. This makes that still work.
315#[cfg(any(feature = "derive", test))]
316extern crate self as zerocopy;
317
318#[doc(hidden)]
319#[macro_use]
320pub mod util;
321
322pub mod byte_slice;
323pub mod byteorder;
324mod deprecated;
325// This module is `pub` so that zerocopy's error types and error handling
326// documentation is grouped together in a cohesive module. In practice, we
327// expect most users to use the re-export of `error`'s items to avoid identifier
328// stuttering.
329pub mod error;
330mod impls;
331#[doc(hidden)]
332pub mod layout;
333mod macros;
334#[doc(hidden)]
335pub mod pointer;
336mod r#ref;
337// TODO(#252): If we make this pub, come up with a better name.
338mod wrappers;
339
340pub use crate::byte_slice::*;
341pub use crate::byteorder::*;
342pub use crate::error::*;
343pub use crate::r#ref::*;
344pub use crate::wrappers::*;
345
346use core::{
347    cell::UnsafeCell,
348    cmp::Ordering,
349    fmt::{self, Debug, Display, Formatter},
350    hash::Hasher,
351    marker::PhantomData,
352    mem::{self, ManuallyDrop, MaybeUninit},
353    num::{
354        NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize, NonZeroU128,
355        NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize, Wrapping,
356    },
357    ops::{Deref, DerefMut},
358    ptr::{self, NonNull},
359    slice,
360};
361
362use crate::pointer::{invariant, BecauseExclusive, BecauseImmutable};
363
364#[cfg(any(feature = "alloc", test))]
365extern crate alloc;
366#[cfg(any(feature = "alloc", test))]
367use alloc::{boxed::Box, vec::Vec};
368
369#[cfg(any(feature = "alloc", test, kani))]
370use core::alloc::Layout;
371
372// Used by `TryFromBytes::is_bit_valid`.
373#[doc(hidden)]
374pub use crate::pointer::{Maybe, MaybeAligned, Ptr};
375// Used by `KnownLayout`.
376#[doc(hidden)]
377pub use crate::layout::*;
378
379// For each trait polyfill, as soon as the corresponding feature is stable, the
380// polyfill import will be unused because method/function resolution will prefer
381// the inherent method/function over a trait method/function. Thus, we suppress
382// the `unused_imports` warning.
383//
384// See the documentation on `util::polyfills` for more information.
385#[allow(unused_imports)]
386use crate::util::polyfills::{self, NonNullExt as _, NumExt as _};
387
388#[rustversion::nightly]
389#[cfg(all(test, not(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS)))]
390const _: () = {
391    #[deprecated = "some tests may be skipped due to missing RUSTFLAGS=\"--cfg __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS\""]
392    const _WARNING: () = ();
393    #[warn(deprecated)]
394    _WARNING
395};
396
397// These exist so that code which was written against the old names will get
398// less confusing error messages when they upgrade to a more recent version of
399// zerocopy. On our MSRV toolchain, the error messages read, for example:
400//
401//   error[E0603]: trait `FromZeroes` is private
402//       --> examples/deprecated.rs:1:15
403//        |
404//   1    | use zerocopy::FromZeroes;
405//        |               ^^^^^^^^^^ private trait
406//        |
407//   note: the trait `FromZeroes` is defined here
408//       --> /Users/josh/workspace/zerocopy/src/lib.rs:1845:5
409//        |
410//   1845 | use FromZeros as FromZeroes;
411//        |     ^^^^^^^^^^^^^^^^^^^^^^^
412//
413// The "note" provides enough context to make it easy to figure out how to fix
414// the error.
415#[allow(unused)]
416use {FromZeros as FromZeroes, IntoBytes as AsBytes, Ref as LayoutVerified};
417
418/// Implements [`KnownLayout`].
419///
420/// This derive analyzes various aspects of a type's layout that are needed for
421/// some of zerocopy's APIs. It can be applied to structs, enums, and unions;
422/// e.g.:
423///
424/// ```
425/// # use zerocopy_derive::KnownLayout;
426/// #[derive(KnownLayout)]
427/// struct MyStruct {
428/// # /*
429///     ...
430/// # */
431/// }
432///
433/// #[derive(KnownLayout)]
434/// enum MyEnum {
435/// #   V00,
436/// # /*
437///     ...
438/// # */
439/// }
440///
441/// #[derive(KnownLayout)]
442/// union MyUnion {
443/// #   variant: u8,
444/// # /*
445///     ...
446/// # */
447/// }
448/// ```
449///
450/// # Limitations
451///
452/// This derive cannot currently be applied to unsized structs without an
453/// explicit `repr` attribute.
454///
455/// Some invocations of this derive run afoul of a [known bug] in Rust's type
456/// privacy checker. For example, this code:
457///
458/// ```compile_fail,E0446
459/// use zerocopy::*;
460/// # use zerocopy_derive::*;
461///
462/// #[derive(KnownLayout)]
463/// #[repr(C)]
464/// pub struct PublicType {
465///     leading: Foo,
466///     trailing: Bar,
467/// }
468///
469/// #[derive(KnownLayout)]
470/// struct Foo;
471///
472/// #[derive(KnownLayout)]
473/// struct Bar;
474/// ```
475///
476/// ...results in a compilation error:
477///
478/// ```text
479/// error[E0446]: private type `Bar` in public interface
480///  --> examples/bug.rs:3:10
481///    |
482/// 3  | #[derive(KnownLayout)]
483///    |          ^^^^^^^^^^^ can't leak private type
484/// ...
485/// 14 | struct Bar;
486///    | ---------- `Bar` declared as private
487///    |
488///    = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info)
489/// ```
490///
491/// This issue arises when `#[derive(KnownLayout)]` is applied to `repr(C)`
492/// structs whose trailing field type is less public than the enclosing struct.
493///
494/// To work around this, mark the trailing field type `pub` and annotate it with
495/// `#[doc(hidden)]`; e.g.:
496///
497/// ```no_run
498/// use zerocopy::*;
499/// # use zerocopy_derive::*;
500///
501/// #[derive(KnownLayout)]
502/// #[repr(C)]
503/// pub struct PublicType {
504///     leading: Foo,
505///     trailing: Bar,
506/// }
507///
508/// #[derive(KnownLayout)]
509/// struct Foo;
510///
511/// #[doc(hidden)]
512/// #[derive(KnownLayout)]
513/// pub struct Bar; // <- `Bar` is now also `pub`
514/// ```
515///
516/// [known bug]: https://github.com/rust-lang/rust/issues/45713
517#[cfg(any(feature = "derive", test))]
518#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
519pub use zerocopy_derive::KnownLayout;
520
521/// Indicates that zerocopy can reason about certain aspects of a type's layout.
522///
523/// This trait is required by many of zerocopy's APIs. It supports sized types,
524/// slices, and [slice DSTs](#dynamically-sized-types).
525///
526/// # Implementation
527///
528/// **Do not implement this trait yourself!** Instead, use
529/// [`#[derive(KnownLayout)]`][derive]; e.g.:
530///
531/// ```
532/// # use zerocopy_derive::KnownLayout;
533/// #[derive(KnownLayout)]
534/// struct MyStruct {
535/// # /*
536///     ...
537/// # */
538/// }
539///
540/// #[derive(KnownLayout)]
541/// enum MyEnum {
542/// # /*
543///     ...
544/// # */
545/// }
546///
547/// #[derive(KnownLayout)]
548/// union MyUnion {
549/// #   variant: u8,
550/// # /*
551///     ...
552/// # */
553/// }
554/// ```
555///
556/// This derive performs a sophisticated analysis to deduce the layout
557/// characteristics of types. You **must** implement this trait via the derive.
558///
559/// # Dynamically-sized types
560///
561/// `KnownLayout` supports slice-based dynamically sized types ("slice DSTs").
562///
563/// A slice DST is a type whose trailing field is either a slice or another
564/// slice DST, rather than a type with fixed size. For example:
565///
566/// ```
567/// #[repr(C)]
568/// struct PacketHeader {
569/// # /*
570///     ...
571/// # */
572/// }
573///
574/// #[repr(C)]
575/// struct Packet {
576///     header: PacketHeader,
577///     body: [u8],
578/// }
579/// ```
580///
581/// It can be useful to think of slice DSTs as a generalization of slices - in
582/// other words, a normal slice is just the special case of a slice DST with
583/// zero leading fields. In particular:
584/// - Like slices, slice DSTs can have different lengths at runtime
585/// - Like slices, slice DSTs cannot be passed by-value, but only by reference
586///   or via other indirection such as `Box`
587/// - Like slices, a reference (or `Box`, or other pointer type) to a slice DST
588///   encodes the number of elements in the trailing slice field
589///
590/// ## Slice DST layout
591///
592/// Just like other composite Rust types, the layout of a slice DST is not
593/// well-defined unless it is specified using an explicit `#[repr(...)]`
594/// attribute such as `#[repr(C)]`. [Other representations are
595/// supported][reprs], but in this section, we'll use `#[repr(C)]` as our
596/// example.
597///
598/// A `#[repr(C)]` slice DST is laid out [just like sized `#[repr(C)]`
599/// types][repr-c-structs], but the presenence of a variable-length field
600/// introduces the possibility of *dynamic padding*. In particular, it may be
601/// necessary to add trailing padding *after* the trailing slice field in order
602/// to satisfy the outer type's alignment, and the amount of padding required
603/// may be a function of the length of the trailing slice field. This is just a
604/// natural consequence of the normal `#[repr(C)]` rules applied to slice DSTs,
605/// but it can result in surprising behavior. For example, consider the
606/// following type:
607///
608/// ```
609/// #[repr(C)]
610/// struct Foo {
611///     a: u32,
612///     b: u8,
613///     z: [u16],
614/// }
615/// ```
616///
617/// Assuming that `u32` has alignment 4 (this is not true on all platforms),
618/// then `Foo` has alignment 4 as well. Here is the smallest possible value for
619/// `Foo`:
620///
621/// ```text
622/// byte offset | 01234567
623///       field | aaaab---
624///                    ><
625/// ```
626///
627/// In this value, `z` has length 0. Abiding by `#[repr(C)]`, the lowest offset
628/// that we can place `z` at is 5, but since `z` has alignment 2, we need to
629/// round up to offset 6. This means that there is one byte of padding between
630/// `b` and `z`, then 0 bytes of `z` itself (denoted `><` in this diagram), and
631/// then two bytes of padding after `z` in order to satisfy the overall
632/// alignment of `Foo`. The size of this instance is 8 bytes.
633///
634/// What about if `z` has length 1?
635///
636/// ```text
637/// byte offset | 01234567
638///       field | aaaab-zz
639/// ```
640///
641/// In this instance, `z` has length 1, and thus takes up 2 bytes. That means
642/// that we no longer need padding after `z` in order to satisfy `Foo`'s
643/// alignment. We've now seen two different values of `Foo` with two different
644/// lengths of `z`, but they both have the same size - 8 bytes.
645///
646/// What about if `z` has length 2?
647///
648/// ```text
649/// byte offset | 012345678901
650///       field | aaaab-zzzz--
651/// ```
652///
653/// Now `z` has length 2, and thus takes up 4 bytes. This brings our un-padded
654/// size to 10, and so we now need another 2 bytes of padding after `z` to
655/// satisfy `Foo`'s alignment.
656///
657/// Again, all of this is just a logical consequence of the `#[repr(C)]` rules
658/// applied to slice DSTs, but it can be surprising that the amount of trailing
659/// padding becomes a function of the trailing slice field's length, and thus
660/// can only be computed at runtime.
661///
662/// [reprs]: https://doc.rust-lang.org/reference/type-layout.html#representations
663/// [repr-c-structs]: https://doc.rust-lang.org/reference/type-layout.html#reprc-structs
664///
665/// ## What is a valid size?
666///
667/// There are two places in zerocopy's API that we refer to "a valid size" of a
668/// type. In normal casts or conversions, where the source is a byte slice, we
669/// need to know whether the source byte slice is a valid size of the
670/// destination type. In prefix or suffix casts, we need to know whether *there
671/// exists* a valid size of the destination type which fits in the source byte
672/// slice and, if so, what the largest such size is.
673///
674/// As outlined above, a slice DST's size is defined by the number of elements
675/// in its trailing slice field. However, there is not necessarily a 1-to-1
676/// mapping between trailing slice field length and overall size. As we saw in
677/// the previous section with the type `Foo`, instances with both 0 and 1
678/// elements in the trailing `z` field result in a `Foo` whose size is 8 bytes.
679///
680/// When we say "x is a valid size of `T`", we mean one of two things:
681/// - If `T: Sized`, then we mean that `x == size_of::<T>()`
682/// - If `T` is a slice DST, then we mean that there exists a `len` such that the instance of
683///   `T` with `len` trailing slice elements has size `x`
684///
685/// When we say "largest possible size of `T` that fits in a byte slice", we
686/// mean one of two things:
687/// - If `T: Sized`, then we mean `size_of::<T>()` if the byte slice is at least
688///   `size_of::<T>()` bytes long
689/// - If `T` is a slice DST, then we mean to consider all values, `len`, such
690///   that the instance of `T` with `len` trailing slice elements fits in the
691///   byte slice, and to choose the largest such `len`, if any
692///
693///
694/// # Safety
695///
696/// This trait does not convey any safety guarantees to code outside this crate.
697///
698/// You must not rely on the `#[doc(hidden)]` internals of `KnownLayout`. Future
699/// releases of zerocopy may make backwards-breaking changes to these items,
700/// including changes that only affect soundness, which may cause code which
701/// uses those items to silently become unsound.
702///
703#[cfg_attr(feature = "derive", doc = "[derive]: zerocopy_derive::KnownLayout")]
704#[cfg_attr(
705    not(feature = "derive"),
706    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.KnownLayout.html"),
707)]
708#[cfg_attr(
709    zerocopy_diagnostic_on_unimplemented,
710    diagnostic::on_unimplemented(note = "Consider adding `#[derive(KnownLayout)]` to `{Self}`")
711)]
712pub unsafe trait KnownLayout {
713    // The `Self: Sized` bound makes it so that `KnownLayout` can still be
714    // object safe. It's not currently object safe thanks to `const LAYOUT`, and
715    // it likely won't be in the future, but there's no reason not to be
716    // forwards-compatible with object safety.
717    #[doc(hidden)]
718    fn only_derive_is_allowed_to_implement_this_trait()
719    where
720        Self: Sized;
721
722    /// The type of metadata stored in a pointer to `Self`.
723    ///
724    /// This is `()` for sized types and `usize` for slice DSTs.
725    type PointerMetadata: PointerMetadata;
726
727    /// The layout of `Self`.
728    ///
729    /// # Safety
730    ///
731    /// Callers may assume that `LAYOUT` accurately reflects the layout of
732    /// `Self`. In particular:
733    /// - `LAYOUT.align` is equal to `Self`'s alignment
734    /// - If `Self: Sized`, then `LAYOUT.size_info == SizeInfo::Sized { size }`
735    ///   where `size == size_of::<Self>()`
736    /// - If `Self` is a slice DST, then `LAYOUT.size_info ==
737    ///   SizeInfo::SliceDst(slice_layout)` where:
738    ///   - The size, `size`, of an instance of `Self` with `elems` trailing
739    ///     slice elements is equal to `slice_layout.offset +
740    ///     slice_layout.elem_size * elems` rounded up to the nearest multiple
741    ///     of `LAYOUT.align`
742    ///   - For such an instance, any bytes in the range `[slice_layout.offset +
743    ///     slice_layout.elem_size * elems, size)` are padding and must not be
744    ///     assumed to be initialized
745    #[doc(hidden)]
746    const LAYOUT: DstLayout;
747
748    /// SAFETY: The returned pointer has the same address and provenance as
749    /// `bytes`. If `Self` is a DST, the returned pointer's referent has `elems`
750    /// elements in its trailing slice.
751    #[doc(hidden)]
752    fn raw_from_ptr_len(bytes: NonNull<u8>, meta: Self::PointerMetadata) -> NonNull<Self>;
753
754    /// Extracts the metadata from a pointer to `Self`.
755    ///
756    /// # Safety
757    ///
758    /// `pointer_to_metadata` always returns the correct metadata stored in
759    /// `ptr`.
760    #[doc(hidden)]
761    fn pointer_to_metadata(ptr: NonNull<Self>) -> Self::PointerMetadata;
762
763    /// Computes the length of the byte range addressed by `ptr`.
764    ///
765    /// Returns `None` if the resulting length would not fit in an `usize`.
766    ///
767    /// # Safety
768    ///
769    /// Callers may assume that `size_of_val_raw` always returns the correct
770    /// size.
771    ///
772    /// Callers may assume that, if `ptr` addresses a byte range whose length
773    /// fits in an `usize`, this will return `Some`.
774    #[doc(hidden)]
775    #[must_use]
776    #[inline(always)]
777    fn size_of_val_raw(ptr: NonNull<Self>) -> Option<usize> {
778        let meta = Self::pointer_to_metadata(ptr);
779        // SAFETY: `size_for_metadata` promises to only return `None` if the
780        // resulting size would not fit in a `usize`.
781        meta.size_for_metadata(Self::LAYOUT)
782    }
783}
784
785/// The metadata associated with a [`KnownLayout`] type.
786#[doc(hidden)]
787pub trait PointerMetadata: Copy + Eq + Debug {
788    /// Constructs a `Self` from an element count.
789    ///
790    /// If `Self = ()`, this returns `()`. If `Self = usize`, this returns
791    /// `elems`. No other types are currently supported.
792    fn from_elem_count(elems: usize) -> Self;
793
794    /// Computes the size of the object with the given layout and pointer
795    /// metadata.
796    ///
797    /// # Panics
798    ///
799    /// If `Self = ()`, `layout` must describe a sized type. If `Self = usize`,
800    /// `layout` must describe a slice DST. Otherwise, `size_for_metadata` may
801    /// panic.
802    ///
803    /// # Safety
804    ///
805    /// `size_for_metadata` promises to only return `None` if the resulting size
806    /// would not fit in a `usize`.
807    fn size_for_metadata(&self, layout: DstLayout) -> Option<usize>;
808}
809
810impl PointerMetadata for () {
811    #[inline]
812    #[allow(clippy::unused_unit)]
813    fn from_elem_count(_elems: usize) -> () {}
814
815    #[inline]
816    fn size_for_metadata(&self, layout: DstLayout) -> Option<usize> {
817        match layout.size_info {
818            SizeInfo::Sized { size } => Some(size),
819            // NOTE: This branch is unreachable, but we return `None` rather
820            // than `unreachable!()` to avoid generating panic paths.
821            SizeInfo::SliceDst(_) => None,
822        }
823    }
824}
825
826impl PointerMetadata for usize {
827    #[inline]
828    fn from_elem_count(elems: usize) -> usize {
829        elems
830    }
831
832    #[inline]
833    fn size_for_metadata(&self, layout: DstLayout) -> Option<usize> {
834        match layout.size_info {
835            SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }) => {
836                let slice_len = elem_size.checked_mul(*self)?;
837                let without_padding = offset.checked_add(slice_len)?;
838                without_padding.checked_add(util::padding_needed_for(without_padding, layout.align))
839            }
840            // NOTE: This branch is unreachable, but we return `None` rather
841            // than `unreachable!()` to avoid generating panic paths.
842            SizeInfo::Sized { .. } => None,
843        }
844    }
845}
846
847// SAFETY: Delegates safety to `DstLayout::for_slice`.
848unsafe impl<T> KnownLayout for [T] {
849    #[allow(clippy::missing_inline_in_public_items)]
850    #[cfg_attr(coverage_nightly, coverage(off))]
851    fn only_derive_is_allowed_to_implement_this_trait()
852    where
853        Self: Sized,
854    {
855    }
856
857    type PointerMetadata = usize;
858
859    const LAYOUT: DstLayout = DstLayout::for_slice::<T>();
860
861    // SAFETY: `.cast` preserves address and provenance. The returned pointer
862    // refers to an object with `elems` elements by construction.
863    #[inline(always)]
864    fn raw_from_ptr_len(data: NonNull<u8>, elems: usize) -> NonNull<Self> {
865        // TODO(#67): Remove this allow. See NonNullExt for more details.
866        #[allow(unstable_name_collisions)]
867        NonNull::slice_from_raw_parts(data.cast::<T>(), elems)
868    }
869
870    #[inline(always)]
871    fn pointer_to_metadata(ptr: NonNull<[T]>) -> usize {
872        #[allow(clippy::as_conversions)]
873        let slc = ptr.as_ptr() as *const [()];
874
875        // SAFETY:
876        // - `()` has alignment 1, so `slc` is trivially aligned.
877        // - `slc` was derived from a non-null pointer.
878        // - The size is 0 regardless of the length, so it is sound to
879        //   materialize a reference regardless of location.
880        // - By invariant, `self.ptr` has valid provenance.
881        let slc = unsafe { &*slc };
882
883        // This is correct because the preceding `as` cast preserves the number
884        // of slice elements. [1]
885        //
886        // [1] Per https://doc.rust-lang.org/reference/expressions/operator-expr.html#pointer-to-pointer-cast:
887        //
888        //   For slice types like `[T]` and `[U]`, the raw pointer types `*const
889        //   [T]`, `*mut [T]`, `*const [U]`, and `*mut [U]` encode the number of
890        //   elements in this slice. Casts between these raw pointer types
891        //   preserve the number of elements. ... The same holds for `str` and
892        //   any compound type whose unsized tail is a slice type, such as
893        //   struct `Foo(i32, [u8])` or `(u64, Foo)`.
894        slc.len()
895    }
896}
897
898#[rustfmt::skip]
899impl_known_layout!(
900    (),
901    u8, i8, u16, i16, u32, i32, u64, i64, u128, i128, usize, isize, f32, f64,
902    bool, char,
903    NonZeroU8, NonZeroI8, NonZeroU16, NonZeroI16, NonZeroU32, NonZeroI32,
904    NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize
905);
906#[rustfmt::skip]
907impl_known_layout!(
908    T         => Option<T>,
909    T: ?Sized => PhantomData<T>,
910    T         => Wrapping<T>,
911    T         => MaybeUninit<T>,
912    T: ?Sized => *const T,
913    T: ?Sized => *mut T
914);
915impl_known_layout!(const N: usize, T => [T; N]);
916
917safety_comment! {
918    /// SAFETY:
919    /// `str`, `ManuallyDrop<[T]>` [1], and `UnsafeCell<T>` [2] have the same
920    /// representations as `[u8]`, `[T]`, and `T` repsectively. `str` has
921    /// different bit validity than `[u8]`, but that doesn't affect the
922    /// soundness of this impl.
923    ///
924    /// [1] Per https://doc.rust-lang.org/nightly/core/mem/struct.ManuallyDrop.html:
925    ///
926    ///   `ManuallyDrop<T>` is guaranteed to have the same layout and bit
927    ///   validity as `T`
928    ///
929    /// [2] Per https://doc.rust-lang.org/core/cell/struct.UnsafeCell.html#memory-layout:
930    ///
931    ///   `UnsafeCell<T>` has the same in-memory representation as its inner
932    ///   type `T`.
933    ///
934    /// TODO(#429):
935    /// -  Add quotes from docs.
936    /// -  Once [1] (added in
937    /// https://github.com/rust-lang/rust/pull/115522) is available on stable,
938    /// quote the stable docs instead of the nightly docs.
939    unsafe_impl_known_layout!(#[repr([u8])] str);
940    unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] ManuallyDrop<T>);
941    unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] UnsafeCell<T>);
942}
943
944/// Analyzes whether a type is [`FromZeros`].
945///
946/// This derive analyzes, at compile time, whether the annotated type satisfies
947/// the [safety conditions] of `FromZeros` and implements `FromZeros` if it is
948/// sound to do so. This derive can be applied to structs, enums, and unions;
949/// e.g.:
950///
951/// ```
952/// # use zerocopy_derive::{FromZeros, Immutable};
953/// #[derive(FromZeros)]
954/// struct MyStruct {
955/// # /*
956///     ...
957/// # */
958/// }
959///
960/// #[derive(FromZeros)]
961/// #[repr(u8)]
962/// enum MyEnum {
963/// #   Variant0,
964/// # /*
965///     ...
966/// # */
967/// }
968///
969/// #[derive(FromZeros, Immutable)]
970/// union MyUnion {
971/// #   variant: u8,
972/// # /*
973///     ...
974/// # */
975/// }
976/// ```
977///
978/// [safety conditions]: trait@FromZeros#safety
979///
980/// # Analysis
981///
982/// *This section describes, roughly, the analysis performed by this derive to
983/// determine whether it is sound to implement `FromZeros` for a given type.
984/// Unless you are modifying the implementation of this derive, or attempting to
985/// manually implement `FromZeros` for a type yourself, you don't need to read
986/// this section.*
987///
988/// If a type has the following properties, then this derive can implement
989/// `FromZeros` for that type:
990///
991/// - If the type is a struct, all of its fields must be `FromZeros`.
992/// - If the type is an enum:
993///   - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
994///     `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
995///   - It must have a variant with a discriminant/tag of `0`, and its fields
996///     must be `FromZeros`. See [the reference] for a description of
997///     discriminant values are specified.
998///   - The fields of that variant must be `FromZeros`.
999///
1000/// This analysis is subject to change. Unsafe code may *only* rely on the
1001/// documented [safety conditions] of `FromZeros`, and must *not* rely on the
1002/// implementation details of this derive.
1003///
1004/// [the reference]: https://doc.rust-lang.org/reference/items/enumerations.html#custom-discriminant-values-for-fieldless-enumerations
1005///
1006/// ## Why isn't an explicit representation required for structs?
1007///
1008/// Neither this derive, nor the [safety conditions] of `FromZeros`, requires
1009/// that structs are marked with `#[repr(C)]`.
1010///
1011/// Per the [Rust reference](reference),
1012///
1013/// > The representation of a type can change the padding between fields, but
1014/// > does not change the layout of the fields themselves.
1015///
1016/// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations
1017///
1018/// Since the layout of structs only consists of padding bytes and field bytes,
1019/// a struct is soundly `FromZeros` if:
1020/// 1. its padding is soundly `FromZeros`, and
1021/// 2. its fields are soundly `FromZeros`.
1022///
1023/// The answer to the first question is always yes: padding bytes do not have
1024/// any validity constraints. A [discussion] of this question in the Unsafe Code
1025/// Guidelines Working Group concluded that it would be virtually unimaginable
1026/// for future versions of rustc to add validity constraints to padding bytes.
1027///
1028/// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174
1029///
1030/// Whether a struct is soundly `FromZeros` therefore solely depends on whether
1031/// its fields are `FromZeros`.
1032// TODO(#146): Document why we don't require an enum to have an explicit `repr`
1033// attribute.
1034#[cfg(any(feature = "derive", test))]
1035#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1036pub use zerocopy_derive::FromZeros;
1037
1038/// Analyzes whether a type is [`Immutable`].
1039///
1040/// This derive analyzes, at compile time, whether the annotated type satisfies
1041/// the [safety conditions] of `Immutable` and implements `Immutable` if it is
1042/// sound to do so. This derive can be applied to structs, enums, and unions;
1043/// e.g.:
1044///
1045/// ```
1046/// # use zerocopy_derive::Immutable;
1047/// #[derive(Immutable)]
1048/// struct MyStruct {
1049/// # /*
1050///     ...
1051/// # */
1052/// }
1053///
1054/// #[derive(Immutable)]
1055/// enum MyEnum {
1056/// #   Variant0,
1057/// # /*
1058///     ...
1059/// # */
1060/// }
1061///
1062/// #[derive(Immutable)]
1063/// union MyUnion {
1064/// #   variant: u8,
1065/// # /*
1066///     ...
1067/// # */
1068/// }
1069/// ```
1070///
1071/// # Analysis
1072///
1073/// *This section describes, roughly, the analysis performed by this derive to
1074/// determine whether it is sound to implement `Immutable` for a given type.
1075/// Unless you are modifying the implementation of this derive, you don't need
1076/// to read this section.*
1077///
1078/// If a type has the following properties, then this derive can implement
1079/// `Immutable` for that type:
1080///
1081/// - All fields must be `Immutable`.
1082///
1083/// This analysis is subject to change. Unsafe code may *only* rely on the
1084/// documented [safety conditions] of `Immutable`, and must *not* rely on the
1085/// implementation details of this derive.
1086///
1087/// [safety conditions]: trait@Immutable#safety
1088#[cfg(any(feature = "derive", test))]
1089#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1090pub use zerocopy_derive::Immutable;
1091
1092/// Types which are free from interior mutability.
1093///
1094/// `T: Immutable` indicates that `T` does not permit interior mutation, except
1095/// by ownership or an exclusive (`&mut`) borrow.
1096///
1097/// # Implementation
1098///
1099/// **Do not implement this trait yourself!** Instead, use
1100/// [`#[derive(Immutable)]`][derive] (requires the `derive` Cargo feature);
1101/// e.g.:
1102///
1103/// ```
1104/// # use zerocopy_derive::Immutable;
1105/// #[derive(Immutable)]
1106/// struct MyStruct {
1107/// # /*
1108///     ...
1109/// # */
1110/// }
1111///
1112/// #[derive(Immutable)]
1113/// enum MyEnum {
1114/// # /*
1115///     ...
1116/// # */
1117/// }
1118///
1119/// #[derive(Immutable)]
1120/// union MyUnion {
1121/// #   variant: u8,
1122/// # /*
1123///     ...
1124/// # */
1125/// }
1126/// ```
1127///
1128/// This derive performs a sophisticated, compile-time safety analysis to
1129/// determine whether a type is `Immutable`.
1130///
1131/// # Safety
1132///
1133/// Unsafe code outside of this crate must not make any assumptions about `T`
1134/// based on `T: Immutable`. We reserve the right to relax the requirements for
1135/// `Immutable` in the future, and if unsafe code outside of this crate makes
1136/// assumptions based on `T: Immutable`, future relaxations may cause that code
1137/// to become unsound.
1138///
1139// # Safety (Internal)
1140//
1141// If `T: Immutable`, unsafe code *inside of this crate* may assume that, given
1142// `t: &T`, `t` does not contain any [`UnsafeCell`]s at any byte location
1143// within the byte range addressed by `t`. This includes ranges of length 0
1144// (e.g., `UnsafeCell<()>` and `[UnsafeCell<u8>; 0]`). If a type implements
1145// `Immutable` which violates this assumptions, it may cause this crate to
1146// exhibit [undefined behavior].
1147//
1148// [`UnsafeCell`]: core::cell::UnsafeCell
1149// [undefined behavior]: https://raphlinus.github.io/programming/rust/2018/08/17/undefined-behavior.html
1150#[cfg_attr(
1151    feature = "derive",
1152    doc = "[derive]: zerocopy_derive::Immutable",
1153    doc = "[derive-analysis]: zerocopy_derive::Immutable#analysis"
1154)]
1155#[cfg_attr(
1156    not(feature = "derive"),
1157    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Immutable.html"),
1158    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Immutable.html#analysis"),
1159)]
1160#[cfg_attr(
1161    zerocopy_diagnostic_on_unimplemented,
1162    diagnostic::on_unimplemented(note = "Consider adding `#[derive(Immutable)]` to `{Self}`")
1163)]
1164pub unsafe trait Immutable {
1165    // The `Self: Sized` bound makes it so that `Immutable` is still object
1166    // safe.
1167    #[doc(hidden)]
1168    fn only_derive_is_allowed_to_implement_this_trait()
1169    where
1170        Self: Sized;
1171}
1172
1173/// Implements [`TryFromBytes`].
1174///
1175/// This derive synthesizes the runtime checks required to check whether a
1176/// sequence of initialized bytes corresponds to a valid instance of a type.
1177/// This derive can be applied to structs, enums, and unions; e.g.:
1178///
1179/// ```
1180/// # use zerocopy_derive::{TryFromBytes, Immutable};
1181/// #[derive(TryFromBytes)]
1182/// struct MyStruct {
1183/// # /*
1184///     ...
1185/// # */
1186/// }
1187///
1188/// #[derive(TryFromBytes)]
1189/// #[repr(u8)]
1190/// enum MyEnum {
1191/// #   V00,
1192/// # /*
1193///     ...
1194/// # */
1195/// }
1196///
1197/// #[derive(TryFromBytes, Immutable)]
1198/// union MyUnion {
1199/// #   variant: u8,
1200/// # /*
1201///     ...
1202/// # */
1203/// }
1204/// ```
1205///
1206/// [safety conditions]: trait@TryFromBytes#safety
1207#[cfg(any(feature = "derive", test))]
1208#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1209pub use zerocopy_derive::TryFromBytes;
1210
1211/// Types for which some bit patterns are valid.
1212///
1213/// A memory region of the appropriate length which contains initialized bytes
1214/// can be viewed as a `TryFromBytes` type so long as the runtime value of those
1215/// bytes corresponds to a [*valid instance*] of that type. For example,
1216/// [`bool`] is `TryFromBytes`, so zerocopy can transmute a [`u8`] into a
1217/// [`bool`] so long as it first checks that the value of the [`u8`] is `0` or
1218/// `1`.
1219///
1220/// # Implementation
1221///
1222/// **Do not implement this trait yourself!** Instead, use
1223/// [`#[derive(TryFromBytes)]`][derive]; e.g.:
1224///
1225/// ```
1226/// # use zerocopy_derive::{TryFromBytes, Immutable};
1227/// #[derive(TryFromBytes)]
1228/// struct MyStruct {
1229/// # /*
1230///     ...
1231/// # */
1232/// }
1233///
1234/// #[derive(TryFromBytes)]
1235/// #[repr(u8)]
1236/// enum MyEnum {
1237/// #   V00,
1238/// # /*
1239///     ...
1240/// # */
1241/// }
1242///
1243/// #[derive(TryFromBytes, Immutable)]
1244/// union MyUnion {
1245/// #   variant: u8,
1246/// # /*
1247///     ...
1248/// # */
1249/// }
1250/// ```
1251///
1252/// This derive ensures that the runtime check of whether bytes correspond to a
1253/// valid instance is sound. You **must** implement this trait via the derive.
1254///
1255/// # What is a "valid instance"?
1256///
1257/// In Rust, each type has *bit validity*, which refers to the set of bit
1258/// patterns which may appear in an instance of that type. It is impossible for
1259/// safe Rust code to produce values which violate bit validity (ie, values
1260/// outside of the "valid" set of bit patterns). If `unsafe` code produces an
1261/// invalid value, this is considered [undefined behavior].
1262///
1263/// Rust's bit validity rules are currently being decided, which means that some
1264/// types have three classes of bit patterns: those which are definitely valid,
1265/// and whose validity is documented in the language; those which may or may not
1266/// be considered valid at some point in the future; and those which are
1267/// definitely invalid.
1268///
1269/// Zerocopy takes a conservative approach, and only considers a bit pattern to
1270/// be valid if its validity is a documenteed guarantee provided by the
1271/// language.
1272///
1273/// For most use cases, Rust's current guarantees align with programmers'
1274/// intuitions about what ought to be valid. As a result, zerocopy's
1275/// conservatism should not affect most users.
1276///
1277/// If you are negatively affected by lack of support for a particular type,
1278/// we encourage you to let us know by [filing an issue][github-repo].
1279///
1280/// # `TryFromBytes` is not symmetrical with [`IntoBytes`]
1281///
1282/// There are some types which implement both `TryFromBytes` and [`IntoBytes`],
1283/// but for which `TryFromBytes` is not guaranteed to accept all byte sequences
1284/// produced by `IntoBytes`. In other words, for some `T: TryFromBytes +
1285/// IntoBytes`, there exist values of `t: T` such that
1286/// `TryFromBytes::try_ref_from_bytes(t.as_bytes()) == None`. Code should not
1287/// generally assume that values produced by `IntoBytes` will necessarily be
1288/// accepted as valid by `TryFromBytes`.
1289///
1290/// # Safety
1291///
1292/// On its own, `T: TryFromBytes` does not make any guarantees about the layout
1293/// or representation of `T`. It merely provides the ability to perform a
1294/// validity check at runtime via methods like [`try_ref_from_bytes`].
1295///
1296/// You must not rely on the `#[doc(hidden)]` internals of `TryFromBytes`.
1297/// Future releases of zerocopy may make backwards-breaking changes to these
1298/// items, including changes that only affect soundness, which may cause code
1299/// which uses those items to silently become unsound.
1300///
1301/// [undefined behavior]: https://raphlinus.github.io/programming/rust/2018/08/17/undefined-behavior.html
1302/// [github-repo]: https://github.com/google/zerocopy
1303/// [`try_ref_from_bytes`]: TryFromBytes::try_ref_from_bytes
1304/// [*valid instance*]: #what-is-a-valid-instance
1305#[cfg_attr(feature = "derive", doc = "[derive]: zerocopy_derive::TryFromBytes")]
1306#[cfg_attr(
1307    not(feature = "derive"),
1308    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.TryFromBytes.html"),
1309)]
1310#[cfg_attr(
1311    zerocopy_diagnostic_on_unimplemented,
1312    diagnostic::on_unimplemented(note = "Consider adding `#[derive(TryFromBytes)]` to `{Self}`")
1313)]
1314pub unsafe trait TryFromBytes {
1315    // The `Self: Sized` bound makes it so that `TryFromBytes` is still object
1316    // safe.
1317    #[doc(hidden)]
1318    fn only_derive_is_allowed_to_implement_this_trait()
1319    where
1320        Self: Sized;
1321
1322    /// Does a given memory range contain a valid instance of `Self`?
1323    ///
1324    /// # Safety
1325    ///
1326    /// Unsafe code may assume that, if `is_bit_valid(candidate)` returns true,
1327    /// `*candidate` contains a valid `Self`.
1328    ///
1329    /// # Panics
1330    ///
1331    /// `is_bit_valid` may panic. Callers are responsible for ensuring that any
1332    /// `unsafe` code remains sound even in the face of `is_bit_valid`
1333    /// panicking. (We support user-defined validation routines; so long as
1334    /// these routines are not required to be `unsafe`, there is no way to
1335    /// ensure that these do not generate panics.)
1336    ///
1337    /// Besides user-defined validation routines panicking, `is_bit_valid` will
1338    /// either panic or fail to compile if called on a pointer with [`Shared`]
1339    /// aliasing when `Self: !Immutable`.
1340    ///
1341    /// [`UnsafeCell`]: core::cell::UnsafeCell
1342    /// [`Shared`]: invariant::Shared
1343    #[doc(hidden)]
1344    fn is_bit_valid<A: invariant::Aliasing + invariant::AtLeast<invariant::Shared>>(
1345        candidate: Maybe<'_, Self, A>,
1346    ) -> bool;
1347
1348    /// Attempts to interpret the given `source` as a `&Self`.
1349    ///
1350    /// If the bytes of `source` are a valid instance of `Self`, this method
1351    /// returns a reference to those bytes interpreted as a `Self`. If the
1352    /// length of `source` is not a [valid size of `Self`][valid-size], or if
1353    /// `source` is not appropriately aligned, or if `source` is not a valid
1354    /// instance of `Self`, this returns `Err`. If [`Self:
1355    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
1356    /// error][ConvertError::from].
1357    ///
1358    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1359    ///
1360    /// [valid-size]: crate#what-is-a-valid-size
1361    /// [self-unaligned]: Unaligned
1362    /// [slice-dst]: KnownLayout#dynamically-sized-types
1363    ///
1364    /// # Compile-Time Assertions
1365    ///
1366    /// This method cannot yet be used on unsized types whose dynamically-sized
1367    /// component is zero-sized. Attempting to use this method on such types
1368    /// results in a compile-time assertion error; e.g.:
1369    ///
1370    /// ```compile_fail,E0080
1371    /// use zerocopy::*;
1372    /// # use zerocopy_derive::*;
1373    ///
1374    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
1375    /// #[repr(C)]
1376    /// struct ZSTy {
1377    ///     leading_sized: u16,
1378    ///     trailing_dst: [()],
1379    /// }
1380    ///
1381    /// let _ = ZSTy::try_ref_from_bytes(0u16.as_bytes()); // âš  Compile Error!
1382    /// ```
1383    ///
1384    /// # Examples
1385    ///
1386    /// ```
1387    /// use zerocopy::TryFromBytes;
1388    /// # use zerocopy_derive::*;
1389    ///
1390    /// // The only valid value of this type is the byte `0xC0`
1391    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1392    /// #[repr(u8)]
1393    /// enum C0 { xC0 = 0xC0 }
1394    ///
1395    /// // The only valid value of this type is the byte sequence `0xC0C0`.
1396    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1397    /// #[repr(C)]
1398    /// struct C0C0(C0, C0);
1399    ///
1400    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1401    /// #[repr(C)]
1402    /// struct Packet {
1403    ///     magic_number: C0C0,
1404    ///     mug_size: u8,
1405    ///     temperature: u8,
1406    ///     marshmallows: [[u8; 2]],
1407    /// }
1408    ///
1409    /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..];
1410    ///
1411    /// let packet = Packet::try_ref_from_bytes(bytes).unwrap();
1412    ///
1413    /// assert_eq!(packet.mug_size, 240);
1414    /// assert_eq!(packet.temperature, 77);
1415    /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
1416    ///
1417    /// // These bytes are not valid instance of `Packet`.
1418    /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..];
1419    /// assert!(Packet::try_ref_from_bytes(bytes).is_err());
1420    /// ```
1421    #[must_use = "has no side effects"]
1422    #[inline]
1423    fn try_ref_from_bytes(source: &[u8]) -> Result<&Self, TryCastError<&[u8], Self>>
1424    where
1425        Self: KnownLayout + Immutable,
1426    {
1427        static_assert_dst_is_not_zst!(Self);
1428        match Ptr::from_ref(source).try_cast_into_no_leftover::<Self, BecauseImmutable>(None) {
1429            Ok(source) => {
1430                // This call may panic. If that happens, it doesn't cause any soundness
1431                // issues, as we have not generated any invalid state which we need to
1432                // fix before returning.
1433                //
1434                // Note that one panic or post-monomorphization error condition is
1435                // calling `try_into_valid` (and thus `is_bit_valid`) with a shared
1436                // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic
1437                // condition will not happen.
1438                match source.try_into_valid() {
1439                    Ok(valid) => Ok(valid.as_ref()),
1440                    Err(e) => {
1441                        Err(e.map_src(|src| src.as_bytes::<BecauseImmutable>().as_ref()).into())
1442                    }
1443                }
1444            }
1445            Err(e) => Err(e.map_src(Ptr::as_ref).into()),
1446        }
1447    }
1448
1449    /// Attempts to interpret the prefix of the given `source` as a `&Self`.
1450    ///
1451    /// This method computes the [largest possible size of `Self`][valid-size]
1452    /// that can fit in the leading bytes of `source`. If that prefix is a valid
1453    /// instance of `Self`, this method returns a reference to those bytes
1454    /// interpreted as `Self`, and a reference to the remaining bytes. If there
1455    /// are insufficient bytes, or if `source` is not appropriately aligned, or
1456    /// if those bytes are not a valid instance of `Self`, this returns `Err`.
1457    /// If [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
1458    /// alignment error][ConvertError::from].
1459    ///
1460    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1461    ///
1462    /// [valid-size]: crate#what-is-a-valid-size
1463    /// [self-unaligned]: Unaligned
1464    /// [slice-dst]: KnownLayout#dynamically-sized-types
1465    ///
1466    /// # Compile-Time Assertions
1467    ///
1468    /// This method cannot yet be used on unsized types whose dynamically-sized
1469    /// component is zero-sized. Attempting to use this method on such types
1470    /// results in a compile-time assertion error; e.g.:
1471    ///
1472    /// ```compile_fail,E0080
1473    /// use zerocopy::*;
1474    /// # use zerocopy_derive::*;
1475    ///
1476    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
1477    /// #[repr(C)]
1478    /// struct ZSTy {
1479    ///     leading_sized: u16,
1480    ///     trailing_dst: [()],
1481    /// }
1482    ///
1483    /// let _ = ZSTy::try_ref_from_prefix(0u16.as_bytes()); // âš  Compile Error!
1484    /// ```
1485    ///
1486    /// # Examples
1487    ///
1488    /// ```
1489    /// use zerocopy::TryFromBytes;
1490    /// # use zerocopy_derive::*;
1491    ///
1492    /// // The only valid value of this type is the byte `0xC0`
1493    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1494    /// #[repr(u8)]
1495    /// enum C0 { xC0 = 0xC0 }
1496    ///
1497    /// // The only valid value of this type is the bytes `0xC0C0`.
1498    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1499    /// #[repr(C)]
1500    /// struct C0C0(C0, C0);
1501    ///
1502    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1503    /// #[repr(C)]
1504    /// struct Packet {
1505    ///     magic_number: C0C0,
1506    ///     mug_size: u8,
1507    ///     temperature: u8,
1508    ///     marshmallows: [[u8; 2]],
1509    /// }
1510    ///
1511    /// // These are more bytes than are needed to encode a `Packet`.
1512    /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
1513    ///
1514    /// let (packet, suffix) = Packet::try_ref_from_prefix(bytes).unwrap();
1515    ///
1516    /// assert_eq!(packet.mug_size, 240);
1517    /// assert_eq!(packet.temperature, 77);
1518    /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
1519    /// assert_eq!(suffix, &[6u8][..]);
1520    ///
1521    /// // These bytes are not valid instance of `Packet`.
1522    /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
1523    /// assert!(Packet::try_ref_from_prefix(bytes).is_err());
1524    /// ```
1525    #[must_use = "has no side effects"]
1526    #[inline]
1527    fn try_ref_from_prefix(source: &[u8]) -> Result<(&Self, &[u8]), TryCastError<&[u8], Self>>
1528    where
1529        Self: KnownLayout + Immutable,
1530    {
1531        static_assert_dst_is_not_zst!(Self);
1532        try_ref_from_prefix_suffix(source, CastType::Prefix, None)
1533    }
1534
1535    /// Attempts to interpret the suffix of the given `source` as a `&Self`.
1536    ///
1537    /// This method computes the [largest possible size of `Self`][valid-size]
1538    /// that can fit in the trailing bytes of `source`. If that suffix is a
1539    /// valid instance of `Self`, this method returns a reference to those bytes
1540    /// interpreted as `Self`, and a reference to the preceding bytes. If there
1541    /// are insufficient bytes, or if the suffix of `source` would not be
1542    /// appropriately aligned, or if the suffix is not a valid instance of
1543    /// `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned], you
1544    /// can [infallibly discard the alignment error][ConvertError::from].
1545    ///
1546    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1547    ///
1548    /// [valid-size]: crate#what-is-a-valid-size
1549    /// [self-unaligned]: Unaligned
1550    /// [slice-dst]: KnownLayout#dynamically-sized-types
1551    ///
1552    /// # Compile-Time Assertions
1553    ///
1554    /// This method cannot yet be used on unsized types whose dynamically-sized
1555    /// component is zero-sized. Attempting to use this method on such types
1556    /// results in a compile-time assertion error; e.g.:
1557    ///
1558    /// ```compile_fail,E0080
1559    /// use zerocopy::*;
1560    /// # use zerocopy_derive::*;
1561    ///
1562    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
1563    /// #[repr(C)]
1564    /// struct ZSTy {
1565    ///     leading_sized: u16,
1566    ///     trailing_dst: [()],
1567    /// }
1568    ///
1569    /// let _ = ZSTy::try_ref_from_suffix(0u16.as_bytes()); // âš  Compile Error!
1570    /// ```
1571    ///
1572    /// # Examples
1573    ///
1574    /// ```
1575    /// use zerocopy::TryFromBytes;
1576    /// # use zerocopy_derive::*;
1577    ///
1578    /// // The only valid value of this type is the byte `0xC0`
1579    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1580    /// #[repr(u8)]
1581    /// enum C0 { xC0 = 0xC0 }
1582    ///
1583    /// // The only valid value of this type is the bytes `0xC0C0`.
1584    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1585    /// #[repr(C)]
1586    /// struct C0C0(C0, C0);
1587    ///
1588    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1589    /// #[repr(C)]
1590    /// struct Packet {
1591    ///     magic_number: C0C0,
1592    ///     mug_size: u8,
1593    ///     temperature: u8,
1594    ///     marshmallows: [[u8; 2]],
1595    /// }
1596    ///
1597    /// // These are more bytes than are needed to encode a `Packet`.
1598    /// let bytes = &[0, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
1599    ///
1600    /// let (prefix, packet) = Packet::try_ref_from_suffix(bytes).unwrap();
1601    ///
1602    /// assert_eq!(packet.mug_size, 240);
1603    /// assert_eq!(packet.temperature, 77);
1604    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
1605    /// assert_eq!(prefix, &[0u8][..]);
1606    ///
1607    /// // These bytes are not valid instance of `Packet`.
1608    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0x10][..];
1609    /// assert!(Packet::try_ref_from_suffix(bytes).is_err());
1610    /// ```
1611    #[must_use = "has no side effects"]
1612    #[inline]
1613    fn try_ref_from_suffix(source: &[u8]) -> Result<(&[u8], &Self), TryCastError<&[u8], Self>>
1614    where
1615        Self: KnownLayout + Immutable,
1616    {
1617        static_assert_dst_is_not_zst!(Self);
1618        try_ref_from_prefix_suffix(source, CastType::Suffix, None).map(swap)
1619    }
1620
1621    /// Attempts to interpret the given `source` as a `&mut Self` without
1622    /// copying.
1623    ///
1624    /// If the bytes of `source` are a valid instance of `Self`, this method
1625    /// returns a reference to those bytes interpreted as a `Self`. If the
1626    /// length of `source` is not a [valid size of `Self`][valid-size], or if
1627    /// `source` is not appropriately aligned, or if `source` is not a valid
1628    /// instance of `Self`, this returns `Err`. If [`Self:
1629    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
1630    /// error][ConvertError::from].
1631    ///
1632    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1633    ///
1634    /// [valid-size]: crate#what-is-a-valid-size
1635    /// [self-unaligned]: Unaligned
1636    /// [slice-dst]: KnownLayout#dynamically-sized-types
1637    ///
1638    /// # Compile-Time Assertions
1639    ///
1640    /// This method cannot yet be used on unsized types whose dynamically-sized
1641    /// component is zero-sized. Attempting to use this method on such types
1642    /// results in a compile-time assertion error; e.g.:
1643    ///
1644    /// ```compile_fail,E0080
1645    /// use zerocopy::*;
1646    /// # use zerocopy_derive::*;
1647    ///
1648    /// #[derive(TryFromBytes, KnownLayout)]
1649    /// #[repr(C)]
1650    /// struct ZSTy {
1651    ///     leading_sized: [u8; 2],
1652    ///     trailing_dst: [()],
1653    /// }
1654    ///
1655    /// let mut source = [85, 85];
1656    /// let _ = ZSTy::try_mut_from_bytes(&mut source[..]); // âš  Compile Error!
1657    /// ```
1658    ///
1659    /// # Examples
1660    ///
1661    /// ```
1662    /// use zerocopy::TryFromBytes;
1663    /// # use zerocopy_derive::*;
1664    ///
1665    /// // The only valid value of this type is the byte `0xC0`
1666    /// #[derive(TryFromBytes, KnownLayout)]
1667    /// #[repr(u8)]
1668    /// enum C0 { xC0 = 0xC0 }
1669    ///
1670    /// // The only valid value of this type is the bytes `0xC0C0`.
1671    /// #[derive(TryFromBytes, KnownLayout)]
1672    /// #[repr(C)]
1673    /// struct C0C0(C0, C0);
1674    ///
1675    /// #[derive(TryFromBytes, KnownLayout)]
1676    /// #[repr(C)]
1677    /// struct Packet {
1678    ///     magic_number: C0C0,
1679    ///     mug_size: u8,
1680    ///     temperature: u8,
1681    ///     marshmallows: [[u8; 2]],
1682    /// }
1683    ///
1684    /// let bytes = &mut [0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..];
1685    ///
1686    /// let packet = Packet::try_mut_from_bytes(bytes).unwrap();
1687    ///
1688    /// assert_eq!(packet.mug_size, 240);
1689    /// assert_eq!(packet.temperature, 77);
1690    /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
1691    ///
1692    /// packet.temperature = 111;
1693    ///
1694    /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 0, 1, 2, 3, 4, 5]);
1695    ///
1696    /// // These bytes are not valid instance of `Packet`.
1697    /// let bytes = &mut [0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
1698    /// assert!(Packet::try_mut_from_bytes(bytes).is_err());
1699    /// ```
1700    #[must_use = "has no side effects"]
1701    #[inline]
1702    fn try_mut_from_bytes(bytes: &mut [u8]) -> Result<&mut Self, TryCastError<&mut [u8], Self>>
1703    where
1704        Self: KnownLayout,
1705    {
1706        static_assert_dst_is_not_zst!(Self);
1707        match Ptr::from_mut(bytes).try_cast_into_no_leftover::<Self, BecauseExclusive>(None) {
1708            Ok(source) => {
1709                // This call may panic. If that happens, it doesn't cause any soundness
1710                // issues, as we have not generated any invalid state which we need to
1711                // fix before returning.
1712                //
1713                // Note that one panic or post-monomorphization error condition is
1714                // calling `try_into_valid` (and thus `is_bit_valid`) with a shared
1715                // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic
1716                // condition will not happen.
1717                match source.try_into_valid() {
1718                    Ok(source) => Ok(source.as_mut()),
1719                    Err(e) => {
1720                        Err(e.map_src(|src| src.as_bytes::<BecauseExclusive>().as_mut()).into())
1721                    }
1722                }
1723            }
1724            Err(e) => Err(e.map_src(Ptr::as_mut).into()),
1725        }
1726    }
1727
1728    /// Attempts to interpret the prefix of the given `source` as a `&mut
1729    /// Self`.
1730    ///
1731    /// This method computes the [largest possible size of `Self`][valid-size]
1732    /// that can fit in the leading bytes of `source`. If that prefix is a valid
1733    /// instance of `Self`, this method returns a reference to those bytes
1734    /// interpreted as `Self`, and a reference to the remaining bytes. If there
1735    /// are insufficient bytes, or if `source` is not appropriately aligned, or
1736    /// if the bytes are not a valid instance of `Self`, this returns `Err`. If
1737    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
1738    /// alignment error][ConvertError::from].
1739    ///
1740    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1741    ///
1742    /// [valid-size]: crate#what-is-a-valid-size
1743    /// [self-unaligned]: Unaligned
1744    /// [slice-dst]: KnownLayout#dynamically-sized-types
1745    ///
1746    /// # Compile-Time Assertions
1747    ///
1748    /// This method cannot yet be used on unsized types whose dynamically-sized
1749    /// component is zero-sized. Attempting to use this method on such types
1750    /// results in a compile-time assertion error; e.g.:
1751    ///
1752    /// ```compile_fail,E0080
1753    /// use zerocopy::*;
1754    /// # use zerocopy_derive::*;
1755    ///
1756    /// #[derive(TryFromBytes, KnownLayout)]
1757    /// #[repr(C)]
1758    /// struct ZSTy {
1759    ///     leading_sized: [u8; 2],
1760    ///     trailing_dst: [()],
1761    /// }
1762    ///
1763    /// let mut source = [85, 85];
1764    /// let _ = ZSTy::try_mut_from_prefix(&mut source[..]); // âš  Compile Error!
1765    /// ```
1766    ///
1767    /// # Examples
1768    ///
1769    /// ```
1770    /// use zerocopy::TryFromBytes;
1771    /// # use zerocopy_derive::*;
1772    ///
1773    /// // The only valid value of this type is the byte `0xC0`
1774    /// #[derive(TryFromBytes, KnownLayout)]
1775    /// #[repr(u8)]
1776    /// enum C0 { xC0 = 0xC0 }
1777    ///
1778    /// // The only valid value of this type is the bytes `0xC0C0`.
1779    /// #[derive(TryFromBytes, KnownLayout)]
1780    /// #[repr(C)]
1781    /// struct C0C0(C0, C0);
1782    ///
1783    /// #[derive(TryFromBytes, KnownLayout)]
1784    /// #[repr(C)]
1785    /// struct Packet {
1786    ///     magic_number: C0C0,
1787    ///     mug_size: u8,
1788    ///     temperature: u8,
1789    ///     marshmallows: [[u8; 2]],
1790    /// }
1791    ///
1792    /// // These are more bytes than are needed to encode a `Packet`.
1793    /// let bytes = &mut [0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
1794    ///
1795    /// let (packet, suffix) = Packet::try_mut_from_prefix(bytes).unwrap();
1796    ///
1797    /// assert_eq!(packet.mug_size, 240);
1798    /// assert_eq!(packet.temperature, 77);
1799    /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
1800    /// assert_eq!(suffix, &[6u8][..]);
1801    ///
1802    /// packet.temperature = 111;
1803    /// suffix[0] = 222;
1804    ///
1805    /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 0, 1, 2, 3, 4, 5, 222]);
1806    ///
1807    /// // These bytes are not valid instance of `Packet`.
1808    /// let bytes = &mut [0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
1809    /// assert!(Packet::try_mut_from_prefix(bytes).is_err());
1810    /// ```
1811    #[must_use = "has no side effects"]
1812    #[inline]
1813    fn try_mut_from_prefix(
1814        source: &mut [u8],
1815    ) -> Result<(&mut Self, &mut [u8]), TryCastError<&mut [u8], Self>>
1816    where
1817        Self: KnownLayout,
1818    {
1819        static_assert_dst_is_not_zst!(Self);
1820        try_mut_from_prefix_suffix(source, CastType::Prefix, None)
1821    }
1822
1823    /// Attempts to interpret the suffix of the given `source` as a `&mut
1824    /// Self`.
1825    ///
1826    /// This method computes the [largest possible size of `Self`][valid-size]
1827    /// that can fit in the trailing bytes of `source`. If that suffix is a
1828    /// valid instance of `Self`, this method returns a reference to those bytes
1829    /// interpreted as `Self`, and a reference to the preceding bytes. If there
1830    /// are insufficient bytes, or if the suffix of `source` would not be
1831    /// appropriately aligned, or if the suffix is not a valid instance of
1832    /// `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned], you
1833    /// can [infallibly discard the alignment error][ConvertError::from].
1834    ///
1835    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1836    ///
1837    /// [valid-size]: crate#what-is-a-valid-size
1838    /// [self-unaligned]: Unaligned
1839    /// [slice-dst]: KnownLayout#dynamically-sized-types
1840    ///
1841    /// # Compile-Time Assertions
1842    ///
1843    /// This method cannot yet be used on unsized types whose dynamically-sized
1844    /// component is zero-sized. Attempting to use this method on such types
1845    /// results in a compile-time assertion error; e.g.:
1846    ///
1847    /// ```compile_fail,E0080
1848    /// use zerocopy::*;
1849    /// # use zerocopy_derive::*;
1850    ///
1851    /// #[derive(TryFromBytes, KnownLayout)]
1852    /// #[repr(C)]
1853    /// struct ZSTy {
1854    ///     leading_sized: u16,
1855    ///     trailing_dst: [()],
1856    /// }
1857    ///
1858    /// let mut source = [85, 85];
1859    /// let _ = ZSTy::try_mut_from_suffix(&mut source[..]); // âš  Compile Error!
1860    /// ```
1861    ///
1862    /// # Examples
1863    ///
1864    /// ```
1865    /// use zerocopy::TryFromBytes;
1866    /// # use zerocopy_derive::*;
1867    ///
1868    /// // The only valid value of this type is the byte `0xC0`
1869    /// #[derive(TryFromBytes, KnownLayout)]
1870    /// #[repr(u8)]
1871    /// enum C0 { xC0 = 0xC0 }
1872    ///
1873    /// // The only valid value of this type is the bytes `0xC0C0`.
1874    /// #[derive(TryFromBytes, KnownLayout)]
1875    /// #[repr(C)]
1876    /// struct C0C0(C0, C0);
1877    ///
1878    /// #[derive(TryFromBytes, KnownLayout)]
1879    /// #[repr(C)]
1880    /// struct Packet {
1881    ///     magic_number: C0C0,
1882    ///     mug_size: u8,
1883    ///     temperature: u8,
1884    ///     marshmallows: [[u8; 2]],
1885    /// }
1886    ///
1887    /// // These are more bytes than are needed to encode a `Packet`.
1888    /// let bytes = &mut [0, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
1889    ///
1890    /// let (prefix, packet) = Packet::try_mut_from_suffix(bytes).unwrap();
1891    ///
1892    /// assert_eq!(packet.mug_size, 240);
1893    /// assert_eq!(packet.temperature, 77);
1894    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
1895    /// assert_eq!(prefix, &[0u8][..]);
1896    ///
1897    /// prefix[0] = 111;
1898    /// packet.temperature = 222;
1899    ///
1900    /// assert_eq!(bytes, [111, 0xC0, 0xC0, 240, 222, 2, 3, 4, 5, 6, 7]);
1901    ///
1902    /// // These bytes are not valid instance of `Packet`.
1903    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0x10][..];
1904    /// assert!(Packet::try_mut_from_suffix(bytes).is_err());
1905    /// ```
1906    #[must_use = "has no side effects"]
1907    #[inline]
1908    fn try_mut_from_suffix(
1909        source: &mut [u8],
1910    ) -> Result<(&mut [u8], &mut Self), TryCastError<&mut [u8], Self>>
1911    where
1912        Self: KnownLayout,
1913    {
1914        static_assert_dst_is_not_zst!(Self);
1915        try_mut_from_prefix_suffix(source, CastType::Suffix, None).map(swap)
1916    }
1917
1918    /// Attempts to interpret the given `source` as a `&Self` with a DST length
1919    /// equal to `count`.
1920    ///
1921    /// This method attempts to return a reference to `source` interpreted as a
1922    /// `Self` with `count` trailing elements. If the length of `source` is not
1923    /// equal to the size of `Self` with `count` elements, if `source` is not
1924    /// appropriately aligned, or if `source` does not contain a valid instance
1925    /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
1926    /// you can [infallibly discard the alignment error][ConvertError::from].
1927    ///
1928    /// [self-unaligned]: Unaligned
1929    /// [slice-dst]: KnownLayout#dynamically-sized-types
1930    ///
1931    /// # Examples
1932    ///
1933    /// ```
1934    /// # #![allow(non_camel_case_types)] // For C0::xC0
1935    /// use zerocopy::TryFromBytes;
1936    /// # use zerocopy_derive::*;
1937    ///
1938    /// // The only valid value of this type is the byte `0xC0`
1939    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1940    /// #[repr(u8)]
1941    /// enum C0 { xC0 = 0xC0 }
1942    ///
1943    /// // The only valid value of this type is the bytes `0xC0C0`.
1944    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1945    /// #[repr(C)]
1946    /// struct C0C0(C0, C0);
1947    ///
1948    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1949    /// #[repr(C)]
1950    /// struct Packet {
1951    ///     magic_number: C0C0,
1952    ///     mug_size: u8,
1953    ///     temperature: u8,
1954    ///     marshmallows: [[u8; 2]],
1955    /// }
1956    ///
1957    /// let bytes = &[0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
1958    ///
1959    /// let packet = Packet::try_ref_from_bytes_with_elems(bytes, 3).unwrap();
1960    ///
1961    /// assert_eq!(packet.mug_size, 240);
1962    /// assert_eq!(packet.temperature, 77);
1963    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
1964    ///
1965    /// // These bytes are not valid instance of `Packet`.
1966    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0xC0][..];
1967    /// assert!(Packet::try_ref_from_bytes_with_elems(bytes, 3).is_err());
1968    /// ```
1969    ///
1970    /// Since an explicit `count` is provided, this method supports types with
1971    /// zero-sized trailing slice elements. Methods such as [`try_ref_from_bytes`]
1972    /// which do not take an explicit count do not support such types.
1973    ///
1974    /// ```
1975    /// use core::num::NonZeroU16;
1976    /// use zerocopy::*;
1977    /// # use zerocopy_derive::*;
1978    ///
1979    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
1980    /// #[repr(C)]
1981    /// struct ZSTy {
1982    ///     leading_sized: NonZeroU16,
1983    ///     trailing_dst: [()],
1984    /// }
1985    ///
1986    /// let src = &[85, 85][..];
1987    /// let zsty = ZSTy::try_ref_from_bytes_with_elems(src, 42).unwrap();
1988    /// assert_eq!(zsty.trailing_dst.len(), 42);
1989    /// ```
1990    ///
1991    /// [`try_ref_from_bytes`]: TryFromBytes::try_ref_from_bytes
1992    #[must_use = "has no side effects"]
1993    #[inline]
1994    fn try_ref_from_bytes_with_elems(
1995        source: &[u8],
1996        count: usize,
1997    ) -> Result<&Self, TryCastError<&[u8], Self>>
1998    where
1999        Self: KnownLayout<PointerMetadata = usize> + Immutable,
2000    {
2001        match Ptr::from_ref(source).try_cast_into_no_leftover::<Self, BecauseImmutable>(Some(count))
2002        {
2003            Ok(source) => {
2004                // This call may panic. If that happens, it doesn't cause any soundness
2005                // issues, as we have not generated any invalid state which we need to
2006                // fix before returning.
2007                //
2008                // Note that one panic or post-monomorphization error condition is
2009                // calling `try_into_valid` (and thus `is_bit_valid`) with a shared
2010                // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic
2011                // condition will not happen.
2012                match source.try_into_valid() {
2013                    Ok(source) => Ok(source.as_ref()),
2014                    Err(e) => {
2015                        Err(e.map_src(|src| src.as_bytes::<BecauseImmutable>().as_ref()).into())
2016                    }
2017                }
2018            }
2019            Err(e) => Err(e.map_src(Ptr::as_ref).into()),
2020        }
2021    }
2022
2023    /// Attempts to interpret the prefix of the given `source` as a `&Self` with
2024    /// a DST length equal to `count`.
2025    ///
2026    /// This method attempts to return a reference to the prefix of `source`
2027    /// interpreted as a `Self` with `count` trailing elements, and a reference
2028    /// to the remaining bytes. If the length of `source` is less than the size
2029    /// of `Self` with `count` elements, if `source` is not appropriately
2030    /// aligned, or if the prefix of `source` does not contain a valid instance
2031    /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2032    /// you can [infallibly discard the alignment error][ConvertError::from].
2033    ///
2034    /// [self-unaligned]: Unaligned
2035    /// [slice-dst]: KnownLayout#dynamically-sized-types
2036    ///
2037    /// # Examples
2038    ///
2039    /// ```
2040    /// # #![allow(non_camel_case_types)] // For C0::xC0
2041    /// use zerocopy::TryFromBytes;
2042    /// # use zerocopy_derive::*;
2043    ///
2044    /// // The only valid value of this type is the byte `0xC0`
2045    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2046    /// #[repr(u8)]
2047    /// enum C0 { xC0 = 0xC0 }
2048    ///
2049    /// // The only valid value of this type is the bytes `0xC0C0`.
2050    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2051    /// #[repr(C)]
2052    /// struct C0C0(C0, C0);
2053    ///
2054    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2055    /// #[repr(C)]
2056    /// struct Packet {
2057    ///     magic_number: C0C0,
2058    ///     mug_size: u8,
2059    ///     temperature: u8,
2060    ///     marshmallows: [[u8; 2]],
2061    /// }
2062    ///
2063    /// let bytes = &[0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7, 8][..];
2064    ///
2065    /// let (packet, suffix) = Packet::try_ref_from_prefix_with_elems(bytes, 3).unwrap();
2066    ///
2067    /// assert_eq!(packet.mug_size, 240);
2068    /// assert_eq!(packet.temperature, 77);
2069    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2070    /// assert_eq!(suffix, &[8u8][..]);
2071    ///
2072    /// // These bytes are not valid instance of `Packet`.
2073    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2074    /// assert!(Packet::try_ref_from_prefix_with_elems(bytes, 3).is_err());
2075    /// ```
2076    ///
2077    /// Since an explicit `count` is provided, this method supports types with
2078    /// zero-sized trailing slice elements. Methods such as [`try_ref_from_prefix`]
2079    /// which do not take an explicit count do not support such types.
2080    ///
2081    /// ```
2082    /// use core::num::NonZeroU16;
2083    /// use zerocopy::*;
2084    /// # use zerocopy_derive::*;
2085    ///
2086    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
2087    /// #[repr(C)]
2088    /// struct ZSTy {
2089    ///     leading_sized: NonZeroU16,
2090    ///     trailing_dst: [()],
2091    /// }
2092    ///
2093    /// let src = &[85, 85][..];
2094    /// let (zsty, _) = ZSTy::try_ref_from_prefix_with_elems(src, 42).unwrap();
2095    /// assert_eq!(zsty.trailing_dst.len(), 42);
2096    /// ```
2097    ///
2098    /// [`try_ref_from_prefix`]: TryFromBytes::try_ref_from_prefix
2099    #[must_use = "has no side effects"]
2100    #[inline]
2101    fn try_ref_from_prefix_with_elems(
2102        source: &[u8],
2103        count: usize,
2104    ) -> Result<(&Self, &[u8]), TryCastError<&[u8], Self>>
2105    where
2106        Self: KnownLayout<PointerMetadata = usize> + Immutable,
2107    {
2108        try_ref_from_prefix_suffix(source, CastType::Prefix, Some(count))
2109    }
2110
2111    /// Attempts to interpret the suffix of the given `source` as a `&Self` with
2112    /// a DST length equal to `count`.
2113    ///
2114    /// This method attempts to return a reference to the suffix of `source`
2115    /// interpreted as a `Self` with `count` trailing elements, and a reference
2116    /// to the preceding bytes. If the length of `source` is less than the size
2117    /// of `Self` with `count` elements, if the suffix of `source` is not
2118    /// appropriately aligned, or if the suffix of `source` does not contain a
2119    /// valid instance of `Self`, this returns `Err`. If [`Self:
2120    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
2121    /// error][ConvertError::from].
2122    ///
2123    /// [self-unaligned]: Unaligned
2124    /// [slice-dst]: KnownLayout#dynamically-sized-types
2125    ///
2126    /// # Examples
2127    ///
2128    /// ```
2129    /// # #![allow(non_camel_case_types)] // For C0::xC0
2130    /// use zerocopy::TryFromBytes;
2131    /// # use zerocopy_derive::*;
2132    ///
2133    /// // The only valid value of this type is the byte `0xC0`
2134    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2135    /// #[repr(u8)]
2136    /// enum C0 { xC0 = 0xC0 }
2137    ///
2138    /// // The only valid value of this type is the bytes `0xC0C0`.
2139    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2140    /// #[repr(C)]
2141    /// struct C0C0(C0, C0);
2142    ///
2143    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2144    /// #[repr(C)]
2145    /// struct Packet {
2146    ///     magic_number: C0C0,
2147    ///     mug_size: u8,
2148    ///     temperature: u8,
2149    ///     marshmallows: [[u8; 2]],
2150    /// }
2151    ///
2152    /// let bytes = &[123, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2153    ///
2154    /// let (prefix, packet) = Packet::try_ref_from_suffix_with_elems(bytes, 3).unwrap();
2155    ///
2156    /// assert_eq!(packet.mug_size, 240);
2157    /// assert_eq!(packet.temperature, 77);
2158    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2159    /// assert_eq!(prefix, &[123u8][..]);
2160    ///
2161    /// // These bytes are not valid instance of `Packet`.
2162    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2163    /// assert!(Packet::try_ref_from_suffix_with_elems(bytes, 3).is_err());
2164    /// ```
2165    ///
2166    /// Since an explicit `count` is provided, this method supports types with
2167    /// zero-sized trailing slice elements. Methods such as [`try_ref_from_prefix`]
2168    /// which do not take an explicit count do not support such types.
2169    ///
2170    /// ```
2171    /// use core::num::NonZeroU16;
2172    /// use zerocopy::*;
2173    /// # use zerocopy_derive::*;
2174    ///
2175    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
2176    /// #[repr(C)]
2177    /// struct ZSTy {
2178    ///     leading_sized: NonZeroU16,
2179    ///     trailing_dst: [()],
2180    /// }
2181    ///
2182    /// let src = &[85, 85][..];
2183    /// let (_, zsty) = ZSTy::try_ref_from_suffix_with_elems(src, 42).unwrap();
2184    /// assert_eq!(zsty.trailing_dst.len(), 42);
2185    /// ```
2186    ///
2187    /// [`try_ref_from_prefix`]: TryFromBytes::try_ref_from_prefix
2188    #[must_use = "has no side effects"]
2189    #[inline]
2190    fn try_ref_from_suffix_with_elems(
2191        source: &[u8],
2192        count: usize,
2193    ) -> Result<(&[u8], &Self), TryCastError<&[u8], Self>>
2194    where
2195        Self: KnownLayout<PointerMetadata = usize> + Immutable,
2196    {
2197        try_ref_from_prefix_suffix(source, CastType::Suffix, Some(count)).map(swap)
2198    }
2199
2200    /// Attempts to interpret the given `source` as a `&mut Self` with a DST
2201    /// length equal to `count`.
2202    ///
2203    /// This method attempts to return a reference to `source` interpreted as a
2204    /// `Self` with `count` trailing elements. If the length of `source` is not
2205    /// equal to the size of `Self` with `count` elements, if `source` is not
2206    /// appropriately aligned, or if `source` does not contain a valid instance
2207    /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2208    /// you can [infallibly discard the alignment error][ConvertError::from].
2209    ///
2210    /// [self-unaligned]: Unaligned
2211    /// [slice-dst]: KnownLayout#dynamically-sized-types
2212    ///
2213    /// # Examples
2214    ///
2215    /// ```
2216    /// # #![allow(non_camel_case_types)] // For C0::xC0
2217    /// use zerocopy::TryFromBytes;
2218    /// # use zerocopy_derive::*;
2219    ///
2220    /// // The only valid value of this type is the byte `0xC0`
2221    /// #[derive(TryFromBytes, KnownLayout)]
2222    /// #[repr(u8)]
2223    /// enum C0 { xC0 = 0xC0 }
2224    ///
2225    /// // The only valid value of this type is the bytes `0xC0C0`.
2226    /// #[derive(TryFromBytes, KnownLayout)]
2227    /// #[repr(C)]
2228    /// struct C0C0(C0, C0);
2229    ///
2230    /// #[derive(TryFromBytes, KnownLayout)]
2231    /// #[repr(C)]
2232    /// struct Packet {
2233    ///     magic_number: C0C0,
2234    ///     mug_size: u8,
2235    ///     temperature: u8,
2236    ///     marshmallows: [[u8; 2]],
2237    /// }
2238    ///
2239    /// let bytes = &mut [0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2240    ///
2241    /// let packet = Packet::try_mut_from_bytes_with_elems(bytes, 3).unwrap();
2242    ///
2243    /// assert_eq!(packet.mug_size, 240);
2244    /// assert_eq!(packet.temperature, 77);
2245    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2246    ///
2247    /// packet.temperature = 111;
2248    ///
2249    /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 2, 3, 4, 5, 6, 7]);
2250    ///
2251    /// // These bytes are not valid instance of `Packet`.
2252    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0xC0][..];
2253    /// assert!(Packet::try_mut_from_bytes_with_elems(bytes, 3).is_err());
2254    /// ```
2255    ///
2256    /// Since an explicit `count` is provided, this method supports types with
2257    /// zero-sized trailing slice elements. Methods such as [`try_mut_from_bytes`]
2258    /// which do not take an explicit count do not support such types.
2259    ///
2260    /// ```
2261    /// use core::num::NonZeroU16;
2262    /// use zerocopy::*;
2263    /// # use zerocopy_derive::*;
2264    ///
2265    /// #[derive(TryFromBytes, KnownLayout)]
2266    /// #[repr(C)]
2267    /// struct ZSTy {
2268    ///     leading_sized: NonZeroU16,
2269    ///     trailing_dst: [()],
2270    /// }
2271    ///
2272    /// let src = &mut [85, 85][..];
2273    /// let zsty = ZSTy::try_mut_from_bytes_with_elems(src, 42).unwrap();
2274    /// assert_eq!(zsty.trailing_dst.len(), 42);
2275    /// ```
2276    ///
2277    /// [`try_mut_from_bytes`]: TryFromBytes::try_mut_from_bytes
2278    #[must_use = "has no side effects"]
2279    #[inline]
2280    fn try_mut_from_bytes_with_elems(
2281        source: &mut [u8],
2282        count: usize,
2283    ) -> Result<&mut Self, TryCastError<&mut [u8], Self>>
2284    where
2285        Self: KnownLayout<PointerMetadata = usize>,
2286    {
2287        match Ptr::from_mut(source).try_cast_into_no_leftover::<Self, BecauseExclusive>(Some(count))
2288        {
2289            Ok(source) => {
2290                // This call may panic. If that happens, it doesn't cause any soundness
2291                // issues, as we have not generated any invalid state which we need to
2292                // fix before returning.
2293                //
2294                // Note that one panic or post-monomorphization error condition is
2295                // calling `try_into_valid` (and thus `is_bit_valid`) with a shared
2296                // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic
2297                // condition will not happen.
2298                match source.try_into_valid() {
2299                    Ok(source) => Ok(source.as_mut()),
2300                    Err(e) => {
2301                        Err(e.map_src(|src| src.as_bytes::<BecauseExclusive>().as_mut()).into())
2302                    }
2303                }
2304            }
2305            Err(e) => Err(e.map_src(Ptr::as_mut).into()),
2306        }
2307    }
2308
2309    /// Attempts to interpret the prefix of the given `source` as a `&mut Self`
2310    /// with a DST length equal to `count`.
2311    ///
2312    /// This method attempts to return a reference to the prefix of `source`
2313    /// interpreted as a `Self` with `count` trailing elements, and a reference
2314    /// to the remaining bytes. If the length of `source` is less than the size
2315    /// of `Self` with `count` elements, if `source` is not appropriately
2316    /// aligned, or if the prefix of `source` does not contain a valid instance
2317    /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2318    /// you can [infallibly discard the alignment error][ConvertError::from].
2319    ///
2320    /// [self-unaligned]: Unaligned
2321    /// [slice-dst]: KnownLayout#dynamically-sized-types
2322    ///
2323    /// # Examples
2324    ///
2325    /// ```
2326    /// # #![allow(non_camel_case_types)] // For C0::xC0
2327    /// use zerocopy::TryFromBytes;
2328    /// # use zerocopy_derive::*;
2329    ///
2330    /// // The only valid value of this type is the byte `0xC0`
2331    /// #[derive(TryFromBytes, KnownLayout)]
2332    /// #[repr(u8)]
2333    /// enum C0 { xC0 = 0xC0 }
2334    ///
2335    /// // The only valid value of this type is the bytes `0xC0C0`.
2336    /// #[derive(TryFromBytes, KnownLayout)]
2337    /// #[repr(C)]
2338    /// struct C0C0(C0, C0);
2339    ///
2340    /// #[derive(TryFromBytes, KnownLayout)]
2341    /// #[repr(C)]
2342    /// struct Packet {
2343    ///     magic_number: C0C0,
2344    ///     mug_size: u8,
2345    ///     temperature: u8,
2346    ///     marshmallows: [[u8; 2]],
2347    /// }
2348    ///
2349    /// let bytes = &mut [0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7, 8][..];
2350    ///
2351    /// let (packet, suffix) = Packet::try_mut_from_prefix_with_elems(bytes, 3).unwrap();
2352    ///
2353    /// assert_eq!(packet.mug_size, 240);
2354    /// assert_eq!(packet.temperature, 77);
2355    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2356    /// assert_eq!(suffix, &[8u8][..]);
2357    ///
2358    /// packet.temperature = 111;
2359    /// suffix[0] = 222;
2360    ///
2361    /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 2, 3, 4, 5, 6, 7, 222]);
2362    ///
2363    /// // These bytes are not valid instance of `Packet`.
2364    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2365    /// assert!(Packet::try_mut_from_prefix_with_elems(bytes, 3).is_err());
2366    /// ```
2367    ///
2368    /// Since an explicit `count` is provided, this method supports types with
2369    /// zero-sized trailing slice elements. Methods such as [`try_mut_from_prefix`]
2370    /// which do not take an explicit count do not support such types.
2371    ///
2372    /// ```
2373    /// use core::num::NonZeroU16;
2374    /// use zerocopy::*;
2375    /// # use zerocopy_derive::*;
2376    ///
2377    /// #[derive(TryFromBytes, KnownLayout)]
2378    /// #[repr(C)]
2379    /// struct ZSTy {
2380    ///     leading_sized: NonZeroU16,
2381    ///     trailing_dst: [()],
2382    /// }
2383    ///
2384    /// let src = &mut [85, 85][..];
2385    /// let (zsty, _) = ZSTy::try_mut_from_prefix_with_elems(src, 42).unwrap();
2386    /// assert_eq!(zsty.trailing_dst.len(), 42);
2387    /// ```
2388    ///
2389    /// [`try_mut_from_prefix`]: TryFromBytes::try_mut_from_prefix
2390    #[must_use = "has no side effects"]
2391    #[inline]
2392    fn try_mut_from_prefix_with_elems(
2393        source: &mut [u8],
2394        count: usize,
2395    ) -> Result<(&mut Self, &mut [u8]), TryCastError<&mut [u8], Self>>
2396    where
2397        Self: KnownLayout<PointerMetadata = usize>,
2398    {
2399        try_mut_from_prefix_suffix(source, CastType::Prefix, Some(count))
2400    }
2401
2402    /// Attempts to interpret the suffix of the given `source` as a `&mut Self`
2403    /// with a DST length equal to `count`.
2404    ///
2405    /// This method attempts to return a reference to the suffix of `source`
2406    /// interpreted as a `Self` with `count` trailing elements, and a reference
2407    /// to the preceding bytes. If the length of `source` is less than the size
2408    /// of `Self` with `count` elements, if the suffix of `source` is not
2409    /// appropriately aligned, or if the suffix of `source` does not contain a
2410    /// valid instance of `Self`, this returns `Err`. If [`Self:
2411    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
2412    /// error][ConvertError::from].
2413    ///
2414    /// [self-unaligned]: Unaligned
2415    /// [slice-dst]: KnownLayout#dynamically-sized-types
2416    ///
2417    /// # Examples
2418    ///
2419    /// ```
2420    /// # #![allow(non_camel_case_types)] // For C0::xC0
2421    /// use zerocopy::TryFromBytes;
2422    /// # use zerocopy_derive::*;
2423    ///
2424    /// // The only valid value of this type is the byte `0xC0`
2425    /// #[derive(TryFromBytes, KnownLayout)]
2426    /// #[repr(u8)]
2427    /// enum C0 { xC0 = 0xC0 }
2428    ///
2429    /// // The only valid value of this type is the bytes `0xC0C0`.
2430    /// #[derive(TryFromBytes, KnownLayout)]
2431    /// #[repr(C)]
2432    /// struct C0C0(C0, C0);
2433    ///
2434    /// #[derive(TryFromBytes, KnownLayout)]
2435    /// #[repr(C)]
2436    /// struct Packet {
2437    ///     magic_number: C0C0,
2438    ///     mug_size: u8,
2439    ///     temperature: u8,
2440    ///     marshmallows: [[u8; 2]],
2441    /// }
2442    ///
2443    /// let bytes = &mut [123, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2444    ///
2445    /// let (prefix, packet) = Packet::try_mut_from_suffix_with_elems(bytes, 3).unwrap();
2446    ///
2447    /// assert_eq!(packet.mug_size, 240);
2448    /// assert_eq!(packet.temperature, 77);
2449    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2450    /// assert_eq!(prefix, &[123u8][..]);
2451    ///
2452    /// prefix[0] = 111;
2453    /// packet.temperature = 222;
2454    ///
2455    /// assert_eq!(bytes, [111, 0xC0, 0xC0, 240, 222, 2, 3, 4, 5, 6, 7]);
2456    ///
2457    /// // These bytes are not valid instance of `Packet`.
2458    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2459    /// assert!(Packet::try_mut_from_suffix_with_elems(bytes, 3).is_err());
2460    /// ```
2461    ///
2462    /// Since an explicit `count` is provided, this method supports types with
2463    /// zero-sized trailing slice elements. Methods such as [`try_mut_from_prefix`]
2464    /// which do not take an explicit count do not support such types.
2465    ///
2466    /// ```
2467    /// use core::num::NonZeroU16;
2468    /// use zerocopy::*;
2469    /// # use zerocopy_derive::*;
2470    ///
2471    /// #[derive(TryFromBytes, KnownLayout)]
2472    /// #[repr(C)]
2473    /// struct ZSTy {
2474    ///     leading_sized: NonZeroU16,
2475    ///     trailing_dst: [()],
2476    /// }
2477    ///
2478    /// let src = &mut [85, 85][..];
2479    /// let (_, zsty) = ZSTy::try_mut_from_suffix_with_elems(src, 42).unwrap();
2480    /// assert_eq!(zsty.trailing_dst.len(), 42);
2481    /// ```
2482    ///
2483    /// [`try_mut_from_prefix`]: TryFromBytes::try_mut_from_prefix
2484    #[must_use = "has no side effects"]
2485    #[inline]
2486    fn try_mut_from_suffix_with_elems(
2487        source: &mut [u8],
2488        count: usize,
2489    ) -> Result<(&mut [u8], &mut Self), TryCastError<&mut [u8], Self>>
2490    where
2491        Self: KnownLayout<PointerMetadata = usize>,
2492    {
2493        try_mut_from_prefix_suffix(source, CastType::Suffix, Some(count)).map(swap)
2494    }
2495
2496    /// Attempts to read the given `source` as a `Self`.
2497    ///
2498    /// If `source.len() != size_of::<Self>()` or the bytes are not a valid
2499    /// instance of `Self`, this returns `Err`.
2500    ///
2501    /// # Examples
2502    ///
2503    /// ```
2504    /// use zerocopy::TryFromBytes;
2505    /// # use zerocopy_derive::*;
2506    ///
2507    /// // The only valid value of this type is the byte `0xC0`
2508    /// #[derive(TryFromBytes)]
2509    /// #[repr(u8)]
2510    /// enum C0 { xC0 = 0xC0 }
2511    ///
2512    /// // The only valid value of this type is the bytes `0xC0C0`.
2513    /// #[derive(TryFromBytes)]
2514    /// #[repr(C)]
2515    /// struct C0C0(C0, C0);
2516    ///
2517    /// #[derive(TryFromBytes)]
2518    /// #[repr(C)]
2519    /// struct Packet {
2520    ///     magic_number: C0C0,
2521    ///     mug_size: u8,
2522    ///     temperature: u8,
2523    /// }
2524    ///
2525    /// let bytes = &[0xC0, 0xC0, 240, 77][..];
2526    ///
2527    /// let packet = Packet::try_read_from_bytes(bytes).unwrap();
2528    ///
2529    /// assert_eq!(packet.mug_size, 240);
2530    /// assert_eq!(packet.temperature, 77);
2531    ///
2532    /// // These bytes are not valid instance of `Packet`.
2533    /// let bytes = &mut [0x10, 0xC0, 240, 77][..];
2534    /// assert!(Packet::try_read_from_bytes(bytes).is_err());
2535    /// ```
2536    #[must_use = "has no side effects"]
2537    #[inline]
2538    fn try_read_from_bytes(source: &[u8]) -> Result<Self, TryReadError<&[u8], Self>>
2539    where
2540        Self: Sized,
2541    {
2542        let candidate = match MaybeUninit::<Self>::read_from_bytes(source) {
2543            Ok(candidate) => candidate,
2544            Err(e) => {
2545                return Err(TryReadError::Size(e.with_dst()));
2546            }
2547        };
2548        // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of
2549        // its bytes are initialized.
2550        unsafe { try_read_from(source, candidate) }
2551    }
2552
2553    /// Attempts to read a `Self` from the prefix of the given `source`.
2554    ///
2555    /// This attempts to read a `Self` from the first `size_of::<Self>()` bytes
2556    /// of `source`, returning that `Self` and any remaining bytes. If
2557    /// `source.len() < size_of::<Self>()` or the bytes are not a valid instance
2558    /// of `Self`, it returns `Err`.
2559    ///
2560    /// # Examples
2561    ///
2562    /// ```
2563    /// use zerocopy::TryFromBytes;
2564    /// # use zerocopy_derive::*;
2565    ///
2566    /// // The only valid value of this type is the byte `0xC0`
2567    /// #[derive(TryFromBytes)]
2568    /// #[repr(u8)]
2569    /// enum C0 { xC0 = 0xC0 }
2570    ///
2571    /// // The only valid value of this type is the bytes `0xC0C0`.
2572    /// #[derive(TryFromBytes)]
2573    /// #[repr(C)]
2574    /// struct C0C0(C0, C0);
2575    ///
2576    /// #[derive(TryFromBytes)]
2577    /// #[repr(C)]
2578    /// struct Packet {
2579    ///     magic_number: C0C0,
2580    ///     mug_size: u8,
2581    ///     temperature: u8,
2582    /// }
2583    ///
2584    /// // These are more bytes than are needed to encode a `Packet`.
2585    /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
2586    ///
2587    /// let (packet, suffix) = Packet::try_read_from_prefix(bytes).unwrap();
2588    ///
2589    /// assert_eq!(packet.mug_size, 240);
2590    /// assert_eq!(packet.temperature, 77);
2591    /// assert_eq!(suffix, &[0u8, 1, 2, 3, 4, 5, 6][..]);
2592    ///
2593    /// // These bytes are not valid instance of `Packet`.
2594    /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
2595    /// assert!(Packet::try_read_from_prefix(bytes).is_err());
2596    /// ```
2597    #[must_use = "has no side effects"]
2598    #[inline]
2599    fn try_read_from_prefix(source: &[u8]) -> Result<(Self, &[u8]), TryReadError<&[u8], Self>>
2600    where
2601        Self: Sized,
2602    {
2603        let (candidate, suffix) = match MaybeUninit::<Self>::read_from_prefix(source) {
2604            Ok(candidate) => candidate,
2605            Err(e) => {
2606                return Err(TryReadError::Size(e.with_dst()));
2607            }
2608        };
2609        // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of
2610        // its bytes are initialized.
2611        unsafe { try_read_from(source, candidate).map(|slf| (slf, suffix)) }
2612    }
2613
2614    /// Attempts to read a `Self` from the suffix of the given `source`.
2615    ///
2616    /// This attempts to read a `Self` from the last `size_of::<Self>()` bytes
2617    /// of `source`, returning that `Self` and any preceding bytes. If
2618    /// `source.len() < size_of::<Self>()` or the bytes are not a valid instance
2619    /// of `Self`, it returns `Err`.
2620    ///
2621    /// # Examples
2622    ///
2623    /// ```
2624    /// # #![allow(non_camel_case_types)] // For C0::xC0
2625    /// use zerocopy::TryFromBytes;
2626    /// # use zerocopy_derive::*;
2627    ///
2628    /// // The only valid value of this type is the byte `0xC0`
2629    /// #[derive(TryFromBytes)]
2630    /// #[repr(u8)]
2631    /// enum C0 { xC0 = 0xC0 }
2632    ///
2633    /// // The only valid value of this type is the bytes `0xC0C0`.
2634    /// #[derive(TryFromBytes)]
2635    /// #[repr(C)]
2636    /// struct C0C0(C0, C0);
2637    ///
2638    /// #[derive(TryFromBytes)]
2639    /// #[repr(C)]
2640    /// struct Packet {
2641    ///     magic_number: C0C0,
2642    ///     mug_size: u8,
2643    ///     temperature: u8,
2644    /// }
2645    ///
2646    /// // These are more bytes than are needed to encode a `Packet`.
2647    /// let bytes = &[0, 1, 2, 3, 4, 5, 0xC0, 0xC0, 240, 77][..];
2648    ///
2649    /// let (prefix, packet) = Packet::try_read_from_suffix(bytes).unwrap();
2650    ///
2651    /// assert_eq!(packet.mug_size, 240);
2652    /// assert_eq!(packet.temperature, 77);
2653    /// assert_eq!(prefix, &[0u8, 1, 2, 3, 4, 5][..]);
2654    ///
2655    /// // These bytes are not valid instance of `Packet`.
2656    /// let bytes = &[0, 1, 2, 3, 4, 5, 0x10, 0xC0, 240, 77][..];
2657    /// assert!(Packet::try_read_from_suffix(bytes).is_err());
2658    /// ```
2659    #[must_use = "has no side effects"]
2660    #[inline]
2661    fn try_read_from_suffix(source: &[u8]) -> Result<(&[u8], Self), TryReadError<&[u8], Self>>
2662    where
2663        Self: Sized,
2664    {
2665        let (prefix, candidate) = match MaybeUninit::<Self>::read_from_suffix(source) {
2666            Ok(candidate) => candidate,
2667            Err(e) => {
2668                return Err(TryReadError::Size(e.with_dst()));
2669            }
2670        };
2671        // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of
2672        // its bytes are initialized.
2673        unsafe { try_read_from(source, candidate).map(|slf| (prefix, slf)) }
2674    }
2675}
2676
2677#[inline(always)]
2678fn try_ref_from_prefix_suffix<T: TryFromBytes + KnownLayout + Immutable + ?Sized>(
2679    source: &[u8],
2680    cast_type: CastType,
2681    meta: Option<T::PointerMetadata>,
2682) -> Result<(&T, &[u8]), TryCastError<&[u8], T>> {
2683    match Ptr::from_ref(source).try_cast_into::<T, BecauseImmutable>(cast_type, meta) {
2684        Ok((source, prefix_suffix)) => {
2685            // This call may panic. If that happens, it doesn't cause any soundness
2686            // issues, as we have not generated any invalid state which we need to
2687            // fix before returning.
2688            //
2689            // Note that one panic or post-monomorphization error condition is
2690            // calling `try_into_valid` (and thus `is_bit_valid`) with a shared
2691            // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic
2692            // condition will not happen.
2693            match source.try_into_valid() {
2694                Ok(valid) => Ok((valid.as_ref(), prefix_suffix.as_ref())),
2695                Err(e) => Err(e.map_src(|src| src.as_bytes::<BecauseImmutable>().as_ref()).into()),
2696            }
2697        }
2698        Err(e) => Err(e.map_src(Ptr::as_ref).into()),
2699    }
2700}
2701
2702#[inline(always)]
2703fn try_mut_from_prefix_suffix<T: TryFromBytes + KnownLayout + ?Sized>(
2704    candidate: &mut [u8],
2705    cast_type: CastType,
2706    meta: Option<T::PointerMetadata>,
2707) -> Result<(&mut T, &mut [u8]), TryCastError<&mut [u8], T>> {
2708    match Ptr::from_mut(candidate).try_cast_into::<T, BecauseExclusive>(cast_type, meta) {
2709        Ok((candidate, prefix_suffix)) => {
2710            // This call may panic. If that happens, it doesn't cause any soundness
2711            // issues, as we have not generated any invalid state which we need to
2712            // fix before returning.
2713            //
2714            // Note that one panic or post-monomorphization error condition is
2715            // calling `try_into_valid` (and thus `is_bit_valid`) with a shared
2716            // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic
2717            // condition will not happen.
2718            match candidate.try_into_valid() {
2719                Ok(valid) => Ok((valid.as_mut(), prefix_suffix.as_mut())),
2720                Err(e) => Err(e.map_src(|src| src.as_bytes::<BecauseExclusive>().as_mut()).into()),
2721            }
2722        }
2723        Err(e) => Err(e.map_src(Ptr::as_mut).into()),
2724    }
2725}
2726
2727#[inline(always)]
2728fn swap<T, U>((t, u): (T, U)) -> (U, T) {
2729    (u, t)
2730}
2731
2732/// # Safety
2733///
2734/// All bytes of `candidate` must be initialized.
2735#[inline(always)]
2736unsafe fn try_read_from<S, T: TryFromBytes>(
2737    source: S,
2738    mut candidate: MaybeUninit<T>,
2739) -> Result<T, TryReadError<S, T>> {
2740    // We use `from_mut` despite not mutating via `c_ptr` so that we don't need
2741    // to add a `T: Immutable` bound.
2742    let c_ptr = Ptr::from_mut(&mut candidate);
2743    let c_ptr = c_ptr.transparent_wrapper_into_inner();
2744    // SAFETY: `c_ptr` has no uninitialized sub-ranges because it derived from
2745    // `candidate`, which the caller promises is entirely initialized.
2746    let c_ptr = unsafe { c_ptr.assume_validity::<invariant::Initialized>() };
2747
2748    // This call may panic. If that happens, it doesn't cause any soundness
2749    // issues, as we have not generated any invalid state which we need to
2750    // fix before returning.
2751    //
2752    // Note that one panic or post-monomorphization error condition is
2753    // calling `try_into_valid` (and thus `is_bit_valid`) with a shared
2754    // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic
2755    // condition will not happen.
2756    if !T::is_bit_valid(c_ptr.forget_aligned()) {
2757        return Err(ValidityError::new(source).into());
2758    }
2759
2760    // SAFETY: We just validated that `candidate` contains a valid `T`.
2761    Ok(unsafe { candidate.assume_init() })
2762}
2763
2764/// Types for which a sequence of bytes all set to zero represents a valid
2765/// instance of the type.
2766///
2767/// Any memory region of the appropriate length which is guaranteed to contain
2768/// only zero bytes can be viewed as any `FromZeros` type with no runtime
2769/// overhead. This is useful whenever memory is known to be in a zeroed state,
2770/// such memory returned from some allocation routines.
2771///
2772/// # Warning: Padding bytes
2773///
2774/// Note that, when a value is moved or copied, only the non-padding bytes of
2775/// that value are guaranteed to be preserved. It is unsound to assume that
2776/// values written to padding bytes are preserved after a move or copy. For more
2777/// details, see the [`FromBytes` docs][frombytes-warning-padding-bytes].
2778///
2779/// [frombytes-warning-padding-bytes]: FromBytes#warning-padding-bytes
2780///
2781/// # Implementation
2782///
2783/// **Do not implement this trait yourself!** Instead, use
2784/// [`#[derive(FromZeros)]`][derive]; e.g.:
2785///
2786/// ```
2787/// # use zerocopy_derive::{FromZeros, Immutable};
2788/// #[derive(FromZeros)]
2789/// struct MyStruct {
2790/// # /*
2791///     ...
2792/// # */
2793/// }
2794///
2795/// #[derive(FromZeros)]
2796/// #[repr(u8)]
2797/// enum MyEnum {
2798/// #   Variant0,
2799/// # /*
2800///     ...
2801/// # */
2802/// }
2803///
2804/// #[derive(FromZeros, Immutable)]
2805/// union MyUnion {
2806/// #   variant: u8,
2807/// # /*
2808///     ...
2809/// # */
2810/// }
2811/// ```
2812///
2813/// This derive performs a sophisticated, compile-time safety analysis to
2814/// determine whether a type is `FromZeros`.
2815///
2816/// # Safety
2817///
2818/// *This section describes what is required in order for `T: FromZeros`, and
2819/// what unsafe code may assume of such types. If you don't plan on implementing
2820/// `FromZeros` manually, and you don't plan on writing unsafe code that
2821/// operates on `FromZeros` types, then you don't need to read this section.*
2822///
2823/// If `T: FromZeros`, then unsafe code may assume that it is sound to produce a
2824/// `T` whose bytes are all initialized to zero. If a type is marked as
2825/// `FromZeros` which violates this contract, it may cause undefined behavior.
2826///
2827/// `#[derive(FromZeros)]` only permits [types which satisfy these
2828/// requirements][derive-analysis].
2829///
2830#[cfg_attr(
2831    feature = "derive",
2832    doc = "[derive]: zerocopy_derive::FromZeros",
2833    doc = "[derive-analysis]: zerocopy_derive::FromZeros#analysis"
2834)]
2835#[cfg_attr(
2836    not(feature = "derive"),
2837    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromZeros.html"),
2838    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromZeros.html#analysis"),
2839)]
2840#[cfg_attr(
2841    zerocopy_diagnostic_on_unimplemented,
2842    diagnostic::on_unimplemented(note = "Consider adding `#[derive(FromZeros)]` to `{Self}`")
2843)]
2844pub unsafe trait FromZeros: TryFromBytes {
2845    // The `Self: Sized` bound makes it so that `FromZeros` is still object
2846    // safe.
2847    #[doc(hidden)]
2848    fn only_derive_is_allowed_to_implement_this_trait()
2849    where
2850        Self: Sized;
2851
2852    /// Overwrites `self` with zeros.
2853    ///
2854    /// Sets every byte in `self` to 0. While this is similar to doing `*self =
2855    /// Self::new_zeroed()`, it differs in that `zero` does not semantically
2856    /// drop the current value and replace it with a new one — it simply
2857    /// modifies the bytes of the existing value.
2858    ///
2859    /// # Examples
2860    ///
2861    /// ```
2862    /// # use zerocopy::FromZeros;
2863    /// # use zerocopy_derive::*;
2864    /// #
2865    /// #[derive(FromZeros)]
2866    /// #[repr(C)]
2867    /// struct PacketHeader {
2868    ///     src_port: [u8; 2],
2869    ///     dst_port: [u8; 2],
2870    ///     length: [u8; 2],
2871    ///     checksum: [u8; 2],
2872    /// }
2873    ///
2874    /// let mut header = PacketHeader {
2875    ///     src_port: 100u16.to_be_bytes(),
2876    ///     dst_port: 200u16.to_be_bytes(),
2877    ///     length: 300u16.to_be_bytes(),
2878    ///     checksum: 400u16.to_be_bytes(),
2879    /// };
2880    ///
2881    /// header.zero();
2882    ///
2883    /// assert_eq!(header.src_port, [0, 0]);
2884    /// assert_eq!(header.dst_port, [0, 0]);
2885    /// assert_eq!(header.length, [0, 0]);
2886    /// assert_eq!(header.checksum, [0, 0]);
2887    /// ```
2888    #[inline(always)]
2889    fn zero(&mut self) {
2890        let slf: *mut Self = self;
2891        let len = mem::size_of_val(self);
2892        // SAFETY:
2893        // - `self` is guaranteed by the type system to be valid for writes of
2894        //   size `size_of_val(self)`.
2895        // - `u8`'s alignment is 1, and thus `self` is guaranteed to be aligned
2896        //   as required by `u8`.
2897        // - Since `Self: FromZeros`, the all-zeros instance is a valid instance
2898        //   of `Self.`
2899        //
2900        // TODO(#429): Add references to docs and quotes.
2901        unsafe { ptr::write_bytes(slf.cast::<u8>(), 0, len) };
2902    }
2903
2904    /// Creates an instance of `Self` from zeroed bytes.
2905    ///
2906    /// # Examples
2907    ///
2908    /// ```
2909    /// # use zerocopy::FromZeros;
2910    /// # use zerocopy_derive::*;
2911    /// #
2912    /// #[derive(FromZeros)]
2913    /// #[repr(C)]
2914    /// struct PacketHeader {
2915    ///     src_port: [u8; 2],
2916    ///     dst_port: [u8; 2],
2917    ///     length: [u8; 2],
2918    ///     checksum: [u8; 2],
2919    /// }
2920    ///
2921    /// let header: PacketHeader = FromZeros::new_zeroed();
2922    ///
2923    /// assert_eq!(header.src_port, [0, 0]);
2924    /// assert_eq!(header.dst_port, [0, 0]);
2925    /// assert_eq!(header.length, [0, 0]);
2926    /// assert_eq!(header.checksum, [0, 0]);
2927    /// ```
2928    #[must_use = "has no side effects"]
2929    #[inline(always)]
2930    fn new_zeroed() -> Self
2931    where
2932        Self: Sized,
2933    {
2934        // SAFETY: `FromZeros` says that the all-zeros bit pattern is legal.
2935        unsafe { mem::zeroed() }
2936    }
2937
2938    /// Creates a `Box<Self>` from zeroed bytes.
2939    ///
2940    /// This function is useful for allocating large values on the heap and
2941    /// zero-initializing them, without ever creating a temporary instance of
2942    /// `Self` on the stack. For example, `<[u8; 1048576]>::new_box_zeroed()`
2943    /// will allocate `[u8; 1048576]` directly on the heap; it does not require
2944    /// storing `[u8; 1048576]` in a temporary variable on the stack.
2945    ///
2946    /// On systems that use a heap implementation that supports allocating from
2947    /// pre-zeroed memory, using `new_box_zeroed` (or related functions) may
2948    /// have performance benefits.
2949    ///
2950    /// Note that `Box<Self>` can be converted to `Arc<Self>` and other
2951    /// container types without reallocation.
2952    ///
2953    /// # Errors
2954    ///
2955    /// Returns an error on allocation failure. Allocation failure is guaranteed
2956    /// never to cause a panic or an abort.
2957    #[must_use = "has no side effects (other than allocation)"]
2958    #[cfg(any(feature = "alloc", test))]
2959    #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
2960    #[inline]
2961    fn new_box_zeroed() -> Result<Box<Self>, AllocError>
2962    where
2963        Self: Sized,
2964    {
2965        // If `T` is a ZST, then return a proper boxed instance of it. There is
2966        // no allocation, but `Box` does require a correct dangling pointer.
2967        let layout = Layout::new::<Self>();
2968        if layout.size() == 0 {
2969            // Construct the `Box` from a dangling pointer to avoid calling
2970            // `Self::new_zeroed`. This ensures that stack space is never
2971            // allocated for `Self` even on lower opt-levels where this branch
2972            // might not get optimized out.
2973
2974            // SAFETY: Per [1], when `T` is a ZST, `Box<T>`'s only validity
2975            // requirements are that the pointer is non-null and sufficiently
2976            // aligned. Per [2], `NonNull::dangling` produces a pointer which
2977            // is sufficiently aligned. Since the produced pointer is a
2978            // `NonNull`, it is non-null.
2979            //
2980            // [1] Per https://doc.rust-lang.org/nightly/std/boxed/index.html#memory-layout:
2981            //
2982            //   For zero-sized values, the `Box` pointer has to be non-null and sufficiently aligned.
2983            //
2984            // [2] Per https://doc.rust-lang.org/std/ptr/struct.NonNull.html#method.dangling:
2985            //
2986            //   Creates a new `NonNull` that is dangling, but well-aligned.
2987            return Ok(unsafe { Box::from_raw(NonNull::dangling().as_ptr()) });
2988        }
2989
2990        // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
2991        #[allow(clippy::undocumented_unsafe_blocks)]
2992        let ptr = unsafe { alloc::alloc::alloc_zeroed(layout).cast::<Self>() };
2993        if ptr.is_null() {
2994            return Err(AllocError);
2995        }
2996        // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
2997        #[allow(clippy::undocumented_unsafe_blocks)]
2998        Ok(unsafe { Box::from_raw(ptr) })
2999    }
3000
3001    /// Creates a `Box<[Self]>` (a boxed slice) from zeroed bytes.
3002    ///
3003    /// This function is useful for allocating large values of `[Self]` on the
3004    /// heap and zero-initializing them, without ever creating a temporary
3005    /// instance of `[Self; _]` on the stack. For example,
3006    /// `u8::new_box_slice_zeroed(1048576)` will allocate the slice directly on
3007    /// the heap; it does not require storing the slice on the stack.
3008    ///
3009    /// On systems that use a heap implementation that supports allocating from
3010    /// pre-zeroed memory, using `new_box_slice_zeroed` may have performance
3011    /// benefits.
3012    ///
3013    /// If `Self` is a zero-sized type, then this function will return a
3014    /// `Box<[Self]>` that has the correct `len`. Such a box cannot contain any
3015    /// actual information, but its `len()` property will report the correct
3016    /// value.
3017    ///
3018    /// # Errors
3019    ///
3020    /// Returns an error on allocation failure. Allocation failure is
3021    /// guaranteed never to cause a panic or an abort.
3022    #[must_use = "has no side effects (other than allocation)"]
3023    #[cfg(feature = "alloc")]
3024    #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3025    #[inline]
3026    fn new_box_zeroed_with_elems(count: usize) -> Result<Box<Self>, AllocError>
3027    where
3028        Self: KnownLayout<PointerMetadata = usize>,
3029    {
3030        let size = match count.size_for_metadata(Self::LAYOUT) {
3031            Some(size) => size,
3032            None => return Err(AllocError),
3033        };
3034
3035        let align = Self::LAYOUT.align.get();
3036        // On stable Rust versions <= 1.64.0, `Layout::from_size_align` has a
3037        // bug in which sufficiently-large allocations (those which, when
3038        // rounded up to the alignment, overflow `isize`) are not rejected,
3039        // which can cause undefined behavior. See #64 for details.
3040        //
3041        // TODO(#67): Once our MSRV is > 1.64.0, remove this assertion.
3042        #[allow(clippy::as_conversions)]
3043        let max_alloc = (isize::MAX as usize).saturating_sub(align);
3044        if size > max_alloc {
3045            return Err(AllocError);
3046        }
3047
3048        // TODO(https://github.com/rust-lang/rust/issues/55724): Use
3049        // `Layout::repeat` once it's stabilized.
3050        let layout = Layout::from_size_align(size, align).or(Err(AllocError))?;
3051
3052        let ptr = if layout.size() != 0 {
3053            // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
3054            #[allow(clippy::undocumented_unsafe_blocks)]
3055            let ptr = unsafe { alloc::alloc::alloc_zeroed(layout) };
3056            match NonNull::new(ptr) {
3057                Some(ptr) => ptr,
3058                None => return Err(AllocError),
3059            }
3060        } else {
3061            let align = Self::LAYOUT.align.get();
3062            // We use `transmute` instead of an `as` cast since Miri (with
3063            // strict provenance enabled) notices and complains that an `as`
3064            // cast creates a pointer with no provenance. Miri isn't smart
3065            // enough to realize that we're only executing this branch when
3066            // we're constructing a zero-sized `Box`, which doesn't require
3067            // provenance.
3068            //
3069            // SAFETY: any initialized bit sequence is a bit-valid `*mut u8`.
3070            // All bits of a `usize` are initialized.
3071            #[allow(clippy::useless_transmute)]
3072            let dangling = unsafe { mem::transmute::<usize, *mut u8>(align) };
3073            // SAFETY: `dangling` is constructed from `Self::LAYOUT.align`,
3074            // which is a `NonZeroUsize`, which is guaranteed to be non-zero.
3075            //
3076            // `Box<[T]>` does not allocate when `T` is zero-sized or when `len`
3077            // is zero, but it does require a non-null dangling pointer for its
3078            // allocation.
3079            //
3080            // TODO(https://github.com/rust-lang/rust/issues/95228): Use
3081            // `std::ptr::without_provenance` once it's stable. That may
3082            // optimize better. As written, Rust may assume that this consumes
3083            // "exposed" provenance, and thus Rust may have to assume that this
3084            // may consume provenance from any pointer whose provenance has been
3085            // exposed.
3086            #[allow(fuzzy_provenance_casts)]
3087            unsafe {
3088                NonNull::new_unchecked(dangling)
3089            }
3090        };
3091
3092        let ptr = Self::raw_from_ptr_len(ptr, count);
3093
3094        // TODO(#429): Add a "SAFETY" comment and remove this `allow`. Make sure
3095        // to include a justification that `ptr.as_ptr()` is validly-aligned in
3096        // the ZST case (in which we manually construct a dangling pointer).
3097        #[allow(clippy::undocumented_unsafe_blocks)]
3098        Ok(unsafe { Box::from_raw(ptr.as_ptr()) })
3099    }
3100
3101    #[deprecated(since = "0.8.0", note = "renamed to `FromZeros::new_box_zeroed_with_elems`")]
3102    #[doc(hidden)]
3103    #[cfg(feature = "alloc")]
3104    #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3105    #[must_use = "has no side effects (other than allocation)"]
3106    #[inline(always)]
3107    fn new_box_slice_zeroed(len: usize) -> Result<Box<[Self]>, AllocError>
3108    where
3109        Self: Sized,
3110    {
3111        <[Self]>::new_box_zeroed_with_elems(len)
3112    }
3113
3114    /// Creates a `Vec<Self>` from zeroed bytes.
3115    ///
3116    /// This function is useful for allocating large values of `Vec`s and
3117    /// zero-initializing them, without ever creating a temporary instance of
3118    /// `[Self; _]` (or many temporary instances of `Self`) on the stack. For
3119    /// example, `u8::new_vec_zeroed(1048576)` will allocate directly on the
3120    /// heap; it does not require storing intermediate values on the stack.
3121    ///
3122    /// On systems that use a heap implementation that supports allocating from
3123    /// pre-zeroed memory, using `new_vec_zeroed` may have performance benefits.
3124    ///
3125    /// If `Self` is a zero-sized type, then this function will return a
3126    /// `Vec<Self>` that has the correct `len`. Such a `Vec` cannot contain any
3127    /// actual information, but its `len()` property will report the correct
3128    /// value.
3129    ///
3130    /// # Errors
3131    ///
3132    /// Returns an error on allocation failure. Allocation failure is
3133    /// guaranteed never to cause a panic or an abort.
3134    #[must_use = "has no side effects (other than allocation)"]
3135    #[cfg(feature = "alloc")]
3136    #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3137    #[inline(always)]
3138    fn new_vec_zeroed(len: usize) -> Result<Vec<Self>, AllocError>
3139    where
3140        Self: Sized,
3141    {
3142        <[Self]>::new_box_zeroed_with_elems(len).map(Into::into)
3143    }
3144
3145    /// Extends a `Vec<Self>` by pushing `additional` new items onto the end of
3146    /// the vector. The new items are initialized with zeros.
3147    #[cfg(zerocopy_panic_in_const_and_vec_try_reserve)]
3148    #[cfg(feature = "alloc")]
3149    #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3150    #[inline(always)]
3151    fn extend_vec_zeroed(v: &mut Vec<Self>, additional: usize) -> Result<(), AllocError>
3152    where
3153        Self: Sized,
3154    {
3155        // PANICS: We pass `v.len()` for `position`, so the `position > v.len()`
3156        // panic condition is not satisfied.
3157        <Self as FromZeros>::insert_vec_zeroed(v, v.len(), additional)
3158    }
3159
3160    /// Inserts `additional` new items into `Vec<Self>` at `position`. The new
3161    /// items are initialized with zeros.
3162    ///
3163    /// # Panics
3164    ///
3165    /// Panics if `position > v.len()`.
3166    #[cfg(zerocopy_panic_in_const_and_vec_try_reserve)]
3167    #[cfg(feature = "alloc")]
3168    #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3169    #[inline]
3170    fn insert_vec_zeroed(
3171        v: &mut Vec<Self>,
3172        position: usize,
3173        additional: usize,
3174    ) -> Result<(), AllocError>
3175    where
3176        Self: Sized,
3177    {
3178        assert!(position <= v.len());
3179        // We only conditionally compile on versions on which `try_reserve` is
3180        // stable; the Clippy lint is a false positive.
3181        #[allow(clippy::incompatible_msrv)]
3182        v.try_reserve(additional).map_err(|_| AllocError)?;
3183        // SAFETY: The `try_reserve` call guarantees that these cannot overflow:
3184        // * `ptr.add(position)`
3185        // * `position + additional`
3186        // * `v.len() + additional`
3187        //
3188        // `v.len() - position` cannot overflow because we asserted that
3189        // `position <= v.len()`.
3190        unsafe {
3191            // This is a potentially overlapping copy.
3192            let ptr = v.as_mut_ptr();
3193            #[allow(clippy::arithmetic_side_effects)]
3194            ptr.add(position).copy_to(ptr.add(position + additional), v.len() - position);
3195            ptr.add(position).write_bytes(0, additional);
3196            #[allow(clippy::arithmetic_side_effects)]
3197            v.set_len(v.len() + additional);
3198        }
3199
3200        Ok(())
3201    }
3202}
3203
3204/// Analyzes whether a type is [`FromBytes`].
3205///
3206/// This derive analyzes, at compile time, whether the annotated type satisfies
3207/// the [safety conditions] of `FromBytes` and implements `FromBytes` if it is
3208/// sound to do so. This derive can be applied to structs, enums, and unions;
3209/// e.g.:
3210///
3211/// ```
3212/// # use zerocopy_derive::{FromBytes, FromZeros, Immutable};
3213/// #[derive(FromBytes)]
3214/// struct MyStruct {
3215/// # /*
3216///     ...
3217/// # */
3218/// }
3219///
3220/// #[derive(FromBytes)]
3221/// #[repr(u8)]
3222/// enum MyEnum {
3223/// #   V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E,
3224/// #   V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D,
3225/// #   V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C,
3226/// #   V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B,
3227/// #   V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A,
3228/// #   V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59,
3229/// #   V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68,
3230/// #   V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77,
3231/// #   V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86,
3232/// #   V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95,
3233/// #   V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4,
3234/// #   VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3,
3235/// #   VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2,
3236/// #   VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1,
3237/// #   VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0,
3238/// #   VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF,
3239/// #   VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE,
3240/// #   VFF,
3241/// # /*
3242///     ...
3243/// # */
3244/// }
3245///
3246/// #[derive(FromBytes, Immutable)]
3247/// union MyUnion {
3248/// #   variant: u8,
3249/// # /*
3250///     ...
3251/// # */
3252/// }
3253/// ```
3254///
3255/// [safety conditions]: trait@FromBytes#safety
3256///
3257/// # Analysis
3258///
3259/// *This section describes, roughly, the analysis performed by this derive to
3260/// determine whether it is sound to implement `FromBytes` for a given type.
3261/// Unless you are modifying the implementation of this derive, or attempting to
3262/// manually implement `FromBytes` for a type yourself, you don't need to read
3263/// this section.*
3264///
3265/// If a type has the following properties, then this derive can implement
3266/// `FromBytes` for that type:
3267///
3268/// - If the type is a struct, all of its fields must be `FromBytes`.
3269/// - If the type is an enum:
3270///   - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
3271///     `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
3272///   - The maximum number of discriminants must be used (so that every possible
3273///     bit pattern is a valid one). Be very careful when using the `C`,
3274///     `usize`, or `isize` representations, as their size is
3275///     platform-dependent.
3276///   - Its fields must be `FromBytes`.
3277///
3278/// This analysis is subject to change. Unsafe code may *only* rely on the
3279/// documented [safety conditions] of `FromBytes`, and must *not* rely on the
3280/// implementation details of this derive.
3281///
3282/// ## Why isn't an explicit representation required for structs?
3283///
3284/// Neither this derive, nor the [safety conditions] of `FromBytes`, requires
3285/// that structs are marked with `#[repr(C)]`.
3286///
3287/// Per the [Rust reference](reference),
3288///
3289/// > The representation of a type can change the padding between fields, but
3290/// > does not change the layout of the fields themselves.
3291///
3292/// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations
3293///
3294/// Since the layout of structs only consists of padding bytes and field bytes,
3295/// a struct is soundly `FromBytes` if:
3296/// 1. its padding is soundly `FromBytes`, and
3297/// 2. its fields are soundly `FromBytes`.
3298///
3299/// The answer to the first question is always yes: padding bytes do not have
3300/// any validity constraints. A [discussion] of this question in the Unsafe Code
3301/// Guidelines Working Group concluded that it would be virtually unimaginable
3302/// for future versions of rustc to add validity constraints to padding bytes.
3303///
3304/// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174
3305///
3306/// Whether a struct is soundly `FromBytes` therefore solely depends on whether
3307/// its fields are `FromBytes`.
3308// TODO(#146): Document why we don't require an enum to have an explicit `repr`
3309// attribute.
3310#[cfg(any(feature = "derive", test))]
3311#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
3312pub use zerocopy_derive::FromBytes;
3313
3314/// Types for which any bit pattern is valid.
3315///
3316/// Any memory region of the appropriate length which contains initialized bytes
3317/// can be viewed as any `FromBytes` type with no runtime overhead. This is
3318/// useful for efficiently parsing bytes as structured data.
3319///
3320/// # Warning: Padding bytes
3321///
3322/// Note that, when a value is moved or copied, only the non-padding bytes of
3323/// that value are guaranteed to be preserved. It is unsound to assume that
3324/// values written to padding bytes are preserved after a move or copy. For
3325/// example, the following is unsound:
3326///
3327/// ```rust,no_run
3328/// use core::mem::{size_of, transmute};
3329/// use zerocopy::FromZeros;
3330/// # use zerocopy_derive::*;
3331///
3332/// // Assume `Foo` is a type with padding bytes.
3333/// #[derive(FromZeros, Default)]
3334/// struct Foo {
3335/// # /*
3336///     ...
3337/// # */
3338/// }
3339///
3340/// let mut foo: Foo = Foo::default();
3341/// FromZeros::zero(&mut foo);
3342/// // UNSOUND: Although `FromZeros::zero` writes zeros to all bytes of `foo`,
3343/// // those writes are not guaranteed to be preserved in padding bytes when
3344/// // `foo` is moved, so this may expose padding bytes as `u8`s.
3345/// let foo_bytes: [u8; size_of::<Foo>()] = unsafe { transmute(foo) };
3346/// ```
3347///
3348/// # Implementation
3349///
3350/// **Do not implement this trait yourself!** Instead, use
3351/// [`#[derive(FromBytes)]`][derive]; e.g.:
3352///
3353/// ```
3354/// # use zerocopy_derive::{FromBytes, Immutable};
3355/// #[derive(FromBytes)]
3356/// struct MyStruct {
3357/// # /*
3358///     ...
3359/// # */
3360/// }
3361///
3362/// #[derive(FromBytes)]
3363/// #[repr(u8)]
3364/// enum MyEnum {
3365/// #   V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E,
3366/// #   V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D,
3367/// #   V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C,
3368/// #   V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B,
3369/// #   V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A,
3370/// #   V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59,
3371/// #   V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68,
3372/// #   V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77,
3373/// #   V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86,
3374/// #   V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95,
3375/// #   V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4,
3376/// #   VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3,
3377/// #   VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2,
3378/// #   VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1,
3379/// #   VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0,
3380/// #   VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF,
3381/// #   VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE,
3382/// #   VFF,
3383/// # /*
3384///     ...
3385/// # */
3386/// }
3387///
3388/// #[derive(FromBytes, Immutable)]
3389/// union MyUnion {
3390/// #   variant: u8,
3391/// # /*
3392///     ...
3393/// # */
3394/// }
3395/// ```
3396///
3397/// This derive performs a sophisticated, compile-time safety analysis to
3398/// determine whether a type is `FromBytes`.
3399///
3400/// # Safety
3401///
3402/// *This section describes what is required in order for `T: FromBytes`, and
3403/// what unsafe code may assume of such types. If you don't plan on implementing
3404/// `FromBytes` manually, and you don't plan on writing unsafe code that
3405/// operates on `FromBytes` types, then you don't need to read this section.*
3406///
3407/// If `T: FromBytes`, then unsafe code may assume that it is sound to produce a
3408/// `T` whose bytes are initialized to any sequence of valid `u8`s (in other
3409/// words, any byte value which is not uninitialized). If a type is marked as
3410/// `FromBytes` which violates this contract, it may cause undefined behavior.
3411///
3412/// `#[derive(FromBytes)]` only permits [types which satisfy these
3413/// requirements][derive-analysis].
3414///
3415#[cfg_attr(
3416    feature = "derive",
3417    doc = "[derive]: zerocopy_derive::FromBytes",
3418    doc = "[derive-analysis]: zerocopy_derive::FromBytes#analysis"
3419)]
3420#[cfg_attr(
3421    not(feature = "derive"),
3422    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromBytes.html"),
3423    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromBytes.html#analysis"),
3424)]
3425#[cfg_attr(
3426    zerocopy_diagnostic_on_unimplemented,
3427    diagnostic::on_unimplemented(note = "Consider adding `#[derive(FromBytes)]` to `{Self}`")
3428)]
3429pub unsafe trait FromBytes: FromZeros {
3430    // The `Self: Sized` bound makes it so that `FromBytes` is still object
3431    // safe.
3432    #[doc(hidden)]
3433    fn only_derive_is_allowed_to_implement_this_trait()
3434    where
3435        Self: Sized;
3436
3437    /// Interprets the given `source` as a `&Self`.
3438    ///
3439    /// This method attempts to return a reference to `source` interpreted as a
3440    /// `Self`. If the length of `source` is not a [valid size of
3441    /// `Self`][valid-size], or if `source` is not appropriately aligned, this
3442    /// returns `Err`. If [`Self: Unaligned`][self-unaligned], you can
3443    /// [infallibly discard the alignment error][size-error-from].
3444    ///
3445    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3446    ///
3447    /// [valid-size]: crate#what-is-a-valid-size
3448    /// [self-unaligned]: Unaligned
3449    /// [size-error-from]: error/struct.SizeError.html#method.from-1
3450    /// [slice-dst]: KnownLayout#dynamically-sized-types
3451    ///
3452    /// # Compile-Time Assertions
3453    ///
3454    /// This method cannot yet be used on unsized types whose dynamically-sized
3455    /// component is zero-sized. Attempting to use this method on such types
3456    /// results in a compile-time assertion error; e.g.:
3457    ///
3458    /// ```compile_fail,E0080
3459    /// use zerocopy::*;
3460    /// # use zerocopy_derive::*;
3461    ///
3462    /// #[derive(FromBytes, Immutable, KnownLayout)]
3463    /// #[repr(C)]
3464    /// struct ZSTy {
3465    ///     leading_sized: u16,
3466    ///     trailing_dst: [()],
3467    /// }
3468    ///
3469    /// let _ = ZSTy::ref_from_bytes(0u16.as_bytes()); // âš  Compile Error!
3470    /// ```
3471    ///
3472    /// # Examples
3473    ///
3474    /// ```
3475    /// use zerocopy::FromBytes;
3476    /// # use zerocopy_derive::*;
3477    ///
3478    /// #[derive(FromBytes, KnownLayout, Immutable)]
3479    /// #[repr(C)]
3480    /// struct PacketHeader {
3481    ///     src_port: [u8; 2],
3482    ///     dst_port: [u8; 2],
3483    ///     length: [u8; 2],
3484    ///     checksum: [u8; 2],
3485    /// }
3486    ///
3487    /// #[derive(FromBytes, KnownLayout, Immutable)]
3488    /// #[repr(C)]
3489    /// struct Packet {
3490    ///     header: PacketHeader,
3491    ///     body: [u8],
3492    /// }
3493    ///
3494    /// // These bytes encode a `Packet`.
3495    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11][..];
3496    ///
3497    /// let packet = Packet::ref_from_bytes(bytes).unwrap();
3498    ///
3499    /// assert_eq!(packet.header.src_port, [0, 1]);
3500    /// assert_eq!(packet.header.dst_port, [2, 3]);
3501    /// assert_eq!(packet.header.length, [4, 5]);
3502    /// assert_eq!(packet.header.checksum, [6, 7]);
3503    /// assert_eq!(packet.body, [8, 9, 10, 11]);
3504    /// ```
3505    #[must_use = "has no side effects"]
3506    #[inline]
3507    fn ref_from_bytes(source: &[u8]) -> Result<&Self, CastError<&[u8], Self>>
3508    where
3509        Self: KnownLayout + Immutable,
3510    {
3511        static_assert_dst_is_not_zst!(Self);
3512        match Ptr::from_ref(source).try_cast_into_no_leftover::<_, BecauseImmutable>(None) {
3513            Ok(ptr) => Ok(ptr.bikeshed_recall_valid().as_ref()),
3514            Err(err) => Err(err.map_src(|src| src.as_ref())),
3515        }
3516    }
3517
3518    /// Interprets the prefix of the given `source` as a `&Self` without
3519    /// copying.
3520    ///
3521    /// This method computes the [largest possible size of `Self`][valid-size]
3522    /// that can fit in the leading bytes of `source`, then attempts to return
3523    /// both a reference to those bytes interpreted as a `Self`, and a reference
3524    /// to the remaining bytes. If there are insufficient bytes, or if `source`
3525    /// is not appropriately aligned, this returns `Err`. If [`Self:
3526    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
3527    /// error][size-error-from].
3528    ///
3529    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3530    ///
3531    /// [valid-size]: crate#what-is-a-valid-size
3532    /// [self-unaligned]: Unaligned
3533    /// [size-error-from]: error/struct.SizeError.html#method.from-1
3534    /// [slice-dst]: KnownLayout#dynamically-sized-types
3535    ///
3536    /// # Compile-Time Assertions
3537    ///
3538    /// This method cannot yet be used on unsized types whose dynamically-sized
3539    /// component is zero-sized. See [`ref_from_prefix_with_elems`], which does
3540    /// support such types. Attempting to use this method on such types results
3541    /// in a compile-time assertion error; e.g.:
3542    ///
3543    /// ```compile_fail,E0080
3544    /// use zerocopy::*;
3545    /// # use zerocopy_derive::*;
3546    ///
3547    /// #[derive(FromBytes, Immutable, KnownLayout)]
3548    /// #[repr(C)]
3549    /// struct ZSTy {
3550    ///     leading_sized: u16,
3551    ///     trailing_dst: [()],
3552    /// }
3553    ///
3554    /// let _ = ZSTy::ref_from_prefix(0u16.as_bytes()); // âš  Compile Error!
3555    /// ```
3556    ///
3557    /// [`ref_from_prefix_with_elems`]: FromBytes::ref_from_prefix_with_elems
3558    ///
3559    /// # Examples
3560    ///
3561    /// ```
3562    /// use zerocopy::FromBytes;
3563    /// # use zerocopy_derive::*;
3564    ///
3565    /// #[derive(FromBytes, KnownLayout, Immutable)]
3566    /// #[repr(C)]
3567    /// struct PacketHeader {
3568    ///     src_port: [u8; 2],
3569    ///     dst_port: [u8; 2],
3570    ///     length: [u8; 2],
3571    ///     checksum: [u8; 2],
3572    /// }
3573    ///
3574    /// #[derive(FromBytes, KnownLayout, Immutable)]
3575    /// #[repr(C)]
3576    /// struct Packet {
3577    ///     header: PacketHeader,
3578    ///     body: [[u8; 2]],
3579    /// }
3580    ///
3581    /// // These are more bytes than are needed to encode a `Packet`.
3582    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14][..];
3583    ///
3584    /// let (packet, suffix) = Packet::ref_from_prefix(bytes).unwrap();
3585    ///
3586    /// assert_eq!(packet.header.src_port, [0, 1]);
3587    /// assert_eq!(packet.header.dst_port, [2, 3]);
3588    /// assert_eq!(packet.header.length, [4, 5]);
3589    /// assert_eq!(packet.header.checksum, [6, 7]);
3590    /// assert_eq!(packet.body, [[8, 9], [10, 11], [12, 13]]);
3591    /// assert_eq!(suffix, &[14u8][..]);
3592    /// ```
3593    #[must_use = "has no side effects"]
3594    #[inline]
3595    fn ref_from_prefix(source: &[u8]) -> Result<(&Self, &[u8]), CastError<&[u8], Self>>
3596    where
3597        Self: KnownLayout + Immutable,
3598    {
3599        static_assert_dst_is_not_zst!(Self);
3600        ref_from_prefix_suffix(source, None, CastType::Prefix)
3601    }
3602
3603    /// Interprets the suffix of the given bytes as a `&Self`.
3604    ///
3605    /// This method computes the [largest possible size of `Self`][valid-size]
3606    /// that can fit in the trailing bytes of `source`, then attempts to return
3607    /// both a reference to those bytes interpreted as a `Self`, and a reference
3608    /// to the preceding bytes. If there are insufficient bytes, or if that
3609    /// suffix of `source` is not appropriately aligned, this returns `Err`. If
3610    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
3611    /// alignment error][size-error-from].
3612    ///
3613    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3614    ///
3615    /// [valid-size]: crate#what-is-a-valid-size
3616    /// [self-unaligned]: Unaligned
3617    /// [size-error-from]: error/struct.SizeError.html#method.from-1
3618    /// [slice-dst]: KnownLayout#dynamically-sized-types
3619    ///
3620    /// # Compile-Time Assertions
3621    ///
3622    /// This method cannot yet be used on unsized types whose dynamically-sized
3623    /// component is zero-sized. See [`ref_from_suffix_with_elems`], which does
3624    /// support such types. Attempting to use this method on such types results
3625    /// in a compile-time assertion error; e.g.:
3626    ///
3627    /// ```compile_fail,E0080
3628    /// use zerocopy::*;
3629    /// # use zerocopy_derive::*;
3630    ///
3631    /// #[derive(FromBytes, Immutable, KnownLayout)]
3632    /// #[repr(C)]
3633    /// struct ZSTy {
3634    ///     leading_sized: u16,
3635    ///     trailing_dst: [()],
3636    /// }
3637    ///
3638    /// let _ = ZSTy::ref_from_suffix(0u16.as_bytes()); // âš  Compile Error!
3639    /// ```
3640    ///
3641    /// [`ref_from_suffix_with_elems`]: FromBytes::ref_from_suffix_with_elems
3642    ///
3643    /// # Examples
3644    ///
3645    /// ```
3646    /// use zerocopy::FromBytes;
3647    /// # use zerocopy_derive::*;
3648    ///
3649    /// #[derive(FromBytes, Immutable, KnownLayout)]
3650    /// #[repr(C)]
3651    /// struct PacketTrailer {
3652    ///     frame_check_sequence: [u8; 4],
3653    /// }
3654    ///
3655    /// // These are more bytes than are needed to encode a `PacketTrailer`.
3656    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
3657    ///
3658    /// let (prefix, trailer) = PacketTrailer::ref_from_suffix(bytes).unwrap();
3659    ///
3660    /// assert_eq!(prefix, &[0, 1, 2, 3, 4, 5][..]);
3661    /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
3662    /// ```
3663    #[must_use = "has no side effects"]
3664    #[inline]
3665    fn ref_from_suffix(source: &[u8]) -> Result<(&[u8], &Self), CastError<&[u8], Self>>
3666    where
3667        Self: Immutable + KnownLayout,
3668    {
3669        static_assert_dst_is_not_zst!(Self);
3670        ref_from_prefix_suffix(source, None, CastType::Suffix).map(swap)
3671    }
3672
3673    /// Interprets the given `source` as a `&mut Self`.
3674    ///
3675    /// This method attempts to return a reference to `source` interpreted as a
3676    /// `Self`. If the length of `source` is not a [valid size of
3677    /// `Self`][valid-size], or if `source` is not appropriately aligned, this
3678    /// returns `Err`. If [`Self: Unaligned`][self-unaligned], you can
3679    /// [infallibly discard the alignment error][size-error-from].
3680    ///
3681    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3682    ///
3683    /// [valid-size]: crate#what-is-a-valid-size
3684    /// [self-unaligned]: Unaligned
3685    /// [size-error-from]: error/struct.SizeError.html#method.from-1
3686    /// [slice-dst]: KnownLayout#dynamically-sized-types
3687    ///
3688    /// # Compile-Time Assertions
3689    ///
3690    /// This method cannot yet be used on unsized types whose dynamically-sized
3691    /// component is zero-sized. See [`mut_from_prefix_with_elems`], which does
3692    /// support such types. Attempting to use this method on such types results
3693    /// in a compile-time assertion error; e.g.:
3694    ///
3695    /// ```compile_fail,E0080
3696    /// use zerocopy::*;
3697    /// # use zerocopy_derive::*;
3698    ///
3699    /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]
3700    /// #[repr(C, packed)]
3701    /// struct ZSTy {
3702    ///     leading_sized: [u8; 2],
3703    ///     trailing_dst: [()],
3704    /// }
3705    ///
3706    /// let mut source = [85, 85];
3707    /// let _ = ZSTy::mut_from_bytes(&mut source[..]); // âš  Compile Error!
3708    /// ```
3709    ///
3710    /// [`mut_from_prefix_with_elems`]: FromBytes::mut_from_prefix_with_elems
3711    ///
3712    /// # Examples
3713    ///
3714    /// ```
3715    /// use zerocopy::FromBytes;
3716    /// # use zerocopy_derive::*;
3717    ///
3718    /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)]
3719    /// #[repr(C)]
3720    /// struct PacketHeader {
3721    ///     src_port: [u8; 2],
3722    ///     dst_port: [u8; 2],
3723    ///     length: [u8; 2],
3724    ///     checksum: [u8; 2],
3725    /// }
3726    ///
3727    /// // These bytes encode a `PacketHeader`.
3728    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..];
3729    ///
3730    /// let header = PacketHeader::mut_from_bytes(bytes).unwrap();
3731    ///
3732    /// assert_eq!(header.src_port, [0, 1]);
3733    /// assert_eq!(header.dst_port, [2, 3]);
3734    /// assert_eq!(header.length, [4, 5]);
3735    /// assert_eq!(header.checksum, [6, 7]);
3736    ///
3737    /// header.checksum = [0, 0];
3738    ///
3739    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0]);
3740    /// ```
3741    #[must_use = "has no side effects"]
3742    #[inline]
3743    fn mut_from_bytes(source: &mut [u8]) -> Result<&mut Self, CastError<&mut [u8], Self>>
3744    where
3745        Self: IntoBytes + KnownLayout,
3746    {
3747        static_assert_dst_is_not_zst!(Self);
3748        match Ptr::from_mut(source).try_cast_into_no_leftover::<_, BecauseExclusive>(None) {
3749            Ok(ptr) => Ok(ptr.bikeshed_recall_valid().as_mut()),
3750            Err(err) => Err(err.map_src(|src| src.as_mut())),
3751        }
3752    }
3753
3754    /// Interprets the prefix of the given `source` as a `&mut Self` without
3755    /// copying.
3756    ///
3757    /// This method computes the [largest possible size of `Self`][valid-size]
3758    /// that can fit in the leading bytes of `source`, then attempts to return
3759    /// both a reference to those bytes interpreted as a `Self`, and a reference
3760    /// to the remaining bytes. If there are insufficient bytes, or if `source`
3761    /// is not appropriately aligned, this returns `Err`. If [`Self:
3762    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
3763    /// error][size-error-from].
3764    ///
3765    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3766    ///
3767    /// [valid-size]: crate#what-is-a-valid-size
3768    /// [self-unaligned]: Unaligned
3769    /// [size-error-from]: error/struct.SizeError.html#method.from-1
3770    /// [slice-dst]: KnownLayout#dynamically-sized-types
3771    ///
3772    /// # Compile-Time Assertions
3773    ///
3774    /// This method cannot yet be used on unsized types whose dynamically-sized
3775    /// component is zero-sized. See [`mut_from_suffix_with_elems`], which does
3776    /// support such types. Attempting to use this method on such types results
3777    /// in a compile-time assertion error; e.g.:
3778    ///
3779    /// ```compile_fail,E0080
3780    /// use zerocopy::*;
3781    /// # use zerocopy_derive::*;
3782    ///
3783    /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]
3784    /// #[repr(C, packed)]
3785    /// struct ZSTy {
3786    ///     leading_sized: [u8; 2],
3787    ///     trailing_dst: [()],
3788    /// }
3789    ///
3790    /// let mut source = [85, 85];
3791    /// let _ = ZSTy::mut_from_prefix(&mut source[..]); // âš  Compile Error!
3792    /// ```
3793    ///
3794    /// [`mut_from_suffix_with_elems`]: FromBytes::mut_from_suffix_with_elems
3795    ///
3796    /// # Examples
3797    ///
3798    /// ```
3799    /// use zerocopy::FromBytes;
3800    /// # use zerocopy_derive::*;
3801    ///
3802    /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)]
3803    /// #[repr(C)]
3804    /// struct PacketHeader {
3805    ///     src_port: [u8; 2],
3806    ///     dst_port: [u8; 2],
3807    ///     length: [u8; 2],
3808    ///     checksum: [u8; 2],
3809    /// }
3810    ///
3811    /// // These are more bytes than are needed to encode a `PacketHeader`.
3812    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
3813    ///
3814    /// let (header, body) = PacketHeader::mut_from_prefix(bytes).unwrap();
3815    ///
3816    /// assert_eq!(header.src_port, [0, 1]);
3817    /// assert_eq!(header.dst_port, [2, 3]);
3818    /// assert_eq!(header.length, [4, 5]);
3819    /// assert_eq!(header.checksum, [6, 7]);
3820    /// assert_eq!(body, &[8, 9][..]);
3821    ///
3822    /// header.checksum = [0, 0];
3823    /// body.fill(1);
3824    ///
3825    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0, 1, 1]);
3826    /// ```
3827    #[must_use = "has no side effects"]
3828    #[inline]
3829    fn mut_from_prefix(
3830        source: &mut [u8],
3831    ) -> Result<(&mut Self, &mut [u8]), CastError<&mut [u8], Self>>
3832    where
3833        Self: IntoBytes + KnownLayout,
3834    {
3835        static_assert_dst_is_not_zst!(Self);
3836        mut_from_prefix_suffix(source, None, CastType::Prefix)
3837    }
3838
3839    /// Interprets the suffix of the given `source` as a `&mut Self` without
3840    /// copying.
3841    ///
3842    /// This method computes the [largest possible size of `Self`][valid-size]
3843    /// that can fit in the trailing bytes of `source`, then attempts to return
3844    /// both a reference to those bytes interpreted as a `Self`, and a reference
3845    /// to the preceding bytes. If there are insufficient bytes, or if that
3846    /// suffix of `source` is not appropriately aligned, this returns `Err`. If
3847    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
3848    /// alignment error][size-error-from].
3849    ///
3850    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3851    ///
3852    /// [valid-size]: crate#what-is-a-valid-size
3853    /// [self-unaligned]: Unaligned
3854    /// [size-error-from]: error/struct.SizeError.html#method.from-1
3855    /// [slice-dst]: KnownLayout#dynamically-sized-types
3856    ///
3857    /// # Compile-Time Assertions
3858    ///
3859    /// This method cannot yet be used on unsized types whose dynamically-sized
3860    /// component is zero-sized. Attempting to use this method on such types
3861    /// results in a compile-time assertion error; e.g.:
3862    ///
3863    /// ```compile_fail,E0080
3864    /// use zerocopy::*;
3865    /// # use zerocopy_derive::*;
3866    ///
3867    /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]
3868    /// #[repr(C, packed)]
3869    /// struct ZSTy {
3870    ///     leading_sized: [u8; 2],
3871    ///     trailing_dst: [()],
3872    /// }
3873    ///
3874    /// let mut source = [85, 85];
3875    /// let _ = ZSTy::mut_from_suffix(&mut source[..]); // âš  Compile Error!
3876    /// ```
3877    ///
3878    /// # Examples
3879    ///
3880    /// ```
3881    /// use zerocopy::FromBytes;
3882    /// # use zerocopy_derive::*;
3883    ///
3884    /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)]
3885    /// #[repr(C)]
3886    /// struct PacketTrailer {
3887    ///     frame_check_sequence: [u8; 4],
3888    /// }
3889    ///
3890    /// // These are more bytes than are needed to encode a `PacketTrailer`.
3891    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
3892    ///
3893    /// let (prefix, trailer) = PacketTrailer::mut_from_suffix(bytes).unwrap();
3894    ///
3895    /// assert_eq!(prefix, &[0u8, 1, 2, 3, 4, 5][..]);
3896    /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
3897    ///
3898    /// prefix.fill(0);
3899    /// trailer.frame_check_sequence.fill(1);
3900    ///
3901    /// assert_eq!(bytes, [0, 0, 0, 0, 0, 0, 1, 1, 1, 1]);
3902    /// ```
3903    #[must_use = "has no side effects"]
3904    #[inline]
3905    fn mut_from_suffix(
3906        source: &mut [u8],
3907    ) -> Result<(&mut [u8], &mut Self), CastError<&mut [u8], Self>>
3908    where
3909        Self: IntoBytes + KnownLayout,
3910    {
3911        static_assert_dst_is_not_zst!(Self);
3912        mut_from_prefix_suffix(source, None, CastType::Suffix).map(swap)
3913    }
3914
3915    /// Interprets the given `source` as a `&Self` with a DST length equal to
3916    /// `count`.
3917    ///
3918    /// This method attempts to return a reference to `source` interpreted as a
3919    /// `Self` with `count` trailing elements. If the length of `source` is not
3920    /// equal to the size of `Self` with `count` elements, or if `source` is not
3921    /// appropriately aligned, this returns `Err`. If [`Self:
3922    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
3923    /// error][size-error-from].
3924    ///
3925    /// [self-unaligned]: Unaligned
3926    /// [size-error-from]: error/struct.SizeError.html#method.from-1
3927    ///
3928    /// # Examples
3929    ///
3930    /// ```
3931    /// use zerocopy::FromBytes;
3932    /// # use zerocopy_derive::*;
3933    ///
3934    /// # #[derive(Debug, PartialEq, Eq)]
3935    /// #[derive(FromBytes, Immutable)]
3936    /// #[repr(C)]
3937    /// struct Pixel {
3938    ///     r: u8,
3939    ///     g: u8,
3940    ///     b: u8,
3941    ///     a: u8,
3942    /// }
3943    ///
3944    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7][..];
3945    ///
3946    /// let pixels = <[Pixel]>::ref_from_bytes_with_elems(bytes, 2).unwrap();
3947    ///
3948    /// assert_eq!(pixels, &[
3949    ///     Pixel { r: 0, g: 1, b: 2, a: 3 },
3950    ///     Pixel { r: 4, g: 5, b: 6, a: 7 },
3951    /// ]);
3952    ///
3953    /// ```
3954    ///
3955    /// Since an explicit `count` is provided, this method supports types with
3956    /// zero-sized trailing slice elements. Methods such as [`ref_from_bytes`]
3957    /// which do not take an explicit count do not support such types.
3958    ///
3959    /// ```
3960    /// use zerocopy::*;
3961    /// # use zerocopy_derive::*;
3962    ///
3963    /// #[derive(FromBytes, Immutable, KnownLayout)]
3964    /// #[repr(C)]
3965    /// struct ZSTy {
3966    ///     leading_sized: [u8; 2],
3967    ///     trailing_dst: [()],
3968    /// }
3969    ///
3970    /// let src = &[85, 85][..];
3971    /// let zsty = ZSTy::ref_from_bytes_with_elems(src, 42).unwrap();
3972    /// assert_eq!(zsty.trailing_dst.len(), 42);
3973    /// ```
3974    ///
3975    /// [`ref_from_bytes`]: FromBytes::ref_from_bytes
3976    #[must_use = "has no side effects"]
3977    #[inline]
3978    fn ref_from_bytes_with_elems(
3979        source: &[u8],
3980        count: usize,
3981    ) -> Result<&Self, CastError<&[u8], Self>>
3982    where
3983        Self: KnownLayout<PointerMetadata = usize> + Immutable,
3984    {
3985        let source = Ptr::from_ref(source);
3986        let maybe_slf = source.try_cast_into_no_leftover::<_, BecauseImmutable>(Some(count));
3987        match maybe_slf {
3988            Ok(slf) => Ok(slf.bikeshed_recall_valid().as_ref()),
3989            Err(err) => Err(err.map_src(|s| s.as_ref())),
3990        }
3991    }
3992
3993    /// Interprets the prefix of the given `source` as a DST `&Self` with length
3994    /// equal to `count`.
3995    ///
3996    /// This method attempts to return a reference to the prefix of `source`
3997    /// interpreted as a `Self` with `count` trailing elements, and a reference
3998    /// to the remaining bytes. If there are insufficient bytes, or if `source`
3999    /// is not appropriately aligned, this returns `Err`. If [`Self:
4000    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4001    /// error][size-error-from].
4002    ///
4003    /// [self-unaligned]: Unaligned
4004    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4005    ///
4006    /// # Examples
4007    ///
4008    /// ```
4009    /// use zerocopy::FromBytes;
4010    /// # use zerocopy_derive::*;
4011    ///
4012    /// # #[derive(Debug, PartialEq, Eq)]
4013    /// #[derive(FromBytes, Immutable)]
4014    /// #[repr(C)]
4015    /// struct Pixel {
4016    ///     r: u8,
4017    ///     g: u8,
4018    ///     b: u8,
4019    ///     a: u8,
4020    /// }
4021    ///
4022    /// // These are more bytes than are needed to encode two `Pixel`s.
4023    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4024    ///
4025    /// let (pixels, suffix) = <[Pixel]>::ref_from_prefix_with_elems(bytes, 2).unwrap();
4026    ///
4027    /// assert_eq!(pixels, &[
4028    ///     Pixel { r: 0, g: 1, b: 2, a: 3 },
4029    ///     Pixel { r: 4, g: 5, b: 6, a: 7 },
4030    /// ]);
4031    ///
4032    /// assert_eq!(suffix, &[8, 9]);
4033    /// ```
4034    ///
4035    /// Since an explicit `count` is provided, this method supports types with
4036    /// zero-sized trailing slice elements. Methods such as [`ref_from_prefix`]
4037    /// which do not take an explicit count do not support such types.
4038    ///
4039    /// ```
4040    /// use zerocopy::*;
4041    /// # use zerocopy_derive::*;
4042    ///
4043    /// #[derive(FromBytes, Immutable, KnownLayout)]
4044    /// #[repr(C)]
4045    /// struct ZSTy {
4046    ///     leading_sized: [u8; 2],
4047    ///     trailing_dst: [()],
4048    /// }
4049    ///
4050    /// let src = &[85, 85][..];
4051    /// let (zsty, _) = ZSTy::ref_from_prefix_with_elems(src, 42).unwrap();
4052    /// assert_eq!(zsty.trailing_dst.len(), 42);
4053    /// ```
4054    ///
4055    /// [`ref_from_prefix`]: FromBytes::ref_from_prefix
4056    #[must_use = "has no side effects"]
4057    #[inline]
4058    fn ref_from_prefix_with_elems(
4059        source: &[u8],
4060        count: usize,
4061    ) -> Result<(&Self, &[u8]), CastError<&[u8], Self>>
4062    where
4063        Self: KnownLayout<PointerMetadata = usize> + Immutable,
4064    {
4065        ref_from_prefix_suffix(source, Some(count), CastType::Prefix)
4066    }
4067
4068    /// Interprets the suffix of the given `source` as a DST `&Self` with length
4069    /// equal to `count`.
4070    ///
4071    /// This method attempts to return a reference to the suffix of `source`
4072    /// interpreted as a `Self` with `count` trailing elements, and a reference
4073    /// to the preceding bytes. If there are insufficient bytes, or if that
4074    /// suffix of `source` is not appropriately aligned, this returns `Err`. If
4075    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
4076    /// alignment error][size-error-from].
4077    ///
4078    /// [self-unaligned]: Unaligned
4079    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4080    ///
4081    /// # Examples
4082    ///
4083    /// ```
4084    /// use zerocopy::FromBytes;
4085    /// # use zerocopy_derive::*;
4086    ///
4087    /// # #[derive(Debug, PartialEq, Eq)]
4088    /// #[derive(FromBytes, Immutable)]
4089    /// #[repr(C)]
4090    /// struct Pixel {
4091    ///     r: u8,
4092    ///     g: u8,
4093    ///     b: u8,
4094    ///     a: u8,
4095    /// }
4096    ///
4097    /// // These are more bytes than are needed to encode two `Pixel`s.
4098    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4099    ///
4100    /// let (prefix, pixels) = <[Pixel]>::ref_from_suffix_with_elems(bytes, 2).unwrap();
4101    ///
4102    /// assert_eq!(prefix, &[0, 1]);
4103    ///
4104    /// assert_eq!(pixels, &[
4105    ///     Pixel { r: 2, g: 3, b: 4, a: 5 },
4106    ///     Pixel { r: 6, g: 7, b: 8, a: 9 },
4107    /// ]);
4108    /// ```
4109    ///
4110    /// Since an explicit `count` is provided, this method supports types with
4111    /// zero-sized trailing slice elements. Methods such as [`ref_from_suffix`]
4112    /// which do not take an explicit count do not support such types.
4113    ///
4114    /// ```
4115    /// use zerocopy::*;
4116    /// # use zerocopy_derive::*;
4117    ///
4118    /// #[derive(FromBytes, Immutable, KnownLayout)]
4119    /// #[repr(C)]
4120    /// struct ZSTy {
4121    ///     leading_sized: [u8; 2],
4122    ///     trailing_dst: [()],
4123    /// }
4124    ///
4125    /// let src = &[85, 85][..];
4126    /// let (_, zsty) = ZSTy::ref_from_suffix_with_elems(src, 42).unwrap();
4127    /// assert_eq!(zsty.trailing_dst.len(), 42);
4128    /// ```
4129    ///
4130    /// [`ref_from_suffix`]: FromBytes::ref_from_suffix
4131    #[must_use = "has no side effects"]
4132    #[inline]
4133    fn ref_from_suffix_with_elems(
4134        source: &[u8],
4135        count: usize,
4136    ) -> Result<(&[u8], &Self), CastError<&[u8], Self>>
4137    where
4138        Self: KnownLayout<PointerMetadata = usize> + Immutable,
4139    {
4140        ref_from_prefix_suffix(source, Some(count), CastType::Suffix).map(swap)
4141    }
4142
4143    /// Interprets the given `source` as a `&mut Self` with a DST length equal
4144    /// to `count`.
4145    ///
4146    /// This method attempts to return a reference to `source` interpreted as a
4147    /// `Self` with `count` trailing elements. If the length of `source` is not
4148    /// equal to the size of `Self` with `count` elements, or if `source` is not
4149    /// appropriately aligned, this returns `Err`. If [`Self:
4150    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4151    /// error][size-error-from].
4152    ///
4153    /// [self-unaligned]: Unaligned
4154    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4155    ///
4156    /// # Examples
4157    ///
4158    /// ```
4159    /// use zerocopy::FromBytes;
4160    /// # use zerocopy_derive::*;
4161    ///
4162    /// # #[derive(Debug, PartialEq, Eq)]
4163    /// #[derive(KnownLayout, FromBytes, IntoBytes, Immutable)]
4164    /// #[repr(C)]
4165    /// struct Pixel {
4166    ///     r: u8,
4167    ///     g: u8,
4168    ///     b: u8,
4169    ///     a: u8,
4170    /// }
4171    ///
4172    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..];
4173    ///
4174    /// let pixels = <[Pixel]>::mut_from_bytes_with_elems(bytes, 2).unwrap();
4175    ///
4176    /// assert_eq!(pixels, &[
4177    ///     Pixel { r: 0, g: 1, b: 2, a: 3 },
4178    ///     Pixel { r: 4, g: 5, b: 6, a: 7 },
4179    /// ]);
4180    ///
4181    /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
4182    ///
4183    /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0]);
4184    /// ```
4185    ///
4186    /// Since an explicit `count` is provided, this method supports types with
4187    /// zero-sized trailing slice elements. Methods such as [`mut_from`] which
4188    /// do not take an explicit count do not support such types.
4189    ///
4190    /// ```
4191    /// use zerocopy::*;
4192    /// # use zerocopy_derive::*;
4193    ///
4194    /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
4195    /// #[repr(C, packed)]
4196    /// struct ZSTy {
4197    ///     leading_sized: [u8; 2],
4198    ///     trailing_dst: [()],
4199    /// }
4200    ///
4201    /// let src = &mut [85, 85][..];
4202    /// let zsty = ZSTy::mut_from_bytes_with_elems(src, 42).unwrap();
4203    /// assert_eq!(zsty.trailing_dst.len(), 42);
4204    /// ```
4205    ///
4206    /// [`mut_from`]: FromBytes::mut_from
4207    #[must_use = "has no side effects"]
4208    #[inline]
4209    fn mut_from_bytes_with_elems(
4210        source: &mut [u8],
4211        count: usize,
4212    ) -> Result<&mut Self, CastError<&mut [u8], Self>>
4213    where
4214        Self: IntoBytes + KnownLayout<PointerMetadata = usize> + Immutable,
4215    {
4216        let source = Ptr::from_mut(source);
4217        let maybe_slf = source.try_cast_into_no_leftover::<_, BecauseImmutable>(Some(count));
4218        match maybe_slf {
4219            Ok(slf) => Ok(slf.bikeshed_recall_valid().as_mut()),
4220            Err(err) => Err(err.map_src(|s| s.as_mut())),
4221        }
4222    }
4223
4224    /// Interprets the prefix of the given `source` as a `&mut Self` with DST
4225    /// length equal to `count`.
4226    ///
4227    /// This method attempts to return a reference to the prefix of `source`
4228    /// interpreted as a `Self` with `count` trailing elements, and a reference
4229    /// to the preceding bytes. If there are insufficient bytes, or if `source`
4230    /// is not appropriately aligned, this returns `Err`. If [`Self:
4231    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4232    /// error][size-error-from].
4233    ///
4234    /// [self-unaligned]: Unaligned
4235    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4236    ///
4237    /// # Examples
4238    ///
4239    /// ```
4240    /// use zerocopy::FromBytes;
4241    /// # use zerocopy_derive::*;
4242    ///
4243    /// # #[derive(Debug, PartialEq, Eq)]
4244    /// #[derive(KnownLayout, FromBytes, IntoBytes, Immutable)]
4245    /// #[repr(C)]
4246    /// struct Pixel {
4247    ///     r: u8,
4248    ///     g: u8,
4249    ///     b: u8,
4250    ///     a: u8,
4251    /// }
4252    ///
4253    /// // These are more bytes than are needed to encode two `Pixel`s.
4254    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4255    ///
4256    /// let (pixels, suffix) = <[Pixel]>::mut_from_prefix_with_elems(bytes, 2).unwrap();
4257    ///
4258    /// assert_eq!(pixels, &[
4259    ///     Pixel { r: 0, g: 1, b: 2, a: 3 },
4260    ///     Pixel { r: 4, g: 5, b: 6, a: 7 },
4261    /// ]);
4262    ///
4263    /// assert_eq!(suffix, &[8, 9]);
4264    ///
4265    /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
4266    /// suffix.fill(1);
4267    ///
4268    /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0, 1, 1]);
4269    /// ```
4270    ///
4271    /// Since an explicit `count` is provided, this method supports types with
4272    /// zero-sized trailing slice elements. Methods such as [`mut_from_prefix`]
4273    /// which do not take an explicit count do not support such types.
4274    ///
4275    /// ```
4276    /// use zerocopy::*;
4277    /// # use zerocopy_derive::*;
4278    ///
4279    /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
4280    /// #[repr(C, packed)]
4281    /// struct ZSTy {
4282    ///     leading_sized: [u8; 2],
4283    ///     trailing_dst: [()],
4284    /// }
4285    ///
4286    /// let src = &mut [85, 85][..];
4287    /// let (zsty, _) = ZSTy::mut_from_prefix_with_elems(src, 42).unwrap();
4288    /// assert_eq!(zsty.trailing_dst.len(), 42);
4289    /// ```
4290    ///
4291    /// [`mut_from_prefix`]: FromBytes::mut_from_prefix
4292    #[must_use = "has no side effects"]
4293    #[inline]
4294    fn mut_from_prefix_with_elems(
4295        source: &mut [u8],
4296        count: usize,
4297    ) -> Result<(&mut Self, &mut [u8]), CastError<&mut [u8], Self>>
4298    where
4299        Self: IntoBytes + KnownLayout<PointerMetadata = usize>,
4300    {
4301        mut_from_prefix_suffix(source, Some(count), CastType::Prefix)
4302    }
4303
4304    /// Interprets the suffix of the given `source` as a `&mut Self` with DST
4305    /// length equal to `count`.
4306    ///
4307    /// This method attempts to return a reference to the suffix of `source`
4308    /// interpreted as a `Self` with `count` trailing elements, and a reference
4309    /// to the remaining bytes. If there are insufficient bytes, or if that
4310    /// suffix of `source` is not appropriately aligned, this returns `Err`. If
4311    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
4312    /// alignment error][size-error-from].
4313    ///
4314    /// [self-unaligned]: Unaligned
4315    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4316    ///
4317    /// # Examples
4318    ///
4319    /// ```
4320    /// use zerocopy::FromBytes;
4321    /// # use zerocopy_derive::*;
4322    ///
4323    /// # #[derive(Debug, PartialEq, Eq)]
4324    /// #[derive(FromBytes, IntoBytes, Immutable)]
4325    /// #[repr(C)]
4326    /// struct Pixel {
4327    ///     r: u8,
4328    ///     g: u8,
4329    ///     b: u8,
4330    ///     a: u8,
4331    /// }
4332    ///
4333    /// // These are more bytes than are needed to encode two `Pixel`s.
4334    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4335    ///
4336    /// let (prefix, pixels) = <[Pixel]>::mut_from_suffix_with_elems(bytes, 2).unwrap();
4337    ///
4338    /// assert_eq!(prefix, &[0, 1]);
4339    ///
4340    /// assert_eq!(pixels, &[
4341    ///     Pixel { r: 2, g: 3, b: 4, a: 5 },
4342    ///     Pixel { r: 6, g: 7, b: 8, a: 9 },
4343    /// ]);
4344    ///
4345    /// prefix.fill(9);
4346    /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
4347    ///
4348    /// assert_eq!(bytes, [9, 9, 2, 3, 4, 5, 0, 0, 0, 0]);
4349    /// ```
4350    ///
4351    /// Since an explicit `count` is provided, this method supports types with
4352    /// zero-sized trailing slice elements. Methods such as [`mut_from_suffix`]
4353    /// which do not take an explicit count do not support such types.
4354    ///
4355    /// ```
4356    /// use zerocopy::*;
4357    /// # use zerocopy_derive::*;
4358    ///
4359    /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
4360    /// #[repr(C, packed)]
4361    /// struct ZSTy {
4362    ///     leading_sized: [u8; 2],
4363    ///     trailing_dst: [()],
4364    /// }
4365    ///
4366    /// let src = &mut [85, 85][..];
4367    /// let (_, zsty) = ZSTy::mut_from_suffix_with_elems(src, 42).unwrap();
4368    /// assert_eq!(zsty.trailing_dst.len(), 42);
4369    /// ```
4370    ///
4371    /// [`mut_from_suffix`]: FromBytes::mut_from_suffix
4372    #[must_use = "has no side effects"]
4373    #[inline]
4374    fn mut_from_suffix_with_elems(
4375        source: &mut [u8],
4376        count: usize,
4377    ) -> Result<(&mut [u8], &mut Self), CastError<&mut [u8], Self>>
4378    where
4379        Self: IntoBytes + KnownLayout<PointerMetadata = usize>,
4380    {
4381        mut_from_prefix_suffix(source, Some(count), CastType::Suffix).map(swap)
4382    }
4383
4384    /// Reads a copy of `Self` from the given `source`.
4385    ///
4386    /// If `source.len() != size_of::<Self>()`, `read_from_bytes` returns `Err`.
4387    ///
4388    /// # Examples
4389    ///
4390    /// ```
4391    /// use zerocopy::FromBytes;
4392    /// # use zerocopy_derive::*;
4393    ///
4394    /// #[derive(FromBytes)]
4395    /// #[repr(C)]
4396    /// struct PacketHeader {
4397    ///     src_port: [u8; 2],
4398    ///     dst_port: [u8; 2],
4399    ///     length: [u8; 2],
4400    ///     checksum: [u8; 2],
4401    /// }
4402    ///
4403    /// // These bytes encode a `PacketHeader`.
4404    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7][..];
4405    ///
4406    /// let header = PacketHeader::read_from_bytes(bytes).unwrap();
4407    ///
4408    /// assert_eq!(header.src_port, [0, 1]);
4409    /// assert_eq!(header.dst_port, [2, 3]);
4410    /// assert_eq!(header.length, [4, 5]);
4411    /// assert_eq!(header.checksum, [6, 7]);
4412    /// ```
4413    #[must_use = "has no side effects"]
4414    #[inline]
4415    fn read_from_bytes(source: &[u8]) -> Result<Self, SizeError<&[u8], Self>>
4416    where
4417        Self: Sized,
4418    {
4419        match Ref::<_, Unalign<Self>>::sized_from(source) {
4420            Ok(r) => Ok(Ref::read(&r).into_inner()),
4421            Err(CastError::Size(e)) => Err(e.with_dst()),
4422            Err(CastError::Alignment(_)) => {
4423                // SAFETY: `Unalign<Self>` is trivially aligned, so
4424                // `Ref::sized_from` cannot fail due to unmet alignment
4425                // requirements.
4426                unsafe { core::hint::unreachable_unchecked() }
4427            }
4428            Err(CastError::Validity(i)) => match i {},
4429        }
4430    }
4431
4432    /// Reads a copy of `Self` from the prefix of the given `source`.
4433    ///
4434    /// This attempts to read a `Self` from the first `size_of::<Self>()` bytes
4435    /// of `source`, returning that `Self` and any remaining bytes. If
4436    /// `source.len() < size_of::<Self>()`, it returns `Err`.
4437    ///
4438    /// # Examples
4439    ///
4440    /// ```
4441    /// use zerocopy::FromBytes;
4442    /// # use zerocopy_derive::*;
4443    ///
4444    /// #[derive(FromBytes)]
4445    /// #[repr(C)]
4446    /// struct PacketHeader {
4447    ///     src_port: [u8; 2],
4448    ///     dst_port: [u8; 2],
4449    ///     length: [u8; 2],
4450    ///     checksum: [u8; 2],
4451    /// }
4452    ///
4453    /// // These are more bytes than are needed to encode a `PacketHeader`.
4454    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4455    ///
4456    /// let (header, body) = PacketHeader::read_from_prefix(bytes).unwrap();
4457    ///
4458    /// assert_eq!(header.src_port, [0, 1]);
4459    /// assert_eq!(header.dst_port, [2, 3]);
4460    /// assert_eq!(header.length, [4, 5]);
4461    /// assert_eq!(header.checksum, [6, 7]);
4462    /// assert_eq!(body, [8, 9]);
4463    /// ```
4464    #[must_use = "has no side effects"]
4465    #[inline]
4466    fn read_from_prefix(source: &[u8]) -> Result<(Self, &[u8]), SizeError<&[u8], Self>>
4467    where
4468        Self: Sized,
4469    {
4470        match Ref::<_, Unalign<Self>>::sized_from_prefix(source) {
4471            Ok((r, suffix)) => Ok((Ref::read(&r).into_inner(), suffix)),
4472            Err(CastError::Size(e)) => Err(e.with_dst()),
4473            Err(CastError::Alignment(_)) => {
4474                // SAFETY: `Unalign<Self>` is trivially aligned, so
4475                // `Ref::sized_from_prefix` cannot fail due to unmet alignment
4476                // requirements.
4477                unsafe { core::hint::unreachable_unchecked() }
4478            }
4479            Err(CastError::Validity(i)) => match i {},
4480        }
4481    }
4482
4483    /// Reads a copy of `Self` from the suffix of the given `source`.
4484    ///
4485    /// This attempts to read a `Self` from the last `size_of::<Self>()` bytes
4486    /// of `source`, returning that `Self` and any preceding bytes. If
4487    /// `source.len() < size_of::<Self>()`, it returns `Err`.
4488    ///
4489    /// # Examples
4490    ///
4491    /// ```
4492    /// use zerocopy::FromBytes;
4493    /// # use zerocopy_derive::*;
4494    ///
4495    /// #[derive(FromBytes)]
4496    /// #[repr(C)]
4497    /// struct PacketTrailer {
4498    ///     frame_check_sequence: [u8; 4],
4499    /// }
4500    ///
4501    /// // These are more bytes than are needed to encode a `PacketTrailer`.
4502    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4503    ///
4504    /// let (prefix, trailer) = PacketTrailer::read_from_suffix(bytes).unwrap();
4505    ///
4506    /// assert_eq!(prefix, [0, 1, 2, 3, 4, 5]);
4507    /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
4508    /// ```
4509    #[must_use = "has no side effects"]
4510    #[inline]
4511    fn read_from_suffix(source: &[u8]) -> Result<(&[u8], Self), SizeError<&[u8], Self>>
4512    where
4513        Self: Sized,
4514    {
4515        match Ref::<_, Unalign<Self>>::sized_from_suffix(source) {
4516            Ok((prefix, r)) => Ok((prefix, Ref::read(&r).into_inner())),
4517            Err(CastError::Size(e)) => Err(e.with_dst()),
4518            Err(CastError::Alignment(_)) => {
4519                // SAFETY: `Unalign<Self>` is trivially aligned, so
4520                // `Ref::sized_from_suffix` cannot fail due to unmet alignment
4521                // requirements.
4522                unsafe { core::hint::unreachable_unchecked() }
4523            }
4524            Err(CastError::Validity(i)) => match i {},
4525        }
4526    }
4527
4528    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::ref_from_bytes`")]
4529    #[doc(hidden)]
4530    #[must_use = "has no side effects"]
4531    #[inline(always)]
4532    fn ref_from(source: &[u8]) -> Option<&Self>
4533    where
4534        Self: KnownLayout + Immutable,
4535    {
4536        Self::ref_from_bytes(source).ok()
4537    }
4538
4539    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::mut_from_bytes`")]
4540    #[doc(hidden)]
4541    #[must_use = "has no side effects"]
4542    #[inline(always)]
4543    fn mut_from(source: &mut [u8]) -> Option<&mut Self>
4544    where
4545        Self: KnownLayout + IntoBytes,
4546    {
4547        Self::mut_from_bytes(source).ok()
4548    }
4549
4550    #[deprecated(since = "0.8.0", note = "`FromBytes::ref_from_bytes` now supports slices")]
4551    #[doc(hidden)]
4552    #[must_use = "has no side effects"]
4553    #[inline(always)]
4554    fn slice_from(source: &[u8]) -> Option<&[Self]>
4555    where
4556        Self: Sized + Immutable,
4557    {
4558        <[Self]>::ref_from_bytes(source).ok()
4559    }
4560
4561    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::ref_from_prefix_with_elems`")]
4562    #[doc(hidden)]
4563    #[must_use = "has no side effects"]
4564    #[inline(always)]
4565    fn slice_from_prefix(source: &[u8], count: usize) -> Option<(&[Self], &[u8])>
4566    where
4567        Self: Sized + Immutable,
4568    {
4569        <[Self]>::ref_from_prefix_with_elems(source, count).ok()
4570    }
4571
4572    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::ref_from_suffix_with_elems`")]
4573    #[doc(hidden)]
4574    #[must_use = "has no side effects"]
4575    #[inline(always)]
4576    fn slice_from_suffix(source: &[u8], count: usize) -> Option<(&[u8], &[Self])>
4577    where
4578        Self: Sized + Immutable,
4579    {
4580        <[Self]>::ref_from_suffix_with_elems(source, count).ok()
4581    }
4582
4583    #[deprecated(since = "0.8.0", note = "`FromBytes::mut_from_bytes` now supports slices")]
4584    #[must_use = "has no side effects"]
4585    #[doc(hidden)]
4586    #[inline(always)]
4587    fn mut_slice_from(source: &mut [u8]) -> Option<&mut [Self]>
4588    where
4589        Self: Sized + IntoBytes,
4590    {
4591        <[Self]>::mut_from_bytes(source).ok()
4592    }
4593
4594    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::mut_from_prefix_with_elems`")]
4595    #[doc(hidden)]
4596    #[must_use = "has no side effects"]
4597    #[inline(always)]
4598    fn mut_slice_from_prefix(source: &mut [u8], count: usize) -> Option<(&mut [Self], &mut [u8])>
4599    where
4600        Self: Sized + IntoBytes,
4601    {
4602        <[Self]>::mut_from_prefix_with_elems(source, count).ok()
4603    }
4604
4605    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::mut_from_suffix_with_elems`")]
4606    #[doc(hidden)]
4607    #[must_use = "has no side effects"]
4608    #[inline(always)]
4609    fn mut_slice_from_suffix(source: &mut [u8], count: usize) -> Option<(&mut [u8], &mut [Self])>
4610    where
4611        Self: Sized + IntoBytes,
4612    {
4613        <[Self]>::mut_from_suffix_with_elems(source, count).ok()
4614    }
4615
4616    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::read_from_bytes`")]
4617    #[doc(hidden)]
4618    #[must_use = "has no side effects"]
4619    #[inline(always)]
4620    fn read_from(source: &[u8]) -> Option<Self>
4621    where
4622        Self: Sized,
4623    {
4624        Self::read_from_bytes(source).ok()
4625    }
4626}
4627
4628/// Interprets the given affix of the given bytes as a `&Self`.
4629///
4630/// This method computes the largest possible size of `Self` that can fit in the
4631/// prefix or suffix bytes of `source`, then attempts to return both a reference
4632/// to those bytes interpreted as a `Self`, and a reference to the excess bytes.
4633/// If there are insufficient bytes, or if that affix of `source` is not
4634/// appropriately aligned, this returns `Err`.
4635#[inline(always)]
4636fn ref_from_prefix_suffix<T: FromBytes + KnownLayout + Immutable + ?Sized>(
4637    source: &[u8],
4638    meta: Option<T::PointerMetadata>,
4639    cast_type: CastType,
4640) -> Result<(&T, &[u8]), CastError<&[u8], T>> {
4641    let (slf, prefix_suffix) = Ptr::from_ref(source)
4642        .try_cast_into::<_, BecauseImmutable>(cast_type, meta)
4643        .map_err(|err| err.map_src(|s| s.as_ref()))?;
4644    Ok((slf.bikeshed_recall_valid().as_ref(), prefix_suffix.as_ref()))
4645}
4646
4647/// Interprets the given affix of the given bytes as a `&mut Self` without
4648/// copying.
4649///
4650/// This method computes the largest possible size of `Self` that can fit in the
4651/// prefix or suffix bytes of `source`, then attempts to return both a reference
4652/// to those bytes interpreted as a `Self`, and a reference to the excess bytes.
4653/// If there are insufficient bytes, or if that affix of `source` is not
4654/// appropriately aligned, this returns `Err`.
4655#[inline(always)]
4656fn mut_from_prefix_suffix<T: FromBytes + KnownLayout + ?Sized>(
4657    source: &mut [u8],
4658    meta: Option<T::PointerMetadata>,
4659    cast_type: CastType,
4660) -> Result<(&mut T, &mut [u8]), CastError<&mut [u8], T>> {
4661    let (slf, prefix_suffix) = Ptr::from_mut(source)
4662        .try_cast_into::<_, BecauseExclusive>(cast_type, meta)
4663        .map_err(|err| err.map_src(|s| s.as_mut()))?;
4664    Ok((slf.bikeshed_recall_valid().as_mut(), prefix_suffix.as_mut()))
4665}
4666
4667/// Analyzes whether a type is [`IntoBytes`].
4668///
4669/// This derive analyzes, at compile time, whether the annotated type satisfies
4670/// the [safety conditions] of `IntoBytes` and implements `IntoBytes` if it is
4671/// sound to do so. This derive can be applied to structs and enums (see below
4672/// for union support); e.g.:
4673///
4674/// ```
4675/// # use zerocopy_derive::{IntoBytes};
4676/// #[derive(IntoBytes)]
4677/// #[repr(C)]
4678/// struct MyStruct {
4679/// # /*
4680///     ...
4681/// # */
4682/// }
4683///
4684/// #[derive(IntoBytes)]
4685/// #[repr(u8)]
4686/// enum MyEnum {
4687/// #   Variant,
4688/// # /*
4689///     ...
4690/// # */
4691/// }
4692/// ```
4693///
4694/// [safety conditions]: trait@IntoBytes#safety
4695///
4696/// # Error Messages
4697///
4698/// On Rust toolchains prior to 1.78.0, due to the way that the custom derive
4699/// for `IntoBytes` is implemented, you may get an error like this:
4700///
4701/// ```text
4702/// error[E0277]: the trait bound `(): PaddingFree<Foo, true>` is not satisfied
4703///   --> lib.rs:23:10
4704///    |
4705///  1 | #[derive(IntoBytes)]
4706///    |          ^^^^^^^^^ the trait `PaddingFree<Foo, true>` is not implemented for `()`
4707///    |
4708///    = help: the following implementations were found:
4709///                   <() as PaddingFree<T, false>>
4710/// ```
4711///
4712/// This error indicates that the type being annotated has padding bytes, which
4713/// is illegal for `IntoBytes` types. Consider reducing the alignment of some
4714/// fields by using types in the [`byteorder`] module, wrapping field types in
4715/// [`Unalign`], adding explicit struct fields where those padding bytes would
4716/// be, or using `#[repr(packed)]`. See the Rust Reference's page on [type
4717/// layout] for more information about type layout and padding.
4718///
4719/// [type layout]: https://doc.rust-lang.org/reference/type-layout.html
4720///
4721/// # Unions
4722///
4723/// Currently, union bit validity is [up in the air][union-validity], and so
4724/// zerocopy does not support `#[derive(IntoBytes)]` on unions by default.
4725/// However, implementing `IntoBytes` on a union type is likely sound on all
4726/// existing Rust toolchains - it's just that it may become unsound in the
4727/// future. You can opt-in to `#[derive(IntoBytes)]` support on unions by
4728/// passing the unstable `zerocopy_derive_union_into_bytes` cfg:
4729///
4730/// ```shell
4731/// $ RUSTFLAGS='--cfg zerocopy_derive_union_into_bytes' cargo build
4732/// ```
4733///
4734/// However, it is your responsibility to ensure that this derive is sound on
4735/// the specific versions of the Rust toolchain you are using! We make no
4736/// stability or soundness guarantees regarding this cfg, and may remove it at
4737/// any point.
4738///
4739/// We are actively working with Rust to stabilize the necessary language
4740/// guarantees to support this in a forwards-compatible way, which will enable
4741/// us to remove the cfg gate. As part of this effort, we need to know how much
4742/// demand there is for this feature. If you would like to use `IntoBytes` on
4743/// unions, [please let us know][discussion].
4744///
4745/// [union-validity]: https://github.com/rust-lang/unsafe-code-guidelines/issues/438
4746/// [discussion]: https://github.com/google/zerocopy/discussions/1802
4747///
4748/// # Analysis
4749///
4750/// *This section describes, roughly, the analysis performed by this derive to
4751/// determine whether it is sound to implement `IntoBytes` for a given type.
4752/// Unless you are modifying the implementation of this derive, or attempting to
4753/// manually implement `IntoBytes` for a type yourself, you don't need to read
4754/// this section.*
4755///
4756/// If a type has the following properties, then this derive can implement
4757/// `IntoBytes` for that type:
4758///
4759/// - If the type is a struct, its fields must be [`IntoBytes`]. Additionally:
4760///     - if the type is `repr(transparent)` or `repr(packed)`, it is
4761///       [`IntoBytes`] if its fields are [`IntoBytes`]; else,
4762///     - if the type is `repr(C)` with at most one field, it is [`IntoBytes`]
4763///       if its field is [`IntoBytes`]; else,
4764///     - if the type has no generic parameters, it is [`IntoBytes`] if the type
4765///       is sized and has no padding bytes; else,
4766///     - if the type is `repr(C)`, its fields must be [`Unaligned`].
4767/// - If the type is an enum:
4768///   - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
4769///     `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
4770///   - It must have no padding bytes.
4771///   - Its fields must be [`IntoBytes`].
4772///
4773/// This analysis is subject to change. Unsafe code may *only* rely on the
4774/// documented [safety conditions] of `FromBytes`, and must *not* rely on the
4775/// implementation details of this derive.
4776///
4777/// [Rust Reference]: https://doc.rust-lang.org/reference/type-layout.html
4778#[cfg(any(feature = "derive", test))]
4779#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
4780pub use zerocopy_derive::IntoBytes;
4781
4782/// Types that can be converted to an immutable slice of initialized bytes.
4783///
4784/// Any `IntoBytes` type can be converted to a slice of initialized bytes of the
4785/// same size. This is useful for efficiently serializing structured data as raw
4786/// bytes.
4787///
4788/// # Implementation
4789///
4790/// **Do not implement this trait yourself!** Instead, use
4791/// [`#[derive(IntoBytes)]`][derive]; e.g.:
4792///
4793/// ```
4794/// # use zerocopy_derive::IntoBytes;
4795/// #[derive(IntoBytes)]
4796/// #[repr(C)]
4797/// struct MyStruct {
4798/// # /*
4799///     ...
4800/// # */
4801/// }
4802///
4803/// #[derive(IntoBytes)]
4804/// #[repr(u8)]
4805/// enum MyEnum {
4806/// #   Variant0,
4807/// # /*
4808///     ...
4809/// # */
4810/// }
4811/// ```
4812///
4813/// This derive performs a sophisticated, compile-time safety analysis to
4814/// determine whether a type is `IntoBytes`. See the [derive
4815/// documentation][derive] for guidance on how to interpret error messages
4816/// produced by the derive's analysis.
4817///
4818/// # Safety
4819///
4820/// *This section describes what is required in order for `T: IntoBytes`, and
4821/// what unsafe code may assume of such types. If you don't plan on implementing
4822/// `IntoBytes` manually, and you don't plan on writing unsafe code that
4823/// operates on `IntoBytes` types, then you don't need to read this section.*
4824///
4825/// If `T: IntoBytes`, then unsafe code may assume that it is sound to treat any
4826/// `t: T` as an immutable `[u8]` of length `size_of_val(t)`. If a type is
4827/// marked as `IntoBytes` which violates this contract, it may cause undefined
4828/// behavior.
4829///
4830/// `#[derive(IntoBytes)]` only permits [types which satisfy these
4831/// requirements][derive-analysis].
4832///
4833#[cfg_attr(
4834    feature = "derive",
4835    doc = "[derive]: zerocopy_derive::IntoBytes",
4836    doc = "[derive-analysis]: zerocopy_derive::IntoBytes#analysis"
4837)]
4838#[cfg_attr(
4839    not(feature = "derive"),
4840    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.IntoBytes.html"),
4841    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.IntoBytes.html#analysis"),
4842)]
4843#[cfg_attr(
4844    zerocopy_diagnostic_on_unimplemented,
4845    diagnostic::on_unimplemented(note = "Consider adding `#[derive(IntoBytes)]` to `{Self}`")
4846)]
4847pub unsafe trait IntoBytes {
4848    // The `Self: Sized` bound makes it so that this function doesn't prevent
4849    // `IntoBytes` from being object safe. Note that other `IntoBytes` methods
4850    // prevent object safety, but those provide a benefit in exchange for object
4851    // safety. If at some point we remove those methods, change their type
4852    // signatures, or move them out of this trait so that `IntoBytes` is object
4853    // safe again, it's important that this function not prevent object safety.
4854    #[doc(hidden)]
4855    fn only_derive_is_allowed_to_implement_this_trait()
4856    where
4857        Self: Sized;
4858
4859    /// Gets the bytes of this value.
4860    ///
4861    /// # Examples
4862    ///
4863    /// ```
4864    /// use zerocopy::IntoBytes;
4865    /// # use zerocopy_derive::*;
4866    ///
4867    /// #[derive(IntoBytes, Immutable)]
4868    /// #[repr(C)]
4869    /// struct PacketHeader {
4870    ///     src_port: [u8; 2],
4871    ///     dst_port: [u8; 2],
4872    ///     length: [u8; 2],
4873    ///     checksum: [u8; 2],
4874    /// }
4875    ///
4876    /// let header = PacketHeader {
4877    ///     src_port: [0, 1],
4878    ///     dst_port: [2, 3],
4879    ///     length: [4, 5],
4880    ///     checksum: [6, 7],
4881    /// };
4882    ///
4883    /// let bytes = header.as_bytes();
4884    ///
4885    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
4886    /// ```
4887    #[must_use = "has no side effects"]
4888    #[inline(always)]
4889    fn as_bytes(&self) -> &[u8]
4890    where
4891        Self: Immutable,
4892    {
4893        // Note that this method does not have a `Self: Sized` bound;
4894        // `size_of_val` works for unsized values too.
4895        let len = mem::size_of_val(self);
4896        let slf: *const Self = self;
4897
4898        // SAFETY:
4899        // - `slf.cast::<u8>()` is valid for reads for `len * size_of::<u8>()`
4900        //   many bytes because...
4901        //   - `slf` is the same pointer as `self`, and `self` is a reference
4902        //     which points to an object whose size is `len`. Thus...
4903        //     - The entire region of `len` bytes starting at `slf` is contained
4904        //       within a single allocation.
4905        //     - `slf` is non-null.
4906        //   - `slf` is trivially aligned to `align_of::<u8>() == 1`.
4907        // - `Self: IntoBytes` ensures that all of the bytes of `slf` are
4908        //   initialized.
4909        // - Since `slf` is derived from `self`, and `self` is an immutable
4910        //   reference, the only other references to this memory region that
4911        //   could exist are other immutable references, and those don't allow
4912        //   mutation. `Self: Immutable` prohibits types which contain
4913        //   `UnsafeCell`s, which are the only types for which this rule
4914        //   wouldn't be sufficient.
4915        // - The total size of the resulting slice is no larger than
4916        //   `isize::MAX` because no allocation produced by safe code can be
4917        //   larger than `isize::MAX`.
4918        //
4919        // TODO(#429): Add references to docs and quotes.
4920        unsafe { slice::from_raw_parts(slf.cast::<u8>(), len) }
4921    }
4922
4923    /// Gets the bytes of this value mutably.
4924    ///
4925    /// # Examples
4926    ///
4927    /// ```
4928    /// use zerocopy::IntoBytes;
4929    /// # use zerocopy_derive::*;
4930    ///
4931    /// # #[derive(Eq, PartialEq, Debug)]
4932    /// #[derive(FromBytes, IntoBytes, Immutable)]
4933    /// #[repr(C)]
4934    /// struct PacketHeader {
4935    ///     src_port: [u8; 2],
4936    ///     dst_port: [u8; 2],
4937    ///     length: [u8; 2],
4938    ///     checksum: [u8; 2],
4939    /// }
4940    ///
4941    /// let mut header = PacketHeader {
4942    ///     src_port: [0, 1],
4943    ///     dst_port: [2, 3],
4944    ///     length: [4, 5],
4945    ///     checksum: [6, 7],
4946    /// };
4947    ///
4948    /// let bytes = header.as_mut_bytes();
4949    ///
4950    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
4951    ///
4952    /// bytes.reverse();
4953    ///
4954    /// assert_eq!(header, PacketHeader {
4955    ///     src_port: [7, 6],
4956    ///     dst_port: [5, 4],
4957    ///     length: [3, 2],
4958    ///     checksum: [1, 0],
4959    /// });
4960    /// ```
4961    #[must_use = "has no side effects"]
4962    #[inline(always)]
4963    fn as_mut_bytes(&mut self) -> &mut [u8]
4964    where
4965        Self: FromBytes,
4966    {
4967        // Note that this method does not have a `Self: Sized` bound;
4968        // `size_of_val` works for unsized values too.
4969        let len = mem::size_of_val(self);
4970        let slf: *mut Self = self;
4971
4972        // SAFETY:
4973        // - `slf.cast::<u8>()` is valid for reads and writes for `len *
4974        //   size_of::<u8>()` many bytes because...
4975        //   - `slf` is the same pointer as `self`, and `self` is a reference
4976        //     which points to an object whose size is `len`. Thus...
4977        //     - The entire region of `len` bytes starting at `slf` is contained
4978        //       within a single allocation.
4979        //     - `slf` is non-null.
4980        //   - `slf` is trivially aligned to `align_of::<u8>() == 1`.
4981        // - `Self: IntoBytes` ensures that all of the bytes of `slf` are
4982        //   initialized.
4983        // - `Self: FromBytes` ensures that no write to this memory region
4984        //   could result in it containing an invalid `Self`.
4985        // - Since `slf` is derived from `self`, and `self` is a mutable
4986        //   reference, no other references to this memory region can exist.
4987        // - The total size of the resulting slice is no larger than
4988        //   `isize::MAX` because no allocation produced by safe code can be
4989        //   larger than `isize::MAX`.
4990        //
4991        // TODO(#429): Add references to docs and quotes.
4992        unsafe { slice::from_raw_parts_mut(slf.cast::<u8>(), len) }
4993    }
4994
4995    /// Writes a copy of `self` to `dst`.
4996    ///
4997    /// If `dst.len() != size_of_val(self)`, `write_to` returns `Err`.
4998    ///
4999    /// # Examples
5000    ///
5001    /// ```
5002    /// use zerocopy::IntoBytes;
5003    /// # use zerocopy_derive::*;
5004    ///
5005    /// #[derive(IntoBytes, Immutable)]
5006    /// #[repr(C)]
5007    /// struct PacketHeader {
5008    ///     src_port: [u8; 2],
5009    ///     dst_port: [u8; 2],
5010    ///     length: [u8; 2],
5011    ///     checksum: [u8; 2],
5012    /// }
5013    ///
5014    /// let header = PacketHeader {
5015    ///     src_port: [0, 1],
5016    ///     dst_port: [2, 3],
5017    ///     length: [4, 5],
5018    ///     checksum: [6, 7],
5019    /// };
5020    ///
5021    /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0];
5022    ///
5023    /// header.write_to(&mut bytes[..]);
5024    ///
5025    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
5026    /// ```
5027    ///
5028    /// If too many or too few target bytes are provided, `write_to` returns
5029    /// `Err` and leaves the target bytes unmodified:
5030    ///
5031    /// ```
5032    /// # use zerocopy::IntoBytes;
5033    /// # let header = u128::MAX;
5034    /// let mut excessive_bytes = &mut [0u8; 128][..];
5035    ///
5036    /// let write_result = header.write_to(excessive_bytes);
5037    ///
5038    /// assert!(write_result.is_err());
5039    /// assert_eq!(excessive_bytes, [0u8; 128]);
5040    /// ```
5041    #[must_use = "callers should check the return value to see if the operation succeeded"]
5042    #[inline]
5043    fn write_to(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>>
5044    where
5045        Self: Immutable,
5046    {
5047        let src = self.as_bytes();
5048        if dst.len() == src.len() {
5049            // SAFETY: Within this branch of the conditional, we have ensured
5050            // that `dst.len()` is equal to `src.len()`. Neither the size of the
5051            // source nor the size of the destination change between the above
5052            // size check and the invocation of `copy_unchecked`.
5053            unsafe { util::copy_unchecked(src, dst) }
5054            Ok(())
5055        } else {
5056            Err(SizeError::new(self))
5057        }
5058    }
5059
5060    /// Writes a copy of `self` to the prefix of `dst`.
5061    ///
5062    /// `write_to_prefix` writes `self` to the first `size_of_val(self)` bytes
5063    /// of `dst`. If `dst.len() < size_of_val(self)`, it returns `Err`.
5064    ///
5065    /// # Examples
5066    ///
5067    /// ```
5068    /// use zerocopy::IntoBytes;
5069    /// # use zerocopy_derive::*;
5070    ///
5071    /// #[derive(IntoBytes, Immutable)]
5072    /// #[repr(C)]
5073    /// struct PacketHeader {
5074    ///     src_port: [u8; 2],
5075    ///     dst_port: [u8; 2],
5076    ///     length: [u8; 2],
5077    ///     checksum: [u8; 2],
5078    /// }
5079    ///
5080    /// let header = PacketHeader {
5081    ///     src_port: [0, 1],
5082    ///     dst_port: [2, 3],
5083    ///     length: [4, 5],
5084    ///     checksum: [6, 7],
5085    /// };
5086    ///
5087    /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
5088    ///
5089    /// header.write_to_prefix(&mut bytes[..]);
5090    ///
5091    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7, 0, 0]);
5092    /// ```
5093    ///
5094    /// If insufficient target bytes are provided, `write_to_prefix` returns
5095    /// `Err` and leaves the target bytes unmodified:
5096    ///
5097    /// ```
5098    /// # use zerocopy::IntoBytes;
5099    /// # let header = u128::MAX;
5100    /// let mut insufficent_bytes = &mut [0, 0][..];
5101    ///
5102    /// let write_result = header.write_to_suffix(insufficent_bytes);
5103    ///
5104    /// assert!(write_result.is_err());
5105    /// assert_eq!(insufficent_bytes, [0, 0]);
5106    /// ```
5107    #[must_use = "callers should check the return value to see if the operation succeeded"]
5108    #[inline]
5109    fn write_to_prefix(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>>
5110    where
5111        Self: Immutable,
5112    {
5113        let src = self.as_bytes();
5114        match dst.get_mut(..src.len()) {
5115            Some(dst) => {
5116                // SAFETY: Within this branch of the `match`, we have ensured
5117                // through fallible subslicing that `dst.len()` is equal to
5118                // `src.len()`. Neither the size of the source nor the size of
5119                // the destination change between the above subslicing operation
5120                // and the invocation of `copy_unchecked`.
5121                unsafe { util::copy_unchecked(src, dst) }
5122                Ok(())
5123            }
5124            None => Err(SizeError::new(self)),
5125        }
5126    }
5127
5128    /// Writes a copy of `self` to the suffix of `dst`.
5129    ///
5130    /// `write_to_suffix` writes `self` to the last `size_of_val(self)` bytes of
5131    /// `dst`. If `dst.len() < size_of_val(self)`, it returns `Err`.
5132    ///
5133    /// # Examples
5134    ///
5135    /// ```
5136    /// use zerocopy::IntoBytes;
5137    /// # use zerocopy_derive::*;
5138    ///
5139    /// #[derive(IntoBytes, Immutable)]
5140    /// #[repr(C)]
5141    /// struct PacketHeader {
5142    ///     src_port: [u8; 2],
5143    ///     dst_port: [u8; 2],
5144    ///     length: [u8; 2],
5145    ///     checksum: [u8; 2],
5146    /// }
5147    ///
5148    /// let header = PacketHeader {
5149    ///     src_port: [0, 1],
5150    ///     dst_port: [2, 3],
5151    ///     length: [4, 5],
5152    ///     checksum: [6, 7],
5153    /// };
5154    ///
5155    /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
5156    ///
5157    /// header.write_to_suffix(&mut bytes[..]);
5158    ///
5159    /// assert_eq!(bytes, [0, 0, 0, 1, 2, 3, 4, 5, 6, 7]);
5160    ///
5161    /// let mut insufficent_bytes = &mut [0, 0][..];
5162    ///
5163    /// let write_result = header.write_to_suffix(insufficent_bytes);
5164    ///
5165    /// assert!(write_result.is_err());
5166    /// assert_eq!(insufficent_bytes, [0, 0]);
5167    /// ```
5168    ///
5169    /// If insufficient target bytes are provided, `write_to_suffix` returns
5170    /// `Err` and leaves the target bytes unmodified:
5171    ///
5172    /// ```
5173    /// # use zerocopy::IntoBytes;
5174    /// # let header = u128::MAX;
5175    /// let mut insufficent_bytes = &mut [0, 0][..];
5176    ///
5177    /// let write_result = header.write_to_suffix(insufficent_bytes);
5178    ///
5179    /// assert!(write_result.is_err());
5180    /// assert_eq!(insufficent_bytes, [0, 0]);
5181    /// ```
5182    #[must_use = "callers should check the return value to see if the operation succeeded"]
5183    #[inline]
5184    fn write_to_suffix(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>>
5185    where
5186        Self: Immutable,
5187    {
5188        let src = self.as_bytes();
5189        let start = if let Some(start) = dst.len().checked_sub(src.len()) {
5190            start
5191        } else {
5192            return Err(SizeError::new(self));
5193        };
5194        let dst = if let Some(dst) = dst.get_mut(start..) {
5195            dst
5196        } else {
5197            // get_mut() should never return None here. We return a `SizeError`
5198            // rather than .unwrap() because in the event the branch is not
5199            // optimized away, returning a value is generally lighter-weight
5200            // than panicking.
5201            return Err(SizeError::new(self));
5202        };
5203        // SAFETY: Through fallible subslicing of `dst`, we have ensured that
5204        // `dst.len()` is equal to `src.len()`. Neither the size of the source
5205        // nor the size of the destination change between the above subslicing
5206        // operation and the invocation of `copy_unchecked`.
5207        unsafe {
5208            util::copy_unchecked(src, dst);
5209        }
5210        Ok(())
5211    }
5212
5213    #[deprecated(since = "0.8.0", note = "`IntoBytes::as_bytes_mut` was renamed to `as_mut_bytes`")]
5214    #[doc(hidden)]
5215    #[inline]
5216    fn as_bytes_mut(&mut self) -> &mut [u8]
5217    where
5218        Self: FromBytes,
5219    {
5220        self.as_mut_bytes()
5221    }
5222}
5223
5224/// Analyzes whether a type is [`Unaligned`].
5225///
5226/// This derive analyzes, at compile time, whether the annotated type satisfies
5227/// the [safety conditions] of `Unaligned` and implements `Unaligned` if it is
5228/// sound to do so. This derive can be applied to structs, enums, and unions;
5229/// e.g.:
5230///
5231/// ```
5232/// # use zerocopy_derive::Unaligned;
5233/// #[derive(Unaligned)]
5234/// #[repr(C)]
5235/// struct MyStruct {
5236/// # /*
5237///     ...
5238/// # */
5239/// }
5240///
5241/// #[derive(Unaligned)]
5242/// #[repr(u8)]
5243/// enum MyEnum {
5244/// #   Variant0,
5245/// # /*
5246///     ...
5247/// # */
5248/// }
5249///
5250/// #[derive(Unaligned)]
5251/// #[repr(packed)]
5252/// union MyUnion {
5253/// #   variant: u8,
5254/// # /*
5255///     ...
5256/// # */
5257/// }
5258/// ```
5259///
5260/// # Analysis
5261///
5262/// *This section describes, roughly, the analysis performed by this derive to
5263/// determine whether it is sound to implement `Unaligned` for a given type.
5264/// Unless you are modifying the implementation of this derive, or attempting to
5265/// manually implement `Unaligned` for a type yourself, you don't need to read
5266/// this section.*
5267///
5268/// If a type has the following properties, then this derive can implement
5269/// `Unaligned` for that type:
5270///
5271/// - If the type is a struct or union:
5272///   - If `repr(align(N))` is provided, `N` must equal 1.
5273///   - If the type is `repr(C)` or `repr(transparent)`, all fields must be
5274///     [`Unaligned`].
5275///   - If the type is not `repr(C)` or `repr(transparent)`, it must be
5276///     `repr(packed)` or `repr(packed(1))`.
5277/// - If the type is an enum:
5278///   - If `repr(align(N))` is provided, `N` must equal 1.
5279///   - It must be a field-less enum (meaning that all variants have no fields).
5280///   - It must be `repr(i8)` or `repr(u8)`.
5281///
5282/// [safety conditions]: trait@Unaligned#safety
5283#[cfg(any(feature = "derive", test))]
5284#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
5285pub use zerocopy_derive::Unaligned;
5286
5287/// Types with no alignment requirement.
5288///
5289/// If `T: Unaligned`, then `align_of::<T>() == 1`.
5290///
5291/// # Implementation
5292///
5293/// **Do not implement this trait yourself!** Instead, use
5294/// [`#[derive(Unaligned)]`][derive]; e.g.:
5295///
5296/// ```
5297/// # use zerocopy_derive::Unaligned;
5298/// #[derive(Unaligned)]
5299/// #[repr(C)]
5300/// struct MyStruct {
5301/// # /*
5302///     ...
5303/// # */
5304/// }
5305///
5306/// #[derive(Unaligned)]
5307/// #[repr(u8)]
5308/// enum MyEnum {
5309/// #   Variant0,
5310/// # /*
5311///     ...
5312/// # */
5313/// }
5314///
5315/// #[derive(Unaligned)]
5316/// #[repr(packed)]
5317/// union MyUnion {
5318/// #   variant: u8,
5319/// # /*
5320///     ...
5321/// # */
5322/// }
5323/// ```
5324///
5325/// This derive performs a sophisticated, compile-time safety analysis to
5326/// determine whether a type is `Unaligned`.
5327///
5328/// # Safety
5329///
5330/// *This section describes what is required in order for `T: Unaligned`, and
5331/// what unsafe code may assume of such types. If you don't plan on implementing
5332/// `Unaligned` manually, and you don't plan on writing unsafe code that
5333/// operates on `Unaligned` types, then you don't need to read this section.*
5334///
5335/// If `T: Unaligned`, then unsafe code may assume that it is sound to produce a
5336/// reference to `T` at any memory location regardless of alignment. If a type
5337/// is marked as `Unaligned` which violates this contract, it may cause
5338/// undefined behavior.
5339///
5340/// `#[derive(Unaligned)]` only permits [types which satisfy these
5341/// requirements][derive-analysis].
5342///
5343#[cfg_attr(
5344    feature = "derive",
5345    doc = "[derive]: zerocopy_derive::Unaligned",
5346    doc = "[derive-analysis]: zerocopy_derive::Unaligned#analysis"
5347)]
5348#[cfg_attr(
5349    not(feature = "derive"),
5350    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Unaligned.html"),
5351    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Unaligned.html#analysis"),
5352)]
5353#[cfg_attr(
5354    zerocopy_diagnostic_on_unimplemented,
5355    diagnostic::on_unimplemented(note = "Consider adding `#[derive(Unaligned)]` to `{Self}`")
5356)]
5357pub unsafe trait Unaligned {
5358    // The `Self: Sized` bound makes it so that `Unaligned` is still object
5359    // safe.
5360    #[doc(hidden)]
5361    fn only_derive_is_allowed_to_implement_this_trait()
5362    where
5363        Self: Sized;
5364}
5365
5366#[cfg(feature = "alloc")]
5367#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
5368#[cfg(zerocopy_panic_in_const_and_vec_try_reserve)]
5369mod alloc_support {
5370    use super::*;
5371
5372    /// Extends a `Vec<T>` by pushing `additional` new items onto the end of the
5373    /// vector. The new items are initialized with zeros.
5374    #[cfg(zerocopy_panic_in_const_and_vec_try_reserve)]
5375    #[doc(hidden)]
5376    #[deprecated(since = "0.8.0", note = "moved to `FromZeros`")]
5377    #[inline(always)]
5378    pub fn extend_vec_zeroed<T: FromZeros>(
5379        v: &mut Vec<T>,
5380        additional: usize,
5381    ) -> Result<(), AllocError> {
5382        <T as FromZeros>::extend_vec_zeroed(v, additional)
5383    }
5384
5385    /// Inserts `additional` new items into `Vec<T>` at `position`. The new
5386    /// items are initialized with zeros.
5387    ///
5388    /// # Panics
5389    ///
5390    /// Panics if `position > v.len()`.
5391    #[cfg(zerocopy_panic_in_const_and_vec_try_reserve)]
5392    #[doc(hidden)]
5393    #[deprecated(since = "0.8.0", note = "moved to `FromZeros`")]
5394    #[inline(always)]
5395    pub fn insert_vec_zeroed<T: FromZeros>(
5396        v: &mut Vec<T>,
5397        position: usize,
5398        additional: usize,
5399    ) -> Result<(), AllocError> {
5400        <T as FromZeros>::insert_vec_zeroed(v, position, additional)
5401    }
5402}
5403
5404#[cfg(feature = "alloc")]
5405#[cfg(zerocopy_panic_in_const_and_vec_try_reserve)]
5406#[doc(hidden)]
5407pub use alloc_support::*;
5408
5409#[cfg(test)]
5410#[allow(clippy::assertions_on_result_states, clippy::unreadable_literal)]
5411mod tests {
5412    use static_assertions::assert_impl_all;
5413
5414    use super::*;
5415    use crate::util::testutil::*;
5416
5417    // An unsized type.
5418    //
5419    // This is used to test the custom derives of our traits. The `[u8]` type
5420    // gets a hand-rolled impl, so it doesn't exercise our custom derives.
5421    #[derive(Debug, Eq, PartialEq, FromBytes, IntoBytes, Unaligned, Immutable)]
5422    #[repr(transparent)]
5423    struct Unsized([u8]);
5424
5425    impl Unsized {
5426        fn from_mut_slice(slc: &mut [u8]) -> &mut Unsized {
5427            // SAFETY: This *probably* sound - since the layouts of `[u8]` and
5428            // `Unsized` are the same, so are the layouts of `&mut [u8]` and
5429            // `&mut Unsized`. [1] Even if it turns out that this isn't actually
5430            // guaranteed by the language spec, we can just change this since
5431            // it's in test code.
5432            //
5433            // [1] https://github.com/rust-lang/unsafe-code-guidelines/issues/375
5434            unsafe { mem::transmute(slc) }
5435        }
5436    }
5437
5438    #[test]
5439    fn test_known_layout() {
5440        // Test that `$ty` and `ManuallyDrop<$ty>` have the expected layout.
5441        // Test that `PhantomData<$ty>` has the same layout as `()` regardless
5442        // of `$ty`.
5443        macro_rules! test {
5444            ($ty:ty, $expect:expr) => {
5445                let expect = $expect;
5446                assert_eq!(<$ty as KnownLayout>::LAYOUT, expect);
5447                assert_eq!(<ManuallyDrop<$ty> as KnownLayout>::LAYOUT, expect);
5448                assert_eq!(<PhantomData<$ty> as KnownLayout>::LAYOUT, <() as KnownLayout>::LAYOUT);
5449            };
5450        }
5451
5452        let layout = |offset, align, _trailing_slice_elem_size| DstLayout {
5453            align: NonZeroUsize::new(align).unwrap(),
5454            size_info: match _trailing_slice_elem_size {
5455                None => SizeInfo::Sized { size: offset },
5456                Some(elem_size) => SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }),
5457            },
5458        };
5459
5460        test!((), layout(0, 1, None));
5461        test!(u8, layout(1, 1, None));
5462        // Use `align_of` because `u64` alignment may be smaller than 8 on some
5463        // platforms.
5464        test!(u64, layout(8, mem::align_of::<u64>(), None));
5465        test!(AU64, layout(8, 8, None));
5466
5467        test!(Option<&'static ()>, usize::LAYOUT);
5468
5469        test!([()], layout(0, 1, Some(0)));
5470        test!([u8], layout(0, 1, Some(1)));
5471        test!(str, layout(0, 1, Some(1)));
5472    }
5473
5474    #[cfg(feature = "derive")]
5475    #[test]
5476    fn test_known_layout_derive() {
5477        // In this and other files (`late_compile_pass.rs`,
5478        // `mid_compile_pass.rs`, and `struct.rs`), we test success and failure
5479        // modes of `derive(KnownLayout)` for the following combination of
5480        // properties:
5481        //
5482        // +------------+--------------------------------------+-----------+
5483        // |            |      trailing field properties       |           |
5484        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5485        // |------------+----------+----------------+----------+-----------|
5486        // |          N |        N |              N |        N |      KL00 |
5487        // |          N |        N |              N |        Y |      KL01 |
5488        // |          N |        N |              Y |        N |      KL02 |
5489        // |          N |        N |              Y |        Y |      KL03 |
5490        // |          N |        Y |              N |        N |      KL04 |
5491        // |          N |        Y |              N |        Y |      KL05 |
5492        // |          N |        Y |              Y |        N |      KL06 |
5493        // |          N |        Y |              Y |        Y |      KL07 |
5494        // |          Y |        N |              N |        N |      KL08 |
5495        // |          Y |        N |              N |        Y |      KL09 |
5496        // |          Y |        N |              Y |        N |      KL10 |
5497        // |          Y |        N |              Y |        Y |      KL11 |
5498        // |          Y |        Y |              N |        N |      KL12 |
5499        // |          Y |        Y |              N |        Y |      KL13 |
5500        // |          Y |        Y |              Y |        N |      KL14 |
5501        // |          Y |        Y |              Y |        Y |      KL15 |
5502        // +------------+----------+----------------+----------+-----------+
5503
5504        struct NotKnownLayout<T = ()> {
5505            _t: T,
5506        }
5507
5508        #[derive(KnownLayout)]
5509        #[repr(C)]
5510        struct AlignSize<const ALIGN: usize, const SIZE: usize>
5511        where
5512            elain::Align<ALIGN>: elain::Alignment,
5513        {
5514            _align: elain::Align<ALIGN>,
5515            size: [u8; SIZE],
5516        }
5517
5518        type AU16 = AlignSize<2, 2>;
5519        type AU32 = AlignSize<4, 4>;
5520
5521        fn _assert_kl<T: ?Sized + KnownLayout>(_: &T) {}
5522
5523        let sized_layout = |align, size| DstLayout {
5524            align: NonZeroUsize::new(align).unwrap(),
5525            size_info: SizeInfo::Sized { size },
5526        };
5527
5528        let unsized_layout = |align, elem_size, offset| DstLayout {
5529            align: NonZeroUsize::new(align).unwrap(),
5530            size_info: SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }),
5531        };
5532
5533        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5534        // |          N |        N |              N |        Y |      KL01 |
5535        #[allow(dead_code)]
5536        #[derive(KnownLayout)]
5537        struct KL01(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
5538
5539        let expected = DstLayout::for_type::<KL01>();
5540
5541        assert_eq!(<KL01 as KnownLayout>::LAYOUT, expected);
5542        assert_eq!(<KL01 as KnownLayout>::LAYOUT, sized_layout(4, 8));
5543
5544        // ...with `align(N)`:
5545        #[allow(dead_code)]
5546        #[derive(KnownLayout)]
5547        #[repr(align(64))]
5548        struct KL01Align(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
5549
5550        let expected = DstLayout::for_type::<KL01Align>();
5551
5552        assert_eq!(<KL01Align as KnownLayout>::LAYOUT, expected);
5553        assert_eq!(<KL01Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
5554
5555        // ...with `packed`:
5556        #[allow(dead_code)]
5557        #[derive(KnownLayout)]
5558        #[repr(packed)]
5559        struct KL01Packed(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
5560
5561        let expected = DstLayout::for_type::<KL01Packed>();
5562
5563        assert_eq!(<KL01Packed as KnownLayout>::LAYOUT, expected);
5564        assert_eq!(<KL01Packed as KnownLayout>::LAYOUT, sized_layout(1, 6));
5565
5566        // ...with `packed(N)`:
5567        #[allow(dead_code)]
5568        #[derive(KnownLayout)]
5569        #[repr(packed(2))]
5570        struct KL01PackedN(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
5571
5572        assert_impl_all!(KL01PackedN: KnownLayout);
5573
5574        let expected = DstLayout::for_type::<KL01PackedN>();
5575
5576        assert_eq!(<KL01PackedN as KnownLayout>::LAYOUT, expected);
5577        assert_eq!(<KL01PackedN as KnownLayout>::LAYOUT, sized_layout(2, 6));
5578
5579        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5580        // |          N |        N |              Y |        Y |      KL03 |
5581        #[allow(dead_code)]
5582        #[derive(KnownLayout)]
5583        struct KL03(NotKnownLayout, u8);
5584
5585        let expected = DstLayout::for_type::<KL03>();
5586
5587        assert_eq!(<KL03 as KnownLayout>::LAYOUT, expected);
5588        assert_eq!(<KL03 as KnownLayout>::LAYOUT, sized_layout(1, 1));
5589
5590        // ... with `align(N)`
5591        #[allow(dead_code)]
5592        #[derive(KnownLayout)]
5593        #[repr(align(64))]
5594        struct KL03Align(NotKnownLayout<AU32>, u8);
5595
5596        let expected = DstLayout::for_type::<KL03Align>();
5597
5598        assert_eq!(<KL03Align as KnownLayout>::LAYOUT, expected);
5599        assert_eq!(<KL03Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
5600
5601        // ... with `packed`:
5602        #[allow(dead_code)]
5603        #[derive(KnownLayout)]
5604        #[repr(packed)]
5605        struct KL03Packed(NotKnownLayout<AU32>, u8);
5606
5607        let expected = DstLayout::for_type::<KL03Packed>();
5608
5609        assert_eq!(<KL03Packed as KnownLayout>::LAYOUT, expected);
5610        assert_eq!(<KL03Packed as KnownLayout>::LAYOUT, sized_layout(1, 5));
5611
5612        // ... with `packed(N)`
5613        #[allow(dead_code)]
5614        #[derive(KnownLayout)]
5615        #[repr(packed(2))]
5616        struct KL03PackedN(NotKnownLayout<AU32>, u8);
5617
5618        assert_impl_all!(KL03PackedN: KnownLayout);
5619
5620        let expected = DstLayout::for_type::<KL03PackedN>();
5621
5622        assert_eq!(<KL03PackedN as KnownLayout>::LAYOUT, expected);
5623        assert_eq!(<KL03PackedN as KnownLayout>::LAYOUT, sized_layout(2, 6));
5624
5625        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5626        // |          N |        Y |              N |        Y |      KL05 |
5627        #[allow(dead_code)]
5628        #[derive(KnownLayout)]
5629        struct KL05<T>(u8, T);
5630
5631        fn _test_kl05<T>(t: T) -> impl KnownLayout {
5632            KL05(0u8, t)
5633        }
5634
5635        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5636        // |          N |        Y |              Y |        Y |      KL07 |
5637        #[allow(dead_code)]
5638        #[derive(KnownLayout)]
5639        struct KL07<T: KnownLayout>(u8, T);
5640
5641        fn _test_kl07<T: KnownLayout>(t: T) -> impl KnownLayout {
5642            let _ = KL07(0u8, t);
5643        }
5644
5645        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5646        // |          Y |        N |              Y |        N |      KL10 |
5647        #[allow(dead_code)]
5648        #[derive(KnownLayout)]
5649        #[repr(C)]
5650        struct KL10(NotKnownLayout<AU32>, [u8]);
5651
5652        let expected = DstLayout::new_zst(None)
5653            .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), None)
5654            .extend(<[u8] as KnownLayout>::LAYOUT, None)
5655            .pad_to_align();
5656
5657        assert_eq!(<KL10 as KnownLayout>::LAYOUT, expected);
5658        assert_eq!(<KL10 as KnownLayout>::LAYOUT, unsized_layout(4, 1, 4));
5659
5660        // ...with `align(N)`:
5661        #[allow(dead_code)]
5662        #[derive(KnownLayout)]
5663        #[repr(C, align(64))]
5664        struct KL10Align(NotKnownLayout<AU32>, [u8]);
5665
5666        let repr_align = NonZeroUsize::new(64);
5667
5668        let expected = DstLayout::new_zst(repr_align)
5669            .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), None)
5670            .extend(<[u8] as KnownLayout>::LAYOUT, None)
5671            .pad_to_align();
5672
5673        assert_eq!(<KL10Align as KnownLayout>::LAYOUT, expected);
5674        assert_eq!(<KL10Align as KnownLayout>::LAYOUT, unsized_layout(64, 1, 4));
5675
5676        // ...with `packed`:
5677        #[allow(dead_code)]
5678        #[derive(KnownLayout)]
5679        #[repr(C, packed)]
5680        struct KL10Packed(NotKnownLayout<AU32>, [u8]);
5681
5682        let repr_packed = NonZeroUsize::new(1);
5683
5684        let expected = DstLayout::new_zst(None)
5685            .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), repr_packed)
5686            .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed)
5687            .pad_to_align();
5688
5689        assert_eq!(<KL10Packed as KnownLayout>::LAYOUT, expected);
5690        assert_eq!(<KL10Packed as KnownLayout>::LAYOUT, unsized_layout(1, 1, 4));
5691
5692        // ...with `packed(N)`:
5693        #[allow(dead_code)]
5694        #[derive(KnownLayout)]
5695        #[repr(C, packed(2))]
5696        struct KL10PackedN(NotKnownLayout<AU32>, [u8]);
5697
5698        let repr_packed = NonZeroUsize::new(2);
5699
5700        let expected = DstLayout::new_zst(None)
5701            .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), repr_packed)
5702            .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed)
5703            .pad_to_align();
5704
5705        assert_eq!(<KL10PackedN as KnownLayout>::LAYOUT, expected);
5706        assert_eq!(<KL10PackedN as KnownLayout>::LAYOUT, unsized_layout(2, 1, 4));
5707
5708        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5709        // |          Y |        N |              Y |        Y |      KL11 |
5710        #[allow(dead_code)]
5711        #[derive(KnownLayout)]
5712        #[repr(C)]
5713        struct KL11(NotKnownLayout<AU64>, u8);
5714
5715        let expected = DstLayout::new_zst(None)
5716            .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), None)
5717            .extend(<u8 as KnownLayout>::LAYOUT, None)
5718            .pad_to_align();
5719
5720        assert_eq!(<KL11 as KnownLayout>::LAYOUT, expected);
5721        assert_eq!(<KL11 as KnownLayout>::LAYOUT, sized_layout(8, 16));
5722
5723        // ...with `align(N)`:
5724        #[allow(dead_code)]
5725        #[derive(KnownLayout)]
5726        #[repr(C, align(64))]
5727        struct KL11Align(NotKnownLayout<AU64>, u8);
5728
5729        let repr_align = NonZeroUsize::new(64);
5730
5731        let expected = DstLayout::new_zst(repr_align)
5732            .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), None)
5733            .extend(<u8 as KnownLayout>::LAYOUT, None)
5734            .pad_to_align();
5735
5736        assert_eq!(<KL11Align as KnownLayout>::LAYOUT, expected);
5737        assert_eq!(<KL11Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
5738
5739        // ...with `packed`:
5740        #[allow(dead_code)]
5741        #[derive(KnownLayout)]
5742        #[repr(C, packed)]
5743        struct KL11Packed(NotKnownLayout<AU64>, u8);
5744
5745        let repr_packed = NonZeroUsize::new(1);
5746
5747        let expected = DstLayout::new_zst(None)
5748            .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), repr_packed)
5749            .extend(<u8 as KnownLayout>::LAYOUT, repr_packed)
5750            .pad_to_align();
5751
5752        assert_eq!(<KL11Packed as KnownLayout>::LAYOUT, expected);
5753        assert_eq!(<KL11Packed as KnownLayout>::LAYOUT, sized_layout(1, 9));
5754
5755        // ...with `packed(N)`:
5756        #[allow(dead_code)]
5757        #[derive(KnownLayout)]
5758        #[repr(C, packed(2))]
5759        struct KL11PackedN(NotKnownLayout<AU64>, u8);
5760
5761        let repr_packed = NonZeroUsize::new(2);
5762
5763        let expected = DstLayout::new_zst(None)
5764            .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), repr_packed)
5765            .extend(<u8 as KnownLayout>::LAYOUT, repr_packed)
5766            .pad_to_align();
5767
5768        assert_eq!(<KL11PackedN as KnownLayout>::LAYOUT, expected);
5769        assert_eq!(<KL11PackedN as KnownLayout>::LAYOUT, sized_layout(2, 10));
5770
5771        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5772        // |          Y |        Y |              Y |        N |      KL14 |
5773        #[allow(dead_code)]
5774        #[derive(KnownLayout)]
5775        #[repr(C)]
5776        struct KL14<T: ?Sized + KnownLayout>(u8, T);
5777
5778        fn _test_kl14<T: ?Sized + KnownLayout>(kl: &KL14<T>) {
5779            _assert_kl(kl)
5780        }
5781
5782        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5783        // |          Y |        Y |              Y |        Y |      KL15 |
5784        #[allow(dead_code)]
5785        #[derive(KnownLayout)]
5786        #[repr(C)]
5787        struct KL15<T: KnownLayout>(u8, T);
5788
5789        fn _test_kl15<T: KnownLayout>(t: T) -> impl KnownLayout {
5790            let _ = KL15(0u8, t);
5791        }
5792
5793        // Test a variety of combinations of field types:
5794        //  - ()
5795        //  - u8
5796        //  - AU16
5797        //  - [()]
5798        //  - [u8]
5799        //  - [AU16]
5800
5801        #[allow(clippy::upper_case_acronyms, dead_code)]
5802        #[derive(KnownLayout)]
5803        #[repr(C)]
5804        struct KLTU<T, U: ?Sized>(T, U);
5805
5806        assert_eq!(<KLTU<(), ()> as KnownLayout>::LAYOUT, sized_layout(1, 0));
5807
5808        assert_eq!(<KLTU<(), u8> as KnownLayout>::LAYOUT, sized_layout(1, 1));
5809
5810        assert_eq!(<KLTU<(), AU16> as KnownLayout>::LAYOUT, sized_layout(2, 2));
5811
5812        assert_eq!(<KLTU<(), [()]> as KnownLayout>::LAYOUT, unsized_layout(1, 0, 0));
5813
5814        assert_eq!(<KLTU<(), [u8]> as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0));
5815
5816        assert_eq!(<KLTU<(), [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 0));
5817
5818        assert_eq!(<KLTU<u8, ()> as KnownLayout>::LAYOUT, sized_layout(1, 1));
5819
5820        assert_eq!(<KLTU<u8, u8> as KnownLayout>::LAYOUT, sized_layout(1, 2));
5821
5822        assert_eq!(<KLTU<u8, AU16> as KnownLayout>::LAYOUT, sized_layout(2, 4));
5823
5824        assert_eq!(<KLTU<u8, [()]> as KnownLayout>::LAYOUT, unsized_layout(1, 0, 1));
5825
5826        assert_eq!(<KLTU<u8, [u8]> as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1));
5827
5828        assert_eq!(<KLTU<u8, [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2));
5829
5830        assert_eq!(<KLTU<AU16, ()> as KnownLayout>::LAYOUT, sized_layout(2, 2));
5831
5832        assert_eq!(<KLTU<AU16, u8> as KnownLayout>::LAYOUT, sized_layout(2, 4));
5833
5834        assert_eq!(<KLTU<AU16, AU16> as KnownLayout>::LAYOUT, sized_layout(2, 4));
5835
5836        assert_eq!(<KLTU<AU16, [()]> as KnownLayout>::LAYOUT, unsized_layout(2, 0, 2));
5837
5838        assert_eq!(<KLTU<AU16, [u8]> as KnownLayout>::LAYOUT, unsized_layout(2, 1, 2));
5839
5840        assert_eq!(<KLTU<AU16, [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2));
5841
5842        // Test a variety of field counts.
5843
5844        #[derive(KnownLayout)]
5845        #[repr(C)]
5846        struct KLF0;
5847
5848        assert_eq!(<KLF0 as KnownLayout>::LAYOUT, sized_layout(1, 0));
5849
5850        #[derive(KnownLayout)]
5851        #[repr(C)]
5852        struct KLF1([u8]);
5853
5854        assert_eq!(<KLF1 as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0));
5855
5856        #[derive(KnownLayout)]
5857        #[repr(C)]
5858        struct KLF2(NotKnownLayout<u8>, [u8]);
5859
5860        assert_eq!(<KLF2 as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1));
5861
5862        #[derive(KnownLayout)]
5863        #[repr(C)]
5864        struct KLF3(NotKnownLayout<u8>, NotKnownLayout<AU16>, [u8]);
5865
5866        assert_eq!(<KLF3 as KnownLayout>::LAYOUT, unsized_layout(2, 1, 4));
5867
5868        #[derive(KnownLayout)]
5869        #[repr(C)]
5870        struct KLF4(NotKnownLayout<u8>, NotKnownLayout<AU16>, NotKnownLayout<AU32>, [u8]);
5871
5872        assert_eq!(<KLF4 as KnownLayout>::LAYOUT, unsized_layout(4, 1, 8));
5873    }
5874
5875    #[test]
5876    fn test_object_safety() {
5877        fn _takes_no_cell(_: &dyn Immutable) {}
5878        fn _takes_unaligned(_: &dyn Unaligned) {}
5879    }
5880
5881    #[test]
5882    fn test_from_zeros_only() {
5883        // Test types that implement `FromZeros` but not `FromBytes`.
5884
5885        assert!(!bool::new_zeroed());
5886        assert_eq!(char::new_zeroed(), '\0');
5887
5888        #[cfg(feature = "alloc")]
5889        {
5890            assert_eq!(bool::new_box_zeroed(), Ok(Box::new(false)));
5891            assert_eq!(char::new_box_zeroed(), Ok(Box::new('\0')));
5892
5893            assert_eq!(
5894                <[bool]>::new_box_zeroed_with_elems(3).unwrap().as_ref(),
5895                [false, false, false]
5896            );
5897            assert_eq!(
5898                <[char]>::new_box_zeroed_with_elems(3).unwrap().as_ref(),
5899                ['\0', '\0', '\0']
5900            );
5901
5902            assert_eq!(bool::new_vec_zeroed(3).unwrap().as_ref(), [false, false, false]);
5903            assert_eq!(char::new_vec_zeroed(3).unwrap().as_ref(), ['\0', '\0', '\0']);
5904        }
5905
5906        let mut string = "hello".to_string();
5907        let s: &mut str = string.as_mut();
5908        assert_eq!(s, "hello");
5909        s.zero();
5910        assert_eq!(s, "\0\0\0\0\0");
5911    }
5912
5913    #[test]
5914    fn test_zst_count_preserved() {
5915        // Test that, when an explicit count is provided to for a type with a
5916        // ZST trailing slice element, that count is preserved. This is
5917        // important since, for such types, all element counts result in objects
5918        // of the same size, and so the correct behavior is ambiguous. However,
5919        // preserving the count as requested by the user is the behavior that we
5920        // document publicly.
5921
5922        // FromZeros methods
5923        #[cfg(feature = "alloc")]
5924        assert_eq!(<[()]>::new_box_zeroed_with_elems(3).unwrap().len(), 3);
5925        #[cfg(feature = "alloc")]
5926        assert_eq!(<()>::new_vec_zeroed(3).unwrap().len(), 3);
5927
5928        // FromBytes methods
5929        assert_eq!(<[()]>::ref_from_bytes_with_elems(&[][..], 3).unwrap().len(), 3);
5930        assert_eq!(<[()]>::ref_from_prefix_with_elems(&[][..], 3).unwrap().0.len(), 3);
5931        assert_eq!(<[()]>::ref_from_suffix_with_elems(&[][..], 3).unwrap().1.len(), 3);
5932        assert_eq!(<[()]>::mut_from_bytes_with_elems(&mut [][..], 3).unwrap().len(), 3);
5933        assert_eq!(<[()]>::mut_from_prefix_with_elems(&mut [][..], 3).unwrap().0.len(), 3);
5934        assert_eq!(<[()]>::mut_from_suffix_with_elems(&mut [][..], 3).unwrap().1.len(), 3);
5935    }
5936
5937    #[test]
5938    fn test_read_write() {
5939        const VAL: u64 = 0x12345678;
5940        #[cfg(target_endian = "big")]
5941        const VAL_BYTES: [u8; 8] = VAL.to_be_bytes();
5942        #[cfg(target_endian = "little")]
5943        const VAL_BYTES: [u8; 8] = VAL.to_le_bytes();
5944        const ZEROS: [u8; 8] = [0u8; 8];
5945
5946        // Test `FromBytes::{read_from, read_from_prefix, read_from_suffix}`.
5947
5948        assert_eq!(u64::read_from_bytes(&VAL_BYTES[..]), Ok(VAL));
5949        // The first 8 bytes are from `VAL_BYTES` and the second 8 bytes are all
5950        // zeros.
5951        let bytes_with_prefix: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]);
5952        assert_eq!(u64::read_from_prefix(&bytes_with_prefix[..]), Ok((VAL, &ZEROS[..])));
5953        assert_eq!(u64::read_from_suffix(&bytes_with_prefix[..]), Ok((&VAL_BYTES[..], 0)));
5954        // The first 8 bytes are all zeros and the second 8 bytes are from
5955        // `VAL_BYTES`
5956        let bytes_with_suffix: [u8; 16] = transmute!([[0; 8], VAL_BYTES]);
5957        assert_eq!(u64::read_from_prefix(&bytes_with_suffix[..]), Ok((0, &VAL_BYTES[..])));
5958        assert_eq!(u64::read_from_suffix(&bytes_with_suffix[..]), Ok((&ZEROS[..], VAL)));
5959
5960        // Test `IntoBytes::{write_to, write_to_prefix, write_to_suffix}`.
5961
5962        let mut bytes = [0u8; 8];
5963        assert_eq!(VAL.write_to(&mut bytes[..]), Ok(()));
5964        assert_eq!(bytes, VAL_BYTES);
5965        let mut bytes = [0u8; 16];
5966        assert_eq!(VAL.write_to_prefix(&mut bytes[..]), Ok(()));
5967        let want: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]);
5968        assert_eq!(bytes, want);
5969        let mut bytes = [0u8; 16];
5970        assert_eq!(VAL.write_to_suffix(&mut bytes[..]), Ok(()));
5971        let want: [u8; 16] = transmute!([[0; 8], VAL_BYTES]);
5972        assert_eq!(bytes, want);
5973    }
5974
5975    #[test]
5976    fn test_try_from_bytes_try_read_from() {
5977        assert_eq!(<bool as TryFromBytes>::try_read_from_bytes(&[0]), Ok(false));
5978        assert_eq!(<bool as TryFromBytes>::try_read_from_bytes(&[1]), Ok(true));
5979
5980        assert_eq!(<bool as TryFromBytes>::try_read_from_prefix(&[0, 2]), Ok((false, &[2][..])));
5981        assert_eq!(<bool as TryFromBytes>::try_read_from_prefix(&[1, 2]), Ok((true, &[2][..])));
5982
5983        assert_eq!(<bool as TryFromBytes>::try_read_from_suffix(&[2, 0]), Ok((&[2][..], false)));
5984        assert_eq!(<bool as TryFromBytes>::try_read_from_suffix(&[2, 1]), Ok((&[2][..], true)));
5985
5986        // If we don't pass enough bytes, it fails.
5987        assert!(matches!(
5988            <u8 as TryFromBytes>::try_read_from_bytes(&[]),
5989            Err(TryReadError::Size(_))
5990        ));
5991        assert!(matches!(
5992            <u8 as TryFromBytes>::try_read_from_prefix(&[]),
5993            Err(TryReadError::Size(_))
5994        ));
5995        assert!(matches!(
5996            <u8 as TryFromBytes>::try_read_from_suffix(&[]),
5997            Err(TryReadError::Size(_))
5998        ));
5999
6000        // If we pass too many bytes, it fails.
6001        assert!(matches!(
6002            <u8 as TryFromBytes>::try_read_from_bytes(&[0, 0]),
6003            Err(TryReadError::Size(_))
6004        ));
6005
6006        // If we pass an invalid value, it fails.
6007        assert!(matches!(
6008            <bool as TryFromBytes>::try_read_from_bytes(&[2]),
6009            Err(TryReadError::Validity(_))
6010        ));
6011        assert!(matches!(
6012            <bool as TryFromBytes>::try_read_from_prefix(&[2, 0]),
6013            Err(TryReadError::Validity(_))
6014        ));
6015        assert!(matches!(
6016            <bool as TryFromBytes>::try_read_from_suffix(&[0, 2]),
6017            Err(TryReadError::Validity(_))
6018        ));
6019
6020        // Reading from a misaligned buffer should still succeed. Since `AU64`'s
6021        // alignment is 8, and since we read from two adjacent addresses one
6022        // byte apart, it is guaranteed that at least one of them (though
6023        // possibly both) will be misaligned.
6024        let bytes: [u8; 9] = [0, 0, 0, 0, 0, 0, 0, 0, 0];
6025        assert_eq!(<AU64 as TryFromBytes>::try_read_from_bytes(&bytes[..8]), Ok(AU64(0)));
6026        assert_eq!(<AU64 as TryFromBytes>::try_read_from_bytes(&bytes[1..9]), Ok(AU64(0)));
6027
6028        assert_eq!(
6029            <AU64 as TryFromBytes>::try_read_from_prefix(&bytes[..8]),
6030            Ok((AU64(0), &[][..]))
6031        );
6032        assert_eq!(
6033            <AU64 as TryFromBytes>::try_read_from_prefix(&bytes[1..9]),
6034            Ok((AU64(0), &[][..]))
6035        );
6036
6037        assert_eq!(
6038            <AU64 as TryFromBytes>::try_read_from_suffix(&bytes[..8]),
6039            Ok((&[][..], AU64(0)))
6040        );
6041        assert_eq!(
6042            <AU64 as TryFromBytes>::try_read_from_suffix(&bytes[1..9]),
6043            Ok((&[][..], AU64(0)))
6044        );
6045    }
6046
6047    #[test]
6048    fn test_ref_from_mut_from() {
6049        // Test `FromBytes::{ref_from, mut_from}{,_prefix,Suffix}` success cases
6050        // Exhaustive coverage for these methods is covered by the `Ref` tests above,
6051        // which these helper methods defer to.
6052
6053        let mut buf =
6054            Align::<[u8; 16], AU64>::new([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
6055
6056        assert_eq!(
6057            AU64::ref_from_bytes(&buf.t[8..]).unwrap().0.to_ne_bytes(),
6058            [8, 9, 10, 11, 12, 13, 14, 15]
6059        );
6060        let suffix = AU64::mut_from_bytes(&mut buf.t[8..]).unwrap();
6061        suffix.0 = 0x0101010101010101;
6062        // The `[u8:9]` is a non-half size of the full buffer, which would catch
6063        // `from_prefix` having the same implementation as `from_suffix` (issues #506, #511).
6064        assert_eq!(
6065            <[u8; 9]>::ref_from_suffix(&buf.t[..]).unwrap(),
6066            (&[0, 1, 2, 3, 4, 5, 6][..], &[7u8, 1, 1, 1, 1, 1, 1, 1, 1])
6067        );
6068        let (prefix, suffix) = AU64::mut_from_suffix(&mut buf.t[1..]).unwrap();
6069        assert_eq!(prefix, &mut [1u8, 2, 3, 4, 5, 6, 7][..]);
6070        suffix.0 = 0x0202020202020202;
6071        let (prefix, suffix) = <[u8; 10]>::mut_from_suffix(&mut buf.t[..]).unwrap();
6072        assert_eq!(prefix, &mut [0u8, 1, 2, 3, 4, 5][..]);
6073        suffix[0] = 42;
6074        assert_eq!(
6075            <[u8; 9]>::ref_from_prefix(&buf.t[..]).unwrap(),
6076            (&[0u8, 1, 2, 3, 4, 5, 42, 7, 2], &[2u8, 2, 2, 2, 2, 2, 2][..])
6077        );
6078        <[u8; 2]>::mut_from_prefix(&mut buf.t[..]).unwrap().0[1] = 30;
6079        assert_eq!(buf.t, [0, 30, 2, 3, 4, 5, 42, 7, 2, 2, 2, 2, 2, 2, 2, 2]);
6080    }
6081
6082    #[test]
6083    fn test_ref_from_mut_from_error() {
6084        // Test `FromBytes::{ref_from, mut_from}{,_prefix,Suffix}` error cases.
6085
6086        // Fail because the buffer is too large.
6087        let mut buf = Align::<[u8; 16], AU64>::default();
6088        // `buf.t` should be aligned to 8, so only the length check should fail.
6089        assert!(AU64::ref_from_bytes(&buf.t[..]).is_err());
6090        assert!(AU64::mut_from_bytes(&mut buf.t[..]).is_err());
6091        assert!(<[u8; 8]>::ref_from_bytes(&buf.t[..]).is_err());
6092        assert!(<[u8; 8]>::mut_from_bytes(&mut buf.t[..]).is_err());
6093
6094        // Fail because the buffer is too small.
6095        let mut buf = Align::<[u8; 4], AU64>::default();
6096        assert!(AU64::ref_from_bytes(&buf.t[..]).is_err());
6097        assert!(AU64::mut_from_bytes(&mut buf.t[..]).is_err());
6098        assert!(<[u8; 8]>::ref_from_bytes(&buf.t[..]).is_err());
6099        assert!(<[u8; 8]>::mut_from_bytes(&mut buf.t[..]).is_err());
6100        assert!(AU64::ref_from_prefix(&buf.t[..]).is_err());
6101        assert!(AU64::mut_from_prefix(&mut buf.t[..]).is_err());
6102        assert!(AU64::ref_from_suffix(&buf.t[..]).is_err());
6103        assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_err());
6104        assert!(<[u8; 8]>::ref_from_prefix(&buf.t[..]).is_err());
6105        assert!(<[u8; 8]>::mut_from_prefix(&mut buf.t[..]).is_err());
6106        assert!(<[u8; 8]>::ref_from_suffix(&buf.t[..]).is_err());
6107        assert!(<[u8; 8]>::mut_from_suffix(&mut buf.t[..]).is_err());
6108
6109        // Fail because the alignment is insufficient.
6110        let mut buf = Align::<[u8; 13], AU64>::default();
6111        assert!(AU64::ref_from_bytes(&buf.t[1..]).is_err());
6112        assert!(AU64::mut_from_bytes(&mut buf.t[1..]).is_err());
6113        assert!(AU64::ref_from_bytes(&buf.t[1..]).is_err());
6114        assert!(AU64::mut_from_bytes(&mut buf.t[1..]).is_err());
6115        assert!(AU64::ref_from_prefix(&buf.t[1..]).is_err());
6116        assert!(AU64::mut_from_prefix(&mut buf.t[1..]).is_err());
6117        assert!(AU64::ref_from_suffix(&buf.t[..]).is_err());
6118        assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_err());
6119    }
6120
6121    #[test]
6122    fn test_to_methods() {
6123        /// Run a series of tests by calling `IntoBytes` methods on `t`.
6124        ///
6125        /// `bytes` is the expected byte sequence returned from `t.as_bytes()`
6126        /// before `t` has been modified. `post_mutation` is the expected
6127        /// sequence returned from `t.as_bytes()` after `t.as_mut_bytes()[0]`
6128        /// has had its bits flipped (by applying `^= 0xFF`).
6129        ///
6130        /// `N` is the size of `t` in bytes.
6131        fn test<T: FromBytes + IntoBytes + Immutable + Debug + Eq + ?Sized, const N: usize>(
6132            t: &mut T,
6133            bytes: &[u8],
6134            post_mutation: &T,
6135        ) {
6136            // Test that we can access the underlying bytes, and that we get the
6137            // right bytes and the right number of bytes.
6138            assert_eq!(t.as_bytes(), bytes);
6139
6140            // Test that changes to the underlying byte slices are reflected in
6141            // the original object.
6142            t.as_mut_bytes()[0] ^= 0xFF;
6143            assert_eq!(t, post_mutation);
6144            t.as_mut_bytes()[0] ^= 0xFF;
6145
6146            // `write_to` rejects slices that are too small or too large.
6147            assert!(t.write_to(&mut vec![0; N - 1][..]).is_err());
6148            assert!(t.write_to(&mut vec![0; N + 1][..]).is_err());
6149
6150            // `write_to` works as expected.
6151            let mut bytes = [0; N];
6152            assert_eq!(t.write_to(&mut bytes[..]), Ok(()));
6153            assert_eq!(bytes, t.as_bytes());
6154
6155            // `write_to_prefix` rejects slices that are too small.
6156            assert!(t.write_to_prefix(&mut vec![0; N - 1][..]).is_err());
6157
6158            // `write_to_prefix` works with exact-sized slices.
6159            let mut bytes = [0; N];
6160            assert_eq!(t.write_to_prefix(&mut bytes[..]), Ok(()));
6161            assert_eq!(bytes, t.as_bytes());
6162
6163            // `write_to_prefix` works with too-large slices, and any bytes past
6164            // the prefix aren't modified.
6165            let mut too_many_bytes = vec![0; N + 1];
6166            too_many_bytes[N] = 123;
6167            assert_eq!(t.write_to_prefix(&mut too_many_bytes[..]), Ok(()));
6168            assert_eq!(&too_many_bytes[..N], t.as_bytes());
6169            assert_eq!(too_many_bytes[N], 123);
6170
6171            // `write_to_suffix` rejects slices that are too small.
6172            assert!(t.write_to_suffix(&mut vec![0; N - 1][..]).is_err());
6173
6174            // `write_to_suffix` works with exact-sized slices.
6175            let mut bytes = [0; N];
6176            assert_eq!(t.write_to_suffix(&mut bytes[..]), Ok(()));
6177            assert_eq!(bytes, t.as_bytes());
6178
6179            // `write_to_suffix` works with too-large slices, and any bytes
6180            // before the suffix aren't modified.
6181            let mut too_many_bytes = vec![0; N + 1];
6182            too_many_bytes[0] = 123;
6183            assert_eq!(t.write_to_suffix(&mut too_many_bytes[..]), Ok(()));
6184            assert_eq!(&too_many_bytes[1..], t.as_bytes());
6185            assert_eq!(too_many_bytes[0], 123);
6186        }
6187
6188        #[derive(Debug, Eq, PartialEq, FromBytes, IntoBytes, Immutable)]
6189        #[repr(C)]
6190        struct Foo {
6191            a: u32,
6192            b: Wrapping<u32>,
6193            c: Option<NonZeroU32>,
6194        }
6195
6196        let expected_bytes: Vec<u8> = if cfg!(target_endian = "little") {
6197            vec![1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0]
6198        } else {
6199            vec![0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0]
6200        };
6201        let post_mutation_expected_a =
6202            if cfg!(target_endian = "little") { 0x00_00_00_FE } else { 0xFF_00_00_01 };
6203        test::<_, 12>(
6204            &mut Foo { a: 1, b: Wrapping(2), c: None },
6205            expected_bytes.as_bytes(),
6206            &Foo { a: post_mutation_expected_a, b: Wrapping(2), c: None },
6207        );
6208        test::<_, 3>(
6209            Unsized::from_mut_slice(&mut [1, 2, 3]),
6210            &[1, 2, 3],
6211            Unsized::from_mut_slice(&mut [0xFE, 2, 3]),
6212        );
6213    }
6214
6215    #[test]
6216    fn test_array() {
6217        #[derive(FromBytes, IntoBytes, Immutable)]
6218        #[repr(C)]
6219        struct Foo {
6220            a: [u16; 33],
6221        }
6222
6223        let foo = Foo { a: [0xFFFF; 33] };
6224        let expected = [0xFFu8; 66];
6225        assert_eq!(foo.as_bytes(), &expected[..]);
6226    }
6227
6228    #[test]
6229    fn test_new_zeroed() {
6230        assert!(!bool::new_zeroed());
6231        assert_eq!(u64::new_zeroed(), 0);
6232        // This test exists in order to exercise unsafe code, especially when
6233        // running under Miri.
6234        #[allow(clippy::unit_cmp)]
6235        {
6236            assert_eq!(<()>::new_zeroed(), ());
6237        }
6238    }
6239
6240    #[test]
6241    fn test_transparent_packed_generic_struct() {
6242        #[derive(IntoBytes, FromBytes, Unaligned)]
6243        #[repr(transparent)]
6244        #[allow(dead_code)] // We never construct this type
6245        struct Foo<T> {
6246            _t: T,
6247            _phantom: PhantomData<()>,
6248        }
6249
6250        assert_impl_all!(Foo<u32>: FromZeros, FromBytes, IntoBytes);
6251        assert_impl_all!(Foo<u8>: Unaligned);
6252
6253        #[derive(IntoBytes, FromBytes, Unaligned)]
6254        #[repr(C, packed)]
6255        #[allow(dead_code)] // We never construct this type
6256        struct Bar<T, U> {
6257            _t: T,
6258            _u: U,
6259        }
6260
6261        assert_impl_all!(Bar<u8, AU64>: FromZeros, FromBytes, IntoBytes, Unaligned);
6262    }
6263
6264    #[cfg(feature = "alloc")]
6265    mod alloc {
6266        use super::*;
6267
6268        #[cfg(zerocopy_panic_in_const_and_vec_try_reserve)]
6269        #[test]
6270        fn test_extend_vec_zeroed() {
6271            // Test extending when there is an existing allocation.
6272            let mut v = vec![100u16, 200, 300];
6273            FromZeros::extend_vec_zeroed(&mut v, 3).unwrap();
6274            assert_eq!(v.len(), 6);
6275            assert_eq!(&*v, &[100, 200, 300, 0, 0, 0]);
6276            drop(v);
6277
6278            // Test extending when there is no existing allocation.
6279            let mut v: Vec<u64> = Vec::new();
6280            FromZeros::extend_vec_zeroed(&mut v, 3).unwrap();
6281            assert_eq!(v.len(), 3);
6282            assert_eq!(&*v, &[0, 0, 0]);
6283            drop(v);
6284        }
6285
6286        #[cfg(zerocopy_panic_in_const_and_vec_try_reserve)]
6287        #[test]
6288        fn test_extend_vec_zeroed_zst() {
6289            // Test extending when there is an existing (fake) allocation.
6290            let mut v = vec![(), (), ()];
6291            <()>::extend_vec_zeroed(&mut v, 3).unwrap();
6292            assert_eq!(v.len(), 6);
6293            assert_eq!(&*v, &[(), (), (), (), (), ()]);
6294            drop(v);
6295
6296            // Test extending when there is no existing (fake) allocation.
6297            let mut v: Vec<()> = Vec::new();
6298            <()>::extend_vec_zeroed(&mut v, 3).unwrap();
6299            assert_eq!(&*v, &[(), (), ()]);
6300            drop(v);
6301        }
6302
6303        #[cfg(zerocopy_panic_in_const_and_vec_try_reserve)]
6304        #[test]
6305        fn test_insert_vec_zeroed() {
6306            // Insert at start (no existing allocation).
6307            let mut v: Vec<u64> = Vec::new();
6308            u64::insert_vec_zeroed(&mut v, 0, 2).unwrap();
6309            assert_eq!(v.len(), 2);
6310            assert_eq!(&*v, &[0, 0]);
6311            drop(v);
6312
6313            // Insert at start.
6314            let mut v = vec![100u64, 200, 300];
6315            u64::insert_vec_zeroed(&mut v, 0, 2).unwrap();
6316            assert_eq!(v.len(), 5);
6317            assert_eq!(&*v, &[0, 0, 100, 200, 300]);
6318            drop(v);
6319
6320            // Insert at middle.
6321            let mut v = vec![100u64, 200, 300];
6322            u64::insert_vec_zeroed(&mut v, 1, 1).unwrap();
6323            assert_eq!(v.len(), 4);
6324            assert_eq!(&*v, &[100, 0, 200, 300]);
6325            drop(v);
6326
6327            // Insert at end.
6328            let mut v = vec![100u64, 200, 300];
6329            u64::insert_vec_zeroed(&mut v, 3, 1).unwrap();
6330            assert_eq!(v.len(), 4);
6331            assert_eq!(&*v, &[100, 200, 300, 0]);
6332            drop(v);
6333        }
6334
6335        #[cfg(zerocopy_panic_in_const_and_vec_try_reserve)]
6336        #[test]
6337        fn test_insert_vec_zeroed_zst() {
6338            // Insert at start (no existing fake allocation).
6339            let mut v: Vec<()> = Vec::new();
6340            <()>::insert_vec_zeroed(&mut v, 0, 2).unwrap();
6341            assert_eq!(v.len(), 2);
6342            assert_eq!(&*v, &[(), ()]);
6343            drop(v);
6344
6345            // Insert at start.
6346            let mut v = vec![(), (), ()];
6347            <()>::insert_vec_zeroed(&mut v, 0, 2).unwrap();
6348            assert_eq!(v.len(), 5);
6349            assert_eq!(&*v, &[(), (), (), (), ()]);
6350            drop(v);
6351
6352            // Insert at middle.
6353            let mut v = vec![(), (), ()];
6354            <()>::insert_vec_zeroed(&mut v, 1, 1).unwrap();
6355            assert_eq!(v.len(), 4);
6356            assert_eq!(&*v, &[(), (), (), ()]);
6357            drop(v);
6358
6359            // Insert at end.
6360            let mut v = vec![(), (), ()];
6361            <()>::insert_vec_zeroed(&mut v, 3, 1).unwrap();
6362            assert_eq!(v.len(), 4);
6363            assert_eq!(&*v, &[(), (), (), ()]);
6364            drop(v);
6365        }
6366
6367        #[test]
6368        fn test_new_box_zeroed() {
6369            assert_eq!(u64::new_box_zeroed(), Ok(Box::new(0)));
6370        }
6371
6372        #[test]
6373        fn test_new_box_zeroed_array() {
6374            drop(<[u32; 0x1000]>::new_box_zeroed());
6375        }
6376
6377        #[test]
6378        fn test_new_box_zeroed_zst() {
6379            // This test exists in order to exercise unsafe code, especially
6380            // when running under Miri.
6381            #[allow(clippy::unit_cmp)]
6382            {
6383                assert_eq!(<()>::new_box_zeroed(), Ok(Box::new(())));
6384            }
6385        }
6386
6387        #[test]
6388        fn test_new_box_zeroed_with_elems() {
6389            let mut s: Box<[u64]> = <[u64]>::new_box_zeroed_with_elems(3).unwrap();
6390            assert_eq!(s.len(), 3);
6391            assert_eq!(&*s, &[0, 0, 0]);
6392            s[1] = 3;
6393            assert_eq!(&*s, &[0, 3, 0]);
6394        }
6395
6396        #[test]
6397        fn test_new_box_zeroed_with_elems_empty() {
6398            let s: Box<[u64]> = <[u64]>::new_box_zeroed_with_elems(0).unwrap();
6399            assert_eq!(s.len(), 0);
6400        }
6401
6402        #[test]
6403        fn test_new_box_zeroed_with_elems_zst() {
6404            let mut s: Box<[()]> = <[()]>::new_box_zeroed_with_elems(3).unwrap();
6405            assert_eq!(s.len(), 3);
6406            assert!(s.get(10).is_none());
6407            // This test exists in order to exercise unsafe code, especially
6408            // when running under Miri.
6409            #[allow(clippy::unit_cmp)]
6410            {
6411                assert_eq!(s[1], ());
6412            }
6413            s[2] = ();
6414        }
6415
6416        #[test]
6417        fn test_new_box_zeroed_with_elems_zst_empty() {
6418            let s: Box<[()]> = <[()]>::new_box_zeroed_with_elems(0).unwrap();
6419            assert_eq!(s.len(), 0);
6420        }
6421
6422        #[test]
6423        fn new_box_zeroed_with_elems_errors() {
6424            assert_eq!(<[u16]>::new_box_zeroed_with_elems(usize::MAX), Err(AllocError));
6425
6426            let max = <usize as core::convert::TryFrom<_>>::try_from(isize::MAX).unwrap();
6427            assert_eq!(
6428                <[u16]>::new_box_zeroed_with_elems((max / mem::size_of::<u16>()) + 1),
6429                Err(AllocError)
6430            );
6431        }
6432    }
6433}
6434
6435#[cfg(kani)]
6436mod proofs {
6437    use super::*;
6438
6439    impl kani::Arbitrary for DstLayout {
6440        fn any() -> Self {
6441            let align: NonZeroUsize = kani::any();
6442            let size_info: SizeInfo = kani::any();
6443
6444            kani::assume(align.is_power_of_two());
6445            kani::assume(align < DstLayout::THEORETICAL_MAX_ALIGN);
6446
6447            // For testing purposes, we most care about instantiations of
6448            // `DstLayout` that can correspond to actual Rust types. We use
6449            // `Layout` to verify that our `DstLayout` satisfies the validity
6450            // conditions of Rust layouts.
6451            kani::assume(
6452                match size_info {
6453                    SizeInfo::Sized { size } => Layout::from_size_align(size, align.get()),
6454                    SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size: _ }) => {
6455                        // `SliceDst`` cannot encode an exact size, but we know
6456                        // it is at least `offset` bytes.
6457                        Layout::from_size_align(offset, align.get())
6458                    }
6459                }
6460                .is_ok(),
6461            );
6462
6463            Self { align: align, size_info: size_info }
6464        }
6465    }
6466
6467    impl kani::Arbitrary for SizeInfo {
6468        fn any() -> Self {
6469            let is_sized: bool = kani::any();
6470
6471            match is_sized {
6472                true => {
6473                    let size: usize = kani::any();
6474
6475                    kani::assume(size <= isize::MAX as _);
6476
6477                    SizeInfo::Sized { size }
6478                }
6479                false => SizeInfo::SliceDst(kani::any()),
6480            }
6481        }
6482    }
6483
6484    impl kani::Arbitrary for TrailingSliceLayout {
6485        fn any() -> Self {
6486            let elem_size: usize = kani::any();
6487            let offset: usize = kani::any();
6488
6489            kani::assume(elem_size < isize::MAX as _);
6490            kani::assume(offset < isize::MAX as _);
6491
6492            TrailingSliceLayout { elem_size, offset }
6493        }
6494    }
6495
6496    #[kani::proof]
6497    fn prove_dst_layout_extend() {
6498        use crate::util::{max, min, padding_needed_for};
6499
6500        let base: DstLayout = kani::any();
6501        let field: DstLayout = kani::any();
6502        let packed: Option<NonZeroUsize> = kani::any();
6503
6504        if let Some(max_align) = packed {
6505            kani::assume(max_align.is_power_of_two());
6506            kani::assume(base.align <= max_align);
6507        }
6508
6509        // The base can only be extended if it's sized.
6510        kani::assume(matches!(base.size_info, SizeInfo::Sized { .. }));
6511        let base_size = if let SizeInfo::Sized { size } = base.size_info {
6512            size
6513        } else {
6514            unreachable!();
6515        };
6516
6517        // Under the above conditions, `DstLayout::extend` will not panic.
6518        let composite = base.extend(field, packed);
6519
6520        // The field's alignment is clamped by `max_align` (i.e., the
6521        // `packed` attribute, if any) [1].
6522        //
6523        // [1] Per https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers:
6524        //
6525        //   The alignments of each field, for the purpose of positioning
6526        //   fields, is the smaller of the specified alignment and the
6527        //   alignment of the field's type.
6528        let field_align = min(field.align, packed.unwrap_or(DstLayout::THEORETICAL_MAX_ALIGN));
6529
6530        // The struct's alignment is the maximum of its previous alignment and
6531        // `field_align`.
6532        assert_eq!(composite.align, max(base.align, field_align));
6533
6534        // Compute the minimum amount of inter-field padding needed to
6535        // satisfy the field's alignment, and offset of the trailing field.
6536        // [1]
6537        //
6538        // [1] Per https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers:
6539        //
6540        //   Inter-field padding is guaranteed to be the minimum required in
6541        //   order to satisfy each field's (possibly altered) alignment.
6542        let padding = padding_needed_for(base_size, field_align);
6543        let offset = base_size + padding;
6544
6545        // For testing purposes, we'll also construct `alloc::Layout`
6546        // stand-ins for `DstLayout`, and show that `extend` behaves
6547        // comparably on both types.
6548        let base_analog = Layout::from_size_align(base_size, base.align.get()).unwrap();
6549
6550        match field.size_info {
6551            SizeInfo::Sized { size: field_size } => {
6552                if let SizeInfo::Sized { size: composite_size } = composite.size_info {
6553                    // If the trailing field is sized, the resulting layout will
6554                    // be sized. Its size will be the sum of the preceding
6555                    // layout, the size of the new field, and the size of
6556                    // inter-field padding between the two.
6557                    assert_eq!(composite_size, offset + field_size);
6558
6559                    let field_analog =
6560                        Layout::from_size_align(field_size, field_align.get()).unwrap();
6561
6562                    if let Ok((actual_composite, actual_offset)) = base_analog.extend(field_analog)
6563                    {
6564                        assert_eq!(actual_offset, offset);
6565                        assert_eq!(actual_composite.size(), composite_size);
6566                        assert_eq!(actual_composite.align(), composite.align.get());
6567                    } else {
6568                        // An error here reflects that composite of `base`
6569                        // and `field` cannot correspond to a real Rust type
6570                        // fragment, because such a fragment would violate
6571                        // the basic invariants of a valid Rust layout. At
6572                        // the time of writing, `DstLayout` is a little more
6573                        // permissive than `Layout`, so we don't assert
6574                        // anything in this branch (e.g., unreachability).
6575                    }
6576                } else {
6577                    panic!("The composite of two sized layouts must be sized.")
6578                }
6579            }
6580            SizeInfo::SliceDst(TrailingSliceLayout {
6581                offset: field_offset,
6582                elem_size: field_elem_size,
6583            }) => {
6584                if let SizeInfo::SliceDst(TrailingSliceLayout {
6585                    offset: composite_offset,
6586                    elem_size: composite_elem_size,
6587                }) = composite.size_info
6588                {
6589                    // The offset of the trailing slice component is the sum
6590                    // of the offset of the trailing field and the trailing
6591                    // slice offset within that field.
6592                    assert_eq!(composite_offset, offset + field_offset);
6593                    // The elem size is unchanged.
6594                    assert_eq!(composite_elem_size, field_elem_size);
6595
6596                    let field_analog =
6597                        Layout::from_size_align(field_offset, field_align.get()).unwrap();
6598
6599                    if let Ok((actual_composite, actual_offset)) = base_analog.extend(field_analog)
6600                    {
6601                        assert_eq!(actual_offset, offset);
6602                        assert_eq!(actual_composite.size(), composite_offset);
6603                        assert_eq!(actual_composite.align(), composite.align.get());
6604                    } else {
6605                        // An error here reflects that composite of `base`
6606                        // and `field` cannot correspond to a real Rust type
6607                        // fragment, because such a fragment would violate
6608                        // the basic invariants of a valid Rust layout. At
6609                        // the time of writing, `DstLayout` is a little more
6610                        // permissive than `Layout`, so we don't assert
6611                        // anything in this branch (e.g., unreachability).
6612                    }
6613                } else {
6614                    panic!("The extension of a layout with a DST must result in a DST.")
6615                }
6616            }
6617        }
6618    }
6619
6620    #[kani::proof]
6621    #[kani::should_panic]
6622    fn prove_dst_layout_extend_dst_panics() {
6623        let base: DstLayout = kani::any();
6624        let field: DstLayout = kani::any();
6625        let packed: Option<NonZeroUsize> = kani::any();
6626
6627        if let Some(max_align) = packed {
6628            kani::assume(max_align.is_power_of_two());
6629            kani::assume(base.align <= max_align);
6630        }
6631
6632        kani::assume(matches!(base.size_info, SizeInfo::SliceDst(..)));
6633
6634        let _ = base.extend(field, packed);
6635    }
6636
6637    #[kani::proof]
6638    fn prove_dst_layout_pad_to_align() {
6639        use crate::util::padding_needed_for;
6640
6641        let layout: DstLayout = kani::any();
6642
6643        let padded: DstLayout = layout.pad_to_align();
6644
6645        // Calling `pad_to_align` does not alter the `DstLayout`'s alignment.
6646        assert_eq!(padded.align, layout.align);
6647
6648        if let SizeInfo::Sized { size: unpadded_size } = layout.size_info {
6649            if let SizeInfo::Sized { size: padded_size } = padded.size_info {
6650                // If the layout is sized, it will remain sized after padding is
6651                // added. Its sum will be its unpadded size and the size of the
6652                // trailing padding needed to satisfy its alignment
6653                // requirements.
6654                let padding = padding_needed_for(unpadded_size, layout.align);
6655                assert_eq!(padded_size, unpadded_size + padding);
6656
6657                // Prove that calling `DstLayout::pad_to_align` behaves
6658                // identically to `Layout::pad_to_align`.
6659                let layout_analog =
6660                    Layout::from_size_align(unpadded_size, layout.align.get()).unwrap();
6661                let padded_analog = layout_analog.pad_to_align();
6662                assert_eq!(padded_analog.align(), layout.align.get());
6663                assert_eq!(padded_analog.size(), padded_size);
6664            } else {
6665                panic!("The padding of a sized layout must result in a sized layout.")
6666            }
6667        } else {
6668            // If the layout is a DST, padding cannot be statically added.
6669            assert_eq!(padded.size_info, layout.size_info);
6670        }
6671    }
6672}