musli/alloc/
slice.rs

1#[cfg(test)]
2mod tests;
3
4use core::cell::UnsafeCell;
5use core::marker::PhantomData;
6use core::mem::{align_of, forget, replace, size_of, MaybeUninit};
7use core::num::NonZeroU16;
8use core::ops::{Deref, DerefMut};
9use core::ptr;
10
11use super::{Alloc, AllocError, Allocator, SliceBuffer};
12
13// We keep max bytes to 2^31, since that ensures that addition between two
14// magnitutes never overflow.
15const MAX_BYTES: usize = i32::MAX as usize;
16
17/// A no-std compatible slice-based allocator that can be used with the `musli`
18/// crate.
19///
20/// It is geared towards handling few allocations, but they can be arbitrarily
21/// large. It is optimized to work best when allocations are short lived and
22/// "merged back" into one previously allocated region through
23/// `Buffer::extend`.
24///
25/// It's also optimized to write to one allocation "at a time". So once an
26/// allocation has been grown once, it will be put in a region where it is
27/// unlikely to need to be moved again, usually the last region which has access
28/// to the remainder of the provided buffer.
29///
30/// For the moment, this allocator only supports 65535 unique allocations, which
31/// is fine for use with the `musli` crate, but might be a limitation for other
32/// use-cases.
33///
34/// ## Examples
35///
36/// ```
37/// use musli::alloc::{ArrayBuffer, Slice, Vec};
38///
39/// let mut buf = ArrayBuffer::new();
40/// let alloc = Slice::new(&mut buf);
41///
42/// let mut a = Vec::new_in(&alloc);
43/// let mut b = Vec::new_in(&alloc);
44///
45/// b.extend_from_slice(b"He11o")?;
46/// a.extend_from_slice(b.as_slice())?;
47///
48/// assert_eq!(a.as_slice(), b"He11o");
49/// assert_eq!(a.len(), 5);
50///
51/// a.extend_from_slice(b" W0rld")?;
52///
53/// assert_eq!(a.as_slice(), b"He11o W0rld");
54/// assert_eq!(a.len(), 11);
55///
56/// let mut c = Vec::new_in(&alloc);
57/// c.extend_from_slice(b"!")?;
58/// a.extend_from_slice(c.as_slice())?;
59///
60/// assert_eq!(a.as_slice(), b"He11o W0rld!");
61/// assert_eq!(a.len(), 12);
62/// # Ok::<_, musli::alloc::AllocError>(())
63/// ```
64///
65/// ## Design
66///
67/// The allocator takes a buffer of contiguous memory. This is dynamically
68/// diviced into two parts:
69///
70/// * One part which grows upwards from the base, constituting the memory being
71///   allocated.
72/// * Its metadata growing downward from the end of the buffer, containing
73///   headers for all allocated region.
74///
75/// By designing the allocator so that the memory allocated and its metadata is
76/// separate, neighbouring regions can efficiently be merged as they are written
77/// or freed.
78///
79/// Each allocation is sparse, meaning it does not try to over-allocate memory.
80/// This ensures that subsequent regions with initialized memory can be merged
81/// efficiently, but degrades performance for many small writes performed across
82/// multiple allocations concurrently.
83///
84/// Below is an illustration of this, where `a` and `b` are two allocations
85/// where we write one byte at a time to each. Here `x` below indicates an
86/// occupied `gap` in memory regions.
87///
88/// ```text
89/// a
90/// ab
91/// # a moved to end
92/// xbaa
93/// # b moved to 0
94/// bbaa
95/// # aa not moved
96/// bbaaa
97/// # bb moved to end
98/// xxaaabbb
99/// # aaa moved to 0
100/// aaaaxbbb
101/// # bbb not moved
102/// aaaaxbbbb
103/// # aaaa not moved
104/// aaaaabbbb
105/// # bbbbb not moved
106/// aaaaabbbbb
107/// # aaaaa moved to end
108/// xxxxxbbbbbaaaaaa
109/// # bbbbb moved to 0
110/// bbbbbbxxxxaaaaaa
111/// ```
112pub struct Slice<'a> {
113    // This must be an unsafe cell, since it's mutably accessed through an
114    // immutable pointers. We simply make sure that those accesses do not
115    // clobber each other, which we can do since the API is restricted through
116    // the `Alloc` trait.
117    internal: UnsafeCell<Internal>,
118    // The underlying vector being borrowed.
119    _marker: PhantomData<&'a mut [MaybeUninit<u8>]>,
120}
121
122impl<'a> Slice<'a> {
123    /// Construct a new slice allocator around a [`SliceBuffer`].
124    ///
125    /// See [type-level documentation][Slice] for more information.
126    ///
127    /// # Panics
128    ///
129    /// This panics if called with a buffer larger than `2^31` bytes.
130    pub fn new<B>(buffer: &'a mut B) -> Self
131    where
132        B: ?Sized + SliceBuffer,
133    {
134        let buffer = buffer.as_uninit_bytes();
135        let size = buffer.len();
136
137        assert!(
138            size <= MAX_BYTES,
139            "Buffer of {size} bytes is larger than the maximum {MAX_BYTES}"
140        );
141
142        let mut data = Range::new(buffer.as_mut_ptr_range());
143
144        // The region of allocated headers grows downwards from the end of the
145        // buffer, so in order to ensure headers are aligned, we simply align
146        // the end pointer of the buffer preemptively here. Then we don't have
147        // to worry about it.
148        let align = data.end.align_offset(align_of::<Header>());
149
150        if align != 0 {
151            let sub = align_of::<Header>() - align;
152
153            if sub <= size {
154                // SAFETY: We've ensured that the adjustment is less than the
155                // size of the buffer.
156                unsafe {
157                    data.end = data.end.sub(sub);
158                }
159            } else {
160                data.end = data.start;
161            }
162        }
163
164        Self {
165            internal: UnsafeCell::new(Internal {
166                free_head: None,
167                tail: None,
168                occupied: None,
169                full: data,
170                free: data,
171            }),
172            _marker: PhantomData,
173        }
174    }
175}
176
177unsafe impl<'a> Allocator for &'a Slice<'_> {
178    const IS_SYSTEM: bool = false;
179
180    type Alloc<T> = SliceAlloc<'a, T>;
181
182    #[inline]
183    fn alloc<T>(self, value: T) -> Result<Self::Alloc<T>, AllocError> {
184        if size_of::<T>() == 0 {
185            return Ok(SliceAlloc::ZST);
186        }
187
188        // SAFETY: We have exclusive access to the internal state, and it's only
189        // held for the duration of this call.
190        let region = unsafe {
191            let i = &mut *self.internal.get();
192            let region = i.alloc(size_of::<T>(), align_of::<T>());
193
194            let Some(region) = region else {
195                return Err(AllocError);
196            };
197
198            // Write the value into the region.
199            region.range.start.cast::<T>().write(value);
200            Some(region.id)
201        };
202
203        Ok(SliceAlloc {
204            region,
205            internal: Some(&self.internal),
206            cap: 1,
207            _marker: PhantomData,
208        })
209    }
210
211    #[inline]
212    fn alloc_empty<T>(self) -> Self::Alloc<T> {
213        if size_of::<T>() == 0 {
214            return SliceAlloc::ZST;
215        }
216
217        // SAFETY: We have exclusive access to the internal state, and it's only
218        // held for the duration of this call.
219        let region = unsafe {
220            (*self.internal.get())
221                .alloc(0, align_of::<T>())
222                .map(|r| r.id)
223        };
224
225        SliceAlloc {
226            region,
227            internal: Some(&self.internal),
228            cap: 0,
229            _marker: PhantomData,
230        }
231    }
232}
233
234/// A slice allocated buffer.
235///
236/// See [`Slice`].
237pub struct SliceAlloc<'a, T> {
238    region: Option<HeaderId>,
239    internal: Option<&'a UnsafeCell<Internal>>,
240    // Locally known capacity in units of `T`. The real capacity *might* be
241    // larger than this value.
242    cap: usize,
243    _marker: PhantomData<T>,
244}
245
246impl<T> SliceAlloc<'_, T> {
247    const ZST: Self = Self {
248        region: None,
249        internal: None,
250        cap: usize::MAX,
251        _marker: PhantomData,
252    };
253}
254
255impl<T> SliceAlloc<'_, T> {
256    #[inline]
257    fn free(&mut self) {
258        let (Some(region), Some(internal)) = (self.region.take(), self.internal) else {
259            return;
260        };
261
262        // SAFETY: We have exclusive access to the internal state.
263        unsafe {
264            (*internal.get()).free(region);
265        }
266    }
267}
268
269impl<T> Alloc<T> for SliceAlloc<'_, T> {
270    #[inline]
271    fn as_ptr(&self) -> *const T {
272        let (Some(region), Some(internal)) = (self.region, self.internal) else {
273            return ptr::NonNull::dangling().as_ptr();
274        };
275
276        unsafe {
277            let i = &*internal.get();
278            let this = i.header(region);
279            this.range.start.cast_const().cast()
280        }
281    }
282
283    #[inline]
284    fn as_mut_ptr(&mut self) -> *mut T {
285        let (Some(region), Some(internal)) = (self.region, self.internal) else {
286            return ptr::NonNull::dangling().as_ptr();
287        };
288
289        unsafe {
290            let i = &*internal.get();
291            let this = i.header(region);
292            this.range.start.cast()
293        }
294    }
295
296    #[inline]
297    fn capacity(&self) -> usize {
298        let Some(internal) = self.internal else {
299            return usize::MAX;
300        };
301
302        let Some(region_id) = self.region else {
303            return 0;
304        };
305
306        // SAFETY: Due to invariants in the Buffer trait we know that these
307        // cannot be used incorrectly.
308        unsafe {
309            let i = &mut *internal.get();
310            i.region(region_id).capacity()
311        }
312    }
313
314    #[inline]
315    fn resize(&mut self, len: usize, additional: usize) -> Result<(), AllocError> {
316        // Cheaply check the locally known capacity.
317        //
318        // This capacity is in units of `T`, and is only ever at risk of being
319        // too small if the allocation has been grown.
320        if len + additional <= self.cap {
321            return Ok(());
322        }
323
324        let Some(internal) = self.internal else {
325            debug_assert_eq!(
326                size_of::<T>(),
327                0,
328                "Only ZSTs should lack an internal pointer"
329            );
330            // ZSTs don't need to do any work to be resized.
331            return Ok(());
332        };
333
334        let Some(region_id) = self.region else {
335            return Err(AllocError);
336        };
337
338        let Some(len) = len.checked_mul(size_of::<T>()) else {
339            return Err(AllocError);
340        };
341
342        let Some(additional) = additional.checked_mul(size_of::<T>()) else {
343            return Err(AllocError);
344        };
345
346        let Some(requested) = len.checked_add(additional) else {
347            return Err(AllocError);
348        };
349
350        if requested > MAX_BYTES {
351            return Err(AllocError);
352        }
353
354        // SAFETY: Due to invariants in the Buffer trait we know that these
355        // cannot be used incorrectly.
356        unsafe {
357            let i = &mut *internal.get();
358
359            let region = i.region(region_id);
360
361            let actual = region.capacity();
362
363            // Region can already fit in the requested bytes.
364            if actual >= requested {
365                self.cap = actual / size_of::<T>();
366                return Ok(());
367            };
368
369            let Some(region) = i.realloc(region_id, len, requested, align_of::<T>()) else {
370                return Err(AllocError);
371            };
372
373            self.region = Some(region.id);
374            self.cap = region.capacity() / size_of::<T>();
375            Ok(())
376        }
377    }
378
379    #[inline]
380    fn try_merge<B>(&mut self, this_len: usize, buf: B, other_len: usize) -> Result<(), B>
381    where
382        B: Alloc<T>,
383    {
384        let Some(internal) = self.internal else {
385            // T is a ZST, so merging is trivial. We can just return immediately
386            // here.
387            return Ok(());
388        };
389
390        let Some(region) = self.region else {
391            return Err(buf);
392        };
393
394        let this_len = this_len * size_of::<T>();
395        let other_len = other_len * size_of::<T>();
396
397        // NB: Placing this here to make miri happy, since accessing the
398        // slice will mean mutably accessing the internal state.
399        let other_ptr = buf.as_ptr().cast();
400
401        unsafe {
402            let i = &mut *internal.get();
403            let mut this = i.region(region);
404
405            debug_assert!(this.capacity() >= this_len);
406
407            // If this region immediately follows the other region, we can
408            // optimize the write by simply growing the current region and
409            // de-allocating the second since the only conclusion is that
410            // they share the same allocator.
411            if !ptr::eq(this.range.end.cast_const(), other_ptr) {
412                return Err(buf);
413            }
414
415            let Some(next) = this.next else {
416                return Err(buf);
417            };
418
419            // Prevent the other buffer from being dropped, since we're
420            // taking care of the allocation in here directly instead.
421            forget(buf);
422
423            let next = i.region(next);
424
425            let to = this.range.start.wrapping_add(this_len);
426
427            // Data needs to be shuffle back to the end of the initialized
428            // region.
429            if this.range.end != to {
430                this.range.end.copy_to(to, other_len);
431            }
432
433            let old = i.free_region(next);
434            this.range.end = old.range.end;
435            Ok(())
436        }
437    }
438}
439
440impl<T> Drop for SliceAlloc<'_, T> {
441    #[inline]
442    fn drop(&mut self) {
443        self.free()
444    }
445}
446
447struct Region {
448    id: HeaderId,
449    ptr: *mut Header,
450}
451
452impl Deref for Region {
453    type Target = Header;
454
455    #[inline]
456    fn deref(&self) -> &Self::Target {
457        // SAFETY: Construction of the region is unsafe, so the caller must
458        // ensure that it's used correctly after that.
459        unsafe { &*self.ptr }
460    }
461}
462
463impl DerefMut for Region {
464    #[inline]
465    fn deref_mut(&mut self) -> &mut Self::Target {
466        // SAFETY: Construction of the region is unsafe, so the caller must
467        // ensure that it's used correctly after that.
468        unsafe { &mut *self.ptr }
469    }
470}
471
472/// The identifier of a region.
473#[derive(Debug, Clone, Copy, PartialEq, Eq)]
474#[cfg_attr(test, derive(PartialOrd, Ord, Hash))]
475#[repr(transparent)]
476struct HeaderId(NonZeroU16);
477
478impl HeaderId {
479    #[cfg(test)]
480    const unsafe fn new_unchecked(value: u16) -> Self {
481        Self(NonZeroU16::new_unchecked(value))
482    }
483
484    /// Create a new region identifier.
485    ///
486    /// # Safety
487    ///
488    /// The given value must be non-zero.
489    #[inline]
490    fn new(value: isize) -> Option<Self> {
491        Some(Self(NonZeroU16::new(u16::try_from(value).ok()?)?))
492    }
493
494    /// Get the value of the region identifier.
495    #[inline]
496    fn get(self) -> usize {
497        self.0.get() as usize
498    }
499}
500
501#[derive(Debug, Clone, Copy, PartialEq, Eq)]
502struct Range {
503    start: *mut MaybeUninit<u8>,
504    end: *mut MaybeUninit<u8>,
505}
506
507impl Range {
508    fn new(range: core::ops::Range<*mut MaybeUninit<u8>>) -> Self {
509        Self {
510            start: range.start,
511            end: range.end,
512        }
513    }
514
515    fn head(self) -> Range {
516        Self {
517            start: self.start,
518            end: self.start,
519        }
520    }
521}
522
523struct Internal {
524    // The first free region.
525    free_head: Option<HeaderId>,
526    // Pointer to the tail region.
527    tail: Option<HeaderId>,
528    // The occupied header region
529    occupied: Option<HeaderId>,
530    // The full range used by the allocator.
531    full: Range,
532    // The free range available to the allocator.
533    free: Range,
534}
535
536impl Internal {
537    // Return the number of allocates bytes.
538    #[cfg(test)]
539    #[inline]
540    fn bytes(&self) -> usize {
541        // SAFETY: It is guaranteed that free_end >= free_start inside of the provided region.
542        unsafe { self.free.start.byte_offset_from(self.full.start) as usize }
543    }
544
545    #[cfg(test)]
546    #[inline]
547    fn headers(&self) -> usize {
548        unsafe {
549            self.full
550                .end
551                .cast::<Header>()
552                .offset_from(self.free.end.cast()) as usize
553        }
554    }
555
556    // Return the number of remaining bytes.
557    #[inline]
558    fn remaining(&self) -> usize {
559        // SAFETY: It is guaranteed that free_end >= free_start inside of the provided region.
560        unsafe { self.free.end.byte_offset_from(self.free.start) as usize }
561    }
562
563    /// Get the header pointer corresponding to the given id.
564    #[inline]
565    fn header(&self, at: HeaderId) -> &Header {
566        // SAFETY: Once we've coerced to `&self`, then we guarantee that we can
567        // get a header immutably.
568        unsafe { &*self.full.end.cast::<Header>().wrapping_sub(at.get()) }
569    }
570
571    /// Get the mutable header pointer corresponding to the given id.
572    #[inline]
573    fn header_mut(&mut self, at: HeaderId) -> *mut Header {
574        self.full.end.cast::<Header>().wrapping_sub(at.get())
575    }
576
577    /// Get the mutable region corresponding to the given id.
578    #[inline]
579    fn region(&mut self, id: HeaderId) -> Region {
580        Region {
581            id,
582            ptr: self.header_mut(id),
583        }
584    }
585
586    unsafe fn unlink(&mut self, header: &Header) {
587        if let Some(next) = header.next {
588            (*self.header_mut(next)).prev = header.prev;
589        } else {
590            self.tail = header.prev;
591        }
592
593        if let Some(prev) = header.prev {
594            (*self.header_mut(prev)).next = header.next;
595        }
596    }
597
598    unsafe fn replace_back(&mut self, region: &mut Region) {
599        let prev = region.prev.take();
600        let next = region.next.take();
601
602        if let Some(prev) = prev {
603            (*self.header_mut(prev)).next = next;
604        }
605
606        if let Some(next) = next {
607            (*self.header_mut(next)).prev = prev;
608        }
609
610        self.push_back(region);
611    }
612
613    unsafe fn push_back(&mut self, region: &mut Region) {
614        if let Some(tail) = self.tail.replace(region.id) {
615            region.prev = Some(tail);
616            (*self.header_mut(tail)).next = Some(region.id);
617        }
618    }
619
620    /// Free a region.
621    unsafe fn free_region(&mut self, region: Region) -> Header {
622        self.unlink(&region);
623
624        region.ptr.replace(Header {
625            range: self.full.head(),
626            next: self.free_head.replace(region.id),
627            prev: None,
628        })
629    }
630
631    /// Allocate a new header.
632    ///
633    /// # Safety
634    ///
635    /// The caller msut ensure that there is enough space for the region and
636    /// that the end pointer has been aligned to the requirements of `Header`.
637    unsafe fn alloc_header(&mut self, end: *mut MaybeUninit<u8>) -> Option<Region> {
638        if let Some(region) = self.free_head.take() {
639            let mut region = self.region(region);
640
641            region.range.start = self.free.start;
642            region.range.end = end;
643
644            return Some(region);
645        }
646
647        debug_assert_eq!(
648            self.free.end.align_offset(align_of::<Header>()),
649            0,
650            "End pointer should be aligned to header"
651        );
652
653        let ptr = self.free.end.cast::<Header>().wrapping_sub(1);
654
655        if ptr < self.free.start.cast() || ptr >= self.free.end.cast() {
656            return None;
657        }
658
659        let id = HeaderId::new(self.full.end.cast::<Header>().offset_from(ptr))?;
660
661        ptr.write(Header {
662            range: Range::new(self.free.start..end),
663            prev: None,
664            next: None,
665        });
666
667        self.free.end = ptr.cast();
668        Some(Region { id, ptr })
669    }
670
671    /// Allocate a region.
672    ///
673    /// # Safety
674    ///
675    /// The caller must ensure that `this` is exclusively available.
676    unsafe fn alloc(&mut self, requested: usize, align: usize) -> Option<Region> {
677        if let Some(occupied) = self.occupied {
678            let region = self.region(occupied);
679
680            if region.capacity() >= requested && region.is_aligned(align) {
681                self.occupied = None;
682                return Some(region);
683            }
684        }
685
686        self.align(align)?;
687
688        if self.remaining() < requested {
689            return None;
690        }
691
692        let end = self.free.start.wrapping_add(requested);
693
694        let mut region = self.alloc_header(end)?;
695
696        self.free.start = end;
697        debug_assert!(self.free.start <= self.free.end);
698        self.push_back(&mut region);
699        Some(region)
700    }
701
702    /// Align the free region by the specified alignment.
703    ///
704    /// This might require either expanding the tail region, or introducing an
705    /// occupied region which matches the number of bytes needed to fulfill the
706    /// specified alignment.
707    unsafe fn align(&mut self, align: usize) -> Option<()> {
708        let align = self.free.start.align_offset(align);
709
710        if align == 0 {
711            return Some(());
712        }
713
714        if self.remaining() < align {
715            return None;
716        }
717
718        let aligned_start = self.free.start.wrapping_add(align);
719
720        if let Some(tail) = self.tail {
721            // Simply expand the tail region to fill the gap created.
722            self.region(tail).range.end = aligned_start;
723        } else {
724            // We need to construct a new occupied header to fill in the gap
725            // which we just aligned from since there is no previous region to
726            // expand.
727            let mut region = self.alloc_header(aligned_start)?;
728
729            self.push_back(&mut region);
730        }
731
732        self.free.start = aligned_start;
733        Some(())
734    }
735
736    unsafe fn free(&mut self, region: HeaderId) {
737        let region = self.region(region);
738
739        // Just free up the last region in the slab.
740        if region.next.is_none() {
741            self.free_tail(region);
742            return;
743        }
744
745        // If there is no previous region, then mark this region as occupy.
746        let Some(prev) = region.prev else {
747            debug_assert!(
748                self.occupied.is_none(),
749                "There can only be one occupied region"
750            );
751
752            self.occupied = Some(region.id);
753            return;
754        };
755
756        let mut prev = self.region(prev);
757
758        // Move allocation to the previous region.
759        let region = self.free_region(region);
760
761        prev.range.end = region.range.end;
762
763        // The current header being freed is the last in the list.
764        if region.next.is_none() {
765            self.free.start = region.range.start;
766        }
767    }
768
769    /// Free the tail starting at the `current` region.
770    unsafe fn free_tail(&mut self, current: Region) {
771        debug_assert_eq!(self.tail, Some(current.id));
772
773        let current = self.free_region(current);
774        debug_assert_eq!(current.next, None);
775
776        self.free.start = match current.prev {
777            // The prior region is occupied, so we can free that as well.
778            Some(prev) if self.occupied == Some(prev) => {
779                self.occupied = None;
780                let prev = self.region(prev);
781                self.free_region(prev).range.start
782            }
783            _ => current.range.start,
784        };
785    }
786
787    unsafe fn reserve(&mut self, additional: usize, align: usize) -> Option<*mut MaybeUninit<u8>> {
788        self.align(align)?;
789
790        let free_start = self.free.start.wrapping_add(additional);
791
792        if free_start > self.free.end || free_start < self.free.start {
793            return None;
794        }
795
796        Some(free_start)
797    }
798
799    unsafe fn realloc(
800        &mut self,
801        from: HeaderId,
802        len: usize,
803        requested: usize,
804        align: usize,
805    ) -> Option<Region> {
806        let mut from = self.region(from);
807
808        // This is the last region in the slab, so we can just expand it.
809        if from.next.is_none() {
810            // Before we call realloc, we check the capacity of the current
811            // region. So we know that it is <= requested.
812            let additional = requested - from.capacity();
813            self.free.start = self.reserve(additional, align)?;
814            from.range.end = from.range.end.add(additional);
815            return Some(from);
816        }
817
818        // There is no data allocated in the current region, so we can simply
819        // re-link it to the end of the chain of allocation.
820        if from.range.start == from.range.end {
821            let free_start = self.reserve(requested, align)?;
822            from.range.start = replace(&mut self.free.start, free_start);
823            from.range.end = free_start;
824            self.replace_back(&mut from);
825            return Some(from);
826        }
827
828        // Try to merge with a preceeding region, if the requested memory can
829        // fit in it.
830        'bail: {
831            // Check if the immediate prior region can fit the requested allocation.
832            let Some(prev) = from.prev else {
833                break 'bail;
834            };
835
836            if self.occupied != Some(prev) {
837                break 'bail;
838            }
839
840            let mut prev = self.region(prev);
841
842            if prev.capacity() + from.capacity() < requested {
843                break 'bail;
844            }
845
846            if !prev.is_aligned(align) {
847                break 'bail;
848            }
849
850            let from = self.free_region(from);
851
852            from.range.start.copy_to(prev.range.start, len);
853            prev.range.end = from.range.end;
854            self.occupied = None;
855            return Some(prev);
856        }
857
858        let to = self.alloc(requested, align)?;
859        from.range.start.copy_to_nonoverlapping(to.range.start, len);
860        self.free(from.id);
861        Some(to)
862    }
863}
864
865/// The header of a region.
866#[derive(Debug, Clone, Copy, PartialEq, Eq)]
867struct Header {
868    // The range of the allocated region.
869    range: Range,
870    // The previous region.
871    prev: Option<HeaderId>,
872    // The next region.
873    next: Option<HeaderId>,
874}
875
876impl Header {
877    #[inline]
878    fn capacity(&self) -> usize {
879        // SAFETY: Both pointers are defined within the region.
880        unsafe { self.range.end.byte_offset_from(self.range.start) as usize }
881    }
882
883    /// Test if region is aligned to `align`.
884    #[inline]
885    fn is_aligned(&self, align: usize) -> bool {
886        self.range.start.align_offset(align) == 0
887    }
888}