rune_alloc/hashbrown/raw/
mod.rs

1use core::alloc::Layout;
2use core::iter::FusedIterator;
3use core::marker::PhantomData;
4use core::mem;
5use core::mem::MaybeUninit;
6use core::ptr::NonNull;
7use core::{hint, ptr};
8
9use crate::hashbrown::scopeguard::{guard, ScopeGuard};
10
11use crate::alloc::{Allocator, Global, SizedTypeProperties};
12use crate::clone::TryClone;
13#[cfg(rune_nightly)]
14use crate::clone::TryCopy;
15use crate::error::{CustomError, Error};
16// Branch prediction hint. This is currently only available on nightly but it
17// consistently improves performance by 10-15%.
18use crate::hint::{likely, unlikely};
19use crate::ptr::invalid_mut;
20
21#[cfg(test)]
22use crate::testing::*;
23
24use super::{EqFn, ErrorOrInsertSlot, HasherFn};
25
26cfg_if! {
27    // Use the SSE2 implementation if possible: it allows us to scan 16 buckets
28    // at once instead of 8. We don't bother with AVX since it would require
29    // runtime dispatch and wouldn't gain us much anyways: the probability of
30    // finding a match drops off drastically after the first few buckets.
31    //
32    // I attempted an implementation on ARM using NEON instructions, but it
33    // turns out that most NEON instructions have multi-cycle latency, which in
34    // the end outweighs any gains over the generic implementation.
35    if #[cfg(all(
36        target_feature = "sse2",
37        any(target_arch = "x86", target_arch = "x86_64"),
38        not(miri)
39    ))] {
40        mod sse2;
41        use sse2 as imp;
42    } else if #[cfg(all(target_arch = "aarch64", target_feature = "neon"))] {
43        mod neon;
44        use neon as imp;
45    } else {
46        mod generic;
47        use generic as imp;
48    }
49}
50
51mod bitmask;
52
53use self::bitmask::BitMaskIter;
54use self::imp::Group;
55
56#[inline]
57unsafe fn offset_from<T>(to: *const T, from: *const T) -> usize {
58    to.offset_from(from) as usize
59}
60
61/// Control byte value for an empty bucket.
62const EMPTY: u8 = 0b1111_1111;
63
64/// Control byte value for a deleted bucket.
65const DELETED: u8 = 0b1000_0000;
66
67/// Checks whether a control byte represents a full bucket (top bit is clear).
68#[inline]
69fn is_full(ctrl: u8) -> bool {
70    ctrl & 0x80 == 0
71}
72
73/// Checks whether a control byte represents a special value (top bit is set).
74#[inline]
75fn is_special(ctrl: u8) -> bool {
76    ctrl & 0x80 != 0
77}
78
79/// Checks whether a special control value is EMPTY (just check 1 bit).
80#[inline]
81fn special_is_empty(ctrl: u8) -> bool {
82    debug_assert!(is_special(ctrl));
83    ctrl & 0x01 != 0
84}
85
86/// Primary hash function, used to select the initial bucket to probe from.
87#[inline]
88#[allow(clippy::cast_possible_truncation)]
89fn h1(hash: u64) -> usize {
90    // On 32-bit platforms we simply ignore the higher hash bits.
91    hash as usize
92}
93
94// Constant for h2 function that grabing the top 7 bits of the hash.
95const MIN_HASH_LEN: usize = if mem::size_of::<usize>() < mem::size_of::<u64>() {
96    mem::size_of::<usize>()
97} else {
98    mem::size_of::<u64>()
99};
100
101/// Secondary hash function, saved in the low 7 bits of the control byte.
102#[inline]
103#[allow(clippy::cast_possible_truncation)]
104fn h2(hash: u64) -> u8 {
105    // Grab the top 7 bits of the hash. While the hash is normally a full 64-bit
106    // value, some hash functions (such as FxHash) produce a usize result
107    // instead, which means that the top 32 bits are 0 on 32-bit platforms.
108    // So we use MIN_HASH_LEN constant to handle this.
109    let top7 = hash >> (MIN_HASH_LEN * 8 - 7);
110    (top7 & 0x7f) as u8 // truncation
111}
112
113/// Probe sequence based on triangular numbers, which is guaranteed (since our
114/// table size is a power of two) to visit every group of elements exactly once.
115///
116/// A triangular probe has us jump by 1 more group every time. So first we
117/// jump by 1 group (meaning we just continue our linear scan), then 2 groups
118/// (skipping over 1 group), then 3 groups (skipping over 2 groups), and so on.
119///
120/// Proof that the probe will visit every group in the table:
121/// <https://fgiesen.wordpress.com/2015/02/22/triangular-numbers-mod-2n/>
122struct ProbeSeq {
123    pos: usize,
124    stride: usize,
125}
126
127impl ProbeSeq {
128    #[inline]
129    fn move_next(&mut self, bucket_mask: usize) {
130        // We should have found an empty bucket by now and ended the probe.
131        debug_assert!(
132            self.stride <= bucket_mask,
133            "Went past end of probe sequence"
134        );
135
136        self.stride += Group::WIDTH;
137        self.pos += self.stride;
138        self.pos &= bucket_mask;
139    }
140}
141
142/// Returns the number of buckets needed to hold the given number of items,
143/// taking the maximum load factor into account.
144///
145/// Returns `None` if an overflow occurs.
146// Workaround for emscripten bug emscripten-core/emscripten-fastcomp#258
147#[cfg_attr(target_os = "emscripten", inline(never))]
148#[cfg_attr(not(target_os = "emscripten"), inline)]
149fn capacity_to_buckets(cap: usize) -> Option<usize> {
150    debug_assert_ne!(cap, 0);
151
152    // For small tables we require at least 1 empty bucket so that lookups are
153    // guaranteed to terminate if an element doesn't exist in the table.
154    if cap < 8 {
155        // We don't bother with a table size of 2 buckets since that can only
156        // hold a single element. Instead we skip directly to a 4 bucket table
157        // which can hold 3 elements.
158        return Some(if cap < 4 { 4 } else { 8 });
159    }
160
161    // Otherwise require 1/8 buckets to be empty (87.5% load)
162    //
163    // Be careful when modifying this, calculate_layout relies on the
164    // overflow check here.
165    let adjusted_cap = cap.checked_mul(8)? / 7;
166
167    // Any overflows will have been caught by the checked_mul. Also, any
168    // rounding errors from the division above will be cleaned up by
169    // next_power_of_two (which can't overflow because of the previous division).
170    Some(adjusted_cap.next_power_of_two())
171}
172
173/// Returns the maximum effective capacity for the given bucket mask, taking
174/// the maximum load factor into account.
175#[inline]
176fn bucket_mask_to_capacity(bucket_mask: usize) -> usize {
177    if bucket_mask < 8 {
178        // For tables with 1/2/4/8 buckets, we always reserve one empty slot.
179        // Keep in mind that the bucket mask is one less than the bucket count.
180        bucket_mask
181    } else {
182        // For larger tables we reserve 12.5% of the slots as empty.
183        ((bucket_mask + 1) / 8) * 7
184    }
185}
186
187/// Helper which allows the max calculation for ctrl_align to be statically computed for each T
188/// while keeping the rest of `calculate_layout_for` independent of `T`
189#[derive(Copy, Clone)]
190struct TableLayout {
191    size: usize,
192    ctrl_align: usize,
193}
194
195impl TableLayout {
196    #[inline]
197    const fn new<T>() -> Self {
198        let layout = Layout::new::<T>();
199        Self {
200            size: layout.size(),
201            ctrl_align: if layout.align() > Group::WIDTH {
202                layout.align()
203            } else {
204                Group::WIDTH
205            },
206        }
207    }
208
209    #[inline]
210    fn calculate_layout_for(self, buckets: usize) -> Option<(Layout, usize)> {
211        debug_assert!(buckets.is_power_of_two());
212
213        let TableLayout { size, ctrl_align } = self;
214        // Manual layout calculation since Layout methods are not yet stable.
215        let ctrl_offset =
216            size.checked_mul(buckets)?.checked_add(ctrl_align - 1)? & !(ctrl_align - 1);
217        let len = ctrl_offset.checked_add(buckets + Group::WIDTH)?;
218
219        // We need an additional check to ensure that the allocation doesn't
220        // exceed `isize::MAX` (https://github.com/rust-lang/rust/pull/95295).
221        if len > isize::MAX as usize - (ctrl_align - 1) {
222            return None;
223        }
224
225        Some((
226            unsafe { Layout::from_size_align_unchecked(len, ctrl_align) },
227            ctrl_offset,
228        ))
229    }
230}
231
232/// A reference to an empty bucket into which an can be inserted.
233pub struct InsertSlot {
234    index: usize,
235}
236
237/// A reference to a hash table bucket containing a `T`.
238///
239/// This is usually just a pointer to the element itself. However if the element
240/// is a ZST, then we instead track the index of the element in the table so
241/// that `erase` works properly.
242pub struct Bucket<T> {
243    // Actually it is pointer to next element than element itself
244    // this is needed to maintain pointer arithmetic invariants
245    // keeping direct pointer to element introduces difficulty.
246    // Using `NonNull` for variance and niche layout
247    ptr: NonNull<T>,
248}
249
250// This Send impl is needed for rayon support. This is safe since Bucket is
251// never exposed in a public API.
252unsafe impl<T> Send for Bucket<T> {}
253
254impl<T> Clone for Bucket<T> {
255    #[inline]
256    fn clone(&self) -> Self {
257        Self { ptr: self.ptr }
258    }
259}
260
261impl<T> Bucket<T> {
262    /// Creates a [`Bucket`] that contain pointer to the data.
263    /// The pointer calculation is performed by calculating the
264    /// offset from given `base` pointer (convenience for
265    /// `base.as_ptr().sub(index)`).
266    ///
267    /// `index` is in units of `T`; e.g., an `index` of 3 represents a pointer
268    /// offset of `3 * size_of::<T>()` bytes.
269    ///
270    /// If the `T` is a ZST, then we instead track the index of the element
271    /// in the table so that `erase` works properly (return
272    /// `NonNull::new_unchecked((index + 1) as *mut T)`)
273    ///
274    /// # Safety
275    ///
276    /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
277    /// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and the safety
278    /// rules of [`NonNull::new_unchecked`] function.
279    ///
280    /// Thus, in order to uphold the safety contracts for the [`<*mut T>::sub`] method
281    /// and [`NonNull::new_unchecked`] function, as well as for the correct
282    /// logic of the work of this crate, the following rules are necessary and
283    /// sufficient:
284    ///
285    /// * the `base` pointer must not be `dangling` and must points to the
286    ///   end of the first `value element` from the `data part` of the table, i.e.
287    ///   must be the pointer that returned by [`RawTable::data_end`] or by
288    ///   [`RawTableInner::data_end<T>`];
289    ///
290    /// * `index` must not be greater than `RawTableInner.bucket_mask`, i.e.
291    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)`
292    ///   must be no greater than the number returned by the function
293    ///   [`RawTable::buckets`] or [`RawTableInner::buckets`].
294    ///
295    /// If `mem::size_of::<T>() == 0`, then the only requirement is that the
296    /// `index` must not be greater than `RawTableInner.bucket_mask`, i.e.
297    /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)`
298    /// must be no greater than the number returned by the function
299    /// [`RawTable::buckets`] or [`RawTableInner::buckets`].
300    ///
301    /// [`Bucket`]: crate::raw::Bucket
302    /// [`<*mut T>::sub`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.sub-1
303    /// [`NonNull::new_unchecked`]: https://doc.rust-lang.org/stable/std/ptr/struct.NonNull.html#method.new_unchecked
304    /// [`RawTable::data_end`]: crate::raw::RawTable::data_end
305    /// [`RawTableInner::data_end<T>`]: RawTableInner::data_end<T>
306    /// [`RawTable::buckets`]: crate::raw::RawTable::buckets
307    /// [`RawTableInner::buckets`]: RawTableInner::buckets
308    #[inline]
309    unsafe fn from_base_index(base: NonNull<T>, index: usize) -> Self {
310        // If mem::size_of::<T>() != 0 then return a pointer to an `element` in
311        // the data part of the table (we start counting from "0", so that
312        // in the expression T[last], the "last" index actually one less than the
313        // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask"):
314        //
315        //                   `from_base_index(base, 1).as_ptr()` returns a pointer that
316        //                   points here in the data part of the table
317        //                   (to the start of T1)
318        //                        |
319        //                        |        `base: NonNull<T>` must point here
320        //                        |         (to the end of T0 or to the start of C0)
321        //                        v         v
322        // [Padding], Tlast, ..., |T1|, T0, |C0, C1, ..., Clast
323        //                           ^
324        //                           `from_base_index(base, 1)` returns a pointer
325        //                           that points here in the data part of the table
326        //                           (to the end of T1)
327        //
328        // where: T0...Tlast - our stored data; C0...Clast - control bytes
329        // or metadata for data.
330        let ptr = if T::IS_ZST {
331            // won't overflow because index must be less than length (bucket_mask)
332            // and bucket_mask is guaranteed to be less than `isize::MAX`
333            // (see TableLayout::calculate_layout_for method)
334            invalid_mut(index + 1)
335        } else {
336            base.as_ptr().sub(index)
337        };
338        Self {
339            ptr: NonNull::new_unchecked(ptr),
340        }
341    }
342
343    /// Calculates the index of a [`Bucket`] as distance between two pointers
344    /// (convenience for `base.as_ptr().offset_from(self.ptr.as_ptr()) as usize`).
345    /// The returned value is in units of T: the distance in bytes divided by
346    /// [`core::mem::size_of::<T>()`].
347    ///
348    /// If the `T` is a ZST, then we return the index of the element in
349    /// the table so that `erase` works properly (return `self.ptr.as_ptr() as usize - 1`).
350    ///
351    /// This function is the inverse of [`from_base_index`].
352    ///
353    /// # Safety
354    ///
355    /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
356    /// from the safety rules for [`<*const T>::offset_from`] method of `*const T`.
357    ///
358    /// Thus, in order to uphold the safety contracts for [`<*const T>::offset_from`]
359    /// method, as well as for the correct logic of the work of this crate, the
360    /// following rules are necessary and sufficient:
361    ///
362    /// * `base` contained pointer must not be `dangling` and must point to the
363    ///   end of the first `element` from the `data part` of the table, i.e.
364    ///   must be a pointer that returns by [`RawTable::data_end`] or by
365    ///   [`RawTableInner::data_end<T>`];
366    ///
367    /// * `self` also must not contain dangling pointer;
368    ///
369    /// * both `self` and `base` must be created from the same [`RawTable`]
370    ///   (or [`RawTableInner`]).
371    ///
372    /// If `mem::size_of::<T>() == 0`, this function is always safe.
373    ///
374    /// [`Bucket`]: crate::raw::Bucket
375    /// [`from_base_index`]: crate::raw::Bucket::from_base_index
376    /// [`RawTable::data_end`]: crate::raw::RawTable::data_end
377    /// [`RawTableInner::data_end<T>`]: RawTableInner::data_end<T>
378    /// [`RawTable`]: crate::raw::RawTable
379    /// [`RawTableInner`]: RawTableInner
380    /// [`<*const T>::offset_from`]: https://doc.rust-lang.org/nightly/core/primitive.pointer.html#method.offset_from
381    #[inline]
382    unsafe fn to_base_index(&self, base: NonNull<T>) -> usize {
383        // If mem::size_of::<T>() != 0 then return an index under which we used to store the
384        // `element` in the data part of the table (we start counting from "0", so
385        // that in the expression T[last], the "last" index actually is one less than the
386        // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask").
387        // For example for 5th element in table calculation is performed like this:
388        //
389        //                        mem::size_of::<T>()
390        //                          |
391        //                          |         `self = from_base_index(base, 5)` that returns pointer
392        //                          |         that points here in tha data part of the table
393        //                          |         (to the end of T5)
394        //                          |           |                    `base: NonNull<T>` must point here
395        //                          v           |                    (to the end of T0 or to the start of C0)
396        //                        /???\         v                      v
397        // [Padding], Tlast, ..., |T10|, ..., T5|, T4, T3, T2, T1, T0, |C0, C1, C2, C3, C4, C5, ..., C10, ..., Clast
398        //                                      \__________  __________/
399        //                                                 \/
400        //                                     `bucket.to_base_index(base)` = 5
401        //                                     (base.as_ptr() as usize - self.ptr.as_ptr() as usize) / mem::size_of::<T>()
402        //
403        // where: T0...Tlast - our stored data; C0...Clast - control bytes or metadata for data.
404        if T::IS_ZST {
405            // this can not be UB
406            self.ptr.as_ptr() as usize - 1
407        } else {
408            offset_from(base.as_ptr(), self.ptr.as_ptr())
409        }
410    }
411
412    /// Acquires the underlying raw pointer `*mut T` to `data`.
413    ///
414    /// # Note
415    ///
416    /// If `T` is not [`Copy`], do not use `*mut T` methods that can cause calling the
417    /// destructor of `T` (for example the [`<*mut T>::drop_in_place`] method), because
418    /// for properly dropping the data we also need to clear `data` control bytes. If we
419    /// drop data, but do not clear `data control byte` it leads to double drop when
420    /// [`RawTable`] goes out of scope.
421    ///
422    /// If you modify an already initialized `value`, so [`Hash`] and [`Eq`] on the new
423    /// `T` value and its borrowed form *must* match those for the old `T` value, as the map
424    /// will not re-evaluate where the new value should go, meaning the value may become
425    /// "lost" if their location does not reflect their state.
426    ///
427    /// [`RawTable`]: crate::hashbrown::raw::RawTable
428    /// [`<*mut T>::drop_in_place`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.drop_in_place
429    /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
430    /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
431    ///
432    /// # Examples
433    ///
434    /// ```
435    /// use core::hash::{BuildHasher, Hash};
436    /// use core::convert::Infallible;
437    ///
438    /// use rune::alloc::hashbrown::raw::{Bucket, RawTable};
439    ///
440    /// type NewHashBuilder = core::hash::BuildHasherDefault<ahash::AHasher>;
441    ///
442    /// fn make_hash<K: Hash + ?Sized, S: BuildHasher>(hash_builder: &S, key: &K) -> u64 {
443    ///     use core::hash::Hasher;
444    ///     let mut state = hash_builder.build_hasher();
445    ///     key.hash(&mut state);
446    ///     state.finish()
447    /// }
448    ///
449    /// let hash_builder = NewHashBuilder::default();
450    /// let mut table = RawTable::new();
451    ///
452    /// let value = ("a", 100);
453    /// let hash = make_hash(&hash_builder, &value.0);
454    ///
455    /// table.insert(&mut (), hash, value.clone(), |_: &mut (), val: &(&str, _)| Ok::<_, Infallible>(make_hash(&hash_builder, &val.0)));
456    ///
457    /// let bucket: Bucket<(&str, i32)> = table.find(&mut (), hash, |_: &mut (), (k1, _): &(&str, _)| Ok::<_, Infallible>(k1 == &value.0)).unwrap().unwrap();
458    ///
459    /// assert_eq!(unsafe { &*bucket.as_ptr() }, &("a", 100));
460    /// ```
461    #[inline]
462    pub fn as_ptr(&self) -> *mut T {
463        if T::IS_ZST {
464            // Just return an arbitrary ZST pointer which is properly aligned
465            // invalid pointer is good enough for ZST
466            invalid_mut(mem::align_of::<T>())
467        } else {
468            unsafe { self.ptr.as_ptr().sub(1) }
469        }
470    }
471
472    /// Create a new [`Bucket`] that is offset from the `self` by the given
473    /// `offset`. The pointer calculation is performed by calculating the
474    /// offset from `self` pointer (convenience for `self.ptr.as_ptr().sub(offset)`).
475    /// This function is used for iterators.
476    ///
477    /// `offset` is in units of `T`; e.g., a `offset` of 3 represents a pointer
478    /// offset of `3 * size_of::<T>()` bytes.
479    ///
480    /// # Safety
481    ///
482    /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
483    /// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and safety
484    /// rules of [`NonNull::new_unchecked`] function.
485    ///
486    /// Thus, in order to uphold the safety contracts for [`<*mut T>::sub`] method
487    /// and [`NonNull::new_unchecked`] function, as well as for the correct
488    /// logic of the work of this crate, the following rules are necessary and
489    /// sufficient:
490    ///
491    /// * `self` contained pointer must not be `dangling`;
492    ///
493    /// * `self.to_base_index() + ofset` must not be greater than `RawTableInner.bucket_mask`,
494    ///   i.e. `(self.to_base_index() + ofset) <= RawTableInner.bucket_mask` or, in other
495    ///   words, `self.to_base_index() + ofset + 1` must be no greater than the number returned
496    ///   by the function [`RawTable::buckets`] or [`RawTableInner::buckets`].
497    ///
498    /// If `mem::size_of::<T>() == 0`, then the only requirement is that the
499    /// `self.to_base_index() + ofset` must not be greater than `RawTableInner.bucket_mask`,
500    /// i.e. `(self.to_base_index() + ofset) <= RawTableInner.bucket_mask` or, in other words,
501    /// `self.to_base_index() + ofset + 1` must be no greater than the number returned by the
502    /// function [`RawTable::buckets`] or [`RawTableInner::buckets`].
503    ///
504    /// [`Bucket`]: crate::raw::Bucket
505    /// [`<*mut T>::sub`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.sub-1
506    /// [`NonNull::new_unchecked`]: https://doc.rust-lang.org/stable/std/ptr/struct.NonNull.html#method.new_unchecked
507    /// [`RawTable::buckets`]: crate::raw::RawTable::buckets
508    /// [`RawTableInner::buckets`]: RawTableInner::buckets
509    #[inline]
510    unsafe fn next_n(&self, offset: usize) -> Self {
511        let ptr = if T::IS_ZST {
512            // invalid pointer is good enough for ZST
513            invalid_mut(self.ptr.as_ptr() as usize + offset)
514        } else {
515            self.ptr.as_ptr().sub(offset)
516        };
517        Self {
518            ptr: NonNull::new_unchecked(ptr),
519        }
520    }
521
522    /// Executes the destructor (if any) of the pointed-to `data`.
523    ///
524    /// # Safety
525    ///
526    /// See [`ptr::drop_in_place`] for safety concerns.
527    ///
528    /// You should use [`RawTable::erase`] instead of this function,
529    /// or be careful with calling this function directly, because for
530    /// properly dropping the data we need also clear `data` control bytes.
531    /// If we drop data, but do not erase `data control byte` it leads to
532    /// double drop when [`RawTable`] goes out of scope.
533    ///
534    /// [`ptr::drop_in_place`]: https://doc.rust-lang.org/core/ptr/fn.drop_in_place.html
535    /// [`RawTable`]: crate::raw::RawTable
536    /// [`RawTable::erase`]: crate::raw::RawTable::erase
537    #[cfg_attr(feature = "inline-more", inline)]
538    pub(crate) unsafe fn drop(&self) {
539        self.as_ptr().drop_in_place();
540    }
541
542    /// Reads the `value` from `self` without moving it. This leaves the
543    /// memory in `self` unchanged.
544    ///
545    /// # Safety
546    ///
547    /// See [`ptr::read`] for safety concerns.
548    ///
549    /// You should use [`RawTable::remove`] instead of this function,
550    /// or be careful with calling this function directly, because compiler
551    /// calls its destructor when readed `value` goes out of scope. It
552    /// can cause double dropping when [`RawTable`] goes out of scope,
553    /// because of not erased `data control byte`.
554    ///
555    /// [`ptr::read`]: https://doc.rust-lang.org/core/ptr/fn.read.html
556    /// [`RawTable`]: crate::raw::RawTable
557    /// [`RawTable::remove`]: crate::raw::RawTable::remove
558    #[inline]
559    pub(crate) unsafe fn read(&self) -> T {
560        self.as_ptr().read()
561    }
562
563    /// Overwrites a memory location with the given `value` without reading
564    /// or dropping the old value (like [`ptr::write`] function).
565    ///
566    /// # Safety
567    ///
568    /// See [`ptr::write`] for safety concerns.
569    ///
570    /// # Note
571    ///
572    /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match
573    /// those for the old `T` value, as the map will not re-evaluate where the new
574    /// value should go, meaning the value may become "lost" if their location
575    /// does not reflect their state.
576    ///
577    /// [`ptr::write`]: https://doc.rust-lang.org/core/ptr/fn.write.html
578    /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
579    /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
580    #[inline]
581    pub(crate) unsafe fn write(&self, val: T) {
582        self.as_ptr().write(val);
583    }
584
585    /// Returns a shared immutable reference to the `value`.
586    ///
587    /// # Safety
588    ///
589    /// See [`NonNull::as_ref`] for safety concerns.
590    ///
591    /// [`NonNull::as_ref`]: https://doc.rust-lang.org/core/ptr/struct.NonNull.html#method.as_ref
592    ///
593    /// # Examples
594    ///
595    /// ```
596    /// use core::hash::{BuildHasher, Hash};
597    /// use core::convert::Infallible;
598    ///
599    /// use rune::alloc::hashbrown::raw::{Bucket, RawTable};
600    ///
601    /// type NewHashBuilder = core::hash::BuildHasherDefault<ahash::AHasher>;
602    ///
603    /// fn make_hash<K: Hash + ?Sized, S: BuildHasher>(hash_builder: &S, key: &K) -> u64 {
604    ///     use core::hash::Hasher;
605    ///     let mut state = hash_builder.build_hasher();
606    ///     key.hash(&mut state);
607    ///     state.finish()
608    /// }
609    ///
610    /// let hash_builder = NewHashBuilder::default();
611    /// let mut table = RawTable::new();
612    ///
613    /// let value: (&str, String) = ("A pony", "is a small horse".to_owned());
614    /// let hash = make_hash(&hash_builder, &value.0);
615    ///
616    /// table.insert(&mut (), hash, value.clone(), |_: &mut (), (val, _): &(&str, _)| Ok::<_, Infallible>(make_hash(&hash_builder, val))).unwrap();
617    ///
618    /// let bucket: Bucket<(&str, String)> = table.find(&mut (), hash, |_: &mut (), (k, _): &(&str, _)| Ok::<_, Infallible>(k == &value.0)).unwrap().unwrap();
619    ///
620    /// assert_eq!(
621    ///     unsafe { bucket.as_ref() },
622    ///     &("A pony", "is a small horse".to_owned())
623    /// );
624    /// ```
625    #[inline]
626    pub unsafe fn as_ref<'a>(&self) -> &'a T {
627        &*self.as_ptr()
628    }
629
630    /// Returns a unique mutable reference to the `value`.
631    ///
632    /// # Safety
633    ///
634    /// See [`NonNull::as_mut`] for safety concerns.
635    ///
636    /// # Note
637    ///
638    /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match
639    /// those for the old `T` value, as the map will not re-evaluate where the new
640    /// value should go, meaning the value may become "lost" if their location
641    /// does not reflect their state.
642    ///
643    /// [`NonNull::as_mut`]: https://doc.rust-lang.org/core/ptr/struct.NonNull.html#method.as_mut
644    /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
645    /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
646    ///
647    /// # Examples
648    ///
649    /// ```
650    /// use core::hash::{BuildHasher, Hash};
651    /// use core::convert::Infallible;
652    ///
653    /// use rune::alloc::hashbrown::raw::{Bucket, RawTable};
654    ///
655    /// type NewHashBuilder = core::hash::BuildHasherDefault<ahash::AHasher>;
656    ///
657    /// fn make_hash<K: Hash + ?Sized, S: BuildHasher>(hash_builder: &S, key: &K) -> u64 {
658    ///     use core::hash::Hasher;
659    ///     let mut state = hash_builder.build_hasher();
660    ///     key.hash(&mut state);
661    ///     state.finish()
662    /// }
663    ///
664    /// let hash_builder = NewHashBuilder::default();
665    /// let mut table = RawTable::new();
666    ///
667    /// let value: (&str, String) = ("A pony", "is a small horse".to_owned());
668    /// let hash = make_hash(&hash_builder, &value.0);
669    ///
670    /// table.insert(&mut (), hash, value.clone(), |_: &mut (), (k, _): &(&str, _)| Ok::<_, Infallible>(make_hash(&hash_builder, k))).unwrap();
671    ///
672    /// let bucket: Bucket<(&str, String)> = table.find(&mut (), hash, |_: &mut (), (k, _): &(&str, _)| Ok::<_, Infallible>(k == &value.0)).unwrap().unwrap();
673    ///
674    /// unsafe {
675    ///     bucket
676    ///         .as_mut()
677    ///         .1
678    ///         .push_str(" less than 147 cm at the withers")
679    /// };
680    /// assert_eq!(
681    ///     unsafe { bucket.as_ref() },
682    ///     &(
683    ///         "A pony",
684    ///         "is a small horse less than 147 cm at the withers".to_owned()
685    ///     )
686    /// );
687    /// # Ok::<_, rune::alloc::Error>(())
688    /// ```
689    #[inline]
690    pub unsafe fn as_mut<'a>(&self) -> &'a mut T {
691        &mut *self.as_ptr()
692    }
693
694    /// Copies `size_of<T>` bytes from `other` to `self`. The source
695    /// and destination may *not* overlap.
696    ///
697    /// # Safety
698    ///
699    /// See [`ptr::copy_nonoverlapping`] for safety concerns.
700    ///
701    /// Like [`read`], `copy_nonoverlapping` creates a bitwise copy of `T`, regardless of
702    /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using *both* the values
703    /// in the region beginning at `*self` and the region beginning at `*other` can
704    /// [violate memory safety].
705    ///
706    /// # Note
707    ///
708    /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match
709    /// those for the old `T` value, as the map will not re-evaluate where the new
710    /// value should go, meaning the value may become "lost" if their location
711    /// does not reflect their state.
712    ///
713    /// [`ptr::copy_nonoverlapping`]: https://doc.rust-lang.org/core/ptr/fn.copy_nonoverlapping.html
714    /// [`read`]: https://doc.rust-lang.org/core/ptr/fn.read.html
715    /// [violate memory safety]: https://doc.rust-lang.org/std/ptr/fn.read.html#ownership-of-the-returned-value
716    /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
717    /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
718    #[inline]
719    pub unsafe fn copy_from_nonoverlapping(&self, other: &Self) {
720        self.as_ptr().copy_from_nonoverlapping(other.as_ptr(), 1);
721    }
722}
723
724/// A raw hash table with an unsafe API.
725pub struct RawTable<T, A: Allocator = Global> {
726    table: RawTableInner,
727    alloc: A,
728    // Tell dropck that we own instances of T.
729    marker: PhantomData<T>,
730}
731
732/// Non-generic part of `RawTable` which allows functions to be instantiated only once regardless
733/// of how many different key-value types are used.
734struct RawTableInner {
735    // Mask to get an index from a hash value. The value is one less than the
736    // number of buckets in the table.
737    bucket_mask: usize,
738
739    // [Padding], T1, T2, ..., Tlast, C1, C2, ...
740    //                                ^ points here
741    ctrl: NonNull<u8>,
742
743    // Number of elements that can be inserted before we need to grow the table
744    growth_left: usize,
745
746    // Number of elements in the table, only really used by len()
747    items: usize,
748}
749
750impl<T> RawTable<T, Global> {
751    /// Creates a new empty hash table without allocating any memory.
752    ///
753    /// In effect this returns a table with exactly 1 bucket. However we can
754    /// leave the data pointer dangling since that bucket is never written to
755    /// due to our load factor forcing us to always have at least 1 free bucket.
756    #[inline]
757    pub const fn new() -> Self {
758        Self {
759            table: RawTableInner::NEW,
760            alloc: Global,
761            marker: PhantomData,
762        }
763    }
764
765    /// Attempts to allocate a new hash table with at least enough capacity
766    /// for inserting the given number of elements without reallocating.
767    pub fn try_with_capacity(capacity: usize) -> Result<Self, Error> {
768        Self::try_with_capacity_in(capacity, Global)
769    }
770}
771
772impl<T, A: Allocator> RawTable<T, A> {
773    const TABLE_LAYOUT: TableLayout = TableLayout::new::<T>();
774
775    /// Creates a new empty hash table without allocating any memory, using the
776    /// given allocator.
777    ///
778    /// In effect this returns a table with exactly 1 bucket. However we can
779    /// leave the data pointer dangling since that bucket is never written to
780    /// due to our load factor forcing us to always have at least 1 free bucket.
781    #[inline]
782    pub const fn new_in(alloc: A) -> Self {
783        Self {
784            table: RawTableInner::NEW,
785            alloc,
786            marker: PhantomData,
787        }
788    }
789
790    /// Allocates a new hash table with the given number of buckets.
791    ///
792    /// The control bytes are left uninitialized.
793    #[cfg_attr(feature = "inline-more", inline)]
794    unsafe fn new_uninitialized(alloc: A, buckets: usize) -> Result<Self, Error> {
795        debug_assert!(buckets.is_power_of_two());
796
797        Ok(Self {
798            table: RawTableInner::new_uninitialized(&alloc, Self::TABLE_LAYOUT, buckets)?,
799            alloc,
800            marker: PhantomData,
801        })
802    }
803
804    /// Allocates a new hash table using the given allocator, with at least enough capacity for
805    /// inserting the given number of elements without reallocating.
806    pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result<Self, Error> {
807        Ok(Self {
808            table: RawTableInner::try_with_capacity(&alloc, Self::TABLE_LAYOUT, capacity)?,
809            alloc,
810            marker: PhantomData,
811        })
812    }
813
814    /// Returns a reference to the underlying allocator.
815    #[inline]
816    pub fn allocator(&self) -> &A {
817        &self.alloc
818    }
819
820    /// Returns pointer to one past last element of data table.
821    #[inline]
822    pub unsafe fn data_end(&self) -> NonNull<T> {
823        NonNull::new_unchecked(self.table.ctrl.as_ptr().cast())
824    }
825
826    /// Returns pointer to start of data table.
827    #[inline]
828    #[cfg(any(feature = "raw", rune_nightly))]
829    pub unsafe fn data_start(&self) -> NonNull<T> {
830        NonNull::new_unchecked(self.data_end().as_ptr().wrapping_sub(self.buckets()))
831    }
832
833    /// Return the information about memory allocated by the table.
834    ///
835    /// `RawTable` allocates single memory block to store both data and metadata.
836    /// This function returns allocation size and alignment and the beginning of the area.
837    /// These are the arguments which will be passed to `dealloc` when the table is dropped.
838    ///
839    /// This function might be useful for memory profiling.
840    #[inline]
841    pub fn allocation_info(&self) -> (NonNull<u8>, Layout) {
842        // SAFETY: We use the same `table_layout` that was used to allocate
843        // this table.
844        unsafe { self.table.allocation_info_or_zero(Self::TABLE_LAYOUT) }
845    }
846
847    /// Returns the index of a bucket from a `Bucket`.
848    #[inline]
849    pub unsafe fn bucket_index(&self, bucket: &Bucket<T>) -> usize {
850        bucket.to_base_index(self.data_end())
851    }
852
853    /// Returns a pointer to an element in the table.
854    #[inline]
855    pub unsafe fn bucket(&self, index: usize) -> Bucket<T> {
856        debug_assert_ne!(self.table.bucket_mask, 0);
857        debug_assert!(index < self.buckets());
858        Bucket::from_base_index(self.data_end(), index)
859    }
860
861    /// Erases an element from the table without dropping it.
862    #[cfg_attr(feature = "inline-more", inline)]
863    unsafe fn erase_no_drop(&mut self, item: &Bucket<T>) {
864        let index = self.bucket_index(item);
865        self.table.erase(index);
866    }
867
868    /// Erases an element from the table, dropping it in place.
869    #[cfg_attr(feature = "inline-more", inline)]
870    #[allow(clippy::needless_pass_by_value)]
871    pub unsafe fn erase(&mut self, item: Bucket<T>) {
872        // Erase the element from the table first since drop might panic.
873        self.erase_no_drop(&item);
874        item.drop();
875    }
876
877    /// Finds and erases an element from the table, dropping it in place.
878    /// Returns true if an element was found.
879    #[cfg_attr(feature = "inline-more", inline)]
880    pub fn erase_entry<C: ?Sized, E>(
881        &mut self,
882        cx: &mut C,
883        hash: u64,
884        eq: impl EqFn<C, T, E>,
885    ) -> Result<bool, E> {
886        // Avoid `Option::map` because it bloats LLVM IR.
887        if let Some(bucket) = self.find(cx, hash, eq)? {
888            unsafe {
889                self.erase(bucket);
890            }
891            Ok(true)
892        } else {
893            Ok(false)
894        }
895    }
896
897    /// Removes an element from the table, returning it.
898    ///
899    /// This also returns an `InsertSlot` pointing to the newly free bucket.
900    #[cfg_attr(feature = "inline-more", inline)]
901    #[allow(clippy::needless_pass_by_value)]
902    pub unsafe fn remove(&mut self, item: Bucket<T>) -> (T, InsertSlot) {
903        self.erase_no_drop(&item);
904        (
905            item.read(),
906            InsertSlot {
907                index: self.bucket_index(&item),
908            },
909        )
910    }
911
912    /// Finds and removes an element from the table, returning it.
913    #[cfg_attr(feature = "inline-more", inline)]
914    pub fn remove_entry<C: ?Sized, E>(
915        &mut self,
916        cx: &mut C,
917        hash: u64,
918        eq: impl EqFn<C, T, E>,
919    ) -> Result<Option<T>, E> {
920        // Avoid `Option::map` because it bloats LLVM IR.
921        Ok(match self.find(cx, hash, eq)? {
922            Some(bucket) => Some(unsafe { self.remove(bucket).0 }),
923            None => None,
924        })
925    }
926
927    /// Marks all table buckets as empty without dropping their contents.
928    #[cfg_attr(feature = "inline-more", inline)]
929    pub fn clear_no_drop(&mut self) {
930        self.table.clear_no_drop();
931    }
932
933    /// Removes all elements from the table without freeing the backing memory.
934    #[cfg_attr(feature = "inline-more", inline)]
935    pub fn clear(&mut self) {
936        if self.is_empty() {
937            // Special case empty table to avoid surprising O(capacity) time.
938            return;
939        }
940        // Ensure that the table is reset even if one of the drops panic
941        let mut self_ = guard(self, |self_| self_.clear_no_drop());
942        unsafe {
943            // SAFETY: ScopeGuard sets to zero the `items` field of the table
944            // even in case of panic during the dropping of the elements so
945            // that there will be no double drop of the elements.
946            self_.table.drop_elements::<T>();
947        }
948    }
949
950    /// Shrinks the table to fit `max(self.len(), min_size)` elements.
951    #[cfg_attr(feature = "inline-more", inline)]
952    pub fn shrink_to<C: ?Sized, E>(
953        &mut self,
954        cx: &mut C,
955        min_size: usize,
956        hasher: impl HasherFn<C, T, E>,
957    ) -> Result<(), CustomError<E>> {
958        // Calculate the minimal number of elements that we need to reserve
959        // space for.
960        let min_size = usize::max(self.table.items, min_size);
961        if min_size == 0 {
962            let mut old_inner = mem::replace(&mut self.table, RawTableInner::NEW);
963            unsafe {
964                // SAFETY:
965                // 1. We call the function only once;
966                // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
967                //    and [`TableLayout`] that were used to allocate this table.
968                // 3. If any elements' drop function panics, then there will only be a memory leak,
969                //    because we have replaced the inner table with a new one.
970                old_inner.drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
971            }
972            return Ok(());
973        }
974
975        // Calculate the number of buckets that we need for this number of
976        // elements. If the calculation overflows then the requested bucket
977        // count must be larger than what we have right and nothing needs to be
978        // done.
979        let min_buckets = match capacity_to_buckets(min_size) {
980            Some(buckets) => buckets,
981            None => return Ok(()),
982        };
983
984        // If we have more buckets than we need, shrink the table.
985        if min_buckets < self.buckets() {
986            // Fast path if the table is empty
987            if self.table.items == 0 {
988                let new_inner =
989                    RawTableInner::try_with_capacity(&self.alloc, Self::TABLE_LAYOUT, min_size)?;
990                let mut old_inner = mem::replace(&mut self.table, new_inner);
991                unsafe {
992                    // SAFETY:
993                    // 1. We call the function only once;
994                    // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
995                    //    and [`TableLayout`] that were used to allocate this table.
996                    // 3. If any elements' drop function panics, then there will only be a memory leak,
997                    //    because we have replaced the inner table with a new one.
998                    old_inner.drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
999                }
1000            } else {
1001                // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
1002                unsafe {
1003                    // SAFETY:
1004                    // 1. We know for sure that `min_size >= self.table.items`.
1005                    // 2. The [`RawTableInner`] must already have properly initialized control bytes since
1006                    //    we never exposed RawTable::new_uninitialized in a public API.
1007                    self.resize(cx, min_size, hasher)?;
1008                }
1009            }
1010        }
1011
1012        Ok(())
1013    }
1014
1015    /// Ensures that at least `additional` items can be inserted into the table
1016    /// without reallocation.
1017    #[cfg_attr(feature = "inline-more", inline)]
1018    pub fn reserve<C: ?Sized, E>(
1019        &mut self,
1020        cx: &mut C,
1021        additional: usize,
1022        hasher: impl HasherFn<C, T, E>,
1023    ) -> Result<(), CustomError<E>> {
1024        if unlikely(additional > self.table.growth_left) {
1025            // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
1026            unsafe {
1027                // SAFETY: The [`RawTableInner`] must already have properly initialized control
1028                // bytes since we never exposed RawTable::new_uninitialized in a public API.
1029                self.reserve_rehash(cx, additional, hasher)?;
1030            }
1031        }
1032
1033        Ok(())
1034    }
1035
1036    /// Tries to ensure that at least `additional` items can be inserted into
1037    /// the table without reallocation.
1038    #[cfg_attr(feature = "inline-more", inline)]
1039    pub fn try_reserve<C: ?Sized, E>(
1040        &mut self,
1041        cx: &mut C,
1042        additional: usize,
1043        hasher: impl HasherFn<C, T, E>,
1044    ) -> Result<(), CustomError<E>> {
1045        if additional > self.table.growth_left {
1046            // SAFETY: The [`RawTableInner`] must already have properly initialized control
1047            // bytes since we never exposed RawTable::new_uninitialized in a public API.
1048            unsafe { self.reserve_rehash(cx, additional, hasher) }
1049        } else {
1050            Ok(())
1051        }
1052    }
1053
1054    /// Out-of-line slow path for `reserve` and `try_reserve`.
1055    ///
1056    /// # Safety
1057    ///
1058    /// The [`RawTableInner`] must have properly initialized control bytes,
1059    /// otherwise calling this function results in [`undefined behavior`]
1060    ///
1061    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1062    #[cold]
1063    #[inline(never)]
1064    unsafe fn reserve_rehash<C: ?Sized, E>(
1065        &mut self,
1066        cx: &mut C,
1067        additional: usize,
1068        hasher: impl HasherFn<C, T, E>,
1069    ) -> Result<(), CustomError<E>> {
1070        unsafe {
1071            // SAFETY:
1072            // 1. We know for sure that `alloc` and `layout` matches the [`Allocator`] and
1073            //    [`TableLayout`] that were used to allocate this table.
1074            // 2. The `drop` function is the actual drop function of the elements stored in
1075            //    the table.
1076            // 3. The caller ensures that the control bytes of the `RawTableInner`
1077            //    are already initialized.
1078            self.table.reserve_rehash_inner(
1079                cx,
1080                &self.alloc,
1081                additional,
1082                &|cx, table, index| hasher.hash(cx, table.bucket::<T>(index).as_ref()),
1083                Self::TABLE_LAYOUT,
1084                if T::NEEDS_DROP {
1085                    Some(mem::transmute::<unsafe fn(*mut T), fn(*mut u8)>(
1086                        ptr::drop_in_place::<T> as unsafe fn(*mut T),
1087                    ))
1088                } else {
1089                    None
1090                },
1091            )
1092        }
1093    }
1094
1095    /// Allocates a new table of a different size and moves the contents of the
1096    /// current table into it.
1097    ///
1098    /// # Safety
1099    ///
1100    /// The [`RawTableInner`] must have properly initialized control bytes,
1101    /// otherwise calling this function results in [`undefined behavior`]
1102    ///
1103    /// The caller of this function must ensure that `capacity >= self.table.items`
1104    /// otherwise:
1105    ///
1106    /// * If `self.table.items != 0`, calling of this function with `capacity`
1107    ///   equal to 0 (`capacity == 0`) results in [`undefined behavior`].
1108    ///
1109    /// * If `capacity_to_buckets(capacity) < Group::WIDTH` and
1110    ///   `self.table.items > capacity_to_buckets(capacity)`
1111    ///   calling this function results in [`undefined behavior`].
1112    ///
1113    /// * If `capacity_to_buckets(capacity) >= Group::WIDTH` and
1114    ///   `self.table.items > capacity_to_buckets(capacity)`
1115    ///   calling this function are never return (will go into an
1116    ///   infinite loop).
1117    ///
1118    /// See [`RawTableInner::find_insert_slot`] for more information.
1119    ///
1120    /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot
1121    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1122    unsafe fn resize<C: ?Sized, E>(
1123        &mut self,
1124        cx: &mut C,
1125        capacity: usize,
1126        hasher: impl HasherFn<C, T, E>,
1127    ) -> Result<(), CustomError<E>> {
1128        // SAFETY:
1129        // 1. The caller of this function guarantees that `capacity >= self.table.items`.
1130        // 2. We know for sure that `alloc` and `layout` matches the [`Allocator`] and
1131        //    [`TableLayout`] that were used to allocate this table.
1132        // 3. The caller ensures that the control bytes of the `RawTableInner`
1133        //    are already initialized.
1134        self.table.resize_inner(
1135            cx,
1136            &self.alloc,
1137            capacity,
1138            &move |cx, table, index| hasher.hash(cx, table.bucket::<T>(index).as_ref()),
1139            Self::TABLE_LAYOUT,
1140        )
1141    }
1142
1143    /// Inserts a new element into the table, and returns its raw bucket.
1144    ///
1145    /// This does not check if the given element already exists in the table.
1146    #[cfg_attr(feature = "inline-more", inline)]
1147    pub fn insert<C: ?Sized, E>(
1148        &mut self,
1149        cx: &mut C,
1150        hash: u64,
1151        value: T,
1152        hasher: impl HasherFn<C, T, E>,
1153    ) -> Result<Bucket<T>, CustomError<E>> {
1154        unsafe {
1155            let mut slot = self.table.find_insert_slot(hash);
1156
1157            // We can avoid growing the table once we have reached our load
1158            // factor if we are replacing a tombstone. This works since the
1159            // number of EMPTY slots does not change in this case.
1160            let old_ctrl = *self.table.ctrl(slot.index);
1161            if unlikely(self.table.growth_left == 0 && special_is_empty(old_ctrl)) {
1162                self.reserve(cx, 1, hasher)?;
1163                slot = self.table.find_insert_slot(hash);
1164            }
1165
1166            Ok(self.insert_in_slot(hash, slot, value))
1167        }
1168    }
1169
1170    /// Attempts to insert a new element without growing the table and return its raw bucket.
1171    ///
1172    /// Returns an `Err` containing the given element if inserting it would require growing the
1173    /// table.
1174    ///
1175    /// This does not check if the given element already exists in the table.
1176    #[cfg_attr(feature = "inline-more", inline)]
1177    pub fn try_insert_no_grow(&mut self, hash: u64, value: T) -> Result<Bucket<T>, T> {
1178        unsafe {
1179            match self.table.prepare_insert_no_grow(hash) {
1180                Ok(index) => {
1181                    let bucket = self.bucket(index);
1182                    bucket.write(value);
1183                    Ok(bucket)
1184                }
1185                Err(()) => Err(value),
1186            }
1187        }
1188    }
1189
1190    /// Inserts a new element into the table, and returns a mutable reference to it.
1191    ///
1192    /// This does not check if the given element already exists in the table.
1193    #[cfg_attr(feature = "inline-more", inline)]
1194    pub fn insert_entry<C: ?Sized, E>(
1195        &mut self,
1196        cx: &mut C,
1197        hash: u64,
1198        value: T,
1199        hasher: impl HasherFn<C, T, E>,
1200    ) -> Result<&mut T, CustomError<E>> {
1201        Ok(unsafe { self.insert(cx, hash, value, hasher)?.as_mut() })
1202    }
1203
1204    /// Inserts a new element into the table, without growing the table.
1205    ///
1206    /// There must be enough space in the table to insert the new element.
1207    ///
1208    /// This does not check if the given element already exists in the table.
1209    #[cfg_attr(feature = "inline-more", inline)]
1210    #[cfg(feature = "raw")]
1211    pub unsafe fn insert_no_grow(&mut self, hash: u64, value: T) -> Bucket<T> {
1212        let (index, old_ctrl) = self.table.prepare_insert_slot(hash);
1213        let bucket = self.table.bucket(index);
1214
1215        // If we are replacing a DELETED entry then we don't need to update
1216        // the load counter.
1217        self.table.growth_left -= special_is_empty(old_ctrl) as usize;
1218
1219        bucket.write(value);
1220        self.table.items += 1;
1221        bucket
1222    }
1223
1224    /// Temporary removes a bucket, applying the given function to the removed
1225    /// element and optionally put back the returned value in the same bucket.
1226    ///
1227    /// Returns `true` if the bucket still contains an element
1228    ///
1229    /// This does not check if the given bucket is actually occupied.
1230    #[cfg_attr(feature = "inline-more", inline)]
1231    pub unsafe fn replace_bucket_with<F>(&mut self, bucket: Bucket<T>, f: F) -> bool
1232    where
1233        F: FnOnce(T) -> Option<T>,
1234    {
1235        let index = self.bucket_index(&bucket);
1236        let old_ctrl = *self.table.ctrl(index);
1237        debug_assert!(self.is_bucket_full(index));
1238        let old_growth_left = self.table.growth_left;
1239        let item = self.remove(bucket).0;
1240        if let Some(new_item) = f(item) {
1241            self.table.growth_left = old_growth_left;
1242            self.table.set_ctrl(index, old_ctrl);
1243            self.table.items += 1;
1244            self.bucket(index).write(new_item);
1245            true
1246        } else {
1247            false
1248        }
1249    }
1250
1251    /// Searches for an element in the table. If the element is not found,
1252    /// returns `Err` with the position of a slot where an element with the
1253    /// same hash could be inserted.
1254    ///
1255    /// This function may resize the table if additional space is required for
1256    /// inserting an element.
1257    #[inline]
1258    pub fn find_or_find_insert_slot<C: ?Sized, E>(
1259        &mut self,
1260        cx: &mut C,
1261        hash: u64,
1262        eq: impl EqFn<C, T, E>,
1263        hasher: impl HasherFn<C, T, E>,
1264    ) -> Result<Bucket<T>, ErrorOrInsertSlot<E>> {
1265        self.reserve(cx, 1, hasher)?;
1266
1267        let index = self
1268            .table
1269            .find_or_find_insert_slot_inner(cx, hash, &|cx, index| unsafe {
1270                eq.eq(cx, self.bucket(index).as_ref())
1271            })?;
1272
1273        Ok(unsafe { self.bucket(index) })
1274    }
1275
1276    /// Inserts a new element into the table in the given slot, and returns its
1277    /// raw bucket.
1278    ///
1279    /// # Safety
1280    ///
1281    /// `slot` must point to a slot previously returned by
1282    /// `find_or_find_insert_slot`, and no mutation of the table must have
1283    /// occurred since that call.
1284    #[inline]
1285    pub unsafe fn insert_in_slot(&mut self, hash: u64, slot: InsertSlot, value: T) -> Bucket<T> {
1286        let old_ctrl = *self.table.ctrl(slot.index);
1287        self.table.record_item_insert_at(slot.index, old_ctrl, hash);
1288
1289        let bucket = self.bucket(slot.index);
1290        bucket.write(value);
1291        bucket
1292    }
1293
1294    /// Searches for an element in the table.
1295    #[inline]
1296    pub fn find<C: ?Sized, E>(
1297        &self,
1298        cx: &mut C,
1299        hash: u64,
1300        eq: impl EqFn<C, T, E>,
1301    ) -> Result<Option<Bucket<T>>, E> {
1302        let result = self.table.find_inner(cx, hash, &|cx, index| unsafe {
1303            eq.eq(cx, self.bucket(index).as_ref())
1304        })?;
1305
1306        // Avoid `Option::map` because it bloats LLVM IR.
1307        Ok(match result {
1308            Some(index) => Some(unsafe { self.bucket(index) }),
1309            None => None,
1310        })
1311    }
1312
1313    /// Gets a reference to an element in the table.
1314    #[inline]
1315    pub fn get<C: ?Sized, E>(
1316        &self,
1317        cx: &mut C,
1318        hash: u64,
1319        eq: impl EqFn<C, T, E>,
1320    ) -> Result<Option<&T>, E> {
1321        // Avoid `Option::map` because it bloats LLVM IR.
1322        Ok(match self.find(cx, hash, eq)? {
1323            Some(bucket) => Some(unsafe { bucket.as_ref() }),
1324            None => None,
1325        })
1326    }
1327
1328    /// Gets a mutable reference to an element in the table.
1329    #[inline]
1330    pub fn get_mut<C: ?Sized, E>(
1331        &mut self,
1332        cx: &mut C,
1333        hash: u64,
1334        eq: impl EqFn<C, T, E>,
1335    ) -> Result<Option<&mut T>, E> {
1336        // Avoid `Option::map` because it bloats LLVM IR.
1337        Ok(match self.find(cx, hash, eq)? {
1338            Some(bucket) => Some(unsafe { bucket.as_mut() }),
1339            None => None,
1340        })
1341    }
1342
1343    /// Attempts to get mutable references to `N` entries in the table at once.
1344    ///
1345    /// Returns an array of length `N` with the results of each query.
1346    ///
1347    /// At most one mutable reference will be returned to any entry. `None` will be returned if any
1348    /// of the hashes are duplicates. `None` will be returned if the hash is not found.
1349    ///
1350    /// The `eq` argument should be a closure such that `eq(i, k)` returns true if `k` is equal to
1351    /// the `i`th key to be looked up.
1352    pub fn get_many_mut<C: ?Sized, E, const N: usize>(
1353        &mut self,
1354        cx: &mut C,
1355        hashes: [u64; N],
1356        eq: impl Fn(&mut C, usize, &T) -> Result<bool, E>,
1357    ) -> Result<Option<[&'_ mut T; N]>, E> {
1358        unsafe {
1359            let ptrs = match self.get_many_mut_pointers(cx, hashes, eq)? {
1360                Some(ptrs) => ptrs,
1361                None => return Ok(None),
1362            };
1363
1364            for (i, &cur) in ptrs.iter().enumerate() {
1365                if ptrs[..i].iter().any(|&prev| ptr::eq::<T>(prev, cur)) {
1366                    return Ok(None);
1367                }
1368            }
1369            // All bucket are distinct from all previous buckets so we're clear to return the result
1370            // of the lookup.
1371
1372            // TODO use `MaybeUninit::array_assume_init` here instead once that's stable.
1373            Ok(Some(mem::transmute_copy(&ptrs)))
1374        }
1375    }
1376
1377    pub unsafe fn get_many_unchecked_mut<C: ?Sized, E, const N: usize>(
1378        &mut self,
1379        cx: &mut C,
1380        hashes: [u64; N],
1381        eq: impl Fn(&mut C, usize, &T) -> Result<bool, E>,
1382    ) -> Result<Option<[&'_ mut T; N]>, E> {
1383        let ptrs = match self.get_many_mut_pointers(cx, hashes, eq)? {
1384            Some(ptrs) => ptrs,
1385            None => return Ok(None),
1386        };
1387
1388        Ok(Some(mem::transmute_copy(&ptrs)))
1389    }
1390
1391    unsafe fn get_many_mut_pointers<C: ?Sized, E, const N: usize>(
1392        &mut self,
1393        cx: &mut C,
1394        hashes: [u64; N],
1395        eq: impl Fn(&mut C, usize, &T) -> Result<bool, E>,
1396    ) -> Result<Option<[*mut T; N]>, E> {
1397        // TODO use `MaybeUninit::uninit_array` here instead once that's stable.
1398        let mut outs: MaybeUninit<[*mut T; N]> = MaybeUninit::uninit();
1399        let outs_ptr = outs.as_mut_ptr();
1400
1401        for (i, &hash) in hashes.iter().enumerate() {
1402            let cur = match self.find(cx, hash, |cx: &mut C, k: &T| eq(cx, i, k))? {
1403                Some(cur) => cur,
1404                None => return Ok(None),
1405            };
1406            *(*outs_ptr).get_unchecked_mut(i) = cur.as_mut();
1407        }
1408
1409        // TODO use `MaybeUninit::array_assume_init` here instead once that's stable.
1410        Ok(Some(outs.assume_init()))
1411    }
1412
1413    /// Returns the number of elements the map can hold without reallocating.
1414    ///
1415    /// This number is a lower bound; the table might be able to hold
1416    /// more, but is guaranteed to be able to hold at least this many.
1417    #[inline]
1418    pub fn capacity(&self) -> usize {
1419        self.table.items + self.table.growth_left
1420    }
1421
1422    /// Returns the number of elements in the table.
1423    #[inline]
1424    pub fn len(&self) -> usize {
1425        self.table.items
1426    }
1427
1428    /// Returns `true` if the table contains no elements.
1429    #[inline]
1430    pub fn is_empty(&self) -> bool {
1431        self.len() == 0
1432    }
1433
1434    /// Returns the number of buckets in the table.
1435    #[inline]
1436    pub fn buckets(&self) -> usize {
1437        self.table.bucket_mask + 1
1438    }
1439
1440    /// Checks whether the bucket at `index` is full.
1441    ///
1442    /// # Safety
1443    ///
1444    /// The caller must ensure `index` is less than the number of buckets.
1445    #[inline]
1446    pub unsafe fn is_bucket_full(&self, index: usize) -> bool {
1447        self.table.is_bucket_full(index)
1448    }
1449
1450    /// Returns an iterator over every element in the table. It is up to
1451    /// the caller to ensure that the `RawTable` outlives the `RawIter`.
1452    /// Because we cannot make the `next` method unsafe on the `RawIter`
1453    /// struct, we have to make the `iter` method unsafe.
1454    ///
1455    /// # Safety
1456    ///
1457    /// Caller must ensure that the raw iterator doesn't outlive `self`.
1458    #[inline]
1459    pub unsafe fn iter(&self) -> RawIter<T> {
1460        // SAFETY:
1461        // 1. The caller must uphold the safety contract for `iter` method.
1462        // 2. The [`RawTableInner`] must already have properly initialized control bytes since
1463        //    we never exposed RawTable::new_uninitialized in a public API.
1464        self.table.iter()
1465    }
1466
1467    /// Returns an iterator over occupied buckets that could match a given hash.
1468    ///
1469    /// `RawTable` only stores 7 bits of the hash value, so this iterator may
1470    /// return items that have a hash value different than the one provided. You
1471    /// should always validate the returned values before using them.
1472    ///
1473    /// It is up to the caller to ensure that the `RawTable` outlives the
1474    /// `RawIterHash`. Because we cannot make the `next` method unsafe on the
1475    /// `RawIterHash` struct, we have to make the `iter_hash` method unsafe.
1476    #[cfg_attr(feature = "inline-more", inline)]
1477    pub unsafe fn iter_hash(&self, hash: u64) -> RawIterHash<T> {
1478        RawIterHash::new(self, hash)
1479    }
1480
1481    /// Returns an iterator which removes all elements from the table without
1482    /// freeing the memory.
1483    #[cfg_attr(feature = "inline-more", inline)]
1484    pub fn drain(&mut self) -> RawDrain<'_, T, A> {
1485        unsafe {
1486            let iter = self.iter();
1487            self.drain_iter_from(iter)
1488        }
1489    }
1490
1491    /// Returns an iterator which removes all elements from the table without
1492    /// freeing the memory.
1493    ///
1494    /// Iteration starts at the provided iterator's current location.
1495    ///
1496    /// It is up to the caller to ensure that the iterator is valid for this
1497    /// `RawTable` and covers all items that remain in the table.
1498    #[cfg_attr(feature = "inline-more", inline)]
1499    pub unsafe fn drain_iter_from(&mut self, iter: RawIter<T>) -> RawDrain<'_, T, A> {
1500        debug_assert_eq!(iter.len(), self.len());
1501        RawDrain {
1502            iter,
1503            table: mem::replace(&mut self.table, RawTableInner::NEW),
1504            orig_table: NonNull::from(&mut self.table),
1505            marker: PhantomData,
1506        }
1507    }
1508
1509    /// Returns an iterator which consumes all elements from the table.
1510    ///
1511    /// Iteration starts at the provided iterator's current location.
1512    ///
1513    /// It is up to the caller to ensure that the iterator is valid for this
1514    /// `RawTable` and covers all items that remain in the table.
1515    pub unsafe fn into_iter_from(self, iter: RawIter<T>) -> RawIntoIter<T, A> {
1516        debug_assert_eq!(iter.len(), self.len());
1517
1518        let allocation = self.into_allocation();
1519        RawIntoIter {
1520            iter,
1521            allocation,
1522            marker: PhantomData,
1523        }
1524    }
1525
1526    /// Converts the table into a raw allocation. The contents of the table
1527    /// should be dropped using a `RawIter` before freeing the allocation.
1528    #[cfg_attr(feature = "inline-more", inline)]
1529    pub(crate) fn into_allocation(self) -> Option<(NonNull<u8>, Layout, A)> {
1530        let alloc = if self.table.is_empty_singleton() {
1531            None
1532        } else {
1533            // Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
1534            let (layout, ctrl_offset) =
1535                match Self::TABLE_LAYOUT.calculate_layout_for(self.table.buckets()) {
1536                    Some(lco) => lco,
1537                    None => unsafe { hint::unreachable_unchecked() },
1538                };
1539            Some((
1540                unsafe { NonNull::new_unchecked(self.table.ctrl.as_ptr().sub(ctrl_offset)) },
1541                layout,
1542                unsafe { ptr::read(&self.alloc) },
1543            ))
1544        };
1545        mem::forget(self);
1546        alloc
1547    }
1548}
1549
1550unsafe impl<T, A: Allocator> Send for RawTable<T, A>
1551where
1552    T: Send,
1553    A: Send,
1554{
1555}
1556unsafe impl<T, A: Allocator> Sync for RawTable<T, A>
1557where
1558    T: Sync,
1559    A: Sync,
1560{
1561}
1562
1563impl RawTableInner {
1564    const NEW: Self = RawTableInner::new();
1565
1566    /// Creates a new empty hash table without allocating any memory.
1567    ///
1568    /// In effect this returns a table with exactly 1 bucket. However we can
1569    /// leave the data pointer dangling since that bucket is never accessed
1570    /// due to our load factor forcing us to always have at least 1 free bucket.
1571    #[inline]
1572    const fn new() -> Self {
1573        Self {
1574            // Be careful to cast the entire slice to a raw pointer.
1575            ctrl: unsafe { NonNull::new_unchecked(Group::static_empty() as *const _ as *mut u8) },
1576            bucket_mask: 0,
1577            items: 0,
1578            growth_left: 0,
1579        }
1580    }
1581}
1582
1583impl RawTableInner {
1584    /// Allocates a new [`RawTableInner`] with the given number of buckets.
1585    /// The control bytes and buckets are left uninitialized.
1586    ///
1587    /// # Safety
1588    ///
1589    /// The caller of this function must ensure that the `buckets` is power of two
1590    /// and also initialize all control bytes of the length `self.bucket_mask + 1 +
1591    /// Group::WIDTH` with the [`EMPTY`] bytes.
1592    ///
1593    /// See also [`Allocator`] API for other safety concerns.
1594    ///
1595    /// [`Allocator`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html
1596    #[cfg_attr(feature = "inline-more", inline)]
1597    unsafe fn new_uninitialized<A>(
1598        alloc: &A,
1599        table_layout: TableLayout,
1600        buckets: usize,
1601    ) -> Result<Self, Error>
1602    where
1603        A: Allocator,
1604    {
1605        debug_assert!(buckets.is_power_of_two());
1606
1607        // Avoid `Option::ok_or_else` because it bloats LLVM IR.
1608        let (layout, ctrl_offset) = match table_layout.calculate_layout_for(buckets) {
1609            Some(lco) => lco,
1610            None => return Err(Error::CapacityOverflow),
1611        };
1612
1613        let ptr: NonNull<u8> = alloc.allocate(layout)?.cast();
1614
1615        // SAFETY: null pointer will be caught in above check
1616        let ctrl = NonNull::new_unchecked(ptr.as_ptr().add(ctrl_offset));
1617        Ok(Self {
1618            ctrl,
1619            bucket_mask: buckets - 1,
1620            items: 0,
1621            growth_left: bucket_mask_to_capacity(buckets - 1),
1622        })
1623    }
1624
1625    /// Attempts to allocate a new [`RawTableInner`] with at least enough
1626    /// capacity for inserting the given number of elements without reallocating.
1627    ///
1628    /// All the control bytes are initialized with the [`EMPTY`] bytes.
1629    #[inline]
1630    fn fallible_with_capacity<A>(
1631        alloc: &A,
1632        table_layout: TableLayout,
1633        capacity: usize,
1634    ) -> Result<Self, Error>
1635    where
1636        A: Allocator,
1637    {
1638        if capacity == 0 {
1639            Ok(Self::NEW)
1640        } else {
1641            // SAFETY: We checked that we could successfully allocate the new table, and then
1642            // initialized all control bytes with the constant `EMPTY` byte.
1643            unsafe {
1644                let buckets = capacity_to_buckets(capacity).ok_or(Error::CapacityOverflow)?;
1645
1646                let result = Self::new_uninitialized(alloc, table_layout, buckets)?;
1647                // SAFETY: We checked that the table is allocated and therefore the table already has
1648                // `self.bucket_mask + 1 + Group::WIDTH` number of control bytes (see TableLayout::calculate_layout_for)
1649                // so writing `self.num_ctrl_bytes() == bucket_mask + 1 + Group::WIDTH` bytes is safe.
1650                result.ctrl(0).write_bytes(EMPTY, result.num_ctrl_bytes());
1651
1652                Ok(result)
1653            }
1654        }
1655    }
1656
1657    /// Allocates a new [`RawTableInner`] with at least enough capacity for inserting
1658    /// the given number of elements without reallocating.
1659    ///
1660    /// Panics if the new capacity exceeds [`isize::MAX`] bytes and [`abort`] the program
1661    /// in case of allocation error. Use [`fallible_with_capacity`] instead if you want to
1662    /// handle memory allocation failure.
1663    ///
1664    /// All the control bytes are initialized with the [`EMPTY`] bytes.
1665    ///
1666    /// [`fallible_with_capacity`]: RawTableInner::fallible_with_capacity
1667    fn try_with_capacity<A>(
1668        alloc: &A,
1669        table_layout: TableLayout,
1670        capacity: usize,
1671    ) -> Result<Self, Error>
1672    where
1673        A: Allocator,
1674    {
1675        // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
1676        Self::fallible_with_capacity(alloc, table_layout, capacity)
1677    }
1678
1679    /// Fixes up an insertion slot due to false positives for groups smaller than the group width.
1680    /// This must only be used on insertion slots found by `find_insert_slot_in_group`.
1681    #[inline]
1682    unsafe fn fix_insert_slot(&self, mut index: usize) -> InsertSlot {
1683        // In tables smaller than the group width
1684        // (self.buckets() < Group::WIDTH), trailing control
1685        // bytes outside the range of the table are filled with
1686        // EMPTY entries. These will unfortunately trigger a
1687        // match, but once masked may point to a full bucket that
1688        // is already occupied. We detect this situation here and
1689        // perform a second scan starting at the beginning of the
1690        // table. This second scan is guaranteed to find an empty
1691        // slot (due to the load factor) before hitting the trailing
1692        // control bytes (containing EMPTY).
1693        if unlikely(self.is_bucket_full(index)) {
1694            debug_assert!(self.bucket_mask < Group::WIDTH);
1695            // SAFETY:
1696            //
1697            // * We are in range and `ptr = self.ctrl(0)` are valid for reads
1698            //   and properly aligned, because the table is already allocated
1699            //   (see `TableLayout::calculate_layout_for` and `ptr::read`);
1700            //
1701            // * For tables larger than the group width (self.buckets() >= Group::WIDTH),
1702            //   we will never end up in the given branch, since
1703            //   `(probe_seq.pos + bit) & self.bucket_mask` in `find_insert_slot_in_group` cannot
1704            //   return a full bucket index. For tables smaller than the group width, calling the
1705            //   `unwrap_unchecked` function is also
1706            //   safe, as the trailing control bytes outside the range of the table are filled
1707            //   with EMPTY bytes, so this second scan either finds an empty slot (due to the
1708            //   load factor) or hits the trailing control bytes (containing EMPTY).
1709            index = Group::load_aligned(self.ctrl(0))
1710                .match_empty_or_deleted()
1711                .lowest_set_bit()
1712                .unwrap_unchecked();
1713        }
1714        InsertSlot { index }
1715    }
1716
1717    /// Finds the position to insert something in a group.
1718    /// This may have false positives and must be fixed up with `fix_insert_slot` before it's used.
1719    #[inline]
1720    fn find_insert_slot_in_group(&self, group: &Group, probe_seq: &ProbeSeq) -> Option<usize> {
1721        let bit = group.match_empty_or_deleted().lowest_set_bit();
1722
1723        if likely(bit.is_some()) {
1724            Some((probe_seq.pos + bit.unwrap()) & self.bucket_mask)
1725        } else {
1726            None
1727        }
1728    }
1729
1730    /// Searches for an element in the table, or a potential slot where that element could be
1731    /// inserted.
1732    ///
1733    /// This uses dynamic dispatch to reduce the amount of code generated, but that is
1734    /// eliminated by LLVM optimizations.
1735    #[inline]
1736    fn find_or_find_insert_slot_inner<C: ?Sized, E>(
1737        &self,
1738        cx: &mut C,
1739        hash: u64,
1740        eq: &dyn Fn(&mut C, usize) -> Result<bool, E>,
1741    ) -> Result<usize, ErrorOrInsertSlot<E>> {
1742        let mut insert_slot = None;
1743
1744        let h2_hash = h2(hash);
1745        let mut probe_seq = self.probe_seq(hash);
1746
1747        loop {
1748            let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
1749
1750            for bit in group.match_byte(h2_hash) {
1751                let index = (probe_seq.pos + bit) & self.bucket_mask;
1752
1753                if likely(eq(cx, index).map_err(CustomError::Custom)?) {
1754                    return Ok(index);
1755                }
1756            }
1757
1758            // We didn't find the element we were looking for in the group, try to get an
1759            // insertion slot from the group if we don't have one yet.
1760            if likely(insert_slot.is_none()) {
1761                insert_slot = self.find_insert_slot_in_group(&group, &probe_seq);
1762            }
1763
1764            // Only stop the search if the group contains at least one empty element.
1765            // Otherwise, the element that we are looking for might be in a following group.
1766            if likely(group.match_empty().any_bit_set()) {
1767                // We must have found a insert slot by now, since the current group contains at
1768                // least one. For tables smaller than the group width, there will still be an
1769                // empty element in the current (and only) group due to the load factor.
1770                unsafe {
1771                    return Err(ErrorOrInsertSlot::InsertSlot(
1772                        self.fix_insert_slot(insert_slot.unwrap_unchecked()),
1773                    ));
1774                }
1775            }
1776
1777            probe_seq.move_next(self.bucket_mask);
1778        }
1779    }
1780
1781    /// Searches for an empty or deleted bucket which is suitable for inserting a new
1782    /// element and sets the hash for that slot. Returns an index of that slot and the
1783    /// old control byte stored in the found index.
1784    ///
1785    /// This function does not check if the given element exists in the table. Also,
1786    /// this function does not check if there is enough space in the table to insert
1787    /// a new element, so the caller must make ensure that the table has at least 1
1788    /// empty or deleted `bucket` or this function will never return (will go into
1789    /// an infinite loop).
1790    ///
1791    /// This function does not make any changes to the `data` parts of the table,
1792    /// or any changes to the the `items` or `growth_left` field of the table.
1793    ///
1794    /// # Safety
1795    ///
1796    /// The safety rules are directly derived from the safety rule for the
1797    /// [`RawTableInner::set_ctrl_h2`] methods. Thus, in order to uphold the safety
1798    /// contracts for that method, as well as for the correct logic of the work of this
1799    /// crate, you must observe the following rules when calling this function:
1800    ///
1801    /// * The [`RawTableInner`] has already been allocated;
1802    ///
1803    /// * The caller of this function must ensure that the "data" parts of the table
1804    ///   will have an entry in the returned index (matching the given hash) right
1805    ///   after calling this function.
1806    ///
1807    /// Calling this function on a table that has not been allocated results in
1808    /// [`undefined behavior`].
1809    ///
1810    /// The caller must independently increase the `items` field of the table, and also,
1811    /// if the old control byte was [`EMPTY`], then decrease the table's `growth_left`
1812    /// field, and do not change it if the old control byte was [`DELETED`].
1813    ///
1814    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
1815    /// or saving `element` from / into the [`RawTable`] / [`RawTableInner`].
1816    ///
1817    /// [`Bucket::as_ptr`]: Bucket::as_ptr
1818    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1819    /// [`RawTableInner::ctrl`]: RawTableInner::ctrl
1820    /// [`RawTableInner::set_ctrl_h2`]: RawTableInner::set_ctrl_h2
1821    #[inline]
1822    unsafe fn prepare_insert_slot(&mut self, hash: u64) -> (usize, u8) {
1823        let index: usize = self.find_insert_slot(hash).index;
1824        // SAFETY:
1825        // 1. The `find_insert_slot` function either returns an `index` less than or
1826        //    equal to `self.bucket_mask = self.buckets() - 1` of the table, or never
1827        //    returns if it cannot find an empty or deleted slot.
1828        // 2. The caller of this function guarantees that the table has already been
1829        //    allocated
1830        let old_ctrl = *self.ctrl(index);
1831        self.set_ctrl_h2(index, hash);
1832        (index, old_ctrl)
1833    }
1834
1835    /// Searches for an empty or deleted bucket which is suitable for inserting
1836    /// a new element, returning the `index` for the new [`Bucket`].
1837    ///
1838    /// This function does not make any changes to the `data` part of the table, or any
1839    /// changes to the `items` or `growth_left` field of the table.
1840    ///
1841    /// The table must have at least 1 empty or deleted `bucket`, otherwise this function
1842    /// will never return (will go into an infinite loop) for tables larger than the group
1843    /// width, or return an index outside of the table indices range if the table is less
1844    /// than the group width.
1845    ///
1846    /// # Note
1847    ///
1848    /// Calling this function is always safe, but attempting to write data at
1849    /// the index returned by this function when the table is less than the group width
1850    /// and if there was not at least one empty bucket in the table will cause immediate
1851    /// [`undefined behavior`]. This is because in this case the function will return
1852    /// `self.bucket_mask + 1` as an index due to the trailing EMPTY control bytes outside
1853    /// the table range.
1854    ///
1855    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1856    #[inline]
1857    fn find_insert_slot(&self, hash: u64) -> InsertSlot {
1858        let mut probe_seq = self.probe_seq(hash);
1859        loop {
1860            // SAFETY:
1861            // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
1862            //   of the table due to masking with `self.bucket_mask` and also because mumber of
1863            //   buckets is a power of two (see comment for masking below).
1864            //
1865            // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
1866            //   call `Group::load` due to the extended control bytes range, which is
1867            //  `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control
1868            //   byte will never be read for the allocated table);
1869            //
1870            // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will
1871            //   always return "0" (zero), so Group::load will read unaligned `Group::static_empty()`
1872            //   bytes, which is safe (see RawTableInner::new_in).
1873            unsafe {
1874                let group = Group::load(self.ctrl(probe_seq.pos));
1875                let index = self.find_insert_slot_in_group(&group, &probe_seq);
1876
1877                if likely(index.is_some()) {
1878                    return self.fix_insert_slot(index.unwrap_unchecked());
1879                }
1880            }
1881            probe_seq.move_next(self.bucket_mask);
1882        }
1883    }
1884
1885    /// Searches for an element in a table, returning the `index` of the found element.
1886    /// This uses dynamic dispatch to reduce the amount of code generated, but it is
1887    /// eliminated by LLVM optimizations.
1888    ///
1889    /// This function does not make any changes to the `data` part of the table, or any
1890    /// changes to the `items` or `growth_left` field of the table.
1891    ///
1892    /// The table must have at least 1 empty `bucket`, otherwise, if the
1893    /// `eq: &mut dyn FnMut(usize) -> bool` function does not return `true`,
1894    /// this function will also never return (will go into an infinite loop).
1895    #[inline(always)]
1896    fn find_inner<C: ?Sized, E>(
1897        &self,
1898        cx: &mut C,
1899        hash: u64,
1900        eq: &dyn Fn(&mut C, usize) -> Result<bool, E>,
1901    ) -> Result<Option<usize>, E> {
1902        let h2_hash = h2(hash);
1903        let mut probe_seq = self.probe_seq(hash);
1904
1905        loop {
1906            // SAFETY:
1907            // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
1908            //   of the table due to masking with `self.bucket_mask`.
1909            //
1910            // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
1911            //   call `Group::load` due to the extended control bytes range, which is
1912            //  `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control
1913            //   byte will never be read for the allocated table);
1914            //
1915            // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will
1916            //   always return "0" (zero), so Group::load will read unaligned `Group::static_empty()`
1917            //   bytes, which is safe (see RawTableInner::new_in).
1918            let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
1919
1920            for bit in group.match_byte(h2_hash) {
1921                // This is the same as `(probe_seq.pos + bit) % self.buckets()` because the number
1922                // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
1923                let index = (probe_seq.pos + bit) & self.bucket_mask;
1924
1925                if likely(eq(cx, index)?) {
1926                    return Ok(Some(index));
1927                }
1928            }
1929
1930            if likely(group.match_empty().any_bit_set()) {
1931                return Ok(None);
1932            }
1933
1934            probe_seq.move_next(self.bucket_mask);
1935        }
1936    }
1937
1938    /// Prepares for rehashing data in place (that is, without allocating new memory).
1939    /// Converts all full index `control bytes` to `DELETED` and all `DELETED` control
1940    /// bytes to `EMPTY`, i.e. performs the following conversion:
1941    ///
1942    /// - `EMPTY` control bytes   -> `EMPTY`;
1943    /// - `DELETED` control bytes -> `EMPTY`;
1944    /// - `FULL` control bytes    -> `DELETED`.
1945    ///
1946    /// This function does not make any changes to the `data` parts of the table,
1947    /// or any changes to the the `items` or `growth_left` field of the table.
1948    ///
1949    /// # Safety
1950    ///
1951    /// You must observe the following safety rules when calling this function:
1952    ///
1953    /// * The [`RawTableInner`] has already been allocated;
1954    ///
1955    /// * The caller of this function must convert the `DELETED` bytes back to `FULL`
1956    ///   bytes when re-inserting them into their ideal position (which was impossible
1957    ///   to do during the first insert due to tombstones). If the caller does not do
1958    ///   this, then calling this function may result in a memory leak.
1959    ///
1960    /// * The [`RawTableInner`] must have properly initialized control bytes otherwise
1961    ///   calling this function results in [`undefined behavior`].
1962    ///
1963    /// Calling this function on a table that has not been allocated results in
1964    /// [`undefined behavior`].
1965    ///
1966    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
1967    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
1968    ///
1969    /// [`Bucket::as_ptr`]: Bucket::as_ptr
1970    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1971    #[allow(clippy::mut_mut)]
1972    #[inline]
1973    unsafe fn prepare_rehash_in_place(&mut self) {
1974        // Bulk convert all full control bytes to DELETED, and all DELETED control bytes to EMPTY.
1975        // This effectively frees up all buckets containing a DELETED entry.
1976        //
1977        // SAFETY:
1978        // 1. `i` is guaranteed to be within bounds since we are iterating from zero to `buckets - 1`;
1979        // 2. Even if `i` will be `i == self.bucket_mask`, it is safe to call `Group::load_aligned`
1980        //    due to the extended control bytes range, which is `self.bucket_mask + 1 + Group::WIDTH`;
1981        // 3. The caller of this function guarantees that [`RawTableInner`] has already been allocated;
1982        // 4. We can use `Group::load_aligned` and `Group::store_aligned` here since we start from 0
1983        //    and go to the end with a step equal to `Group::WIDTH` (see TableLayout::calculate_layout_for).
1984        for i in (0..self.buckets()).step_by(Group::WIDTH) {
1985            let group = Group::load_aligned(self.ctrl(i));
1986            let group = group.convert_special_to_empty_and_full_to_deleted();
1987            group.store_aligned(self.ctrl(i));
1988        }
1989
1990        // Fix up the trailing control bytes. See the comments in set_ctrl
1991        // for the handling of tables smaller than the group width.
1992        //
1993        // SAFETY: The caller of this function guarantees that [`RawTableInner`]
1994        // has already been allocated
1995        if unlikely(self.buckets() < Group::WIDTH) {
1996            // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of control bytes,
1997            // so copying `self.buckets() == self.bucket_mask + 1` bytes with offset equal to
1998            // `Group::WIDTH` is safe
1999            self.ctrl(0)
2000                .copy_to(self.ctrl(Group::WIDTH), self.buckets());
2001        } else {
2002            // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of
2003            // control bytes,so copying `Group::WIDTH` bytes with offset equal
2004            // to `self.buckets() == self.bucket_mask + 1` is safe
2005            self.ctrl(0)
2006                .copy_to(self.ctrl(self.buckets()), Group::WIDTH);
2007        }
2008    }
2009
2010    /// Returns an iterator over every element in the table.
2011    ///
2012    /// # Safety
2013    ///
2014    /// If any of the following conditions are violated, the result
2015    /// is [`undefined behavior`]:
2016    ///
2017    /// * The caller has to ensure that the `RawTableInner` outlives the
2018    ///   `RawIter`. Because we cannot make the `next` method unsafe on
2019    ///   the `RawIter` struct, we have to make the `iter` method unsafe.
2020    ///
2021    /// * The [`RawTableInner`] must have properly initialized control bytes.
2022    ///
2023    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2024    #[inline]
2025    unsafe fn iter<T>(&self) -> RawIter<T> {
2026        // SAFETY:
2027        // 1. Since the caller of this function ensures that the control bytes
2028        //    are properly initialized and `self.data_end()` points to the start
2029        //    of the array of control bytes, therefore: `ctrl` is valid for reads,
2030        //    properly aligned to `Group::WIDTH` and points to the properly initialized
2031        //    control bytes.
2032        // 2. `data` bucket index in the table is equal to the `ctrl` index (i.e.
2033        //    equal to zero).
2034        // 3. We pass the exact value of buckets of the table to the function.
2035        //
2036        //                         `ctrl` points here (to the start
2037        //                         of the first control byte `CT0`)
2038        //                          ∨
2039        // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, Group::WIDTH
2040        //                           \________  ________/
2041        //                                    \/
2042        //       `n = buckets - 1`, i.e. `RawIndexTableInner::buckets() - 1`
2043        //
2044        // where: T0...T_n  - our stored data;
2045        //        CT0...CT_n - control bytes or metadata for `data`.
2046        let data = Bucket::from_base_index(self.data_end(), 0);
2047        RawIter {
2048            // SAFETY: See explanation above
2049            iter: RawIterRange::new(self.ctrl.as_ptr(), data, self.buckets()),
2050            items: self.items,
2051        }
2052    }
2053
2054    /// Executes the destructors (if any) of the values stored in the table.
2055    ///
2056    /// # Note
2057    ///
2058    /// This function does not erase the control bytes of the table and does
2059    /// not make any changes to the `items` or `growth_left` fields of the
2060    /// table. If necessary, the caller of this function must manually set
2061    /// up these table fields, for example using the [`clear_no_drop`] function.
2062    ///
2063    /// Be careful during calling this function, because drop function of
2064    /// the elements can panic, and this can leave table in an inconsistent
2065    /// state.
2066    ///
2067    /// # Safety
2068    ///
2069    /// If `T` is a type that should be dropped and **the table is not empty**,
2070    /// calling this function more than once results in [`undefined behavior`].
2071    ///
2072    /// If `T` is not [`Copy`], attempting to use values stored in the table after
2073    /// calling this function may result in [`undefined behavior`].
2074    ///
2075    /// It is safe to call this function on a table that has not been allocated,
2076    /// on a table with uninitialized control bytes, and on a table with no actual
2077    /// data but with `Full` control bytes if `self.items == 0`.
2078    ///
2079    /// See also [`Bucket::drop`] / [`Bucket::as_ptr`] methods, for more information
2080    /// about of properly removing or saving `element` from / into the [`RawTable`] /
2081    /// [`RawTableInner`].
2082    ///
2083    /// [`Bucket::drop`]: Bucket::drop
2084    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2085    /// [`clear_no_drop`]: RawTableInner::clear_no_drop
2086    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2087    unsafe fn drop_elements<T>(&mut self) {
2088        // Check that `self.items != 0`. Protects against the possibility
2089        // of creating an iterator on an table with uninitialized control bytes.
2090        if T::NEEDS_DROP && self.items != 0 {
2091            // SAFETY: We know for sure that RawTableInner will outlive the
2092            // returned `RawIter` iterator, and the caller of this function
2093            // must uphold the safety contract for `drop_elements` method.
2094            for item in self.iter::<T>() {
2095                // SAFETY: The caller must uphold the safety contract for
2096                // `drop_elements` method.
2097                item.drop();
2098            }
2099        }
2100    }
2101
2102    /// Executes the destructors (if any) of the values stored in the table and than
2103    /// deallocates the table.
2104    ///
2105    /// # Note
2106    ///
2107    /// Calling this function automatically makes invalid (dangling) all instances of
2108    /// buckets ([`Bucket`]) and makes invalid (dangling) the `ctrl` field of the table.
2109    ///
2110    /// This function does not make any changes to the `bucket_mask`, `items` or `growth_left`
2111    /// fields of the table. If necessary, the caller of this function must manually set
2112    /// up these table fields.
2113    ///
2114    /// # Safety
2115    ///
2116    /// If any of the following conditions are violated, the result is [`undefined behavior`]:
2117    ///
2118    /// * Calling this function more than once;
2119    ///
2120    /// * The `alloc` must be the same [`Allocator`] as the `Allocator` that was used
2121    ///   to allocate this table.
2122    ///
2123    /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` that
2124    ///   was used to allocate this table.
2125    ///
2126    /// The caller of this function should pay attention to the possibility of the
2127    /// elements' drop function panicking, because this:
2128    ///
2129    ///    * May leave the table in an inconsistent state;
2130    ///
2131    ///    * Memory is never deallocated, so a memory leak may occur.
2132    ///
2133    /// Attempt to use the `ctrl` field of the table (dereference) after calling this
2134    /// function results in [`undefined behavior`].
2135    ///
2136    /// It is safe to call this function on a table that has not been allocated,
2137    /// on a table with uninitialized control bytes, and on a table with no actual
2138    /// data but with `Full` control bytes if `self.items == 0`.
2139    ///
2140    /// See also [`RawTableInner::drop_elements`] or [`RawTableInner::free_buckets`]
2141    /// for more  information.
2142    ///
2143    /// [`RawTableInner::drop_elements`]: RawTableInner::drop_elements
2144    /// [`RawTableInner::free_buckets`]: RawTableInner::free_buckets
2145    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2146    unsafe fn drop_inner_table<T, A: Allocator>(&mut self, alloc: &A, table_layout: TableLayout) {
2147        if !self.is_empty_singleton() {
2148            unsafe {
2149                // SAFETY: The caller must uphold the safety contract for `drop_inner_table` method.
2150                self.drop_elements::<T>();
2151                // SAFETY:
2152                // 1. We have checked that our table is allocated.
2153                // 2. The caller must uphold the safety contract for `drop_inner_table` method.
2154                self.free_buckets(alloc, table_layout);
2155            }
2156        }
2157    }
2158
2159    #[inline]
2160    unsafe fn bucket<T>(&self, index: usize) -> Bucket<T> {
2161        debug_assert_ne!(self.bucket_mask, 0);
2162        debug_assert!(index < self.buckets());
2163        Bucket::from_base_index(self.data_end(), index)
2164    }
2165
2166    #[inline]
2167    unsafe fn bucket_ptr(&self, index: usize, size_of: usize) -> *mut u8 {
2168        debug_assert_ne!(self.bucket_mask, 0);
2169        debug_assert!(index < self.buckets());
2170        let base: *mut u8 = self.data_end().as_ptr();
2171        base.sub((index + 1) * size_of)
2172    }
2173
2174    #[inline]
2175    unsafe fn data_end<T>(&self) -> NonNull<T> {
2176        NonNull::new_unchecked(self.ctrl.as_ptr().cast())
2177    }
2178
2179    /// Returns an iterator-like object for a probe sequence on the table.
2180    ///
2181    /// This iterator never terminates, but is guaranteed to visit each bucket
2182    /// group exactly once. The loop using `probe_seq` must terminate upon
2183    /// reaching a group containing an empty bucket.
2184    #[inline]
2185    fn probe_seq(&self, hash: u64) -> ProbeSeq {
2186        ProbeSeq {
2187            pos: h1(hash) & self.bucket_mask,
2188            stride: 0,
2189        }
2190    }
2191
2192    /// Returns the index of a bucket for which a value must be inserted if there is enough rooom
2193    /// in the table, otherwise returns error
2194    #[inline]
2195    unsafe fn prepare_insert_no_grow(&mut self, hash: u64) -> Result<usize, ()> {
2196        let index = self.find_insert_slot(hash).index;
2197        let old_ctrl = *self.ctrl(index);
2198        if unlikely(self.growth_left == 0 && special_is_empty(old_ctrl)) {
2199            Err(())
2200        } else {
2201            self.record_item_insert_at(index, old_ctrl, hash);
2202            Ok(index)
2203        }
2204    }
2205
2206    #[inline]
2207    unsafe fn record_item_insert_at(&mut self, index: usize, old_ctrl: u8, hash: u64) {
2208        self.growth_left -= usize::from(special_is_empty(old_ctrl));
2209        self.set_ctrl_h2(index, hash);
2210        self.items += 1;
2211    }
2212
2213    #[inline]
2214    fn is_in_same_group(&self, i: usize, new_i: usize, hash: u64) -> bool {
2215        let probe_seq_pos = self.probe_seq(hash).pos;
2216        let probe_index =
2217            |pos: usize| (pos.wrapping_sub(probe_seq_pos) & self.bucket_mask) / Group::WIDTH;
2218        probe_index(i) == probe_index(new_i)
2219    }
2220
2221    /// Sets a control byte to the hash, and possibly also the replicated control byte at
2222    /// the end of the array.
2223    ///
2224    /// This function does not make any changes to the `data` parts of the table,
2225    /// or any changes to the the `items` or `growth_left` field of the table.
2226    ///
2227    /// # Safety
2228    ///
2229    /// The safety rules are directly derived from the safety rules for [`RawTableInner::set_ctrl`]
2230    /// method. Thus, in order to uphold the safety contracts for the method, you must observe the
2231    /// following rules when calling this function:
2232    ///
2233    /// * The [`RawTableInner`] has already been allocated;
2234    ///
2235    /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
2236    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
2237    ///   be no greater than the number returned by the function [`RawTableInner::buckets`].
2238    ///
2239    /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
2240    ///
2241    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
2242    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2243    ///
2244    /// [`RawTableInner::set_ctrl`]: RawTableInner::set_ctrl
2245    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2246    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2247    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2248    #[inline]
2249    unsafe fn set_ctrl_h2(&mut self, index: usize, hash: u64) {
2250        // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl_h2`]
2251        self.set_ctrl(index, h2(hash));
2252    }
2253
2254    /// Replaces the hash in the control byte at the given index with the provided one,
2255    /// and possibly also replicates the new control byte at the end of the array of control
2256    /// bytes, returning the old control byte.
2257    ///
2258    /// This function does not make any changes to the `data` parts of the table,
2259    /// or any changes to the the `items` or `growth_left` field of the table.
2260    ///
2261    /// # Safety
2262    ///
2263    /// The safety rules are directly derived from the safety rules for [`RawTableInner::set_ctrl_h2`]
2264    /// and [`RawTableInner::ctrl`] methods. Thus, in order to uphold the safety contracts for both
2265    /// methods, you must observe the following rules when calling this function:
2266    ///
2267    /// * The [`RawTableInner`] has already been allocated;
2268    ///
2269    /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
2270    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
2271    ///   be no greater than the number returned by the function [`RawTableInner::buckets`].
2272    ///
2273    /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
2274    ///
2275    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
2276    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2277    ///
2278    /// [`RawTableInner::set_ctrl_h2`]: RawTableInner::set_ctrl_h2
2279    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2280    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2281    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2282    #[inline]
2283    unsafe fn replace_ctrl_h2(&mut self, index: usize, hash: u64) -> u8 {
2284        // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::replace_ctrl_h2`]
2285        let prev_ctrl = *self.ctrl(index);
2286        self.set_ctrl_h2(index, hash);
2287        prev_ctrl
2288    }
2289
2290    /// Sets a control byte, and possibly also the replicated control byte at
2291    /// the end of the array.
2292    ///
2293    /// This function does not make any changes to the `data` parts of the table,
2294    /// or any changes to the the `items` or `growth_left` field of the table.
2295    ///
2296    /// # Safety
2297    ///
2298    /// You must observe the following safety rules when calling this function:
2299    ///
2300    /// * The [`RawTableInner`] has already been allocated;
2301    ///
2302    /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
2303    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
2304    ///   be no greater than the number returned by the function [`RawTableInner::buckets`].
2305    ///
2306    /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
2307    ///
2308    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
2309    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2310    ///
2311    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2312    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2313    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2314    #[inline]
2315    unsafe fn set_ctrl(&mut self, index: usize, ctrl: u8) {
2316        // Replicate the first Group::WIDTH control bytes at the end of
2317        // the array without using a branch. If the tables smaller than
2318        // the group width (self.buckets() < Group::WIDTH),
2319        // `index2 = Group::WIDTH + index`, otherwise `index2` is:
2320        //
2321        // - If index >= Group::WIDTH then index == index2.
2322        // - Otherwise index2 == self.bucket_mask + 1 + index.
2323        //
2324        // The very last replicated control byte is never actually read because
2325        // we mask the initial index for unaligned loads, but we write it
2326        // anyways because it makes the set_ctrl implementation simpler.
2327        //
2328        // If there are fewer buckets than Group::WIDTH then this code will
2329        // replicate the buckets at the end of the trailing group. For example
2330        // with 2 buckets and a group size of 4, the control bytes will look
2331        // like this:
2332        //
2333        //     Real    |             Replicated
2334        // ---------------------------------------------
2335        // | [A] | [B] | [EMPTY] | [EMPTY] | [A] | [B] |
2336        // ---------------------------------------------
2337
2338        // This is the same as `(index.wrapping_sub(Group::WIDTH)) % self.buckets() + Group::WIDTH`
2339        // because the number of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2340        let index2 = ((index.wrapping_sub(Group::WIDTH)) & self.bucket_mask) + Group::WIDTH;
2341
2342        // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl`]
2343        *self.ctrl(index) = ctrl;
2344        *self.ctrl(index2) = ctrl;
2345    }
2346
2347    /// Returns a pointer to a control byte.
2348    ///
2349    /// # Safety
2350    ///
2351    /// For the allocated [`RawTableInner`], the result is [`Undefined Behavior`],
2352    /// if the `index` is greater than the `self.bucket_mask + 1 + Group::WIDTH`.
2353    /// In that case, calling this function with `index == self.bucket_mask + 1 + Group::WIDTH`
2354    /// will return a pointer to the end of the allocated table and it is useless on its own.
2355    ///
2356    /// Calling this function with `index >= self.bucket_mask + 1 + Group::WIDTH` on a
2357    /// table that has not been allocated results in [`Undefined Behavior`].
2358    ///
2359    /// So to satisfy both requirements you should always follow the rule that
2360    /// `index < self.bucket_mask + 1 + Group::WIDTH`
2361    ///
2362    /// Calling this function on [`RawTableInner`] that are not already allocated is safe
2363    /// for read-only purpose.
2364    ///
2365    /// See also [`Bucket::as_ptr()`] method, for more information about of properly removing
2366    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2367    ///
2368    /// [`Bucket::as_ptr()`]: Bucket::as_ptr()
2369    /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2370    #[inline]
2371    unsafe fn ctrl(&self, index: usize) -> *mut u8 {
2372        debug_assert!(index < self.num_ctrl_bytes());
2373        // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::ctrl`]
2374        self.ctrl.as_ptr().add(index)
2375    }
2376
2377    #[inline]
2378    fn buckets(&self) -> usize {
2379        self.bucket_mask + 1
2380    }
2381
2382    /// Checks whether the bucket at `index` is full.
2383    ///
2384    /// # Safety
2385    ///
2386    /// The caller must ensure `index` is less than the number of buckets.
2387    #[inline]
2388    unsafe fn is_bucket_full(&self, index: usize) -> bool {
2389        debug_assert!(index < self.buckets());
2390        is_full(*self.ctrl(index))
2391    }
2392
2393    #[inline]
2394    fn num_ctrl_bytes(&self) -> usize {
2395        self.bucket_mask + 1 + Group::WIDTH
2396    }
2397
2398    #[inline]
2399    fn is_empty_singleton(&self) -> bool {
2400        self.bucket_mask == 0
2401    }
2402
2403    /// Attempts to allocate a new hash table with at least enough capacity
2404    /// for inserting the given number of elements without reallocating,
2405    /// and return it inside ScopeGuard to protect against panic in the hash
2406    /// function.
2407    ///
2408    /// # Note
2409    ///
2410    /// It is recommended (but not required):
2411    ///
2412    /// * That the new table's `capacity` be greater than or equal to `self.items`.
2413    ///
2414    /// * The `alloc` is the same [`Allocator`] as the `Allocator` used
2415    ///   to allocate this table.
2416    ///
2417    /// * The `table_layout` is the same [`TableLayout`] as the `TableLayout` used
2418    ///   to allocate this table.
2419    ///
2420    /// If `table_layout` does not match the `TableLayout` that was used to allocate
2421    /// this table, then using `mem::swap` with the `self` and the new table returned
2422    /// by this function results in [`undefined behavior`].
2423    ///
2424    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2425    #[allow(clippy::mut_mut)]
2426    #[inline]
2427    fn prepare_resize<'a, A>(
2428        &self,
2429        alloc: &'a A,
2430        table_layout: TableLayout,
2431        capacity: usize,
2432    ) -> Result<ScopeGuard<Self, impl FnMut(&mut Self) + 'a>, Error>
2433    where
2434        A: Allocator,
2435    {
2436        debug_assert!(self.items <= capacity);
2437
2438        // Allocate and initialize the new table.
2439        let new_table = RawTableInner::fallible_with_capacity(alloc, table_layout, capacity)?;
2440
2441        // The hash function may panic, in which case we simply free the new
2442        // table without dropping any elements that may have been copied into
2443        // it.
2444        //
2445        // This guard is also used to free the old table on success, see
2446        // the comment at the bottom of this function.
2447        Ok(guard(new_table, move |self_| {
2448            if !self_.is_empty_singleton() {
2449                // SAFETY:
2450                // 1. We have checked that our table is allocated.
2451                // 2. We know for sure that the `alloc` and `table_layout` matches the
2452                //    [`Allocator`] and [`TableLayout`] used to allocate this table.
2453                unsafe { self_.free_buckets(alloc, table_layout) };
2454            }
2455        }))
2456    }
2457
2458    /// Reserves or rehashes to make room for `additional` more elements.
2459    ///
2460    /// This uses dynamic dispatch to reduce the amount of
2461    /// code generated, but it is eliminated by LLVM optimizations when inlined.
2462    ///
2463    /// # Safety
2464    ///
2465    /// If any of the following conditions are violated, the result is
2466    /// [`undefined behavior`]:
2467    ///
2468    /// * The `alloc` must be the same [`Allocator`] as the `Allocator` used
2469    ///   to allocate this table.
2470    ///
2471    /// * The `layout` must be the same [`TableLayout`] as the `TableLayout`
2472    ///   used to allocate this table.
2473    ///
2474    /// * The `drop` function (`fn(*mut u8)`) must be the actual drop function of
2475    ///   the elements stored in the table.
2476    ///
2477    /// * The [`RawTableInner`] must have properly initialized control bytes.
2478    ///
2479    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2480    #[allow(clippy::inline_always)]
2481    #[inline(always)]
2482    unsafe fn reserve_rehash_inner<C: ?Sized, E, A>(
2483        &mut self,
2484        cx: &mut C,
2485        alloc: &A,
2486        additional: usize,
2487        hasher: &dyn Fn(&mut C, &mut Self, usize) -> Result<u64, E>,
2488        layout: TableLayout,
2489        drop: Option<fn(*mut u8)>,
2490    ) -> Result<(), CustomError<E>>
2491    where
2492        A: Allocator,
2493    {
2494        // Avoid `Option::ok_or_else` because it bloats LLVM IR.
2495        let new_items = match self.items.checked_add(additional) {
2496            Some(new_items) => new_items,
2497            None => return Err(CustomError::from(Error::CapacityOverflow)),
2498        };
2499        let full_capacity = bucket_mask_to_capacity(self.bucket_mask);
2500        if new_items <= full_capacity / 2 {
2501            // Rehash in-place without re-allocating if we have plenty of spare
2502            // capacity that is locked up due to DELETED entries.
2503
2504            // SAFETY:
2505            // 1. We know for sure that `[`RawTableInner`]` has already been allocated
2506            //    (since new_items <= full_capacity / 2);
2507            // 2. The caller ensures that `drop` function is the actual drop function of
2508            //    the elements stored in the table.
2509            // 3. The caller ensures that `layout` matches the [`TableLayout`] that was
2510            //    used to allocate this table.
2511            // 4. The caller ensures that the control bytes of the `RawTableInner`
2512            //    are already initialized.
2513            self.rehash_in_place(cx, hasher, layout.size, drop)
2514                .map_err(CustomError::Custom)?;
2515            Ok(())
2516        } else {
2517            // Otherwise, conservatively resize to at least the next size up
2518            // to avoid churning deletes into frequent rehashes.
2519            //
2520            // SAFETY:
2521            // 1. We know for sure that `capacity >= self.items`.
2522            // 2. The caller ensures that `alloc` and `layout` matches the [`Allocator`] and
2523            //    [`TableLayout`] that were used to allocate this table.
2524            // 3. The caller ensures that the control bytes of the `RawTableInner`
2525            //    are already initialized.
2526            self.resize_inner(
2527                cx,
2528                alloc,
2529                usize::max(new_items, full_capacity + 1),
2530                hasher,
2531                layout,
2532            )
2533        }
2534    }
2535
2536    /// Returns an iterator over full buckets indices in the table.
2537    ///
2538    /// # Safety
2539    ///
2540    /// Behavior is undefined if any of the following conditions are violated:
2541    ///
2542    /// * The caller has to ensure that the `RawTableInner` outlives the
2543    ///   `FullBucketsIndices`. Because we cannot make the `next` method
2544    ///   unsafe on the `FullBucketsIndices` struct, we have to make the
2545    ///   `full_buckets_indices` method unsafe.
2546    ///
2547    /// * The [`RawTableInner`] must have properly initialized control bytes.
2548    #[inline(always)]
2549    unsafe fn full_buckets_indices(&self) -> FullBucketsIndices {
2550        // SAFETY:
2551        // 1. Since the caller of this function ensures that the control bytes
2552        //    are properly initialized and `self.ctrl(0)` points to the start
2553        //    of the array of control bytes, therefore: `ctrl` is valid for reads,
2554        //    properly aligned to `Group::WIDTH` and points to the properly initialized
2555        //    control bytes.
2556        // 2. The value of `items` is equal to the amount of data (values) added
2557        //    to the table.
2558        //
2559        //                         `ctrl` points here (to the start
2560        //                         of the first control byte `CT0`)
2561        //                          ∨
2562        // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, Group::WIDTH
2563        //                           \________  ________/
2564        //                                    \/
2565        //       `n = buckets - 1`, i.e. `RawIndexTableInner::buckets() - 1`
2566        //
2567        // where: T0...T_n  - our stored data;
2568        //        CT0...CT_n - control bytes or metadata for `data`.
2569        let ctrl = NonNull::new_unchecked(self.ctrl(0));
2570
2571        FullBucketsIndices {
2572            // Load the first group
2573            // SAFETY: See explanation above.
2574            current_group: Group::load_aligned(ctrl.as_ptr()).match_full().into_iter(),
2575            group_first_index: 0,
2576            ctrl,
2577            items: self.items,
2578        }
2579    }
2580
2581    /// Allocates a new table of a different size and moves the contents of the
2582    /// current table into it.
2583    ///
2584    /// This uses dynamic dispatch to reduce the amount of
2585    /// code generated, but it is eliminated by LLVM optimizations when inlined.
2586    ///
2587    /// # Safety
2588    ///
2589    /// If any of the following conditions are violated, the result is
2590    /// [`undefined behavior`]:
2591    ///
2592    /// * The `alloc` must be the same [`Allocator`] as the `Allocator` used
2593    ///   to allocate this table;
2594    ///
2595    /// * The `layout` must be the same [`TableLayout`] as the `TableLayout`
2596    ///   used to allocate this table;
2597    ///
2598    /// * The [`RawTableInner`] must have properly initialized control bytes.
2599    ///
2600    /// The caller of this function must ensure that `capacity >= self.items`
2601    /// otherwise:
2602    ///
2603    /// * If `self.items != 0`, calling of this function with `capacity == 0`
2604    ///   results in [`undefined behavior`].
2605    ///
2606    /// * If `capacity_to_buckets(capacity) < Group::WIDTH` and
2607    ///   `self.items > capacity_to_buckets(capacity)` calling this function
2608    ///   results in [`undefined behavior`].
2609    ///
2610    /// * If `capacity_to_buckets(capacity) >= Group::WIDTH` and
2611    ///   `self.items > capacity_to_buckets(capacity)` calling this function
2612    ///   are never return (will go into an infinite loop).
2613    ///
2614    /// Note: It is recommended (but not required) that the new table's `capacity`
2615    /// be greater than or equal to `self.items`. In case if `capacity <= self.items`
2616    /// this function can never return. See [`RawTableInner::find_insert_slot`] for
2617    /// more information.
2618    ///
2619    /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot
2620    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2621    #[allow(clippy::inline_always)]
2622    #[inline(always)]
2623    unsafe fn resize_inner<C: ?Sized, E, A>(
2624        &mut self,
2625        cx: &mut C,
2626        alloc: &A,
2627        capacity: usize,
2628        hasher: &dyn Fn(&mut C, &mut Self, usize) -> Result<u64, E>,
2629        layout: TableLayout,
2630    ) -> Result<(), CustomError<E>>
2631    where
2632        A: Allocator,
2633    {
2634        // SAFETY: We know for sure that `alloc` and `layout` matches the [`Allocator`] and [`TableLayout`]
2635        // that were used to allocate this table.
2636        let mut new_table = self.prepare_resize(alloc, layout, capacity)?;
2637
2638        // SAFETY: We know for sure that RawTableInner will outlive the
2639        // returned `FullBucketsIndices` iterator, and the caller of this
2640        // function ensures that the control bytes are properly initialized.
2641        for full_byte_index in self.full_buckets_indices() {
2642            // This may panic.
2643            let hash = hasher(cx, self, full_byte_index).map_err(CustomError::Custom)?;
2644
2645            // We can use a simpler version of insert() here since:
2646            // - there are no DELETED entries.
2647            // - we know there is enough space in the table.
2648            // - all elements are unique.
2649            //
2650            // SAFETY:
2651            // 1. The caller of this function guarantees that `capacity > 0`
2652            //    so `new_table` must already have some allocated memory.
2653            // 2. We set `growth_left` and `items` fields of the new table
2654            //    after the loop.
2655            // 3. We insert into the table, at the returned index, the data
2656            //    matching the given hash immediately after calling this function.
2657            let (new_index, _) = new_table.prepare_insert_slot(hash);
2658
2659            // SAFETY:
2660            //
2661            // * `src` is valid for reads of `layout.size` bytes, since the
2662            //   table is alive and the `full_byte_index` is guaranteed to be
2663            //   within bounds (see `FullBucketsIndices::next_impl`);
2664            //
2665            // * `dst` is valid for writes of `layout.size` bytes, since the
2666            //   caller ensures that `table_layout` matches the [`TableLayout`]
2667            //   that was used to allocate old table and we have the `new_index`
2668            //   returned by `prepare_insert_slot`.
2669            //
2670            // * Both `src` and `dst` are properly aligned.
2671            //
2672            // * Both `src` and `dst` point to different region of memory.
2673            ptr::copy_nonoverlapping(
2674                self.bucket_ptr(full_byte_index, layout.size),
2675                new_table.bucket_ptr(new_index, layout.size),
2676                layout.size,
2677            );
2678        }
2679
2680        // The hash function didn't panic, so we can safely set the
2681        // `growth_left` and `items` fields of the new table.
2682        new_table.growth_left -= self.items;
2683        new_table.items = self.items;
2684
2685        // We successfully copied all elements without panicking. Now replace
2686        // self with the new table. The old table will have its memory freed but
2687        // the items will not be dropped (since they have been moved into the
2688        // new table).
2689        // SAFETY: The caller ensures that `table_layout` matches the [`TableLayout`]
2690        // that was used to allocate this table.
2691        mem::swap(self, &mut new_table);
2692
2693        Ok(())
2694    }
2695
2696    /// Rehashes the contents of the table in place (i.e. without changing the
2697    /// allocation).
2698    ///
2699    /// If `hasher` panics then some the table's contents may be lost.
2700    ///
2701    /// This uses dynamic dispatch to reduce the amount of
2702    /// code generated, but it is eliminated by LLVM optimizations when inlined.
2703    ///
2704    /// # Safety
2705    ///
2706    /// If any of the following conditions are violated, the result is [`undefined behavior`]:
2707    ///
2708    /// * The `size_of` must be equal to the size of the elements stored in the table;
2709    ///
2710    /// * The `drop` function (`fn(*mut u8)`) must be the actual drop function of
2711    ///   the elements stored in the table.
2712    ///
2713    /// * The [`RawTableInner`] has already been allocated;
2714    ///
2715    /// * The [`RawTableInner`] must have properly initialized control bytes.
2716    ///
2717    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2718    #[allow(clippy::inline_always)]
2719    #[cfg_attr(feature = "inline-more", inline(always))]
2720    #[cfg_attr(not(feature = "inline-more"), inline)]
2721    unsafe fn rehash_in_place<C: ?Sized, E>(
2722        &mut self,
2723        cx: &mut C,
2724        hasher: &dyn Fn(&mut C, &mut Self, usize) -> Result<u64, E>,
2725        size_of: usize,
2726        drop: Option<fn(*mut u8)>,
2727    ) -> Result<(), E> {
2728        // If the hash function panics then properly clean up any elements
2729        // that we haven't rehashed yet. We unfortunately can't preserve the
2730        // element since we lost their hash and have no way of recovering it
2731        // without risking another panic.
2732        self.prepare_rehash_in_place();
2733
2734        let mut guard = guard(self, move |self_| {
2735            if let Some(drop) = drop {
2736                for i in 0..self_.buckets() {
2737                    if *self_.ctrl(i) == DELETED {
2738                        self_.set_ctrl(i, EMPTY);
2739                        drop(self_.bucket_ptr(i, size_of));
2740                        self_.items -= 1;
2741                    }
2742                }
2743            }
2744            self_.growth_left = bucket_mask_to_capacity(self_.bucket_mask) - self_.items;
2745        });
2746
2747        // At this point, DELETED elements are elements that we haven't
2748        // rehashed yet. Find them and re-insert them at their ideal
2749        // position.
2750        'outer: for i in 0..guard.buckets() {
2751            if *guard.ctrl(i) != DELETED {
2752                continue;
2753            }
2754
2755            let i_p = guard.bucket_ptr(i, size_of);
2756
2757            'inner: loop {
2758                // Hash the current item
2759                let hash = hasher(cx, *guard, i)?;
2760
2761                // Search for a suitable place to put it
2762                let new_i = guard.find_insert_slot(hash).index;
2763
2764                // Probing works by scanning through all of the control
2765                // bytes in groups, which may not be aligned to the group
2766                // size. If both the new and old position fall within the
2767                // same unaligned group, then there is no benefit in moving
2768                // it and we can just continue to the next item.
2769                if likely(guard.is_in_same_group(i, new_i, hash)) {
2770                    guard.set_ctrl_h2(i, hash);
2771                    continue 'outer;
2772                }
2773
2774                let new_i_p = guard.bucket_ptr(new_i, size_of);
2775
2776                // We are moving the current item to a new position. Write
2777                // our H2 to the control byte of the new position.
2778                let prev_ctrl = guard.replace_ctrl_h2(new_i, hash);
2779                if prev_ctrl == EMPTY {
2780                    guard.set_ctrl(i, EMPTY);
2781                    // If the target slot is empty, simply move the current
2782                    // element into the new slot and clear the old control
2783                    // byte.
2784                    ptr::copy_nonoverlapping(i_p, new_i_p, size_of);
2785                    continue 'outer;
2786                } else {
2787                    // If the target slot is occupied, swap the two elements
2788                    // and then continue processing the element that we just
2789                    // swapped into the old slot.
2790                    debug_assert_eq!(prev_ctrl, DELETED);
2791                    ptr::swap_nonoverlapping(i_p, new_i_p, size_of);
2792                    continue 'inner;
2793                }
2794            }
2795        }
2796
2797        guard.growth_left = bucket_mask_to_capacity(guard.bucket_mask) - guard.items;
2798
2799        mem::forget(guard);
2800        Ok(())
2801    }
2802
2803    /// Deallocates the table without dropping any entries.
2804    ///
2805    /// # Note
2806    ///
2807    /// This function must be called only after [`drop_elements`](RawTable::drop_elements),
2808    /// else it can lead to leaking of memory. Also calling this function automatically
2809    /// makes invalid (dangling) all instances of buckets ([`Bucket`]) and makes invalid
2810    /// (dangling) the `ctrl` field of the table.
2811    ///
2812    /// # Safety
2813    ///
2814    /// If any of the following conditions are violated, the result is [`Undefined Behavior`]:
2815    ///
2816    /// * The [`RawTableInner`] has already been allocated;
2817    ///
2818    /// * The `alloc` must be the same [`Allocator`] as the `Allocator` that was used
2819    ///   to allocate this table.
2820    ///
2821    /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` that was used
2822    ///   to allocate this table.
2823    ///
2824    /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more  information.
2825    ///
2826    /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2827    /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc
2828    /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate
2829    #[inline]
2830    unsafe fn free_buckets<A>(&mut self, alloc: &A, table_layout: TableLayout)
2831    where
2832        A: Allocator,
2833    {
2834        // SAFETY: The caller must uphold the safety contract for `free_buckets`
2835        // method.
2836        let (ptr, layout) = self.allocation_info(table_layout);
2837        alloc.deallocate(ptr, layout);
2838    }
2839
2840    /// Returns a pointer to the allocated memory and the layout that was used to
2841    /// allocate the table.
2842    ///
2843    /// # Safety
2844    ///
2845    /// Caller of this function must observe the following safety rules:
2846    ///
2847    /// * The [`RawTableInner`] has already been allocated, otherwise
2848    ///   calling this function results in [`undefined behavior`]
2849    ///
2850    /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout`
2851    ///   that was used to allocate this table. Failure to comply with this condition
2852    ///   may result in [`undefined behavior`].
2853    ///
2854    /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more  information.
2855    ///
2856    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2857    /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc
2858    /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate
2859    #[inline]
2860    unsafe fn allocation_info(&self, table_layout: TableLayout) -> (NonNull<u8>, Layout) {
2861        debug_assert!(
2862            !self.is_empty_singleton(),
2863            "this function can only be called on non-empty tables"
2864        );
2865
2866        // Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
2867        let (layout, ctrl_offset) = match table_layout.calculate_layout_for(self.buckets()) {
2868            Some(lco) => lco,
2869            None => unsafe { hint::unreachable_unchecked() },
2870        };
2871        (
2872            // SAFETY: The caller must uphold the safety contract for `allocation_info` method.
2873            unsafe { NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)) },
2874            layout,
2875        )
2876    }
2877
2878    /// Returns a pointer to the allocated memory and the layout that was used to
2879    /// allocate the table. If [`RawTableInner`] has not been allocated, this
2880    /// function return `dangling` pointer and `()` (unit) layout.
2881    ///
2882    /// # Safety
2883    ///
2884    /// The `table_layout` must be the same [`TableLayout`] as the `TableLayout`
2885    /// that was used to allocate this table. Failure to comply with this condition
2886    /// may result in [`undefined behavior`].
2887    ///
2888    /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more  information.
2889    ///
2890    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2891    /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc
2892    /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate
2893    unsafe fn allocation_info_or_zero(&self, table_layout: TableLayout) -> (NonNull<u8>, Layout) {
2894        if self.is_empty_singleton() {
2895            (NonNull::dangling(), Layout::new::<()>())
2896        } else {
2897            // SAFETY:
2898            // 1. We have checked that our table is allocated.
2899            // 2. The caller ensures that `table_layout` matches the [`TableLayout`]
2900            // that was used to allocate this table.
2901            unsafe { self.allocation_info(table_layout) }
2902        }
2903    }
2904
2905    /// Marks all table buckets as empty without dropping their contents.
2906    #[inline]
2907    fn clear_no_drop(&mut self) {
2908        if !self.is_empty_singleton() {
2909            unsafe {
2910                self.ctrl(0).write_bytes(EMPTY, self.num_ctrl_bytes());
2911            }
2912        }
2913        self.items = 0;
2914        self.growth_left = bucket_mask_to_capacity(self.bucket_mask);
2915    }
2916
2917    /// Erases the [`Bucket`]'s control byte at the given index so that it does not
2918    /// triggered as full, decreases the `items` of the table and, if it can be done,
2919    /// increases `self.growth_left`.
2920    ///
2921    /// This function does not actually erase / drop the [`Bucket`] itself, i.e. it
2922    /// does not make any changes to the `data` parts of the table. The caller of this
2923    /// function must take care to properly drop the `data`, otherwise calling this
2924    /// function may result in a memory leak.
2925    ///
2926    /// # Safety
2927    ///
2928    /// You must observe the following safety rules when calling this function:
2929    ///
2930    /// * The [`RawTableInner`] has already been allocated;
2931    ///
2932    /// * It must be the full control byte at the given position;
2933    ///
2934    /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
2935    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
2936    ///   be no greater than the number returned by the function [`RawTableInner::buckets`].
2937    ///
2938    /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
2939    ///
2940    /// Calling this function on a table with no elements is unspecified, but calling subsequent
2941    /// functions is likely to result in [`undefined behavior`] due to overflow subtraction
2942    /// (`self.items -= 1 cause overflow when self.items == 0`).
2943    ///
2944    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
2945    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2946    ///
2947    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2948    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2949    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2950    #[inline]
2951    unsafe fn erase(&mut self, index: usize) {
2952        debug_assert!(self.is_bucket_full(index));
2953
2954        // This is the same as `index.wrapping_sub(Group::WIDTH) % self.buckets()` because
2955        // the number of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2956        let index_before = index.wrapping_sub(Group::WIDTH) & self.bucket_mask;
2957        // SAFETY:
2958        // - The caller must uphold the safety contract for `erase` method;
2959        // - `index_before` is guaranteed to be in range due to masking with `self.bucket_mask`
2960        let empty_before = Group::load(self.ctrl(index_before)).match_empty();
2961        let empty_after = Group::load(self.ctrl(index)).match_empty();
2962
2963        // Inserting and searching in the map is performed by two key functions:
2964        //
2965        // - The `find_insert_slot` function that looks up the index of any `EMPTY` or `DELETED`
2966        //   slot in a group to be able to insert. If it doesn't find an `EMPTY` or `DELETED`
2967        //   slot immediately in the first group, it jumps to the next `Group` looking for it,
2968        //   and so on until it has gone through all the groups in the control bytes.
2969        //
2970        // - The `find_inner` function that looks for the index of the desired element by looking
2971        //   at all the `FULL` bytes in the group. If it did not find the element right away, and
2972        //   there is no `EMPTY` byte in the group, then this means that the `find_insert_slot`
2973        //   function may have found a suitable slot in the next group. Therefore, `find_inner`
2974        //   jumps further, and if it does not find the desired element and again there is no `EMPTY`
2975        //   byte, then it jumps further, and so on. The search stops only if `find_inner` function
2976        //   finds the desired element or hits an `EMPTY` slot/byte.
2977        //
2978        // Accordingly, this leads to two consequences:
2979        //
2980        // - The map must have `EMPTY` slots (bytes);
2981        //
2982        // - You can't just mark the byte to be erased as `EMPTY`, because otherwise the `find_inner`
2983        //   function may stumble upon an `EMPTY` byte before finding the desired element and stop
2984        //   searching.
2985        //
2986        // Thus it is necessary to check all bytes after and before the erased element. If we are in
2987        // a contiguous `Group` of `FULL` or `DELETED` bytes (the number of `FULL` or `DELETED` bytes
2988        // before and after is greater than or equal to `Group::WIDTH`), then we must mark our byte as
2989        // `DELETED` in order for the `find_inner` function to go further. On the other hand, if there
2990        // is at least one `EMPTY` slot in the `Group`, then the `find_inner` function will still stumble
2991        // upon an `EMPTY` byte, so we can safely mark our erased byte as `EMPTY` as well.
2992        //
2993        // Finally, since `index_before == (index.wrapping_sub(Group::WIDTH) & self.bucket_mask) == index`
2994        // and given all of the above, tables smaller than the group width (self.buckets() < Group::WIDTH)
2995        // cannot have `DELETED` bytes.
2996        //
2997        // Note that in this context `leading_zeros` refers to the bytes at the end of a group, while
2998        // `trailing_zeros` refers to the bytes at the beginning of a group.
2999        let ctrl = if empty_before.leading_zeros() + empty_after.trailing_zeros() >= Group::WIDTH {
3000            DELETED
3001        } else {
3002            self.growth_left += 1;
3003            EMPTY
3004        };
3005        // SAFETY: the caller must uphold the safety contract for `erase` method.
3006        self.set_ctrl(index, ctrl);
3007        self.items -= 1;
3008    }
3009}
3010
3011impl<T, A: Allocator + Clone> TryClone for RawTable<T, A>
3012where
3013    T: TryClone,
3014{
3015    fn try_clone(&self) -> Result<Self, Error> {
3016        if self.table.is_empty_singleton() {
3017            Ok(Self::new_in(self.alloc.clone()))
3018        } else {
3019            unsafe {
3020                // Avoid `Result::ok_or_else` because it bloats LLVM IR.
3021                //
3022                // SAFETY: This is safe as we are taking the size of an already allocated table
3023                // and therefore сapacity overflow cannot occur, `self.table.buckets()` is power
3024                // of two and all allocator errors will be caught inside `RawTableInner::new_uninitialized`.
3025                let mut new_table =
3026                    Self::new_uninitialized(self.alloc.clone(), self.table.buckets())?;
3027
3028                // Cloning elements may fail (the clone function may panic). But we don't
3029                // need to worry about uninitialized control bits, since:
3030                // 1. The number of items (elements) in the table is zero, which means that
3031                //    the control bits will not be readed by Drop function.
3032                // 2. The `clone_from_spec` method will first copy all control bits from
3033                //    `self` (thus initializing them). But this will not affect the `Drop`
3034                //    function, since the `clone_from_spec` function sets `items` only after
3035                //    successfully clonning all elements.
3036                new_table.clone_from_spec(self)?;
3037                Ok(new_table)
3038            }
3039        }
3040    }
3041
3042    fn try_clone_from(&mut self, source: &Self) -> Result<(), Error> {
3043        if source.table.is_empty_singleton() {
3044            let mut old_inner = mem::replace(&mut self.table, RawTableInner::NEW);
3045            unsafe {
3046                // SAFETY:
3047                // 1. We call the function only once;
3048                // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
3049                //    and [`TableLayout`] that were used to allocate this table.
3050                // 3. If any elements' drop function panics, then there will only be a memory leak,
3051                //    because we have replaced the inner table with a new one.
3052                old_inner.drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
3053            }
3054        } else {
3055            unsafe {
3056                // Make sure that if any panics occurs, we clear the table and
3057                // leave it in an empty state.
3058                let mut self_ = guard(self, |self_| {
3059                    self_.clear_no_drop();
3060                });
3061
3062                // First, drop all our elements without clearing the control
3063                // bytes. If this panics then the scope guard will clear the
3064                // table, leaking any elements that were not dropped yet.
3065                //
3066                // This leak is unavoidable: we can't try dropping more elements
3067                // since this could lead to another panic and abort the process.
3068                //
3069                // SAFETY: If something gets wrong we clear our table right after
3070                // dropping the elements, so there is no double drop, since `items`
3071                // will be equal to zero.
3072                self_.table.drop_elements::<T>();
3073
3074                // If necessary, resize our table to match the source.
3075                if self_.buckets() != source.buckets() {
3076                    let new_inner = RawTableInner::new_uninitialized(
3077                        &self_.alloc,
3078                        Self::TABLE_LAYOUT,
3079                        source.buckets(),
3080                    )?;
3081                    // Replace the old inner with new uninitialized one. It's ok, since if something gets
3082                    // wrong `ScopeGuard` will initialize all control bytes and leave empty table.
3083                    let mut old_inner = mem::replace(&mut self_.table, new_inner);
3084                    if !old_inner.is_empty_singleton() {
3085                        // SAFETY:
3086                        // 1. We have checked that our table is allocated.
3087                        // 2. We know for sure that `alloc` and `table_layout` matches
3088                        // the [`Allocator`] and [`TableLayout`] that were used to allocate this table.
3089                        old_inner.free_buckets(&self_.alloc, Self::TABLE_LAYOUT);
3090                    }
3091                }
3092
3093                // Cloning elements may fail (the clone function may panic), but the `ScopeGuard`
3094                // inside the `clone_from_impl` function will take care of that, dropping all
3095                // cloned elements if necessary. Our `ScopeGuard` will clear the table.
3096                self_.clone_from_spec(source)?;
3097
3098                // Disarm the scope guard if cloning was successful.
3099                ScopeGuard::into_inner(self_);
3100            }
3101        }
3102
3103        Ok(())
3104    }
3105}
3106
3107#[cfg(test)]
3108impl<T, A: Allocator + Clone> Clone for RawTable<T, A>
3109where
3110    T: TryClone,
3111{
3112    fn clone(&self) -> Self {
3113        self.try_clone().abort()
3114    }
3115
3116    fn clone_from(&mut self, source: &Self) {
3117        self.try_clone_from(source).abort()
3118    }
3119}
3120
3121/// Specialization of `clone_from` for `Copy` types
3122trait RawTableClone {
3123    unsafe fn clone_from_spec(&mut self, source: &Self) -> Result<(), Error>;
3124}
3125impl<T: TryClone, A: Allocator + Clone> RawTableClone for RawTable<T, A> {
3126    default_fn! {
3127        #[cfg_attr(feature = "inline-more", inline)]
3128        unsafe fn clone_from_spec(&mut self, source: &Self) -> Result<(), Error> {
3129            self.clone_from_impl(source)
3130        }
3131    }
3132}
3133#[cfg(rune_nightly)]
3134impl<T: TryCopy, A: Allocator + Clone> RawTableClone for RawTable<T, A> {
3135    #[cfg_attr(feature = "inline-more", inline)]
3136    unsafe fn clone_from_spec(&mut self, source: &Self) -> Result<(), Error> {
3137        source
3138            .table
3139            .ctrl(0)
3140            .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes());
3141        source
3142            .data_start()
3143            .as_ptr()
3144            .copy_to_nonoverlapping(self.data_start().as_ptr(), self.table.buckets());
3145
3146        self.table.items = source.table.items;
3147        self.table.growth_left = source.table.growth_left;
3148        Ok(())
3149    }
3150}
3151
3152impl<T: TryClone, A: Allocator + Clone> RawTable<T, A> {
3153    /// Common code for clone and clone_from. Assumes:
3154    /// - `self.buckets() == source.buckets()`.
3155    /// - Any existing elements have been dropped.
3156    /// - The control bytes are not initialized yet.
3157    #[cfg_attr(feature = "inline-more", inline)]
3158    unsafe fn clone_from_impl(&mut self, source: &Self) -> Result<(), Error> {
3159        // Copy the control bytes unchanged. We do this in a single pass
3160        source
3161            .table
3162            .ctrl(0)
3163            .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes());
3164
3165        // The cloning of elements may panic, in which case we need
3166        // to make sure we drop only the elements that have been
3167        // cloned so far.
3168        let mut guard = guard((0, &mut *self), |(index, self_)| {
3169            if T::NEEDS_DROP {
3170                for i in 0..*index {
3171                    if self_.is_bucket_full(i) {
3172                        self_.bucket(i).drop();
3173                    }
3174                }
3175            }
3176        });
3177
3178        for from in source.iter() {
3179            let index = source.bucket_index(&from);
3180            let to = guard.1.bucket(index);
3181            to.write(from.as_ref().try_clone()?);
3182
3183            // Update the index in case we need to unwind.
3184            guard.0 = index + 1;
3185        }
3186
3187        // Successfully cloned all items, no need to clean up.
3188        mem::forget(guard);
3189
3190        self.table.items = source.table.items;
3191        self.table.growth_left = source.table.growth_left;
3192        Ok(())
3193    }
3194
3195    /// Variant of `clone_from` to use when a hasher is available.
3196    pub fn clone_from_with_hasher<C: ?Sized, E>(
3197        &mut self,
3198        cx: &mut C,
3199        source: &Self,
3200        hasher: impl HasherFn<C, T, E>,
3201    ) -> Result<(), CustomError<E>> {
3202        // If we have enough capacity in the table, just clear it and insert
3203        // elements one by one. We don't do this if we have the same number of
3204        // buckets as the source since we can just copy the contents directly
3205        // in that case.
3206        if self.table.buckets() != source.table.buckets()
3207            && bucket_mask_to_capacity(self.table.bucket_mask) >= source.len()
3208        {
3209            self.clear();
3210
3211            let mut guard_self = guard(&mut *self, |self_| {
3212                // Clear the partially copied table if a panic occurs, otherwise
3213                // items and growth_left will be out of sync with the contents
3214                // of the table.
3215                self_.clear();
3216            });
3217
3218            unsafe {
3219                for item in source.iter() {
3220                    // This may panic.
3221                    let item = item.as_ref().try_clone()?;
3222                    let hash = hasher.hash(cx, &item).map_err(CustomError::Custom)?;
3223
3224                    // We can use a simpler version of insert() here since:
3225                    // - there are no DELETED entries.
3226                    // - we know there is enough space in the table.
3227                    // - all elements are unique.
3228                    let (index, _) = guard_self.table.prepare_insert_slot(hash);
3229                    guard_self.bucket(index).write(item);
3230                }
3231            }
3232
3233            // Successfully cloned all items, no need to clean up.
3234            mem::forget(guard_self);
3235
3236            self.table.items = source.table.items;
3237            self.table.growth_left -= source.table.items;
3238        } else {
3239            self.try_clone_from(source)?;
3240        }
3241
3242        Ok(())
3243    }
3244}
3245
3246impl<T, A: Allocator + Default> Default for RawTable<T, A> {
3247    #[inline]
3248    fn default() -> Self {
3249        Self::new_in(Default::default())
3250    }
3251}
3252
3253#[cfg(rune_nightly)]
3254unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawTable<T, A> {
3255    #[cfg_attr(feature = "inline-more", inline)]
3256    fn drop(&mut self) {
3257        unsafe {
3258            // SAFETY:
3259            // 1. We call the function only once;
3260            // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
3261            //    and [`TableLayout`] that were used to allocate this table.
3262            // 3. If the drop function of any elements fails, then only a memory leak will occur,
3263            //    and we don't care because we are inside the `Drop` function of the `RawTable`,
3264            //    so there won't be any table left in an inconsistent state.
3265            self.table
3266                .drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
3267        }
3268    }
3269}
3270#[cfg(not(rune_nightly))]
3271impl<T, A: Allocator> Drop for RawTable<T, A> {
3272    #[cfg_attr(feature = "inline-more", inline)]
3273    fn drop(&mut self) {
3274        unsafe {
3275            // SAFETY:
3276            // 1. We call the function only once;
3277            // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
3278            //    and [`TableLayout`] that were used to allocate this table.
3279            // 3. If the drop function of any elements fails, then only a memory leak will occur,
3280            //    and we don't care because we are inside the `Drop` function of the `RawTable`,
3281            //    so there won't be any table left in an inconsistent state.
3282            self.table
3283                .drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
3284        }
3285    }
3286}
3287
3288impl<T, A: Allocator> IntoIterator for RawTable<T, A> {
3289    type Item = T;
3290    type IntoIter = RawIntoIter<T, A>;
3291
3292    #[cfg_attr(feature = "inline-more", inline)]
3293    fn into_iter(self) -> RawIntoIter<T, A> {
3294        unsafe {
3295            let iter = self.iter();
3296            self.into_iter_from(iter)
3297        }
3298    }
3299}
3300
3301/// Iterator over a sub-range of a table. Unlike `RawIter` this iterator does
3302/// not track an item count.
3303pub(crate) struct RawIterRange<T> {
3304    // Mask of full buckets in the current group. Bits are cleared from this
3305    // mask as each element is processed.
3306    current_group: BitMaskIter,
3307
3308    // Pointer to the buckets for the current group.
3309    data: Bucket<T>,
3310
3311    // Pointer to the next group of control bytes,
3312    // Must be aligned to the group size.
3313    next_ctrl: *const u8,
3314
3315    // Pointer one past the last control byte of this range.
3316    end: *const u8,
3317}
3318
3319impl<T> RawIterRange<T> {
3320    /// Returns a `RawIterRange` covering a subset of a table.
3321    ///
3322    /// # Safety
3323    ///
3324    /// If any of the following conditions are violated, the result is
3325    /// [`undefined behavior`]:
3326    ///
3327    /// * `ctrl` must be [valid] for reads, i.e. table outlives the `RawIterRange`;
3328    ///
3329    /// * `ctrl` must be properly aligned to the group size (Group::WIDTH);
3330    ///
3331    /// * `ctrl` must point to the array of properly initialized control bytes;
3332    ///
3333    /// * `data` must be the [`Bucket`] at the `ctrl` index in the table;
3334    ///
3335    /// * the value of `len` must be less than or equal to the number of table buckets,
3336    ///   and the returned value of `ctrl.as_ptr().add(len).offset_from(ctrl.as_ptr())`
3337    ///   must be positive.
3338    ///
3339    /// * The `ctrl.add(len)` pointer must be either in bounds or one
3340    ///   byte past the end of the same [allocated table].
3341    ///
3342    /// * The `len` must be a power of two.
3343    ///
3344    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3345    #[cfg_attr(feature = "inline-more", inline)]
3346    unsafe fn new(ctrl: *const u8, data: Bucket<T>, len: usize) -> Self {
3347        debug_assert_ne!(len, 0);
3348        debug_assert_eq!(ctrl as usize % Group::WIDTH, 0);
3349        // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`]
3350        let end = ctrl.add(len);
3351
3352        // Load the first group and advance ctrl to point to the next group
3353        // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`]
3354        let current_group = Group::load_aligned(ctrl).match_full();
3355        let next_ctrl = ctrl.add(Group::WIDTH);
3356
3357        Self {
3358            current_group: current_group.into_iter(),
3359            data,
3360            next_ctrl,
3361            end,
3362        }
3363    }
3364
3365    /// # Safety
3366    /// If DO_CHECK_PTR_RANGE is false, caller must ensure that we never try to iterate
3367    /// after yielding all elements.
3368    #[cfg_attr(feature = "inline-more", inline)]
3369    unsafe fn next_impl<const DO_CHECK_PTR_RANGE: bool>(&mut self) -> Option<Bucket<T>> {
3370        loop {
3371            if let Some(index) = self.current_group.next() {
3372                return Some(self.data.next_n(index));
3373            }
3374
3375            if DO_CHECK_PTR_RANGE && self.next_ctrl >= self.end {
3376                return None;
3377            }
3378
3379            // We might read past self.end up to the next group boundary,
3380            // but this is fine because it only occurs on tables smaller
3381            // than the group size where the trailing control bytes are all
3382            // EMPTY. On larger tables self.end is guaranteed to be aligned
3383            // to the group size (since tables are power-of-two sized).
3384            self.current_group = Group::load_aligned(self.next_ctrl).match_full().into_iter();
3385            self.data = self.data.next_n(Group::WIDTH);
3386            self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
3387        }
3388    }
3389}
3390
3391// We make raw iterators unconditionally Send and Sync, and let the PhantomData
3392// in the actual iterator implementations determine the real Send/Sync bounds.
3393unsafe impl<T> Send for RawIterRange<T> {}
3394unsafe impl<T> Sync for RawIterRange<T> {}
3395
3396impl<T> Clone for RawIterRange<T> {
3397    #[cfg_attr(feature = "inline-more", inline)]
3398    fn clone(&self) -> Self {
3399        Self {
3400            data: self.data.clone(),
3401            next_ctrl: self.next_ctrl,
3402            current_group: self.current_group,
3403            end: self.end,
3404        }
3405    }
3406}
3407
3408impl<T> Iterator for RawIterRange<T> {
3409    type Item = Bucket<T>;
3410
3411    #[cfg_attr(feature = "inline-more", inline)]
3412    fn next(&mut self) -> Option<Bucket<T>> {
3413        unsafe {
3414            // SAFETY: We set checker flag to true.
3415            self.next_impl::<true>()
3416        }
3417    }
3418
3419    #[inline]
3420    fn size_hint(&self) -> (usize, Option<usize>) {
3421        // We don't have an item count, so just guess based on the range size.
3422        let remaining_buckets = if self.end > self.next_ctrl {
3423            unsafe { offset_from(self.end, self.next_ctrl) }
3424        } else {
3425            0
3426        };
3427
3428        // Add a group width to include the group we are currently processing.
3429        (0, Some(Group::WIDTH + remaining_buckets))
3430    }
3431}
3432
3433impl<T> FusedIterator for RawIterRange<T> {}
3434
3435/// Iterator which returns a raw pointer to every full bucket in the table.
3436///
3437/// For maximum flexibility this iterator is not bound by a lifetime, but you
3438/// must observe several rules when using it:
3439/// - You must not free the hash table while iterating (including via growing/shrinking).
3440/// - It is fine to erase a bucket that has been yielded by the iterator.
3441/// - Erasing a bucket that has not yet been yielded by the iterator may still
3442///   result in the iterator yielding that bucket (unless `reflect_remove` is called).
3443/// - It is unspecified whether an element inserted after the iterator was
3444///   created will be yielded by that iterator (unless `reflect_insert` is called).
3445/// - The order in which the iterator yields bucket is unspecified and may
3446///   change in the future.
3447pub struct RawIter<T> {
3448    pub(crate) iter: RawIterRange<T>,
3449    items: usize,
3450}
3451
3452impl<T> RawIter<T> {
3453    /// Refresh the iterator so that it reflects a removal from the given bucket.
3454    ///
3455    /// For the iterator to remain valid, this method must be called once
3456    /// for each removed bucket before `next` is called again.
3457    ///
3458    /// This method should be called _before_ the removal is made. It is not necessary to call this
3459    /// method if you are removing an item that this iterator yielded in the past.
3460    pub unsafe fn reflect_remove(&mut self, b: &Bucket<T>) {
3461        self.reflect_toggle_full(b, false);
3462    }
3463
3464    /// Refresh the iterator so that it reflects an insertion into the given bucket.
3465    ///
3466    /// For the iterator to remain valid, this method must be called once
3467    /// for each insert before `next` is called again.
3468    ///
3469    /// This method does not guarantee that an insertion of a bucket with a greater
3470    /// index than the last one yielded will be reflected in the iterator.
3471    ///
3472    /// This method should be called _after_ the given insert is made.
3473    pub unsafe fn reflect_insert(&mut self, b: &Bucket<T>) {
3474        self.reflect_toggle_full(b, true);
3475    }
3476
3477    /// Refresh the iterator so that it reflects a change to the state of the given bucket.
3478    unsafe fn reflect_toggle_full(&mut self, b: &Bucket<T>, is_insert: bool) {
3479        if b.as_ptr() > self.iter.data.as_ptr() {
3480            // The iterator has already passed the bucket's group.
3481            // So the toggle isn't relevant to this iterator.
3482            return;
3483        }
3484
3485        if self.iter.next_ctrl < self.iter.end
3486            && b.as_ptr() <= self.iter.data.next_n(Group::WIDTH).as_ptr()
3487        {
3488            // The iterator has not yet reached the bucket's group.
3489            // We don't need to reload anything, but we do need to adjust the item count.
3490
3491            if cfg!(debug_assertions) {
3492                // Double-check that the user isn't lying to us by checking the bucket state.
3493                // To do that, we need to find its control byte. We know that self.iter.data is
3494                // at self.iter.next_ctrl - Group::WIDTH, so we work from there:
3495                let offset = offset_from(self.iter.data.as_ptr(), b.as_ptr());
3496                let ctrl = self.iter.next_ctrl.sub(Group::WIDTH).add(offset);
3497                // This method should be called _before_ a removal, or _after_ an insert,
3498                // so in both cases the ctrl byte should indicate that the bucket is full.
3499                assert!(is_full(*ctrl));
3500            }
3501
3502            if is_insert {
3503                self.items += 1;
3504            } else {
3505                self.items -= 1;
3506            }
3507
3508            return;
3509        }
3510
3511        // The iterator is at the bucket group that the toggled bucket is in.
3512        // We need to do two things:
3513        //
3514        //  - Determine if the iterator already yielded the toggled bucket.
3515        //    If it did, we're done.
3516        //  - Otherwise, update the iterator cached group so that it won't
3517        //    yield a to-be-removed bucket, or _will_ yield a to-be-added bucket.
3518        //    We'll also need to update the item count accordingly.
3519        if let Some(index) = self.iter.current_group.0.lowest_set_bit() {
3520            let next_bucket = self.iter.data.next_n(index);
3521            if b.as_ptr() > next_bucket.as_ptr() {
3522                // The toggled bucket is "before" the bucket the iterator would yield next. We
3523                // therefore don't need to do anything --- the iterator has already passed the
3524                // bucket in question.
3525                //
3526                // The item count must already be correct, since a removal or insert "prior" to
3527                // the iterator's position wouldn't affect the item count.
3528            } else {
3529                // The removed bucket is an upcoming bucket. We need to make sure it does _not_
3530                // get yielded, and also that it's no longer included in the item count.
3531                //
3532                // NOTE: We can't just reload the group here, both since that might reflect
3533                // inserts we've already passed, and because that might inadvertently unset the
3534                // bits for _other_ removals. If we do that, we'd have to also decrement the
3535                // item count for those other bits that we unset. But the presumably subsequent
3536                // call to reflect for those buckets might _also_ decrement the item count.
3537                // Instead, we _just_ flip the bit for the particular bucket the caller asked
3538                // us to reflect.
3539                let our_bit = offset_from(self.iter.data.as_ptr(), b.as_ptr());
3540                let was_full = self.iter.current_group.flip(our_bit);
3541                debug_assert_ne!(was_full, is_insert);
3542
3543                if is_insert {
3544                    self.items += 1;
3545                } else {
3546                    self.items -= 1;
3547                }
3548
3549                if cfg!(debug_assertions) {
3550                    if b.as_ptr() == next_bucket.as_ptr() {
3551                        // The removed bucket should no longer be next
3552                        debug_assert_ne!(self.iter.current_group.0.lowest_set_bit(), Some(index));
3553                    } else {
3554                        // We should not have changed what bucket comes next.
3555                        debug_assert_eq!(self.iter.current_group.0.lowest_set_bit(), Some(index));
3556                    }
3557                }
3558            }
3559        } else {
3560            // We must have already iterated past the removed item.
3561        }
3562    }
3563
3564    unsafe fn drop_elements(&mut self) {
3565        if T::NEEDS_DROP && self.items != 0 {
3566            for item in self {
3567                item.drop();
3568            }
3569        }
3570    }
3571}
3572
3573impl<T> Clone for RawIter<T> {
3574    #[cfg_attr(feature = "inline-more", inline)]
3575    fn clone(&self) -> Self {
3576        Self {
3577            iter: self.iter.clone(),
3578            items: self.items,
3579        }
3580    }
3581}
3582
3583impl<T> Iterator for RawIter<T> {
3584    type Item = Bucket<T>;
3585
3586    #[cfg_attr(feature = "inline-more", inline)]
3587    fn next(&mut self) -> Option<Bucket<T>> {
3588        // Inner iterator iterates over buckets
3589        // so it can do unnecessary work if we already yielded all items.
3590        if self.items == 0 {
3591            return None;
3592        }
3593
3594        let nxt = unsafe {
3595            // SAFETY: We check number of items to yield using `items` field.
3596            self.iter.next_impl::<false>()
3597        };
3598
3599        debug_assert!(nxt.is_some());
3600        self.items -= 1;
3601
3602        nxt
3603    }
3604
3605    #[inline]
3606    fn size_hint(&self) -> (usize, Option<usize>) {
3607        (self.items, Some(self.items))
3608    }
3609}
3610
3611impl<T> ExactSizeIterator for RawIter<T> {}
3612impl<T> FusedIterator for RawIter<T> {}
3613
3614/// Iterator which returns an index of every full bucket in the table.
3615///
3616/// For maximum flexibility this iterator is not bound by a lifetime, but you
3617/// must observe several rules when using it:
3618/// - You must not free the hash table while iterating (including via growing/shrinking).
3619/// - It is fine to erase a bucket that has been yielded by the iterator.
3620/// - Erasing a bucket that has not yet been yielded by the iterator may still
3621///   result in the iterator yielding index of that bucket.
3622/// - It is unspecified whether an element inserted after the iterator was
3623///   created will be yielded by that iterator.
3624/// - The order in which the iterator yields indices of the buckets is unspecified
3625///   and may change in the future.
3626pub(crate) struct FullBucketsIndices {
3627    // Mask of full buckets in the current group. Bits are cleared from this
3628    // mask as each element is processed.
3629    current_group: BitMaskIter,
3630
3631    // Initial value of the bytes' indices of the current group (relative
3632    // to the start of the control bytes).
3633    group_first_index: usize,
3634
3635    // Pointer to the current group of control bytes,
3636    // Must be aligned to the group size (Group::WIDTH).
3637    ctrl: NonNull<u8>,
3638
3639    // Number of elements in the table.
3640    items: usize,
3641}
3642
3643impl FullBucketsIndices {
3644    /// Advances the iterator and returns the next value.
3645    ///
3646    /// # Safety
3647    ///
3648    /// If any of the following conditions are violated, the result is
3649    /// [`Undefined Behavior`]:
3650    ///
3651    /// * The [`RawTableInner`] / [`RawTable`] must be alive and not moved,
3652    ///   i.e. table outlives the `FullBucketsIndices`;
3653    ///
3654    /// * It never tries to iterate after getting all elements.
3655    ///
3656    /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3657    #[inline(always)]
3658    unsafe fn next_impl(&mut self) -> Option<usize> {
3659        loop {
3660            if let Some(index) = self.current_group.next() {
3661                // The returned `self.group_first_index + index` will always
3662                // be in the range `0..self.buckets()`. See explanation below.
3663                return Some(self.group_first_index + index);
3664            }
3665
3666            // SAFETY: The caller of this function ensures that:
3667            //
3668            // 1. It never tries to iterate after getting all the elements;
3669            // 2. The table is alive and did not moved;
3670            // 3. The first `self.ctrl` pointed to the start of the array of control bytes.
3671            //
3672            // Taking the above into account, we always stay within the bounds, because:
3673            //
3674            // 1. For tables smaller than the group width (self.buckets() <= Group::WIDTH),
3675            //    we will never end up in the given branch, since we should have already
3676            //    yielded all the elements of the table.
3677            //
3678            // 2. For tables larger than the group width. The the number of buckets is a
3679            //    power of two (2 ^ n), Group::WIDTH is also power of two (2 ^ k). Sinse
3680            //    `(2 ^ n) > (2 ^ k)`, than `(2 ^ n) % (2 ^ k) = 0`. As we start from the
3681            //    the start of the array of control bytes, and never try to iterate after
3682            //    getting all the elements, the last `self.ctrl` will be equal to
3683            //    the `self.buckets() - Group::WIDTH`, so `self.current_group.next()`
3684            //    will always contains indices within the range `0..Group::WIDTH`,
3685            //    and subsequent `self.group_first_index + index` will always return a
3686            //    number less than `self.buckets()`.
3687            self.ctrl = NonNull::new_unchecked(self.ctrl.as_ptr().add(Group::WIDTH));
3688
3689            // SAFETY: See explanation above.
3690            self.current_group = Group::load_aligned(self.ctrl.as_ptr())
3691                .match_full()
3692                .into_iter();
3693            self.group_first_index += Group::WIDTH;
3694        }
3695    }
3696}
3697
3698impl Iterator for FullBucketsIndices {
3699    type Item = usize;
3700
3701    /// Advances the iterator and returns the next value. It is up to
3702    /// the caller to ensure that the `RawTable` outlives the `FullBucketsIndices`,
3703    /// because we cannot make the `next` method unsafe.
3704    #[inline(always)]
3705    fn next(&mut self) -> Option<usize> {
3706        // Return if we already yielded all items.
3707        if self.items == 0 {
3708            return None;
3709        }
3710
3711        let nxt = unsafe {
3712            // SAFETY:
3713            // 1. We check number of items to yield using `items` field.
3714            // 2. The caller ensures that the table is alive and has not moved.
3715            self.next_impl()
3716        };
3717
3718        debug_assert!(nxt.is_some());
3719        self.items -= 1;
3720
3721        nxt
3722    }
3723
3724    #[inline(always)]
3725    fn size_hint(&self) -> (usize, Option<usize>) {
3726        (self.items, Some(self.items))
3727    }
3728}
3729
3730impl ExactSizeIterator for FullBucketsIndices {}
3731impl FusedIterator for FullBucketsIndices {}
3732
3733/// Iterator which consumes a table and returns elements.
3734pub struct RawIntoIter<T, A: Allocator = Global> {
3735    iter: RawIter<T>,
3736    allocation: Option<(NonNull<u8>, Layout, A)>,
3737    marker: PhantomData<T>,
3738}
3739
3740impl<T, A: Allocator> RawIntoIter<T, A> {
3741    #[cfg_attr(feature = "inline-more", inline)]
3742    pub fn iter(&self) -> RawIter<T> {
3743        self.iter.clone()
3744    }
3745}
3746
3747unsafe impl<T, A: Allocator> Send for RawIntoIter<T, A>
3748where
3749    T: Send,
3750    A: Send,
3751{
3752}
3753unsafe impl<T, A: Allocator> Sync for RawIntoIter<T, A>
3754where
3755    T: Sync,
3756    A: Sync,
3757{
3758}
3759
3760#[cfg(rune_nightly)]
3761unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawIntoIter<T, A> {
3762    #[cfg_attr(feature = "inline-more", inline)]
3763    fn drop(&mut self) {
3764        unsafe {
3765            // Drop all remaining elements
3766            self.iter.drop_elements();
3767
3768            // Free the table
3769            if let Some((ptr, layout, ref alloc)) = self.allocation {
3770                alloc.deallocate(ptr, layout);
3771            }
3772        }
3773    }
3774}
3775#[cfg(not(rune_nightly))]
3776impl<T, A: Allocator> Drop for RawIntoIter<T, A> {
3777    #[cfg_attr(feature = "inline-more", inline)]
3778    fn drop(&mut self) {
3779        unsafe {
3780            // Drop all remaining elements
3781            self.iter.drop_elements();
3782
3783            // Free the table
3784            if let Some((ptr, layout, ref alloc)) = self.allocation {
3785                alloc.deallocate(ptr, layout);
3786            }
3787        }
3788    }
3789}
3790
3791impl<T, A: Allocator> Iterator for RawIntoIter<T, A> {
3792    type Item = T;
3793
3794    #[cfg_attr(feature = "inline-more", inline)]
3795    fn next(&mut self) -> Option<T> {
3796        unsafe { Some(self.iter.next()?.read()) }
3797    }
3798
3799    #[inline]
3800    fn size_hint(&self) -> (usize, Option<usize>) {
3801        self.iter.size_hint()
3802    }
3803}
3804
3805impl<T, A: Allocator> ExactSizeIterator for RawIntoIter<T, A> {}
3806impl<T, A: Allocator> FusedIterator for RawIntoIter<T, A> {}
3807
3808/// Iterator which consumes elements without freeing the table storage.
3809pub struct RawDrain<'a, T, A: Allocator = Global> {
3810    iter: RawIter<T>,
3811
3812    // The table is moved into the iterator for the duration of the drain. This
3813    // ensures that an empty table is left if the drain iterator is leaked
3814    // without dropping.
3815    table: RawTableInner,
3816    orig_table: NonNull<RawTableInner>,
3817
3818    // We don't use a &'a mut RawTable<T> because we want RawDrain to be
3819    // covariant over T.
3820    marker: PhantomData<&'a RawTable<T, A>>,
3821}
3822
3823impl<T, A: Allocator> RawDrain<'_, T, A> {
3824    #[cfg_attr(feature = "inline-more", inline)]
3825    pub fn iter(&self) -> RawIter<T> {
3826        self.iter.clone()
3827    }
3828}
3829
3830unsafe impl<T, A: Allocator> Send for RawDrain<'_, T, A>
3831where
3832    T: Send,
3833    A: Send,
3834{
3835}
3836unsafe impl<T, A: Allocator> Sync for RawDrain<'_, T, A>
3837where
3838    T: Sync,
3839    A: Sync,
3840{
3841}
3842
3843impl<T, A: Allocator> Drop for RawDrain<'_, T, A> {
3844    #[cfg_attr(feature = "inline-more", inline)]
3845    fn drop(&mut self) {
3846        unsafe {
3847            // Drop all remaining elements. Note that this may panic.
3848            self.iter.drop_elements();
3849
3850            // Reset the contents of the table now that all elements have been
3851            // dropped.
3852            self.table.clear_no_drop();
3853
3854            // Move the now empty table back to its original location.
3855            self.orig_table
3856                .as_ptr()
3857                .copy_from_nonoverlapping(&self.table, 1);
3858        }
3859    }
3860}
3861
3862impl<T, A: Allocator> Iterator for RawDrain<'_, T, A> {
3863    type Item = T;
3864
3865    #[cfg_attr(feature = "inline-more", inline)]
3866    fn next(&mut self) -> Option<T> {
3867        unsafe {
3868            let item = self.iter.next()?;
3869            Some(item.read())
3870        }
3871    }
3872
3873    #[inline]
3874    fn size_hint(&self) -> (usize, Option<usize>) {
3875        self.iter.size_hint()
3876    }
3877}
3878
3879impl<T, A: Allocator> ExactSizeIterator for RawDrain<'_, T, A> {}
3880impl<T, A: Allocator> FusedIterator for RawDrain<'_, T, A> {}
3881
3882/// Iterator over occupied buckets that could match a given hash.
3883///
3884/// `RawTable` only stores 7 bits of the hash value, so this iterator may return
3885/// items that have a hash value different than the one provided. You should
3886/// always validate the returned values before using them.
3887///
3888/// For maximum flexibility this iterator is not bound by a lifetime, but you
3889/// must observe several rules when using it:
3890/// - You must not free the hash table while iterating (including via growing/shrinking).
3891/// - It is fine to erase a bucket that has been yielded by the iterator.
3892/// - Erasing a bucket that has not yet been yielded by the iterator may still
3893///   result in the iterator yielding that bucket.
3894/// - It is unspecified whether an element inserted after the iterator was
3895///   created will be yielded by that iterator.
3896/// - The order in which the iterator yields buckets is unspecified and may
3897///   change in the future.
3898pub struct RawIterHash<T> {
3899    inner: RawIterHashInner,
3900    _marker: PhantomData<T>,
3901}
3902
3903struct RawIterHashInner {
3904    // See `RawTableInner`'s corresponding fields for details.
3905    // We can't store a `*const RawTableInner` as it would get
3906    // invalidated by the user calling `&mut` methods on `RawTable`.
3907    bucket_mask: usize,
3908    ctrl: NonNull<u8>,
3909
3910    // The top 7 bits of the hash.
3911    h2_hash: u8,
3912
3913    // The sequence of groups to probe in the search.
3914    probe_seq: ProbeSeq,
3915
3916    group: Group,
3917
3918    // The elements within the group with a matching h2-hash.
3919    bitmask: BitMaskIter,
3920}
3921
3922impl<T> RawIterHash<T> {
3923    #[cfg_attr(feature = "inline-more", inline)]
3924    unsafe fn new<A: Allocator>(table: &RawTable<T, A>, hash: u64) -> Self {
3925        RawIterHash {
3926            inner: RawIterHashInner::new(&table.table, hash),
3927            _marker: PhantomData,
3928        }
3929    }
3930}
3931impl RawIterHashInner {
3932    #[cfg_attr(feature = "inline-more", inline)]
3933    unsafe fn new(table: &RawTableInner, hash: u64) -> Self {
3934        let h2_hash = h2(hash);
3935        let probe_seq = table.probe_seq(hash);
3936        let group = Group::load(table.ctrl(probe_seq.pos));
3937        let bitmask = group.match_byte(h2_hash).into_iter();
3938
3939        RawIterHashInner {
3940            bucket_mask: table.bucket_mask,
3941            ctrl: table.ctrl,
3942            h2_hash,
3943            probe_seq,
3944            group,
3945            bitmask,
3946        }
3947    }
3948}
3949
3950impl<T> Iterator for RawIterHash<T> {
3951    type Item = Bucket<T>;
3952
3953    fn next(&mut self) -> Option<Bucket<T>> {
3954        unsafe {
3955            match self.inner.next() {
3956                Some(index) => {
3957                    // Can't use `RawTable::bucket` here as we don't have
3958                    // an actual `RawTable` reference to use.
3959                    debug_assert!(index <= self.inner.bucket_mask);
3960                    let bucket = Bucket::from_base_index(self.inner.ctrl.cast(), index);
3961                    Some(bucket)
3962                }
3963                None => None,
3964            }
3965        }
3966    }
3967}
3968
3969impl Iterator for RawIterHashInner {
3970    type Item = usize;
3971
3972    fn next(&mut self) -> Option<Self::Item> {
3973        unsafe {
3974            loop {
3975                if let Some(bit) = self.bitmask.next() {
3976                    let index = (self.probe_seq.pos + bit) & self.bucket_mask;
3977                    return Some(index);
3978                }
3979                if likely(self.group.match_empty().any_bit_set()) {
3980                    return None;
3981                }
3982                self.probe_seq.move_next(self.bucket_mask);
3983
3984                // Can't use `RawTableInner::ctrl` here as we don't have
3985                // an actual `RawTableInner` reference to use.
3986                let index = self.probe_seq.pos;
3987                debug_assert!(index < self.bucket_mask + 1 + Group::WIDTH);
3988                let group_ctrl = self.ctrl.as_ptr().add(index);
3989
3990                self.group = Group::load(group_ctrl);
3991                self.bitmask = self.group.match_byte(self.h2_hash).into_iter();
3992            }
3993        }
3994    }
3995}
3996
3997#[cfg(test)]
3998mod test_map {
3999    use super::*;
4000
4001    use crate::alloc::into_ok;
4002    use core::convert::Infallible;
4003
4004    fn rehash_in_place<T>(
4005        table: &mut RawTable<T>,
4006        hasher: impl Fn(&mut (), &T) -> Result<u64, Infallible>,
4007    ) {
4008        unsafe {
4009            into_ok(table.table.rehash_in_place(
4010                &mut (),
4011                &move |cx, table, index| hasher(cx, table.bucket::<T>(index).as_ref()),
4012                mem::size_of::<T>(),
4013                if mem::needs_drop::<T>() {
4014                    Some(mem::transmute::<unsafe fn(*mut T), fn(*mut u8)>(
4015                        ptr::drop_in_place::<T> as unsafe fn(*mut T),
4016                    ))
4017                } else {
4018                    None
4019                },
4020            ));
4021        }
4022    }
4023
4024    #[test]
4025    fn rehash() {
4026        let mut table = RawTable::new();
4027        let hasher = |_: &mut (), i: &u64| Ok(*i);
4028        for i in 0..100 {
4029            table.insert(&mut (), i, i, hasher).abort();
4030        }
4031
4032        for i in 0..100 {
4033            unsafe {
4034                assert_eq!(
4035                    into_ok(table.find(&mut (), i, |_: &mut (), x: &u64| Ok(*x == i)))
4036                        .map(|b| b.read()),
4037                    Some(i)
4038                );
4039            }
4040            assert!(
4041                into_ok(table.find(&mut (), i + 100, |_: &mut (), x: &u64| Ok(*x == i + 100)))
4042                    .is_none()
4043            );
4044        }
4045
4046        rehash_in_place(&mut table, hasher);
4047
4048        for i in 0..100 {
4049            unsafe {
4050                assert_eq!(
4051                    into_ok(table.find(&mut (), i, |_: &mut (), x: &u64| Ok(*x == i)))
4052                        .map(|b| b.read()),
4053                    Some(i)
4054                );
4055            }
4056            assert!(
4057                into_ok(table.find(&mut (), i + 100, |_: &mut (), x: &u64| Ok(*x == i + 100)))
4058                    .is_none()
4059            );
4060        }
4061    }
4062
4063    /// CHECKING THAT WE ARE NOT TRYING TO READ THE MEMORY OF
4064    /// AN UNINITIALIZED TABLE DURING THE DROP
4065    #[test]
4066    fn test_drop_uninitialized() {
4067        use ::rust_alloc::vec::Vec;
4068
4069        let table = unsafe {
4070            // SAFETY: The `buckets` is power of two and we're not
4071            // trying to actually use the returned RawTable.
4072            RawTable::<(u64, Vec<i32>)>::new_uninitialized(Global, 8).unwrap()
4073        };
4074        drop(table);
4075    }
4076
4077    /// CHECKING THAT WE DON'T TRY TO DROP DATA IF THE `ITEMS`
4078    /// ARE ZERO, EVEN IF WE HAVE `FULL` CONTROL BYTES.
4079    #[test]
4080    fn test_drop_zero_items() {
4081        use ::rust_alloc::vec::Vec;
4082        unsafe {
4083            // SAFETY: The `buckets` is power of two and we're not
4084            // trying to actually use the returned RawTable.
4085            let table = RawTable::<(u64, Vec<i32>)>::new_uninitialized(Global, 8).unwrap();
4086
4087            // WE SIMULATE, AS IT WERE, A FULL TABLE.
4088
4089            // SAFETY: We checked that the table is allocated and therefore the table already has
4090            // `self.bucket_mask + 1 + Group::WIDTH` number of control bytes (see TableLayout::calculate_layout_for)
4091            // so writing `table.table.num_ctrl_bytes() == bucket_mask + 1 + Group::WIDTH` bytes is safe.
4092            table
4093                .table
4094                .ctrl(0)
4095                .write_bytes(EMPTY, table.table.num_ctrl_bytes());
4096
4097            // SAFETY: table.capacity() is guaranteed to be smaller than table.buckets()
4098            table.table.ctrl(0).write_bytes(0, table.capacity());
4099
4100            // Fix up the trailing control bytes. See the comments in set_ctrl
4101            // for the handling of tables smaller than the group width.
4102            if table.buckets() < Group::WIDTH {
4103                // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of control bytes,
4104                // so copying `self.buckets() == self.bucket_mask + 1` bytes with offset equal to
4105                // `Group::WIDTH` is safe
4106                table
4107                    .table
4108                    .ctrl(0)
4109                    .copy_to(table.table.ctrl(Group::WIDTH), table.table.buckets());
4110            } else {
4111                // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of
4112                // control bytes,so copying `Group::WIDTH` bytes with offset equal
4113                // to `self.buckets() == self.bucket_mask + 1` is safe
4114                table
4115                    .table
4116                    .ctrl(0)
4117                    .copy_to(table.table.ctrl(table.table.buckets()), Group::WIDTH);
4118            }
4119            drop(table);
4120        }
4121    }
4122
4123    /// CHECKING THAT WE DON'T TRY TO DROP DATA IF THE `ITEMS`
4124    /// ARE ZERO, EVEN IF WE HAVE `FULL` CONTROL BYTES.
4125    #[test]
4126    fn test_catch_panic_clone_from() {
4127        use crate::alloc::{AllocError, Allocator};
4128        use ::rust_alloc::sync::Arc;
4129        use ::rust_alloc::vec::Vec;
4130        use core::sync::atomic::{AtomicI8, Ordering};
4131        use std::thread;
4132
4133        struct MyAllocInner {
4134            drop_count: Arc<AtomicI8>,
4135        }
4136
4137        #[derive(Clone)]
4138        struct MyAlloc {
4139            _inner: Arc<MyAllocInner>,
4140        }
4141
4142        impl Drop for MyAllocInner {
4143            fn drop(&mut self) {
4144                std::println!("MyAlloc freed.");
4145                self.drop_count.fetch_sub(1, Ordering::SeqCst);
4146            }
4147        }
4148
4149        unsafe impl Allocator for MyAlloc {
4150            fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
4151                let g = Global;
4152                g.allocate(layout)
4153            }
4154
4155            unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
4156                let g = Global;
4157                g.deallocate(ptr, layout)
4158            }
4159        }
4160
4161        const DISARMED: bool = false;
4162        const ARMED: bool = true;
4163
4164        struct CheckedCloneDrop {
4165            panic_in_clone: bool,
4166            dropped: bool,
4167            need_drop: Vec<u64>,
4168        }
4169
4170        impl TryClone for CheckedCloneDrop {
4171            fn try_clone(&self) -> Result<Self, Error> {
4172                if self.panic_in_clone {
4173                    panic!("panic in clone")
4174                }
4175                Ok(Self {
4176                    panic_in_clone: self.panic_in_clone,
4177                    dropped: self.dropped,
4178                    need_drop: self.need_drop.clone(),
4179                })
4180            }
4181        }
4182
4183        impl Drop for CheckedCloneDrop {
4184            fn drop(&mut self) {
4185                if self.dropped {
4186                    panic!("double drop");
4187                }
4188                self.dropped = true;
4189            }
4190        }
4191
4192        let dropped: Arc<AtomicI8> = Arc::new(AtomicI8::new(2));
4193
4194        let mut table = RawTable::new_in(MyAlloc {
4195            _inner: Arc::new(MyAllocInner {
4196                drop_count: dropped.clone(),
4197            }),
4198        });
4199
4200        for (idx, panic_in_clone) in core::iter::repeat(DISARMED).take(7).enumerate() {
4201            let idx = idx as u64;
4202            table
4203                .insert(
4204                    &mut (),
4205                    idx,
4206                    (
4207                        idx,
4208                        CheckedCloneDrop {
4209                            panic_in_clone,
4210                            dropped: false,
4211                            need_drop: ::rust_alloc::vec![idx],
4212                        },
4213                    ),
4214                    |_: &mut (), (k, _): &(u64, _)| Ok::<_, Infallible>(*k),
4215                )
4216                .abort();
4217        }
4218
4219        assert_eq!(table.len(), 7);
4220
4221        thread::scope(|s| {
4222            let result = s.spawn(|| {
4223                let armed_flags = [
4224                    DISARMED, DISARMED, ARMED, DISARMED, DISARMED, DISARMED, DISARMED,
4225                ];
4226                let mut scope_table = RawTable::new_in(MyAlloc {
4227                    _inner: Arc::new(MyAllocInner {
4228                        drop_count: dropped.clone(),
4229                    }),
4230                });
4231                for (idx, &panic_in_clone) in armed_flags.iter().enumerate() {
4232                    let idx = idx as u64;
4233                    scope_table
4234                        .insert(
4235                            &mut (),
4236                            idx,
4237                            (
4238                                idx,
4239                                CheckedCloneDrop {
4240                                    panic_in_clone,
4241                                    dropped: false,
4242                                    need_drop: ::rust_alloc::vec![idx + 100],
4243                                },
4244                            ),
4245                            |_: &mut (), (k, _): &(u64, _)| Ok::<_, Infallible>(*k),
4246                        )
4247                        .abort();
4248                }
4249                table.clone_from(&scope_table);
4250            });
4251            assert!(result.join().is_err());
4252        });
4253
4254        // Let's check that all iterators work fine and do not return elements
4255        // (especially `RawIterRange`, which does not depend on the number of
4256        // elements in the table, but looks directly at the control bytes)
4257        //
4258        // SAFETY: We know for sure that `RawTable` will outlive
4259        // the returned `RawIter / RawIterRange` iterator.
4260        assert_eq!(table.len(), 0);
4261        assert_eq!(unsafe { table.iter().count() }, 0);
4262        assert_eq!(unsafe { table.iter().iter.count() }, 0);
4263
4264        for idx in 0..table.buckets() {
4265            let idx = idx as u64;
4266            assert!(
4267                into_ok(table.find(&mut (), idx, |_: &mut (), (k, _): &(u64, _)| Ok(*k == idx)))
4268                    .is_none(),
4269                "Index: {idx}"
4270            );
4271        }
4272
4273        // All allocator clones should already be dropped.
4274        assert_eq!(dropped.load(Ordering::SeqCst), 1);
4275    }
4276}