indexmap/
inner.rs

1//! This is the core implementation that doesn't depend on the hasher at all.
2//!
3//! The methods of `Core` don't use any Hash properties of K.
4//!
5//! It's cleaner to separate them out, then the compiler checks that we are not
6//! using Hash at all in these methods.
7//!
8//! However, we should probably not let this show in the public API or docs.
9
10mod entry;
11mod extract;
12
13use alloc::vec::{self, Vec};
14use core::mem;
15use core::ops::RangeBounds;
16use hashbrown::hash_table;
17
18use crate::util::simplify_range;
19use crate::{Bucket, Equivalent, HashValue, TryReserveError};
20
21type Indices = hash_table::HashTable<usize>;
22type Entries<K, V> = Vec<Bucket<K, V>>;
23
24pub use entry::{OccupiedEntry, VacantEntry};
25pub(crate) use extract::ExtractCore;
26
27/// Core of the map that does not depend on S
28#[cfg_attr(feature = "test_debug", derive(Debug))]
29pub(crate) struct Core<K, V> {
30    /// indices mapping from the entry hash to its index.
31    indices: Indices,
32    /// entries is a dense vec maintaining entry order.
33    entries: Entries<K, V>,
34}
35
36#[inline(always)]
37fn get_hash<K, V>(entries: &[Bucket<K, V>]) -> impl Fn(&usize) -> u64 + use<'_, K, V> {
38    move |&i| entries[i].hash.get()
39}
40
41#[inline]
42fn equivalent<'a, K, V, Q: ?Sized + Equivalent<K>>(
43    key: &'a Q,
44    entries: &'a [Bucket<K, V>],
45) -> impl Fn(&usize) -> bool + use<'a, K, V, Q> {
46    move |&i| Q::equivalent(key, &entries[i].key)
47}
48
49#[inline]
50fn erase_index(table: &mut Indices, hash: HashValue, index: usize) {
51    if let Ok(entry) = table.find_entry(hash.get(), move |&i| i == index) {
52        entry.remove();
53    } else if cfg!(debug_assertions) {
54        panic!("index not found");
55    }
56}
57
58#[inline]
59fn update_index(table: &mut Indices, hash: HashValue, old: usize, new: usize) {
60    let index = table
61        .find_mut(hash.get(), move |&i| i == old)
62        .expect("index not found");
63    *index = new;
64}
65
66/// Inserts many entries into the indices table without reallocating,
67/// and without regard for duplication.
68///
69/// ***Panics*** if there is not sufficient capacity already.
70fn insert_bulk_no_grow<K, V>(indices: &mut Indices, entries: &[Bucket<K, V>]) {
71    assert!(indices.capacity() - indices.len() >= entries.len());
72    for entry in entries {
73        indices.insert_unique(entry.hash.get(), indices.len(), |_| unreachable!());
74    }
75}
76
77impl<K, V> Clone for Core<K, V>
78where
79    K: Clone,
80    V: Clone,
81{
82    fn clone(&self) -> Self {
83        let mut new = Self::new();
84        new.clone_from(self);
85        new
86    }
87
88    fn clone_from(&mut self, other: &Self) {
89        self.indices.clone_from(&other.indices);
90        if self.entries.capacity() < other.entries.len() {
91            // If we must resize, match the indices capacity.
92            let additional = other.entries.len() - self.entries.len();
93            self.reserve_entries(additional);
94        }
95        self.entries.clone_from(&other.entries);
96    }
97}
98
99impl<K, V> Core<K, V> {
100    /// The maximum capacity before the `entries` allocation would exceed `isize::MAX`.
101    const MAX_ENTRIES_CAPACITY: usize = (isize::MAX as usize) / size_of::<Bucket<K, V>>();
102
103    #[inline]
104    pub(crate) const fn new() -> Self {
105        Core {
106            indices: Indices::new(),
107            entries: Vec::new(),
108        }
109    }
110
111    #[inline]
112    pub(crate) fn with_capacity(n: usize) -> Self {
113        Core {
114            indices: Indices::with_capacity(n),
115            entries: Vec::with_capacity(n),
116        }
117    }
118
119    #[inline]
120    pub(crate) fn into_entries(self) -> Entries<K, V> {
121        self.entries
122    }
123
124    #[inline]
125    pub(crate) fn as_entries(&self) -> &[Bucket<K, V>] {
126        &self.entries
127    }
128
129    #[inline]
130    pub(crate) fn as_entries_mut(&mut self) -> &mut [Bucket<K, V>] {
131        &mut self.entries
132    }
133
134    pub(crate) fn with_entries<F>(&mut self, f: F)
135    where
136        F: FnOnce(&mut [Bucket<K, V>]),
137    {
138        f(&mut self.entries);
139        self.rebuild_hash_table();
140    }
141
142    #[inline]
143    pub(crate) fn len(&self) -> usize {
144        debug_assert_eq!(self.entries.len(), self.indices.len());
145        self.indices.len()
146    }
147
148    #[inline]
149    pub(crate) fn capacity(&self) -> usize {
150        Ord::min(self.indices.capacity(), self.entries.capacity())
151    }
152
153    pub(crate) fn clear(&mut self) {
154        self.indices.clear();
155        self.entries.clear();
156    }
157
158    pub(crate) fn truncate(&mut self, len: usize) {
159        if len < self.len() {
160            self.erase_indices(len, self.entries.len());
161            self.entries.truncate(len);
162        }
163    }
164
165    #[track_caller]
166    pub(crate) fn drain<R>(&mut self, range: R) -> vec::Drain<'_, Bucket<K, V>>
167    where
168        R: RangeBounds<usize>,
169    {
170        let range = simplify_range(range, self.entries.len());
171        self.erase_indices(range.start, range.end);
172        self.entries.drain(range)
173    }
174
175    #[cfg(feature = "rayon")]
176    pub(crate) fn par_drain<R>(&mut self, range: R) -> rayon::vec::Drain<'_, Bucket<K, V>>
177    where
178        K: Send,
179        V: Send,
180        R: RangeBounds<usize>,
181    {
182        use rayon::iter::ParallelDrainRange;
183        let range = simplify_range(range, self.entries.len());
184        self.erase_indices(range.start, range.end);
185        self.entries.par_drain(range)
186    }
187
188    #[track_caller]
189    pub(crate) fn split_off(&mut self, at: usize) -> Self {
190        let len = self.entries.len();
191        assert!(
192            at <= len,
193            "index out of bounds: the len is {len} but the index is {at}. Expected index <= len"
194        );
195
196        self.erase_indices(at, self.entries.len());
197        let entries = self.entries.split_off(at);
198
199        let mut indices = Indices::with_capacity(entries.len());
200        insert_bulk_no_grow(&mut indices, &entries);
201        Self { indices, entries }
202    }
203
204    #[track_caller]
205    pub(crate) fn split_splice<R>(&mut self, range: R) -> (Self, vec::IntoIter<Bucket<K, V>>)
206    where
207        R: RangeBounds<usize>,
208    {
209        let range = simplify_range(range, self.len());
210        self.erase_indices(range.start, self.entries.len());
211        let entries = self.entries.split_off(range.end);
212        let drained = self.entries.split_off(range.start);
213
214        let mut indices = Indices::with_capacity(entries.len());
215        insert_bulk_no_grow(&mut indices, &entries);
216        (Self { indices, entries }, drained.into_iter())
217    }
218
219    /// Append from another map without checking whether items already exist.
220    pub(crate) fn append_unchecked(&mut self, other: &mut Self) {
221        self.reserve(other.len());
222        insert_bulk_no_grow(&mut self.indices, &other.entries);
223        self.entries.append(&mut other.entries);
224        other.indices.clear();
225    }
226
227    /// Reserve capacity for `additional` more key-value pairs.
228    pub(crate) fn reserve(&mut self, additional: usize) {
229        self.indices.reserve(additional, get_hash(&self.entries));
230        // Only grow entries if necessary, since we also round up capacity.
231        if additional > self.entries.capacity() - self.entries.len() {
232            self.reserve_entries(additional);
233        }
234    }
235
236    /// Reserve capacity for `additional` more key-value pairs, without over-allocating.
237    pub(crate) fn reserve_exact(&mut self, additional: usize) {
238        self.indices.reserve(additional, get_hash(&self.entries));
239        self.entries.reserve_exact(additional);
240    }
241
242    /// Try to reserve capacity for `additional` more key-value pairs.
243    pub(crate) fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
244        self.indices
245            .try_reserve(additional, get_hash(&self.entries))
246            .map_err(TryReserveError::from_hashbrown)?;
247        // Only grow entries if necessary, since we also round up capacity.
248        if additional > self.entries.capacity() - self.entries.len() {
249            self.try_reserve_entries(additional)
250        } else {
251            Ok(())
252        }
253    }
254
255    /// Try to reserve entries capacity, rounded up to match the indices
256    fn try_reserve_entries(&mut self, additional: usize) -> Result<(), TryReserveError> {
257        // Use a soft-limit on the maximum capacity, but if the caller explicitly
258        // requested more, do it and let them have the resulting error.
259        let new_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY);
260        let try_add = new_capacity - self.entries.len();
261        if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() {
262            return Ok(());
263        }
264        self.entries
265            .try_reserve_exact(additional)
266            .map_err(TryReserveError::from_alloc)
267    }
268
269    /// Try to reserve capacity for `additional` more key-value pairs, without over-allocating.
270    pub(crate) fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
271        self.indices
272            .try_reserve(additional, get_hash(&self.entries))
273            .map_err(TryReserveError::from_hashbrown)?;
274        self.entries
275            .try_reserve_exact(additional)
276            .map_err(TryReserveError::from_alloc)
277    }
278
279    /// Shrink the capacity of the map with a lower bound
280    pub(crate) fn shrink_to(&mut self, min_capacity: usize) {
281        self.indices
282            .shrink_to(min_capacity, get_hash(&self.entries));
283        self.entries.shrink_to(min_capacity);
284    }
285
286    /// Remove the last key-value pair
287    pub(crate) fn pop(&mut self) -> Option<(K, V)> {
288        if let Some(entry) = self.entries.pop() {
289            let last = self.entries.len();
290            erase_index(&mut self.indices, entry.hash, last);
291            Some((entry.key, entry.value))
292        } else {
293            None
294        }
295    }
296
297    /// Return the index in `entries` where an equivalent key can be found
298    pub(crate) fn get_index_of<Q>(&self, hash: HashValue, key: &Q) -> Option<usize>
299    where
300        Q: ?Sized + Equivalent<K>,
301    {
302        let eq = equivalent(key, &self.entries);
303        self.indices.find(hash.get(), eq).copied()
304    }
305
306    /// Return the index in `entries` where an equivalent key can be found
307    pub(crate) fn get_index_of_raw<F>(&self, hash: HashValue, mut is_match: F) -> Option<usize>
308    where
309        F: FnMut(&K) -> bool,
310    {
311        let eq = move |&i: &usize| is_match(&self.entries[i].key);
312        self.indices.find(hash.get(), eq).copied()
313    }
314
315    /// Append a key-value pair to `entries`,
316    /// *without* checking whether it already exists.
317    fn push_entry(&mut self, hash: HashValue, key: K, value: V) {
318        if self.entries.len() == self.entries.capacity() {
319            // Reserve our own capacity synced to the indices,
320            // rather than letting `Vec::push` just double it.
321            self.reserve_entries(1);
322        }
323        self.entries.push(Bucket { hash, key, value });
324    }
325
326    pub(crate) fn insert_full(&mut self, hash: HashValue, key: K, value: V) -> (usize, Option<V>)
327    where
328        K: Eq,
329    {
330        let eq = equivalent(&key, &self.entries);
331        let hasher = get_hash(&self.entries);
332        match self.indices.entry(hash.get(), eq, hasher) {
333            hash_table::Entry::Occupied(entry) => {
334                let i = *entry.get();
335                (i, Some(mem::replace(&mut self.entries[i].value, value)))
336            }
337            hash_table::Entry::Vacant(entry) => {
338                let i = self.entries.len();
339                entry.insert(i);
340                self.push_entry(hash, key, value);
341                debug_assert_eq!(self.indices.len(), self.entries.len());
342                (i, None)
343            }
344        }
345    }
346
347    /// Same as `insert_full`, except it also replaces the key
348    pub(crate) fn replace_full(
349        &mut self,
350        hash: HashValue,
351        key: K,
352        value: V,
353    ) -> (usize, Option<(K, V)>)
354    where
355        K: Eq,
356    {
357        let eq = equivalent(&key, &self.entries);
358        let hasher = get_hash(&self.entries);
359        match self.indices.entry(hash.get(), eq, hasher) {
360            hash_table::Entry::Occupied(entry) => {
361                let i = *entry.get();
362                let entry = &mut self.entries[i];
363                let kv = (
364                    mem::replace(&mut entry.key, key),
365                    mem::replace(&mut entry.value, value),
366                );
367                (i, Some(kv))
368            }
369            hash_table::Entry::Vacant(entry) => {
370                let i = self.entries.len();
371                entry.insert(i);
372                self.push_entry(hash, key, value);
373                debug_assert_eq!(self.indices.len(), self.entries.len());
374                (i, None)
375            }
376        }
377    }
378
379    /// Remove an entry by shifting all entries that follow it
380    pub(crate) fn shift_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
381    where
382        Q: ?Sized + Equivalent<K>,
383    {
384        let eq = equivalent(key, &self.entries);
385        let (index, _) = self.indices.find_entry(hash.get(), eq).ok()?.remove();
386        let (key, value) = self.shift_remove_finish(index);
387        Some((index, key, value))
388    }
389
390    /// Remove an entry by swapping it with the last
391    pub(crate) fn swap_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
392    where
393        Q: ?Sized + Equivalent<K>,
394    {
395        let eq = equivalent(key, &self.entries);
396        let (index, _) = self.indices.find_entry(hash.get(), eq).ok()?.remove();
397        let (key, value) = self.swap_remove_finish(index);
398        Some((index, key, value))
399    }
400
401    /// Erase `start..end` from `indices`, and shift `end..` indices down to `start..`
402    ///
403    /// All of these items should still be at their original location in `entries`.
404    /// This is used by `drain`, which will let `Vec::drain` do the work on `entries`.
405    fn erase_indices(&mut self, start: usize, end: usize) {
406        let (init, shifted_entries) = self.entries.split_at(end);
407        let (start_entries, erased_entries) = init.split_at(start);
408
409        let erased = erased_entries.len();
410        let shifted = shifted_entries.len();
411        let half_capacity = self.indices.capacity() / 2;
412
413        // Use a heuristic between different strategies
414        if erased == 0 {
415            // Degenerate case, nothing to do
416        } else if start + shifted < half_capacity && start < erased {
417            // Reinsert everything, as there are few kept indices
418            self.indices.clear();
419
420            // Reinsert stable indices, then shifted indices
421            insert_bulk_no_grow(&mut self.indices, start_entries);
422            insert_bulk_no_grow(&mut self.indices, shifted_entries);
423        } else if erased + shifted < half_capacity {
424            // Find each affected index, as there are few to adjust
425
426            // Find erased indices
427            for (i, entry) in (start..).zip(erased_entries) {
428                erase_index(&mut self.indices, entry.hash, i);
429            }
430
431            // Find shifted indices
432            for ((new, old), entry) in (start..).zip(end..).zip(shifted_entries) {
433                update_index(&mut self.indices, entry.hash, old, new);
434            }
435        } else {
436            // Sweep the whole table for adjustments
437            let offset = end - start;
438            self.indices.retain(move |i| {
439                if *i >= end {
440                    *i -= offset;
441                    true
442                } else {
443                    *i < start
444                }
445            });
446        }
447
448        debug_assert_eq!(self.indices.len(), start + shifted);
449    }
450
451    pub(crate) fn retain_in_order<F>(&mut self, mut keep: F)
452    where
453        F: FnMut(&mut K, &mut V) -> bool,
454    {
455        self.entries
456            .retain_mut(|entry| keep(&mut entry.key, &mut entry.value));
457        if self.entries.len() < self.indices.len() {
458            self.rebuild_hash_table();
459        }
460    }
461
462    fn rebuild_hash_table(&mut self) {
463        self.indices.clear();
464        insert_bulk_no_grow(&mut self.indices, &self.entries);
465    }
466
467    pub(crate) fn reverse(&mut self) {
468        self.entries.reverse();
469
470        // No need to save hash indices, can easily calculate what they should
471        // be, given that this is an in-place reversal.
472        let len = self.entries.len();
473        for i in &mut self.indices {
474            *i = len - *i - 1;
475        }
476    }
477
478    /// Reserve entries capacity, rounded up to match the indices
479    #[inline]
480    fn reserve_entries(&mut self, additional: usize) {
481        // Use a soft-limit on the maximum capacity, but if the caller explicitly
482        // requested more, do it and let them have the resulting panic.
483        let try_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY);
484        let try_add = try_capacity - self.entries.len();
485        if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() {
486            return;
487        }
488        self.entries.reserve_exact(additional);
489    }
490
491    /// Insert a key-value pair in `entries`,
492    /// *without* checking whether it already exists.
493    pub(super) fn insert_unique(&mut self, hash: HashValue, key: K, value: V) -> &mut Bucket<K, V> {
494        let i = self.indices.len();
495        debug_assert_eq!(i, self.entries.len());
496        self.indices
497            .insert_unique(hash.get(), i, get_hash(&self.entries));
498        self.push_entry(hash, key, value);
499        &mut self.entries[i]
500    }
501
502    /// Replaces the key at the given index,
503    /// *without* checking whether it already exists.
504    #[track_caller]
505    pub(crate) fn replace_index_unique(&mut self, index: usize, hash: HashValue, key: K) -> K {
506        // NB: This removal and insertion isn't "no grow" (with unreachable hasher)
507        // because hashbrown's tombstones might force a resize anyway.
508        erase_index(&mut self.indices, self.entries[index].hash, index);
509        self.indices
510            .insert_unique(hash.get(), index, get_hash(&self.entries));
511
512        let entry = &mut self.entries[index];
513        entry.hash = hash;
514        mem::replace(&mut entry.key, key)
515    }
516
517    /// Insert a key-value pair in `entries` at a particular index,
518    /// *without* checking whether it already exists.
519    pub(crate) fn shift_insert_unique(
520        &mut self,
521        index: usize,
522        hash: HashValue,
523        key: K,
524        value: V,
525    ) -> &mut Bucket<K, V> {
526        let end = self.indices.len();
527        assert!(index <= end);
528        // Increment others first so we don't have duplicate indices.
529        self.increment_indices(index, end);
530        let entries = &*self.entries;
531        self.indices.insert_unique(hash.get(), index, move |&i| {
532            // Adjust for the incremented indices to find hashes.
533            debug_assert_ne!(i, index);
534            let i = if i < index { i } else { i - 1 };
535            entries[i].hash.get()
536        });
537        if self.entries.len() == self.entries.capacity() {
538            // Reserve our own capacity synced to the indices,
539            // rather than letting `Vec::insert` just double it.
540            self.reserve_entries(1);
541        }
542        self.entries.insert(index, Bucket { hash, key, value });
543        &mut self.entries[index]
544    }
545
546    /// Remove an entry by shifting all entries that follow it
547    pub(crate) fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> {
548        match self.entries.get(index) {
549            Some(entry) => {
550                erase_index(&mut self.indices, entry.hash, index);
551                Some(self.shift_remove_finish(index))
552            }
553            None => None,
554        }
555    }
556
557    /// Remove an entry by shifting all entries that follow it
558    ///
559    /// The index should already be removed from `self.indices`.
560    fn shift_remove_finish(&mut self, index: usize) -> (K, V) {
561        // Correct indices that point to the entries that followed the removed entry.
562        self.decrement_indices(index + 1, self.entries.len());
563
564        // Use Vec::remove to actually remove the entry.
565        let entry = self.entries.remove(index);
566        (entry.key, entry.value)
567    }
568
569    /// Remove an entry by swapping it with the last
570    pub(crate) fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> {
571        match self.entries.get(index) {
572            Some(entry) => {
573                erase_index(&mut self.indices, entry.hash, index);
574                Some(self.swap_remove_finish(index))
575            }
576            None => None,
577        }
578    }
579
580    /// Finish removing an entry by swapping it with the last
581    ///
582    /// The index should already be removed from `self.indices`.
583    fn swap_remove_finish(&mut self, index: usize) -> (K, V) {
584        // use swap_remove, but then we need to update the index that points
585        // to the other entry that has to move
586        let entry = self.entries.swap_remove(index);
587
588        // correct index that points to the entry that had to swap places
589        if let Some(entry) = self.entries.get(index) {
590            // was not last element
591            // examine new element in `index` and find it in indices
592            let last = self.entries.len();
593            update_index(&mut self.indices, entry.hash, last, index);
594        }
595
596        (entry.key, entry.value)
597    }
598
599    /// Decrement all indices in the range `start..end`.
600    ///
601    /// The index `start - 1` should not exist in `self.indices`.
602    /// All entries should still be in their original positions.
603    fn decrement_indices(&mut self, start: usize, end: usize) {
604        // Use a heuristic between a full sweep vs. a `find()` for every shifted item.
605        let shifted_entries = &self.entries[start..end];
606        if shifted_entries.len() > self.indices.capacity() / 2 {
607            // Shift all indices in range.
608            for i in &mut self.indices {
609                if start <= *i && *i < end {
610                    *i -= 1;
611                }
612            }
613        } else {
614            // Find each entry in range to shift its index.
615            for (i, entry) in (start..end).zip(shifted_entries) {
616                update_index(&mut self.indices, entry.hash, i, i - 1);
617            }
618        }
619    }
620
621    /// Increment all indices in the range `start..end`.
622    ///
623    /// The index `end` should not exist in `self.indices`.
624    /// All entries should still be in their original positions.
625    fn increment_indices(&mut self, start: usize, end: usize) {
626        // Use a heuristic between a full sweep vs. a `find()` for every shifted item.
627        let shifted_entries = &self.entries[start..end];
628        if shifted_entries.len() > self.indices.capacity() / 2 {
629            // Shift all indices in range.
630            for i in &mut self.indices {
631                if start <= *i && *i < end {
632                    *i += 1;
633                }
634            }
635        } else {
636            // Find each entry in range to shift its index, updated in reverse so
637            // we never have duplicated indices that might have a hash collision.
638            for (i, entry) in (start..end).zip(shifted_entries).rev() {
639                update_index(&mut self.indices, entry.hash, i, i + 1);
640            }
641        }
642    }
643
644    #[track_caller]
645    pub(super) fn move_index(&mut self, from: usize, to: usize) {
646        let from_hash = self.entries[from].hash;
647        if from != to {
648            let _ = self.entries[to]; // explicit bounds check
649
650            // Find the bucket index first so we won't lose it among other updated indices.
651            let bucket = self
652                .indices
653                .find_bucket_index(from_hash.get(), move |&i| i == from)
654                .expect("index not found");
655
656            self.move_index_inner(from, to);
657            *self.indices.get_bucket_mut(bucket).unwrap() = to;
658        }
659    }
660
661    fn move_index_inner(&mut self, from: usize, to: usize) {
662        // Update all other indices and rotate the entry positions.
663        if from < to {
664            self.decrement_indices(from + 1, to + 1);
665            self.entries[from..=to].rotate_left(1);
666        } else if to < from {
667            self.increment_indices(to, from);
668            self.entries[to..=from].rotate_right(1);
669        }
670    }
671
672    #[track_caller]
673    pub(crate) fn swap_indices(&mut self, a: usize, b: usize) {
674        // If they're equal and in-bounds, there's nothing to do.
675        if a == b && a < self.entries.len() {
676            return;
677        }
678
679        // We'll get a "nice" bounds-check from indexing `entries`,
680        // and then we expect to find it in the table as well.
681        match self.indices.get_disjoint_mut(
682            [self.entries[a].hash.get(), self.entries[b].hash.get()],
683            move |i, &x| if i == 0 { x == a } else { x == b },
684        ) {
685            [Some(ref_a), Some(ref_b)] => {
686                mem::swap(ref_a, ref_b);
687                self.entries.swap(a, b);
688            }
689            _ => panic!("indices not found"),
690        }
691    }
692}
693
694#[test]
695fn assert_send_sync() {
696    fn assert_send_sync<T: Send + Sync>() {}
697    assert_send_sync::<Core<i32, i32>>();
698}