indexmap/map/
core.rs

1//! This is the core implementation that doesn't depend on the hasher at all.
2//!
3//! The methods of `IndexMapCore` don't use any Hash properties of K.
4//!
5//! It's cleaner to separate them out, then the compiler checks that we are not
6//! using Hash at all in these methods.
7//!
8//! However, we should probably not let this show in the public API or docs.
9
10mod entry;
11mod extract;
12
13pub mod raw_entry_v1;
14
15use alloc::vec::{self, Vec};
16use core::mem;
17use core::ops::RangeBounds;
18use hashbrown::hash_table;
19
20use crate::util::simplify_range;
21use crate::{Bucket, Equivalent, HashValue, TryReserveError};
22
23type Indices = hash_table::HashTable<usize>;
24type Entries<K, V> = Vec<Bucket<K, V>>;
25
26pub use entry::{Entry, IndexedEntry, OccupiedEntry, VacantEntry};
27pub(crate) use extract::ExtractCore;
28
29/// Core of the map that does not depend on S
30#[derive(Debug)]
31pub(crate) struct IndexMapCore<K, V> {
32    /// indices mapping from the entry hash to its index.
33    indices: Indices,
34    /// entries is a dense vec maintaining entry order.
35    entries: Entries<K, V>,
36}
37
38#[inline(always)]
39fn get_hash<K, V>(entries: &[Bucket<K, V>]) -> impl Fn(&usize) -> u64 + use<'_, K, V> {
40    move |&i| entries[i].hash.get()
41}
42
43#[inline]
44fn equivalent<'a, K, V, Q: ?Sized + Equivalent<K>>(
45    key: &'a Q,
46    entries: &'a [Bucket<K, V>],
47) -> impl Fn(&usize) -> bool + use<'a, K, V, Q> {
48    move |&i| Q::equivalent(key, &entries[i].key)
49}
50
51#[inline]
52fn erase_index(table: &mut Indices, hash: HashValue, index: usize) {
53    if let Ok(entry) = table.find_entry(hash.get(), move |&i| i == index) {
54        entry.remove();
55    } else if cfg!(debug_assertions) {
56        panic!("index not found");
57    }
58}
59
60#[inline]
61fn update_index(table: &mut Indices, hash: HashValue, old: usize, new: usize) {
62    let index = table
63        .find_mut(hash.get(), move |&i| i == old)
64        .expect("index not found");
65    *index = new;
66}
67
68/// Inserts many entries into the indices table without reallocating,
69/// and without regard for duplication.
70///
71/// ***Panics*** if there is not sufficient capacity already.
72fn insert_bulk_no_grow<K, V>(indices: &mut Indices, entries: &[Bucket<K, V>]) {
73    assert!(indices.capacity() - indices.len() >= entries.len());
74    for entry in entries {
75        indices.insert_unique(entry.hash.get(), indices.len(), |_| unreachable!());
76    }
77}
78
79impl<K, V> Clone for IndexMapCore<K, V>
80where
81    K: Clone,
82    V: Clone,
83{
84    fn clone(&self) -> Self {
85        let mut new = Self::new();
86        new.clone_from(self);
87        new
88    }
89
90    fn clone_from(&mut self, other: &Self) {
91        self.indices.clone_from(&other.indices);
92        if self.entries.capacity() < other.entries.len() {
93            // If we must resize, match the indices capacity.
94            let additional = other.entries.len() - self.entries.len();
95            self.reserve_entries(additional);
96        }
97        self.entries.clone_from(&other.entries);
98    }
99}
100
101impl<K, V> IndexMapCore<K, V> {
102    /// The maximum capacity before the `entries` allocation would exceed `isize::MAX`.
103    const MAX_ENTRIES_CAPACITY: usize = (isize::MAX as usize) / size_of::<Bucket<K, V>>();
104
105    #[inline]
106    pub(crate) const fn new() -> Self {
107        IndexMapCore {
108            indices: Indices::new(),
109            entries: Vec::new(),
110        }
111    }
112
113    #[inline]
114    pub(crate) fn with_capacity(n: usize) -> Self {
115        IndexMapCore {
116            indices: Indices::with_capacity(n),
117            entries: Vec::with_capacity(n),
118        }
119    }
120
121    #[inline]
122    pub(crate) fn into_entries(self) -> Entries<K, V> {
123        self.entries
124    }
125
126    #[inline]
127    pub(crate) fn as_entries(&self) -> &[Bucket<K, V>] {
128        &self.entries
129    }
130
131    #[inline]
132    pub(crate) fn as_entries_mut(&mut self) -> &mut [Bucket<K, V>] {
133        &mut self.entries
134    }
135
136    pub(crate) fn with_entries<F>(&mut self, f: F)
137    where
138        F: FnOnce(&mut [Bucket<K, V>]),
139    {
140        f(&mut self.entries);
141        self.rebuild_hash_table();
142    }
143
144    #[inline]
145    pub(crate) fn len(&self) -> usize {
146        debug_assert_eq!(self.entries.len(), self.indices.len());
147        self.indices.len()
148    }
149
150    #[inline]
151    pub(crate) fn capacity(&self) -> usize {
152        Ord::min(self.indices.capacity(), self.entries.capacity())
153    }
154
155    pub(crate) fn clear(&mut self) {
156        self.indices.clear();
157        self.entries.clear();
158    }
159
160    pub(crate) fn truncate(&mut self, len: usize) {
161        if len < self.len() {
162            self.erase_indices(len, self.entries.len());
163            self.entries.truncate(len);
164        }
165    }
166
167    #[track_caller]
168    pub(crate) fn drain<R>(&mut self, range: R) -> vec::Drain<'_, Bucket<K, V>>
169    where
170        R: RangeBounds<usize>,
171    {
172        let range = simplify_range(range, self.entries.len());
173        self.erase_indices(range.start, range.end);
174        self.entries.drain(range)
175    }
176
177    #[cfg(feature = "rayon")]
178    pub(crate) fn par_drain<R>(&mut self, range: R) -> rayon::vec::Drain<'_, Bucket<K, V>>
179    where
180        K: Send,
181        V: Send,
182        R: RangeBounds<usize>,
183    {
184        use rayon::iter::ParallelDrainRange;
185        let range = simplify_range(range, self.entries.len());
186        self.erase_indices(range.start, range.end);
187        self.entries.par_drain(range)
188    }
189
190    #[track_caller]
191    pub(crate) fn split_off(&mut self, at: usize) -> Self {
192        let len = self.entries.len();
193        assert!(
194            at <= len,
195            "index out of bounds: the len is {len} but the index is {at}. Expected index <= len"
196        );
197
198        self.erase_indices(at, self.entries.len());
199        let entries = self.entries.split_off(at);
200
201        let mut indices = Indices::with_capacity(entries.len());
202        insert_bulk_no_grow(&mut indices, &entries);
203        Self { indices, entries }
204    }
205
206    #[track_caller]
207    pub(crate) fn split_splice<R>(&mut self, range: R) -> (Self, vec::IntoIter<Bucket<K, V>>)
208    where
209        R: RangeBounds<usize>,
210    {
211        let range = simplify_range(range, self.len());
212        self.erase_indices(range.start, self.entries.len());
213        let entries = self.entries.split_off(range.end);
214        let drained = self.entries.split_off(range.start);
215
216        let mut indices = Indices::with_capacity(entries.len());
217        insert_bulk_no_grow(&mut indices, &entries);
218        (Self { indices, entries }, drained.into_iter())
219    }
220
221    /// Append from another map without checking whether items already exist.
222    pub(crate) fn append_unchecked(&mut self, other: &mut Self) {
223        self.reserve(other.len());
224        insert_bulk_no_grow(&mut self.indices, &other.entries);
225        self.entries.append(&mut other.entries);
226        other.indices.clear();
227    }
228
229    /// Reserve capacity for `additional` more key-value pairs.
230    pub(crate) fn reserve(&mut self, additional: usize) {
231        self.indices.reserve(additional, get_hash(&self.entries));
232        // Only grow entries if necessary, since we also round up capacity.
233        if additional > self.entries.capacity() - self.entries.len() {
234            self.reserve_entries(additional);
235        }
236    }
237
238    /// Reserve capacity for `additional` more key-value pairs, without over-allocating.
239    pub(crate) fn reserve_exact(&mut self, additional: usize) {
240        self.indices.reserve(additional, get_hash(&self.entries));
241        self.entries.reserve_exact(additional);
242    }
243
244    /// Try to reserve capacity for `additional` more key-value pairs.
245    pub(crate) fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
246        self.indices
247            .try_reserve(additional, get_hash(&self.entries))
248            .map_err(TryReserveError::from_hashbrown)?;
249        // Only grow entries if necessary, since we also round up capacity.
250        if additional > self.entries.capacity() - self.entries.len() {
251            self.try_reserve_entries(additional)
252        } else {
253            Ok(())
254        }
255    }
256
257    /// Try to reserve entries capacity, rounded up to match the indices
258    fn try_reserve_entries(&mut self, additional: usize) -> Result<(), TryReserveError> {
259        // Use a soft-limit on the maximum capacity, but if the caller explicitly
260        // requested more, do it and let them have the resulting error.
261        let new_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY);
262        let try_add = new_capacity - self.entries.len();
263        if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() {
264            return Ok(());
265        }
266        self.entries
267            .try_reserve_exact(additional)
268            .map_err(TryReserveError::from_alloc)
269    }
270
271    /// Try to reserve capacity for `additional` more key-value pairs, without over-allocating.
272    pub(crate) fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
273        self.indices
274            .try_reserve(additional, get_hash(&self.entries))
275            .map_err(TryReserveError::from_hashbrown)?;
276        self.entries
277            .try_reserve_exact(additional)
278            .map_err(TryReserveError::from_alloc)
279    }
280
281    /// Shrink the capacity of the map with a lower bound
282    pub(crate) fn shrink_to(&mut self, min_capacity: usize) {
283        self.indices
284            .shrink_to(min_capacity, get_hash(&self.entries));
285        self.entries.shrink_to(min_capacity);
286    }
287
288    /// Remove the last key-value pair
289    pub(crate) fn pop(&mut self) -> Option<(K, V)> {
290        if let Some(entry) = self.entries.pop() {
291            let last = self.entries.len();
292            erase_index(&mut self.indices, entry.hash, last);
293            Some((entry.key, entry.value))
294        } else {
295            None
296        }
297    }
298
299    /// Return the index in `entries` where an equivalent key can be found
300    pub(crate) fn get_index_of<Q>(&self, hash: HashValue, key: &Q) -> Option<usize>
301    where
302        Q: ?Sized + Equivalent<K>,
303    {
304        let eq = equivalent(key, &self.entries);
305        self.indices.find(hash.get(), eq).copied()
306    }
307
308    /// Append a key-value pair to `entries`,
309    /// *without* checking whether it already exists.
310    fn push_entry(&mut self, hash: HashValue, key: K, value: V) {
311        if self.entries.len() == self.entries.capacity() {
312            // Reserve our own capacity synced to the indices,
313            // rather than letting `Vec::push` just double it.
314            self.reserve_entries(1);
315        }
316        self.entries.push(Bucket { hash, key, value });
317    }
318
319    pub(crate) fn insert_full(&mut self, hash: HashValue, key: K, value: V) -> (usize, Option<V>)
320    where
321        K: Eq,
322    {
323        let eq = equivalent(&key, &self.entries);
324        let hasher = get_hash(&self.entries);
325        match self.indices.entry(hash.get(), eq, hasher) {
326            hash_table::Entry::Occupied(entry) => {
327                let i = *entry.get();
328                (i, Some(mem::replace(&mut self.entries[i].value, value)))
329            }
330            hash_table::Entry::Vacant(entry) => {
331                let i = self.entries.len();
332                entry.insert(i);
333                self.push_entry(hash, key, value);
334                debug_assert_eq!(self.indices.len(), self.entries.len());
335                (i, None)
336            }
337        }
338    }
339
340    /// Same as `insert_full`, except it also replaces the key
341    pub(crate) fn replace_full(
342        &mut self,
343        hash: HashValue,
344        key: K,
345        value: V,
346    ) -> (usize, Option<(K, V)>)
347    where
348        K: Eq,
349    {
350        let eq = equivalent(&key, &self.entries);
351        let hasher = get_hash(&self.entries);
352        match self.indices.entry(hash.get(), eq, hasher) {
353            hash_table::Entry::Occupied(entry) => {
354                let i = *entry.get();
355                let entry = &mut self.entries[i];
356                let kv = (
357                    mem::replace(&mut entry.key, key),
358                    mem::replace(&mut entry.value, value),
359                );
360                (i, Some(kv))
361            }
362            hash_table::Entry::Vacant(entry) => {
363                let i = self.entries.len();
364                entry.insert(i);
365                self.push_entry(hash, key, value);
366                debug_assert_eq!(self.indices.len(), self.entries.len());
367                (i, None)
368            }
369        }
370    }
371
372    /// Remove an entry by shifting all entries that follow it
373    pub(crate) fn shift_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
374    where
375        Q: ?Sized + Equivalent<K>,
376    {
377        let eq = equivalent(key, &self.entries);
378        let (index, _) = self.indices.find_entry(hash.get(), eq).ok()?.remove();
379        let (key, value) = self.shift_remove_finish(index);
380        Some((index, key, value))
381    }
382
383    /// Remove an entry by swapping it with the last
384    pub(crate) fn swap_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
385    where
386        Q: ?Sized + Equivalent<K>,
387    {
388        let eq = equivalent(key, &self.entries);
389        let (index, _) = self.indices.find_entry(hash.get(), eq).ok()?.remove();
390        let (key, value) = self.swap_remove_finish(index);
391        Some((index, key, value))
392    }
393
394    /// Erase `start..end` from `indices`, and shift `end..` indices down to `start..`
395    ///
396    /// All of these items should still be at their original location in `entries`.
397    /// This is used by `drain`, which will let `Vec::drain` do the work on `entries`.
398    fn erase_indices(&mut self, start: usize, end: usize) {
399        let (init, shifted_entries) = self.entries.split_at(end);
400        let (start_entries, erased_entries) = init.split_at(start);
401
402        let erased = erased_entries.len();
403        let shifted = shifted_entries.len();
404        let half_capacity = self.indices.capacity() / 2;
405
406        // Use a heuristic between different strategies
407        if erased == 0 {
408            // Degenerate case, nothing to do
409        } else if start + shifted < half_capacity && start < erased {
410            // Reinsert everything, as there are few kept indices
411            self.indices.clear();
412
413            // Reinsert stable indices, then shifted indices
414            insert_bulk_no_grow(&mut self.indices, start_entries);
415            insert_bulk_no_grow(&mut self.indices, shifted_entries);
416        } else if erased + shifted < half_capacity {
417            // Find each affected index, as there are few to adjust
418
419            // Find erased indices
420            for (i, entry) in (start..).zip(erased_entries) {
421                erase_index(&mut self.indices, entry.hash, i);
422            }
423
424            // Find shifted indices
425            for ((new, old), entry) in (start..).zip(end..).zip(shifted_entries) {
426                update_index(&mut self.indices, entry.hash, old, new);
427            }
428        } else {
429            // Sweep the whole table for adjustments
430            let offset = end - start;
431            self.indices.retain(move |i| {
432                if *i >= end {
433                    *i -= offset;
434                    true
435                } else {
436                    *i < start
437                }
438            });
439        }
440
441        debug_assert_eq!(self.indices.len(), start + shifted);
442    }
443
444    pub(crate) fn retain_in_order<F>(&mut self, mut keep: F)
445    where
446        F: FnMut(&mut K, &mut V) -> bool,
447    {
448        self.entries
449            .retain_mut(|entry| keep(&mut entry.key, &mut entry.value));
450        if self.entries.len() < self.indices.len() {
451            self.rebuild_hash_table();
452        }
453    }
454
455    fn rebuild_hash_table(&mut self) {
456        self.indices.clear();
457        insert_bulk_no_grow(&mut self.indices, &self.entries);
458    }
459
460    pub(crate) fn reverse(&mut self) {
461        self.entries.reverse();
462
463        // No need to save hash indices, can easily calculate what they should
464        // be, given that this is an in-place reversal.
465        let len = self.entries.len();
466        for i in &mut self.indices {
467            *i = len - *i - 1;
468        }
469    }
470
471    /// Reserve entries capacity, rounded up to match the indices
472    #[inline]
473    fn reserve_entries(&mut self, additional: usize) {
474        // Use a soft-limit on the maximum capacity, but if the caller explicitly
475        // requested more, do it and let them have the resulting panic.
476        let try_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY);
477        let try_add = try_capacity - self.entries.len();
478        if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() {
479            return;
480        }
481        self.entries.reserve_exact(additional);
482    }
483
484    /// Insert a key-value pair in `entries`,
485    /// *without* checking whether it already exists.
486    pub(super) fn insert_unique(&mut self, hash: HashValue, key: K, value: V) -> &mut Bucket<K, V> {
487        let i = self.indices.len();
488        debug_assert_eq!(i, self.entries.len());
489        self.indices
490            .insert_unique(hash.get(), i, get_hash(&self.entries));
491        self.push_entry(hash, key, value);
492        &mut self.entries[i]
493    }
494
495    /// Replaces the key at the given index,
496    /// *without* checking whether it already exists.
497    #[track_caller]
498    pub(crate) fn replace_index_unique(&mut self, index: usize, hash: HashValue, key: K) -> K {
499        // NB: This removal and insertion isn't "no grow" (with unreachable hasher)
500        // because hashbrown's tombstones might force a resize anyway.
501        erase_index(&mut self.indices, self.entries[index].hash, index);
502        self.indices
503            .insert_unique(hash.get(), index, get_hash(&self.entries));
504
505        let entry = &mut self.entries[index];
506        entry.hash = hash;
507        mem::replace(&mut entry.key, key)
508    }
509
510    /// Insert a key-value pair in `entries` at a particular index,
511    /// *without* checking whether it already exists.
512    fn shift_insert_unique(&mut self, index: usize, hash: HashValue, key: K, value: V) {
513        let end = self.indices.len();
514        assert!(index <= end);
515        // Increment others first so we don't have duplicate indices.
516        self.increment_indices(index, end);
517        let entries = &*self.entries;
518        self.indices.insert_unique(hash.get(), index, move |&i| {
519            // Adjust for the incremented indices to find hashes.
520            debug_assert_ne!(i, index);
521            let i = if i < index { i } else { i - 1 };
522            entries[i].hash.get()
523        });
524        if self.entries.len() == self.entries.capacity() {
525            // Reserve our own capacity synced to the indices,
526            // rather than letting `Vec::insert` just double it.
527            self.reserve_entries(1);
528        }
529        self.entries.insert(index, Bucket { hash, key, value });
530    }
531
532    /// Remove an entry by shifting all entries that follow it
533    pub(crate) fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> {
534        match self.entries.get(index) {
535            Some(entry) => {
536                erase_index(&mut self.indices, entry.hash, index);
537                Some(self.shift_remove_finish(index))
538            }
539            None => None,
540        }
541    }
542
543    /// Remove an entry by shifting all entries that follow it
544    ///
545    /// The index should already be removed from `self.indices`.
546    fn shift_remove_finish(&mut self, index: usize) -> (K, V) {
547        // Correct indices that point to the entries that followed the removed entry.
548        self.decrement_indices(index + 1, self.entries.len());
549
550        // Use Vec::remove to actually remove the entry.
551        let entry = self.entries.remove(index);
552        (entry.key, entry.value)
553    }
554
555    /// Remove an entry by swapping it with the last
556    pub(crate) fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> {
557        match self.entries.get(index) {
558            Some(entry) => {
559                erase_index(&mut self.indices, entry.hash, index);
560                Some(self.swap_remove_finish(index))
561            }
562            None => None,
563        }
564    }
565
566    /// Finish removing an entry by swapping it with the last
567    ///
568    /// The index should already be removed from `self.indices`.
569    fn swap_remove_finish(&mut self, index: usize) -> (K, V) {
570        // use swap_remove, but then we need to update the index that points
571        // to the other entry that has to move
572        let entry = self.entries.swap_remove(index);
573
574        // correct index that points to the entry that had to swap places
575        if let Some(entry) = self.entries.get(index) {
576            // was not last element
577            // examine new element in `index` and find it in indices
578            let last = self.entries.len();
579            update_index(&mut self.indices, entry.hash, last, index);
580        }
581
582        (entry.key, entry.value)
583    }
584
585    /// Decrement all indices in the range `start..end`.
586    ///
587    /// The index `start - 1` should not exist in `self.indices`.
588    /// All entries should still be in their original positions.
589    fn decrement_indices(&mut self, start: usize, end: usize) {
590        // Use a heuristic between a full sweep vs. a `find()` for every shifted item.
591        let shifted_entries = &self.entries[start..end];
592        if shifted_entries.len() > self.indices.capacity() / 2 {
593            // Shift all indices in range.
594            for i in &mut self.indices {
595                if start <= *i && *i < end {
596                    *i -= 1;
597                }
598            }
599        } else {
600            // Find each entry in range to shift its index.
601            for (i, entry) in (start..end).zip(shifted_entries) {
602                update_index(&mut self.indices, entry.hash, i, i - 1);
603            }
604        }
605    }
606
607    /// Increment all indices in the range `start..end`.
608    ///
609    /// The index `end` should not exist in `self.indices`.
610    /// All entries should still be in their original positions.
611    fn increment_indices(&mut self, start: usize, end: usize) {
612        // Use a heuristic between a full sweep vs. a `find()` for every shifted item.
613        let shifted_entries = &self.entries[start..end];
614        if shifted_entries.len() > self.indices.capacity() / 2 {
615            // Shift all indices in range.
616            for i in &mut self.indices {
617                if start <= *i && *i < end {
618                    *i += 1;
619                }
620            }
621        } else {
622            // Find each entry in range to shift its index, updated in reverse so
623            // we never have duplicated indices that might have a hash collision.
624            for (i, entry) in (start..end).zip(shifted_entries).rev() {
625                update_index(&mut self.indices, entry.hash, i, i + 1);
626            }
627        }
628    }
629
630    #[track_caller]
631    pub(super) fn move_index(&mut self, from: usize, to: usize) {
632        let from_hash = self.entries[from].hash;
633        if from != to {
634            let _ = self.entries[to]; // explicit bounds check
635
636            // Find the bucket index first so we won't lose it among other updated indices.
637            let bucket = self
638                .indices
639                .find_bucket_index(from_hash.get(), move |&i| i == from)
640                .expect("index not found");
641
642            self.move_index_inner(from, to);
643            *self.indices.get_bucket_mut(bucket).unwrap() = to;
644        }
645    }
646
647    fn move_index_inner(&mut self, from: usize, to: usize) {
648        // Update all other indices and rotate the entry positions.
649        if from < to {
650            self.decrement_indices(from + 1, to + 1);
651            self.entries[from..=to].rotate_left(1);
652        } else if to < from {
653            self.increment_indices(to, from);
654            self.entries[to..=from].rotate_right(1);
655        }
656    }
657
658    #[track_caller]
659    pub(crate) fn swap_indices(&mut self, a: usize, b: usize) {
660        // If they're equal and in-bounds, there's nothing to do.
661        if a == b && a < self.entries.len() {
662            return;
663        }
664
665        // We'll get a "nice" bounds-check from indexing `entries`,
666        // and then we expect to find it in the table as well.
667        match self.indices.get_disjoint_mut(
668            [self.entries[a].hash.get(), self.entries[b].hash.get()],
669            move |i, &x| if i == 0 { x == a } else { x == b },
670        ) {
671            [Some(ref_a), Some(ref_b)] => {
672                mem::swap(ref_a, ref_b);
673                self.entries.swap(a, b);
674            }
675            _ => panic!("indices not found"),
676        }
677    }
678}
679
680#[test]
681fn assert_send_sync() {
682    fn assert_send_sync<T: Send + Sync>() {}
683    assert_send_sync::<IndexMapCore<i32, i32>>();
684    assert_send_sync::<Entry<'_, i32, i32>>();
685    assert_send_sync::<IndexedEntry<'_, i32, i32>>();
686    assert_send_sync::<raw_entry_v1::RawEntryMut<'_, i32, i32, ()>>();
687}