1mod entry;
11mod extract;
12
13use alloc::vec::{self, Vec};
14use core::mem;
15use core::ops::RangeBounds;
16use hashbrown::hash_table;
17
18use crate::util::simplify_range;
19use crate::{Bucket, Equivalent, HashValue, TryReserveError};
20
21type Indices = hash_table::HashTable<usize>;
22type Entries<K, V> = Vec<Bucket<K, V>>;
23
24pub use entry::{OccupiedEntry, VacantEntry};
25pub(crate) use extract::ExtractCore;
26
27#[cfg_attr(feature = "test_debug", derive(Debug))]
29pub(crate) struct Core<K, V> {
30 indices: Indices,
32 entries: Entries<K, V>,
34}
35
36#[inline(always)]
37fn get_hash<K, V>(entries: &[Bucket<K, V>]) -> impl Fn(&usize) -> u64 + use<'_, K, V> {
38 move |&i| entries[i].hash.get()
39}
40
41#[inline]
42fn equivalent<'a, K, V, Q: ?Sized + Equivalent<K>>(
43 key: &'a Q,
44 entries: &'a [Bucket<K, V>],
45) -> impl Fn(&usize) -> bool + use<'a, K, V, Q> {
46 move |&i| Q::equivalent(key, &entries[i].key)
47}
48
49#[inline]
50fn erase_index(table: &mut Indices, hash: HashValue, index: usize) {
51 if let Ok(entry) = table.find_entry(hash.get(), move |&i| i == index) {
52 entry.remove();
53 } else if cfg!(debug_assertions) {
54 panic!("index not found");
55 }
56}
57
58#[inline]
59fn update_index(table: &mut Indices, hash: HashValue, old: usize, new: usize) {
60 let index = table
61 .find_mut(hash.get(), move |&i| i == old)
62 .expect("index not found");
63 *index = new;
64}
65
66fn insert_bulk_no_grow<K, V>(indices: &mut Indices, entries: &[Bucket<K, V>]) {
71 assert!(indices.capacity() - indices.len() >= entries.len());
72 for entry in entries {
73 indices.insert_unique(entry.hash.get(), indices.len(), |_| unreachable!());
74 }
75}
76
77impl<K, V> Clone for Core<K, V>
78where
79 K: Clone,
80 V: Clone,
81{
82 fn clone(&self) -> Self {
83 let mut new = Self::new();
84 new.clone_from(self);
85 new
86 }
87
88 fn clone_from(&mut self, other: &Self) {
89 self.indices.clone_from(&other.indices);
90 if self.entries.capacity() < other.entries.len() {
91 let additional = other.entries.len() - self.entries.len();
93 self.reserve_entries(additional);
94 }
95 self.entries.clone_from(&other.entries);
96 }
97}
98
99impl<K, V> Core<K, V> {
100 const MAX_ENTRIES_CAPACITY: usize = (isize::MAX as usize) / size_of::<Bucket<K, V>>();
102
103 #[inline]
104 pub(crate) const fn new() -> Self {
105 Core {
106 indices: Indices::new(),
107 entries: Vec::new(),
108 }
109 }
110
111 #[inline]
112 pub(crate) fn with_capacity(n: usize) -> Self {
113 Core {
114 indices: Indices::with_capacity(n),
115 entries: Vec::with_capacity(n),
116 }
117 }
118
119 #[inline]
120 pub(crate) fn into_entries(self) -> Entries<K, V> {
121 self.entries
122 }
123
124 #[inline]
125 pub(crate) fn as_entries(&self) -> &[Bucket<K, V>] {
126 &self.entries
127 }
128
129 #[inline]
130 pub(crate) fn as_entries_mut(&mut self) -> &mut [Bucket<K, V>] {
131 &mut self.entries
132 }
133
134 pub(crate) fn with_entries<F>(&mut self, f: F)
135 where
136 F: FnOnce(&mut [Bucket<K, V>]),
137 {
138 f(&mut self.entries);
139 self.rebuild_hash_table();
140 }
141
142 #[inline]
143 pub(crate) fn len(&self) -> usize {
144 debug_assert_eq!(self.entries.len(), self.indices.len());
145 self.indices.len()
146 }
147
148 #[inline]
149 pub(crate) fn capacity(&self) -> usize {
150 Ord::min(self.indices.capacity(), self.entries.capacity())
151 }
152
153 pub(crate) fn clear(&mut self) {
154 self.indices.clear();
155 self.entries.clear();
156 }
157
158 pub(crate) fn truncate(&mut self, len: usize) {
159 if len < self.len() {
160 self.erase_indices(len, self.entries.len());
161 self.entries.truncate(len);
162 }
163 }
164
165 #[track_caller]
166 pub(crate) fn drain<R>(&mut self, range: R) -> vec::Drain<'_, Bucket<K, V>>
167 where
168 R: RangeBounds<usize>,
169 {
170 let range = simplify_range(range, self.entries.len());
171 self.erase_indices(range.start, range.end);
172 self.entries.drain(range)
173 }
174
175 #[cfg(feature = "rayon")]
176 pub(crate) fn par_drain<R>(&mut self, range: R) -> rayon::vec::Drain<'_, Bucket<K, V>>
177 where
178 K: Send,
179 V: Send,
180 R: RangeBounds<usize>,
181 {
182 use rayon::iter::ParallelDrainRange;
183 let range = simplify_range(range, self.entries.len());
184 self.erase_indices(range.start, range.end);
185 self.entries.par_drain(range)
186 }
187
188 #[track_caller]
189 pub(crate) fn split_off(&mut self, at: usize) -> Self {
190 let len = self.entries.len();
191 assert!(
192 at <= len,
193 "index out of bounds: the len is {len} but the index is {at}. Expected index <= len"
194 );
195
196 self.erase_indices(at, self.entries.len());
197 let entries = self.entries.split_off(at);
198
199 let mut indices = Indices::with_capacity(entries.len());
200 insert_bulk_no_grow(&mut indices, &entries);
201 Self { indices, entries }
202 }
203
204 #[track_caller]
205 pub(crate) fn split_splice<R>(&mut self, range: R) -> (Self, vec::IntoIter<Bucket<K, V>>)
206 where
207 R: RangeBounds<usize>,
208 {
209 let range = simplify_range(range, self.len());
210 self.erase_indices(range.start, self.entries.len());
211 let entries = self.entries.split_off(range.end);
212 let drained = self.entries.split_off(range.start);
213
214 let mut indices = Indices::with_capacity(entries.len());
215 insert_bulk_no_grow(&mut indices, &entries);
216 (Self { indices, entries }, drained.into_iter())
217 }
218
219 pub(crate) fn append_unchecked(&mut self, other: &mut Self) {
221 self.reserve(other.len());
222 insert_bulk_no_grow(&mut self.indices, &other.entries);
223 self.entries.append(&mut other.entries);
224 other.indices.clear();
225 }
226
227 pub(crate) fn reserve(&mut self, additional: usize) {
229 self.indices.reserve(additional, get_hash(&self.entries));
230 if additional > self.entries.capacity() - self.entries.len() {
232 self.reserve_entries(additional);
233 }
234 }
235
236 pub(crate) fn reserve_exact(&mut self, additional: usize) {
238 self.indices.reserve(additional, get_hash(&self.entries));
239 self.entries.reserve_exact(additional);
240 }
241
242 pub(crate) fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
244 self.indices
245 .try_reserve(additional, get_hash(&self.entries))
246 .map_err(TryReserveError::from_hashbrown)?;
247 if additional > self.entries.capacity() - self.entries.len() {
249 self.try_reserve_entries(additional)
250 } else {
251 Ok(())
252 }
253 }
254
255 fn try_reserve_entries(&mut self, additional: usize) -> Result<(), TryReserveError> {
257 let new_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY);
260 let try_add = new_capacity - self.entries.len();
261 if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() {
262 return Ok(());
263 }
264 self.entries
265 .try_reserve_exact(additional)
266 .map_err(TryReserveError::from_alloc)
267 }
268
269 pub(crate) fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
271 self.indices
272 .try_reserve(additional, get_hash(&self.entries))
273 .map_err(TryReserveError::from_hashbrown)?;
274 self.entries
275 .try_reserve_exact(additional)
276 .map_err(TryReserveError::from_alloc)
277 }
278
279 pub(crate) fn shrink_to(&mut self, min_capacity: usize) {
281 self.indices
282 .shrink_to(min_capacity, get_hash(&self.entries));
283 self.entries.shrink_to(min_capacity);
284 }
285
286 pub(crate) fn pop(&mut self) -> Option<(K, V)> {
288 if let Some(entry) = self.entries.pop() {
289 let last = self.entries.len();
290 erase_index(&mut self.indices, entry.hash, last);
291 Some((entry.key, entry.value))
292 } else {
293 None
294 }
295 }
296
297 pub(crate) fn get_index_of<Q>(&self, hash: HashValue, key: &Q) -> Option<usize>
299 where
300 Q: ?Sized + Equivalent<K>,
301 {
302 let eq = equivalent(key, &self.entries);
303 self.indices.find(hash.get(), eq).copied()
304 }
305
306 pub(crate) fn get_index_of_raw<F>(&self, hash: HashValue, mut is_match: F) -> Option<usize>
308 where
309 F: FnMut(&K) -> bool,
310 {
311 let eq = move |&i: &usize| is_match(&self.entries[i].key);
312 self.indices.find(hash.get(), eq).copied()
313 }
314
315 fn push_entry(&mut self, hash: HashValue, key: K, value: V) {
318 if self.entries.len() == self.entries.capacity() {
319 self.reserve_entries(1);
322 }
323 self.entries.push(Bucket { hash, key, value });
324 }
325
326 pub(crate) fn insert_full(&mut self, hash: HashValue, key: K, value: V) -> (usize, Option<V>)
327 where
328 K: Eq,
329 {
330 let eq = equivalent(&key, &self.entries);
331 let hasher = get_hash(&self.entries);
332 match self.indices.entry(hash.get(), eq, hasher) {
333 hash_table::Entry::Occupied(entry) => {
334 let i = *entry.get();
335 (i, Some(mem::replace(&mut self.entries[i].value, value)))
336 }
337 hash_table::Entry::Vacant(entry) => {
338 let i = self.entries.len();
339 entry.insert(i);
340 self.push_entry(hash, key, value);
341 debug_assert_eq!(self.indices.len(), self.entries.len());
342 (i, None)
343 }
344 }
345 }
346
347 pub(crate) fn replace_full(
349 &mut self,
350 hash: HashValue,
351 key: K,
352 value: V,
353 ) -> (usize, Option<(K, V)>)
354 where
355 K: Eq,
356 {
357 let eq = equivalent(&key, &self.entries);
358 let hasher = get_hash(&self.entries);
359 match self.indices.entry(hash.get(), eq, hasher) {
360 hash_table::Entry::Occupied(entry) => {
361 let i = *entry.get();
362 let entry = &mut self.entries[i];
363 let kv = (
364 mem::replace(&mut entry.key, key),
365 mem::replace(&mut entry.value, value),
366 );
367 (i, Some(kv))
368 }
369 hash_table::Entry::Vacant(entry) => {
370 let i = self.entries.len();
371 entry.insert(i);
372 self.push_entry(hash, key, value);
373 debug_assert_eq!(self.indices.len(), self.entries.len());
374 (i, None)
375 }
376 }
377 }
378
379 pub(crate) fn shift_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
381 where
382 Q: ?Sized + Equivalent<K>,
383 {
384 let eq = equivalent(key, &self.entries);
385 let (index, _) = self.indices.find_entry(hash.get(), eq).ok()?.remove();
386 let (key, value) = self.shift_remove_finish(index);
387 Some((index, key, value))
388 }
389
390 pub(crate) fn swap_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
392 where
393 Q: ?Sized + Equivalent<K>,
394 {
395 let eq = equivalent(key, &self.entries);
396 let (index, _) = self.indices.find_entry(hash.get(), eq).ok()?.remove();
397 let (key, value) = self.swap_remove_finish(index);
398 Some((index, key, value))
399 }
400
401 fn erase_indices(&mut self, start: usize, end: usize) {
406 let (init, shifted_entries) = self.entries.split_at(end);
407 let (start_entries, erased_entries) = init.split_at(start);
408
409 let erased = erased_entries.len();
410 let shifted = shifted_entries.len();
411 let half_capacity = self.indices.capacity() / 2;
412
413 if erased == 0 {
415 } else if start + shifted < half_capacity && start < erased {
417 self.indices.clear();
419
420 insert_bulk_no_grow(&mut self.indices, start_entries);
422 insert_bulk_no_grow(&mut self.indices, shifted_entries);
423 } else if erased + shifted < half_capacity {
424 for (i, entry) in (start..).zip(erased_entries) {
428 erase_index(&mut self.indices, entry.hash, i);
429 }
430
431 for ((new, old), entry) in (start..).zip(end..).zip(shifted_entries) {
433 update_index(&mut self.indices, entry.hash, old, new);
434 }
435 } else {
436 let offset = end - start;
438 self.indices.retain(move |i| {
439 if *i >= end {
440 *i -= offset;
441 true
442 } else {
443 *i < start
444 }
445 });
446 }
447
448 debug_assert_eq!(self.indices.len(), start + shifted);
449 }
450
451 pub(crate) fn retain_in_order<F>(&mut self, mut keep: F)
452 where
453 F: FnMut(&mut K, &mut V) -> bool,
454 {
455 self.entries
456 .retain_mut(|entry| keep(&mut entry.key, &mut entry.value));
457 if self.entries.len() < self.indices.len() {
458 self.rebuild_hash_table();
459 }
460 }
461
462 fn rebuild_hash_table(&mut self) {
463 self.indices.clear();
464 insert_bulk_no_grow(&mut self.indices, &self.entries);
465 }
466
467 pub(crate) fn reverse(&mut self) {
468 self.entries.reverse();
469
470 let len = self.entries.len();
473 for i in &mut self.indices {
474 *i = len - *i - 1;
475 }
476 }
477
478 #[inline]
480 fn reserve_entries(&mut self, additional: usize) {
481 let try_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY);
484 let try_add = try_capacity - self.entries.len();
485 if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() {
486 return;
487 }
488 self.entries.reserve_exact(additional);
489 }
490
491 pub(super) fn insert_unique(&mut self, hash: HashValue, key: K, value: V) -> &mut Bucket<K, V> {
494 let i = self.indices.len();
495 debug_assert_eq!(i, self.entries.len());
496 self.indices
497 .insert_unique(hash.get(), i, get_hash(&self.entries));
498 self.push_entry(hash, key, value);
499 &mut self.entries[i]
500 }
501
502 #[track_caller]
505 pub(crate) fn replace_index_unique(&mut self, index: usize, hash: HashValue, key: K) -> K {
506 erase_index(&mut self.indices, self.entries[index].hash, index);
509 self.indices
510 .insert_unique(hash.get(), index, get_hash(&self.entries));
511
512 let entry = &mut self.entries[index];
513 entry.hash = hash;
514 mem::replace(&mut entry.key, key)
515 }
516
517 pub(crate) fn shift_insert_unique(
520 &mut self,
521 index: usize,
522 hash: HashValue,
523 key: K,
524 value: V,
525 ) -> &mut Bucket<K, V> {
526 let end = self.indices.len();
527 assert!(index <= end);
528 self.increment_indices(index, end);
530 let entries = &*self.entries;
531 self.indices.insert_unique(hash.get(), index, move |&i| {
532 debug_assert_ne!(i, index);
534 let i = if i < index { i } else { i - 1 };
535 entries[i].hash.get()
536 });
537 if self.entries.len() == self.entries.capacity() {
538 self.reserve_entries(1);
541 }
542 self.entries.insert(index, Bucket { hash, key, value });
543 &mut self.entries[index]
544 }
545
546 pub(crate) fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> {
548 match self.entries.get(index) {
549 Some(entry) => {
550 erase_index(&mut self.indices, entry.hash, index);
551 Some(self.shift_remove_finish(index))
552 }
553 None => None,
554 }
555 }
556
557 fn shift_remove_finish(&mut self, index: usize) -> (K, V) {
561 self.decrement_indices(index + 1, self.entries.len());
563
564 let entry = self.entries.remove(index);
566 (entry.key, entry.value)
567 }
568
569 pub(crate) fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> {
571 match self.entries.get(index) {
572 Some(entry) => {
573 erase_index(&mut self.indices, entry.hash, index);
574 Some(self.swap_remove_finish(index))
575 }
576 None => None,
577 }
578 }
579
580 fn swap_remove_finish(&mut self, index: usize) -> (K, V) {
584 let entry = self.entries.swap_remove(index);
587
588 if let Some(entry) = self.entries.get(index) {
590 let last = self.entries.len();
593 update_index(&mut self.indices, entry.hash, last, index);
594 }
595
596 (entry.key, entry.value)
597 }
598
599 fn decrement_indices(&mut self, start: usize, end: usize) {
604 let shifted_entries = &self.entries[start..end];
606 if shifted_entries.len() > self.indices.capacity() / 2 {
607 for i in &mut self.indices {
609 if start <= *i && *i < end {
610 *i -= 1;
611 }
612 }
613 } else {
614 for (i, entry) in (start..end).zip(shifted_entries) {
616 update_index(&mut self.indices, entry.hash, i, i - 1);
617 }
618 }
619 }
620
621 fn increment_indices(&mut self, start: usize, end: usize) {
626 let shifted_entries = &self.entries[start..end];
628 if shifted_entries.len() > self.indices.capacity() / 2 {
629 for i in &mut self.indices {
631 if start <= *i && *i < end {
632 *i += 1;
633 }
634 }
635 } else {
636 for (i, entry) in (start..end).zip(shifted_entries).rev() {
639 update_index(&mut self.indices, entry.hash, i, i + 1);
640 }
641 }
642 }
643
644 #[track_caller]
645 pub(super) fn move_index(&mut self, from: usize, to: usize) {
646 let from_hash = self.entries[from].hash;
647 if from != to {
648 let _ = self.entries[to]; let bucket = self
652 .indices
653 .find_bucket_index(from_hash.get(), move |&i| i == from)
654 .expect("index not found");
655
656 self.move_index_inner(from, to);
657 *self.indices.get_bucket_mut(bucket).unwrap() = to;
658 }
659 }
660
661 fn move_index_inner(&mut self, from: usize, to: usize) {
662 if from < to {
664 self.decrement_indices(from + 1, to + 1);
665 self.entries[from..=to].rotate_left(1);
666 } else if to < from {
667 self.increment_indices(to, from);
668 self.entries[to..=from].rotate_right(1);
669 }
670 }
671
672 #[track_caller]
673 pub(crate) fn swap_indices(&mut self, a: usize, b: usize) {
674 if a == b && a < self.entries.len() {
676 return;
677 }
678
679 match self.indices.get_disjoint_mut(
682 [self.entries[a].hash.get(), self.entries[b].hash.get()],
683 move |i, &x| if i == 0 { x == a } else { x == b },
684 ) {
685 [Some(ref_a), Some(ref_b)] => {
686 mem::swap(ref_a, ref_b);
687 self.entries.swap(a, b);
688 }
689 _ => panic!("indices not found"),
690 }
691 }
692}
693
694#[test]
695fn assert_send_sync() {
696 fn assert_send_sync<T: Send + Sync>() {}
697 assert_send_sync::<Core<i32, i32>>();
698}