1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
//! An unbounded set of futures.
//!
//! This module is only available when the `std` or `alloc` feature of this
//! library is activated, and it is activated by default.

use crate::task::AtomicWaker;
use alloc::sync::{Arc, Weak};
use core::cell::UnsafeCell;
use core::fmt::{self, Debug};
use core::iter::FromIterator;
use core::marker::PhantomData;
use core::mem;
use core::pin::Pin;
use core::ptr;
use core::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release, SeqCst};
use core::sync::atomic::{AtomicBool, AtomicPtr};
use futures_core::future::Future;
use futures_core::stream::{FusedStream, Stream};
use futures_core::task::{Context, Poll};
use futures_task::{FutureObj, LocalFutureObj, LocalSpawn, Spawn, SpawnError};

mod abort;

mod iter;
#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/102352
pub use self::iter::{IntoIter, Iter, IterMut, IterPinMut, IterPinRef};

mod task;
use self::task::Task;

mod ready_to_run_queue;
use self::ready_to_run_queue::{Dequeue, ReadyToRunQueue};

/// A set of futures which may complete in any order.
///
/// See [`FuturesOrdered`](crate::stream::FuturesOrdered) for a version of this
/// type that preserves a FIFO order.
///
/// This structure is optimized to manage a large number of futures.
/// Futures managed by [`FuturesUnordered`] will only be polled when they
/// generate wake-up notifications. This reduces the required amount of work
/// needed to poll large numbers of futures.
///
/// [`FuturesUnordered`] can be filled by [`collect`](Iterator::collect)ing an
/// iterator of futures into a [`FuturesUnordered`], or by
/// [`push`](FuturesUnordered::push)ing futures onto an existing
/// [`FuturesUnordered`]. When new futures are added,
/// [`poll_next`](Stream::poll_next) must be called in order to begin receiving
/// wake-ups for new futures.
///
/// Note that you can create a ready-made [`FuturesUnordered`] via the
/// [`collect`](Iterator::collect) method, or you can start with an empty set
/// with the [`FuturesUnordered::new`] constructor.
///
/// This type is only available when the `std` or `alloc` feature of this
/// library is activated, and it is activated by default.
#[must_use = "streams do nothing unless polled"]
pub struct FuturesUnordered<Fut> {
    ready_to_run_queue: Arc<ReadyToRunQueue<Fut>>,
    head_all: AtomicPtr<Task<Fut>>,
    is_terminated: AtomicBool,
}

unsafe impl<Fut: Send> Send for FuturesUnordered<Fut> {}
unsafe impl<Fut: Send + Sync> Sync for FuturesUnordered<Fut> {}
impl<Fut> Unpin for FuturesUnordered<Fut> {}

impl Spawn for FuturesUnordered<FutureObj<'_, ()>> {
    fn spawn_obj(&self, future_obj: FutureObj<'static, ()>) -> Result<(), SpawnError> {
        self.push(future_obj);
        Ok(())
    }
}

impl LocalSpawn for FuturesUnordered<LocalFutureObj<'_, ()>> {
    fn spawn_local_obj(&self, future_obj: LocalFutureObj<'static, ()>) -> Result<(), SpawnError> {
        self.push(future_obj);
        Ok(())
    }
}

// FuturesUnordered is implemented using two linked lists. One which links all
// futures managed by a `FuturesUnordered` and one that tracks futures that have
// been scheduled for polling. The first linked list allows for thread safe
// insertion of nodes at the head as well as forward iteration, but is otherwise
// not thread safe and is only accessed by the thread that owns the
// `FuturesUnordered` value for any other operations. The second linked list is
// an implementation of the intrusive MPSC queue algorithm described by
// 1024cores.net.
//
// When a future is submitted to the set, a task is allocated and inserted in
// both linked lists. The next call to `poll_next` will (eventually) see this
// task and call `poll` on the future.
//
// Before a managed future is polled, the current context's waker is replaced
// with one that is aware of the specific future being run. This ensures that
// wake-up notifications generated by that specific future are visible to
// `FuturesUnordered`. When a wake-up notification is received, the task is
// inserted into the ready to run queue, so that its future can be polled later.
//
// Each task is wrapped in an `Arc` and thereby atomically reference counted.
// Also, each task contains an `AtomicBool` which acts as a flag that indicates
// whether the task is currently inserted in the atomic queue. When a wake-up
// notification is received, the task will only be inserted into the ready to
// run queue if it isn't inserted already.

impl<Fut> Default for FuturesUnordered<Fut> {
    fn default() -> Self {
        Self::new()
    }
}

impl<Fut> FuturesUnordered<Fut> {
    /// Constructs a new, empty [`FuturesUnordered`].
    ///
    /// The returned [`FuturesUnordered`] does not contain any futures.
    /// In this state, [`FuturesUnordered::poll_next`](Stream::poll_next) will
    /// return [`Poll::Ready(None)`](Poll::Ready).
    pub fn new() -> Self {
        let stub = Arc::new(Task {
            future: UnsafeCell::new(None),
            next_all: AtomicPtr::new(ptr::null_mut()),
            prev_all: UnsafeCell::new(ptr::null()),
            len_all: UnsafeCell::new(0),
            next_ready_to_run: AtomicPtr::new(ptr::null_mut()),
            queued: AtomicBool::new(true),
            ready_to_run_queue: Weak::new(),
            woken: AtomicBool::new(false),
        });
        let stub_ptr = Arc::as_ptr(&stub);
        let ready_to_run_queue = Arc::new(ReadyToRunQueue {
            waker: AtomicWaker::new(),
            head: AtomicPtr::new(stub_ptr as *mut _),
            tail: UnsafeCell::new(stub_ptr),
            stub,
        });

        Self {
            head_all: AtomicPtr::new(ptr::null_mut()),
            ready_to_run_queue,
            is_terminated: AtomicBool::new(false),
        }
    }

    /// Returns the number of futures contained in the set.
    ///
    /// This represents the total number of in-flight futures.
    pub fn len(&self) -> usize {
        let (_, len) = self.atomic_load_head_and_len_all();
        len
    }

    /// Returns `true` if the set contains no futures.
    pub fn is_empty(&self) -> bool {
        // Relaxed ordering can be used here since we don't need to read from
        // the head pointer, only check whether it is null.
        self.head_all.load(Relaxed).is_null()
    }

    /// Push a future into the set.
    ///
    /// This method adds the given future to the set. This method will not
    /// call [`poll`](core::future::Future::poll) on the submitted future. The caller must
    /// ensure that [`FuturesUnordered::poll_next`](Stream::poll_next) is called
    /// in order to receive wake-up notifications for the given future.
    pub fn push(&self, future: Fut) {
        let task = Arc::new(Task {
            future: UnsafeCell::new(Some(future)),
            next_all: AtomicPtr::new(self.pending_next_all()),
            prev_all: UnsafeCell::new(ptr::null_mut()),
            len_all: UnsafeCell::new(0),
            next_ready_to_run: AtomicPtr::new(ptr::null_mut()),
            queued: AtomicBool::new(true),
            ready_to_run_queue: Arc::downgrade(&self.ready_to_run_queue),
            woken: AtomicBool::new(false),
        });

        // Reset the `is_terminated` flag if we've previously marked ourselves
        // as terminated.
        self.is_terminated.store(false, Relaxed);

        // Right now our task has a strong reference count of 1. We transfer
        // ownership of this reference count to our internal linked list
        // and we'll reclaim ownership through the `unlink` method below.
        let ptr = self.link(task);

        // We'll need to get the future "into the system" to start tracking it,
        // e.g. getting its wake-up notifications going to us tracking which
        // futures are ready. To do that we unconditionally enqueue it for
        // polling here.
        self.ready_to_run_queue.enqueue(ptr);
    }

    /// Returns an iterator that allows inspecting each future in the set.
    pub fn iter(&self) -> Iter<'_, Fut>
    where
        Fut: Unpin,
    {
        Iter(Pin::new(self).iter_pin_ref())
    }

    /// Returns an iterator that allows inspecting each future in the set.
    pub fn iter_pin_ref(self: Pin<&Self>) -> IterPinRef<'_, Fut> {
        let (task, len) = self.atomic_load_head_and_len_all();
        let pending_next_all = self.pending_next_all();

        IterPinRef { task, len, pending_next_all, _marker: PhantomData }
    }

    /// Returns an iterator that allows modifying each future in the set.
    pub fn iter_mut(&mut self) -> IterMut<'_, Fut>
    where
        Fut: Unpin,
    {
        IterMut(Pin::new(self).iter_pin_mut())
    }

    /// Returns an iterator that allows modifying each future in the set.
    pub fn iter_pin_mut(mut self: Pin<&mut Self>) -> IterPinMut<'_, Fut> {
        // `head_all` can be accessed directly and we don't need to spin on
        // `Task::next_all` since we have exclusive access to the set.
        let task = *self.head_all.get_mut();
        let len = if task.is_null() { 0 } else { unsafe { *(*task).len_all.get() } };

        IterPinMut { task, len, _marker: PhantomData }
    }

    /// Returns the current head node and number of futures in the list of all
    /// futures within a context where access is shared with other threads
    /// (mostly for use with the `len` and `iter_pin_ref` methods).
    fn atomic_load_head_and_len_all(&self) -> (*const Task<Fut>, usize) {
        let task = self.head_all.load(Acquire);
        let len = if task.is_null() {
            0
        } else {
            unsafe {
                (*task).spin_next_all(self.pending_next_all(), Acquire);
                *(*task).len_all.get()
            }
        };

        (task, len)
    }

    /// Releases the task. It destroys the future inside and either drops
    /// the `Arc<Task>` or transfers ownership to the ready to run queue.
    /// The task this method is called on must have been unlinked before.
    fn release_task(&mut self, task: Arc<Task<Fut>>) {
        // `release_task` must only be called on unlinked tasks
        debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all());
        unsafe {
            debug_assert!((*task.prev_all.get()).is_null());
        }

        // The future is done, try to reset the queued flag. This will prevent
        // `wake` from doing any work in the future
        let prev = task.queued.swap(true, SeqCst);

        // If the queued flag was previously set, then it means that this task
        // is still in our internal ready to run queue. We then transfer
        // ownership of our reference count to the ready to run queue, and it'll
        // come along and free it later, noticing that the future is `None`.
        //
        // If, however, the queued flag was *not* set then we're safe to
        // release our reference count on the task. The queued flag was set
        // above so all future `enqueue` operations will not actually
        // enqueue the task, so our task will never see the ready to run queue
        // again. The task itself will be deallocated once all reference counts
        // have been dropped elsewhere by the various wakers that contain it.
        //
        // Use ManuallyDrop to transfer the reference count ownership before
        // dropping the future so unwinding won't release the reference count.
        let md_slot;
        let task = if prev {
            md_slot = mem::ManuallyDrop::new(task);
            &*md_slot
        } else {
            &task
        };

        // Drop the future, even if it hasn't finished yet. This is safe
        // because we're dropping the future on the thread that owns
        // `FuturesUnordered`, which correctly tracks `Fut`'s lifetimes and
        // such.
        unsafe {
            // Set to `None` rather than `take()`ing to prevent moving the
            // future.
            *task.future.get() = None;
        }
    }

    /// Insert a new task into the internal linked list.
    fn link(&self, task: Arc<Task<Fut>>) -> *const Task<Fut> {
        // `next_all` should already be reset to the pending state before this
        // function is called.
        debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all());
        let ptr = Arc::into_raw(task);

        // Atomically swap out the old head node to get the node that should be
        // assigned to `next_all`.
        let next = self.head_all.swap(ptr as *mut _, AcqRel);

        unsafe {
            // Store the new list length in the new node.
            let new_len = if next.is_null() {
                1
            } else {
                // Make sure `next_all` has been written to signal that it is
                // safe to read `len_all`.
                (*next).spin_next_all(self.pending_next_all(), Acquire);
                *(*next).len_all.get() + 1
            };
            *(*ptr).len_all.get() = new_len;

            // Write the old head as the next node pointer, signaling to other
            // threads that `len_all` and `next_all` are ready to read.
            (*ptr).next_all.store(next, Release);

            // `prev_all` updates don't need to be synchronized, as the field is
            // only ever used after exclusive access has been acquired.
            if !next.is_null() {
                *(*next).prev_all.get() = ptr;
            }
        }

        ptr
    }

    /// Remove the task from the linked list tracking all tasks currently
    /// managed by `FuturesUnordered`.
    /// This method is unsafe because it has be guaranteed that `task` is a
    /// valid pointer.
    unsafe fn unlink(&mut self, task: *const Task<Fut>) -> Arc<Task<Fut>> {
        unsafe {
            // Compute the new list length now in case we're removing the head node
            // and won't be able to retrieve the correct length later.
            let head = *self.head_all.get_mut();
            debug_assert!(!head.is_null());
            let new_len = *(*head).len_all.get() - 1;

            let task = Arc::from_raw(task);
            let next = task.next_all.load(Relaxed);
            let prev = *task.prev_all.get();
            task.next_all.store(self.pending_next_all(), Relaxed);
            *task.prev_all.get() = ptr::null_mut();

            if !next.is_null() {
                *(*next).prev_all.get() = prev;
            }

            if !prev.is_null() {
                (*prev).next_all.store(next, Relaxed);
            } else {
                *self.head_all.get_mut() = next;
            }

            // Store the new list length in the head node.
            let head = *self.head_all.get_mut();
            if !head.is_null() {
                *(*head).len_all.get() = new_len;
            }

            task
        }
    }

    /// Returns the reserved value for `Task::next_all` to indicate a pending
    /// assignment from the thread that inserted the task.
    ///
    /// `FuturesUnordered::link` needs to update `Task` pointers in an order
    /// that ensures any iterators created on other threads can correctly
    /// traverse the entire `Task` list using the chain of `next_all` pointers.
    /// This could be solved with a compare-exchange loop that stores the
    /// current `head_all` in `next_all` and swaps out `head_all` with the new
    /// `Task` pointer if the head hasn't already changed. Under heavy thread
    /// contention, this compare-exchange loop could become costly.
    ///
    /// An alternative is to initialize `next_all` to a reserved pending state
    /// first, perform an atomic swap on `head_all`, and finally update
    /// `next_all` with the old head node. Iterators will then either see the
    /// pending state value or the correct next node pointer, and can reload
    /// `next_all` as needed until the correct value is loaded. The number of
    /// retries needed (if any) would be small and will always be finite, so
    /// this should generally perform better than the compare-exchange loop.
    ///
    /// A valid `Task` pointer in the `head_all` list is guaranteed to never be
    /// this value, so it is safe to use as a reserved value until the correct
    /// value can be written.
    fn pending_next_all(&self) -> *mut Task<Fut> {
        // The `ReadyToRunQueue` stub is never inserted into the `head_all`
        // list, and its pointer value will remain valid for the lifetime of
        // this `FuturesUnordered`, so we can make use of its value here.
        Arc::as_ptr(&self.ready_to_run_queue.stub) as *mut _
    }
}

impl<Fut: Future> Stream for FuturesUnordered<Fut> {
    type Item = Fut::Output;

    fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
        let len = self.len();

        // Keep track of how many child futures we have polled,
        // in case we want to forcibly yield.
        let mut polled = 0;
        let mut yielded = 0;

        // Ensure `parent` is correctly set.
        self.ready_to_run_queue.waker.register(cx.waker());

        loop {
            // Safety: &mut self guarantees the mutual exclusion `dequeue`
            // expects
            let task = match unsafe { self.ready_to_run_queue.dequeue() } {
                Dequeue::Empty => {
                    if self.is_empty() {
                        // We can only consider ourselves terminated once we
                        // have yielded a `None`
                        *self.is_terminated.get_mut() = true;
                        return Poll::Ready(None);
                    } else {
                        return Poll::Pending;
                    }
                }
                Dequeue::Inconsistent => {
                    // At this point, it may be worth yielding the thread &
                    // spinning a few times... but for now, just yield using the
                    // task system.
                    cx.waker().wake_by_ref();
                    return Poll::Pending;
                }
                Dequeue::Data(task) => task,
            };

            debug_assert!(task != self.ready_to_run_queue.stub());

            // Safety:
            // - `task` is a valid pointer.
            // - We are the only thread that accesses the `UnsafeCell` that
            //   contains the future
            let future = match unsafe { &mut *(*task).future.get() } {
                Some(future) => future,

                // If the future has already gone away then we're just
                // cleaning out this task. See the comment in
                // `release_task` for more information, but we're basically
                // just taking ownership of our reference count here.
                None => {
                    // This case only happens when `release_task` was called
                    // for this task before and couldn't drop the task
                    // because it was already enqueued in the ready to run
                    // queue.

                    // Safety: `task` is a valid pointer
                    let task = unsafe { Arc::from_raw(task) };

                    // Double check that the call to `release_task` really
                    // happened. Calling it required the task to be unlinked.
                    debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all());
                    unsafe {
                        debug_assert!((*task.prev_all.get()).is_null());
                    }
                    continue;
                }
            };

            // Safety: `task` is a valid pointer
            let task = unsafe { self.unlink(task) };

            // Unset queued flag: This must be done before polling to ensure
            // that the future's task gets rescheduled if it sends a wake-up
            // notification **during** the call to `poll`.
            let prev = task.queued.swap(false, SeqCst);
            assert!(prev);

            // We're going to need to be very careful if the `poll`
            // method below panics. We need to (a) not leak memory and
            // (b) ensure that we still don't have any use-after-frees. To
            // manage this we do a few things:
            //
            // * A "bomb" is created which if dropped abnormally will call
            //   `release_task`. That way we'll be sure the memory management
            //   of the `task` is managed correctly. In particular
            //   `release_task` will drop the future. This ensures that it is
            //   dropped on this thread and not accidentally on a different
            //   thread (bad).
            // * We unlink the task from our internal queue to preemptively
            //   assume it'll panic, in which case we'll want to discard it
            //   regardless.
            struct Bomb<'a, Fut> {
                queue: &'a mut FuturesUnordered<Fut>,
                task: Option<Arc<Task<Fut>>>,
            }

            impl<Fut> Drop for Bomb<'_, Fut> {
                fn drop(&mut self) {
                    if let Some(task) = self.task.take() {
                        self.queue.release_task(task);
                    }
                }
            }

            let mut bomb = Bomb { task: Some(task), queue: &mut *self };

            // Poll the underlying future with the appropriate waker
            // implementation. This is where a large bit of the unsafety
            // starts to stem from internally. The waker is basically just
            // our `Arc<Task<Fut>>` and can schedule the future for polling by
            // enqueuing itself in the ready to run queue.
            //
            // Critically though `Task<Fut>` won't actually access `Fut`, the
            // future, while it's floating around inside of wakers.
            // These structs will basically just use `Fut` to size
            // the internal allocation, appropriately accessing fields and
            // deallocating the task if need be.
            let res = {
                let task = bomb.task.as_ref().unwrap();
                // We are only interested in whether the future is awoken before it
                // finishes polling, so reset the flag here.
                task.woken.store(false, Relaxed);
                // SAFETY: see the comments of Bomb and this block.
                let waker = unsafe { Task::waker_ref(task) };
                let mut cx = Context::from_waker(&waker);

                // Safety: We won't move the future ever again
                let future = unsafe { Pin::new_unchecked(future) };

                future.poll(&mut cx)
            };
            polled += 1;

            match res {
                Poll::Pending => {
                    let task = bomb.task.take().unwrap();
                    // If the future was awoken during polling, we assume
                    // the future wanted to explicitly yield.
                    yielded += task.woken.load(Relaxed) as usize;
                    bomb.queue.link(task);

                    // If a future yields, we respect it and yield here.
                    // If all futures have been polled, we also yield here to
                    // avoid starving other tasks waiting on the executor.
                    // (polling the same future twice per iteration may cause
                    // the problem: https://github.com/rust-lang/futures-rs/pull/2333)
                    if yielded >= 2 || polled == len {
                        cx.waker().wake_by_ref();
                        return Poll::Pending;
                    }
                    continue;
                }
                Poll::Ready(output) => return Poll::Ready(Some(output)),
            }
        }
    }

    fn size_hint(&self) -> (usize, Option<usize>) {
        let len = self.len();
        (len, Some(len))
    }
}

impl<Fut> Debug for FuturesUnordered<Fut> {
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
        write!(f, "FuturesUnordered {{ ... }}")
    }
}

impl<Fut> FuturesUnordered<Fut> {
    /// Clears the set, removing all futures.
    pub fn clear(&mut self) {
        *self = Self::new();
    }
}

impl<Fut> Drop for FuturesUnordered<Fut> {
    fn drop(&mut self) {
        // Before the strong reference to the queue is dropped we need all
        // futures to be dropped. See note at the bottom of this method.
        //
        // If there is a panic before this completes, we leak the queue.
        struct LeakQueueOnDrop<'a, Fut>(&'a mut FuturesUnordered<Fut>);
        impl<Fut> Drop for LeakQueueOnDrop<'_, Fut> {
            fn drop(&mut self) {
                mem::forget(Arc::clone(&self.0.ready_to_run_queue));
            }
        }
        let guard = LeakQueueOnDrop(self);
        // When a `FuturesUnordered` is dropped we want to drop all futures
        // associated with it. At the same time though there may be tons of
        // wakers flying around which contain `Task<Fut>` references
        // inside them. We'll let those naturally get deallocated.
        while !guard.0.head_all.get_mut().is_null() {
            let head = *guard.0.head_all.get_mut();
            let task = unsafe { guard.0.unlink(head) };
            guard.0.release_task(task);
        }
        mem::forget(guard); // safe to release strong reference to queue

        // Note that at this point we could still have a bunch of tasks in the
        // ready to run queue. None of those tasks, however, have futures
        // associated with them so they're safe to destroy on any thread. At
        // this point the `FuturesUnordered` struct, the owner of the one strong
        // reference to the ready to run queue will drop the strong reference.
        // At that point whichever thread releases the strong refcount last (be
        // it this thread or some other thread as part of an `upgrade`) will
        // clear out the ready to run queue and free all remaining tasks.
        //
        // While that freeing operation isn't guaranteed to happen here, it's
        // guaranteed to happen "promptly" as no more "blocking work" will
        // happen while there's a strong refcount held.
    }
}

impl<'a, Fut: Unpin> IntoIterator for &'a FuturesUnordered<Fut> {
    type Item = &'a Fut;
    type IntoIter = Iter<'a, Fut>;

    fn into_iter(self) -> Self::IntoIter {
        self.iter()
    }
}

impl<'a, Fut: Unpin> IntoIterator for &'a mut FuturesUnordered<Fut> {
    type Item = &'a mut Fut;
    type IntoIter = IterMut<'a, Fut>;

    fn into_iter(self) -> Self::IntoIter {
        self.iter_mut()
    }
}

impl<Fut: Unpin> IntoIterator for FuturesUnordered<Fut> {
    type Item = Fut;
    type IntoIter = IntoIter<Fut>;

    fn into_iter(mut self) -> Self::IntoIter {
        // `head_all` can be accessed directly and we don't need to spin on
        // `Task::next_all` since we have exclusive access to the set.
        let task = *self.head_all.get_mut();
        let len = if task.is_null() { 0 } else { unsafe { *(*task).len_all.get() } };

        IntoIter { len, inner: self }
    }
}

impl<Fut> FromIterator<Fut> for FuturesUnordered<Fut> {
    fn from_iter<I>(iter: I) -> Self
    where
        I: IntoIterator<Item = Fut>,
    {
        let acc = Self::new();
        iter.into_iter().fold(acc, |acc, item| {
            acc.push(item);
            acc
        })
    }
}

impl<Fut: Future> FusedStream for FuturesUnordered<Fut> {
    fn is_terminated(&self) -> bool {
        self.is_terminated.load(Relaxed)
    }
}

impl<Fut> Extend<Fut> for FuturesUnordered<Fut> {
    fn extend<I>(&mut self, iter: I)
    where
        I: IntoIterator<Item = Fut>,
    {
        for item in iter {
            self.push(item);
        }
    }
}