1use crate::task::AtomicWaker;
2use alloc::sync::Arc;
3use core::cell::UnsafeCell;
4use core::ptr;
5use core::sync::atomic::AtomicPtr;
6use core::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release};
78use super::abort::abort;
9use super::task::Task;
1011pub(super) enum Dequeue<Fut> {
12 Data(*const Task<Fut>),
13 Empty,
14 Inconsistent,
15}
1617pub(super) struct ReadyToRunQueue<Fut> {
18// The waker of the task using `FuturesUnordered`.
19pub(super) waker: AtomicWaker,
2021// Head/tail of the readiness queue
22pub(super) head: AtomicPtr<Task<Fut>>,
23pub(super) tail: UnsafeCell<*const Task<Fut>>,
24pub(super) stub: Arc<Task<Fut>>,
25}
2627/// An MPSC queue into which the tasks containing the futures are inserted
28/// whenever the future inside is scheduled for polling.
29impl<Fut> ReadyToRunQueue<Fut> {
30// FIXME: this takes raw pointer without safety conditions.
3132/// The enqueue function from the 1024cores intrusive MPSC queue algorithm.
33pub(super) fn enqueue(&self, task: *const Task<Fut>) {
34unsafe {
35debug_assert!((*task).queued.load(Relaxed));
3637// This action does not require any coordination
38(*task).next_ready_to_run.store(ptr::null_mut(), Relaxed);
3940// Note that these atomic orderings come from 1024cores
41let task = task as *mut _;
42let prev = self.head.swap(task, AcqRel);
43 (*prev).next_ready_to_run.store(task, Release);
44 }
45 }
4647/// The dequeue function from the 1024cores intrusive MPSC queue algorithm
48 ///
49 /// Note that this is unsafe as it required mutual exclusion (only one
50 /// thread can call this) to be guaranteed elsewhere.
51pub(super) unsafe fn dequeue(&self) -> Dequeue<Fut> {
52unsafe {
53let mut tail = *self.tail.get();
54let mut next = (*tail).next_ready_to_run.load(Acquire);
5556if tail == self.stub() {
57if next.is_null() {
58return Dequeue::Empty;
59 }
6061*self.tail.get() = next;
62 tail = next;
63 next = (*next).next_ready_to_run.load(Acquire);
64 }
6566if !next.is_null() {
67*self.tail.get() = next;
68debug_assert!(tail != self.stub());
69return Dequeue::Data(tail);
70 }
7172if self.head.load(Acquire) as *const _ != tail {
73return Dequeue::Inconsistent;
74 }
7576self.enqueue(self.stub());
7778 next = (*tail).next_ready_to_run.load(Acquire);
7980if !next.is_null() {
81*self.tail.get() = next;
82return Dequeue::Data(tail);
83 }
8485 Dequeue::Inconsistent
86 }
87 }
8889pub(super) fn stub(&self) -> *const Task<Fut> {
90 Arc::as_ptr(&self.stub)
91 }
92}
9394impl<Fut> Drop for ReadyToRunQueue<Fut> {
95fn drop(&mut self) {
96// Once we're in the destructor for `Inner<Fut>` we need to clear out
97 // the ready to run queue of tasks if there's anything left in there.
98 //
99 // Note that each task has a strong reference count associated with it
100 // which is owned by the ready to run queue. All tasks should have had
101 // their futures dropped already by the `FuturesUnordered` destructor
102 // above, so we're just pulling out tasks and dropping their refcounts.
103unsafe {
104loop {
105match self.dequeue() {
106 Dequeue::Empty => break,
107 Dequeue::Inconsistent => abort("inconsistent in drop"),
108 Dequeue::Data(ptr) => drop(Arc::from_raw(ptr)),
109 }
110 }
111 }
112 }
113}