tokio/runtime/scheduler/inject/
rt_multi_thread.rs

1use super::{Shared, Synced};
2
3use crate::runtime::scheduler::Lock;
4use crate::runtime::task;
5
6use std::sync::atomic::Ordering::Release;
7
8impl<'a> Lock<Synced> for &'a mut Synced {
9    type Handle = &'a mut Synced;
10
11    fn lock(self) -> Self::Handle {
12        self
13    }
14}
15
16impl AsMut<Synced> for Synced {
17    fn as_mut(&mut self) -> &mut Synced {
18        self
19    }
20}
21
22impl<T: 'static> Shared<T> {
23    /// Pushes several values into the queue.
24    ///
25    /// # Safety
26    ///
27    /// Must be called with the same `Synced` instance returned by `Inject::new`
28    #[inline]
29    pub(crate) unsafe fn push_batch<L, I>(&self, shared: L, mut iter: I)
30    where
31        L: Lock<Synced>,
32        I: Iterator<Item = task::Notified<T>>,
33    {
34        let first = match iter.next() {
35            Some(first) => first.into_raw(),
36            None => return,
37        };
38
39        // Link up all the tasks.
40        let mut prev = first;
41        let mut counter = 1;
42
43        // We are going to be called with an `std::iter::Chain`, and that
44        // iterator overrides `for_each` to something that is easier for the
45        // compiler to optimize than a loop.
46        iter.for_each(|next| {
47            let next = next.into_raw();
48
49            // safety: Holding the Notified for a task guarantees exclusive
50            // access to the `queue_next` field.
51            unsafe { prev.set_queue_next(Some(next)) };
52            prev = next;
53            counter += 1;
54        });
55
56        // Now that the tasks are linked together, insert them into the
57        // linked list.
58        self.push_batch_inner(shared, first, prev, counter);
59    }
60
61    /// Inserts several tasks that have been linked together into the queue.
62    ///
63    /// The provided head and tail may be be the same task. In this case, a
64    /// single task is inserted.
65    #[inline]
66    unsafe fn push_batch_inner<L>(
67        &self,
68        shared: L,
69        batch_head: task::RawTask,
70        batch_tail: task::RawTask,
71        num: usize,
72    ) where
73        L: Lock<Synced>,
74    {
75        debug_assert!(unsafe { batch_tail.get_queue_next().is_none() });
76
77        let mut synced = shared.lock();
78
79        if synced.as_mut().is_closed {
80            drop(synced);
81
82            let mut curr = Some(batch_head);
83
84            while let Some(task) = curr {
85                curr = task.get_queue_next();
86
87                let _ = unsafe { task::Notified::<T>::from_raw(task) };
88            }
89
90            return;
91        }
92
93        let synced = synced.as_mut();
94
95        if let Some(tail) = synced.tail {
96            unsafe {
97                tail.set_queue_next(Some(batch_head));
98            }
99        } else {
100            synced.head = Some(batch_head);
101        }
102
103        synced.tail = Some(batch_tail);
104
105        // Increment the count.
106        //
107        // safety: All updates to the len atomic are guarded by the mutex. As
108        // such, a non-atomic load followed by a store is safe.
109        let len = self.len.unsync_load();
110
111        self.len.store(len + num, Release);
112    }
113}