1#![cfg_attr(not(feature = "full"), allow(dead_code))]
23use crate::loom::sync::atomic::AtomicUsize;
4use crate::loom::sync::{Arc, Condvar, Mutex};
56use std::sync::atomic::Ordering::SeqCst;
7use std::time::Duration;
89#[derive(Debug)]
10pub(crate) struct ParkThread {
11 inner: Arc<Inner>,
12}
1314/// Unblocks a thread that was blocked by `ParkThread`.
15#[derive(Clone, Debug)]
16pub(crate) struct UnparkThread {
17 inner: Arc<Inner>,
18}
1920#[derive(Debug)]
21struct Inner {
22 state: AtomicUsize,
23 mutex: Mutex<()>,
24 condvar: Condvar,
25}
2627const EMPTY: usize = 0;
28const PARKED: usize = 1;
29const NOTIFIED: usize = 2;
3031tokio_thread_local! {
32static CURRENT_PARKER: ParkThread = ParkThread::new();
33}
3435// Bit of a hack, but it is only for loom
36#[cfg(loom)]
37tokio_thread_local! {
38pub(crate) static CURRENT_THREAD_PARK_COUNT: AtomicUsize = AtomicUsize::new(0);
39}
4041// ==== impl ParkThread ====
4243impl ParkThread {
44pub(crate) fn new() -> Self {
45Self {
46 inner: Arc::new(Inner {
47 state: AtomicUsize::new(EMPTY),
48 mutex: Mutex::new(()),
49 condvar: Condvar::new(),
50 }),
51 }
52 }
5354pub(crate) fn unpark(&self) -> UnparkThread {
55let inner = self.inner.clone();
56 UnparkThread { inner }
57 }
5859pub(crate) fn park(&mut self) {
60#[cfg(loom)]
61CURRENT_THREAD_PARK_COUNT.with(|count| count.fetch_add(1, SeqCst));
62self.inner.park();
63 }
6465pub(crate) fn park_timeout(&mut self, duration: Duration) {
66#[cfg(loom)]
67CURRENT_THREAD_PARK_COUNT.with(|count| count.fetch_add(1, SeqCst));
68self.inner.park_timeout(duration);
69 }
7071pub(crate) fn shutdown(&mut self) {
72self.inner.shutdown();
73 }
74}
7576// ==== impl Inner ====
7778impl Inner {
79fn park(&self) {
80// If we were previously notified then we consume this notification and
81 // return quickly.
82if self
83.state
84 .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst)
85 .is_ok()
86 {
87return;
88 }
8990// Otherwise we need to coordinate going to sleep
91let mut m = self.mutex.lock();
9293match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) {
94Ok(_) => {}
95Err(NOTIFIED) => {
96// We must read here, even though we know it will be `NOTIFIED`.
97 // This is because `unpark` may have been called again since we read
98 // `NOTIFIED` in the `compare_exchange` above. We must perform an
99 // acquire operation that synchronizes with that `unpark` to observe
100 // any writes it made before the call to unpark. To do that we must
101 // read from the write it made to `state`.
102let old = self.state.swap(EMPTY, SeqCst);
103debug_assert_eq!(old, NOTIFIED, "park state changed unexpectedly");
104105return;
106 }
107Err(actual) => panic!("inconsistent park state; actual = {actual}"),
108 }
109110loop {
111 m = self.condvar.wait(m).unwrap();
112113if self
114.state
115 .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst)
116 .is_ok()
117 {
118// got a notification
119return;
120 }
121122// spurious wakeup, go back to sleep
123}
124 }
125126/// Parks the current thread for at most `dur`.
127fn park_timeout(&self, dur: Duration) {
128// Like `park` above we have a fast path for an already-notified thread,
129 // and afterwards we start coordinating for a sleep. Return quickly.
130if self
131.state
132 .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst)
133 .is_ok()
134 {
135return;
136 }
137138if dur == Duration::from_millis(0) {
139return;
140 }
141142let m = self.mutex.lock();
143144match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) {
145Ok(_) => {}
146Err(NOTIFIED) => {
147// We must read again here, see `park`.
148let old = self.state.swap(EMPTY, SeqCst);
149debug_assert_eq!(old, NOTIFIED, "park state changed unexpectedly");
150151return;
152 }
153Err(actual) => panic!("inconsistent park_timeout state; actual = {actual}"),
154 }
155156#[cfg(not(all(target_family = "wasm", not(target_feature = "atomics"))))]
157// Wait with a timeout, and if we spuriously wake up or otherwise wake up
158 // from a notification, we just want to unconditionally set the state back to
159 // empty, either consuming a notification or un-flagging ourselves as
160 // parked.
161let (_m, _result) = self.condvar.wait_timeout(m, dur).unwrap();
162163#[cfg(all(target_family = "wasm", not(target_feature = "atomics")))]
164// Wasm without atomics doesn't have threads, so just sleep.
165{
166let _m = m;
167 std::thread::sleep(dur);
168 }
169170match self.state.swap(EMPTY, SeqCst) {
171 NOTIFIED => {} // got a notification, hurray!
172PARKED => {} // no notification, alas
173n => panic!("inconsistent park_timeout state: {n}"),
174 }
175 }
176177fn unpark(&self) {
178// To ensure the unparked thread will observe any writes we made before
179 // this call, we must perform a release operation that `park` can
180 // synchronize with. To do that we must write `NOTIFIED` even if `state`
181 // is already `NOTIFIED`. That is why this must be a swap rather than a
182 // compare-and-swap that returns if it reads `NOTIFIED` on failure.
183match self.state.swap(NOTIFIED, SeqCst) {
184 EMPTY => return, // no one was waiting
185NOTIFIED => return, // already unparked
186PARKED => {} // gotta go wake someone up
187_ => panic!("inconsistent state in unpark"),
188 }
189190// There is a period between when the parked thread sets `state` to
191 // `PARKED` (or last checked `state` in the case of a spurious wake
192 // up) and when it actually waits on `cvar`. If we were to notify
193 // during this period it would be ignored and then when the parked
194 // thread went to sleep it would never wake up. Fortunately, it has
195 // `lock` locked at this stage so we can acquire `lock` to wait until
196 // it is ready to receive the notification.
197 //
198 // Releasing `lock` before the call to `notify_one` means that when the
199 // parked thread wakes it doesn't get woken only to have to wait for us
200 // to release `lock`.
201drop(self.mutex.lock());
202203self.condvar.notify_one();
204 }
205206fn shutdown(&self) {
207self.condvar.notify_all();
208 }
209}
210211impl Default for ParkThread {
212fn default() -> Self {
213Self::new()
214 }
215}
216217// ===== impl UnparkThread =====
218219impl UnparkThread {
220pub(crate) fn unpark(&self) {
221self.inner.unpark();
222 }
223}
224225use crate::loom::thread::AccessError;
226use std::future::Future;
227use std::marker::PhantomData;
228use std::rc::Rc;
229use std::task::{RawWaker, RawWakerVTable, Waker};
230231/// Blocks the current thread using a condition variable.
232#[derive(Debug)]
233pub(crate) struct CachedParkThread {
234 _anchor: PhantomData<Rc<()>>,
235}
236237impl CachedParkThread {
238/// Creates a new `ParkThread` handle for the current thread.
239 ///
240 /// This type cannot be moved to other threads, so it should be created on
241 /// the thread that the caller intends to park.
242pub(crate) fn new() -> CachedParkThread {
243 CachedParkThread {
244 _anchor: PhantomData,
245 }
246 }
247248pub(crate) fn waker(&self) -> Result<Waker, AccessError> {
249self.unpark().map(UnparkThread::into_waker)
250 }
251252fn unpark(&self) -> Result<UnparkThread, AccessError> {
253self.with_current(ParkThread::unpark)
254 }
255256pub(crate) fn park(&mut self) {
257self.with_current(|park_thread| park_thread.inner.park())
258 .unwrap();
259 }
260261pub(crate) fn park_timeout(&mut self, duration: Duration) {
262self.with_current(|park_thread| park_thread.inner.park_timeout(duration))
263 .unwrap();
264 }
265266/// Gets a reference to the `ParkThread` handle for this thread.
267fn with_current<F, R>(&self, f: F) -> Result<R, AccessError>
268where
269F: FnOnce(&ParkThread) -> R,
270 {
271 CURRENT_PARKER.try_with(|inner| f(inner))
272 }
273274pub(crate) fn block_on<F: Future>(&mut self, f: F) -> Result<F::Output, AccessError> {
275use std::task::Context;
276use std::task::Poll::Ready;
277278let waker = self.waker()?;
279let mut cx = Context::from_waker(&waker);
280281pin!(f);
282283loop {
284if let Ready(v) = crate::task::coop::budget(|| f.as_mut().poll(&mut cx)) {
285return Ok(v);
286 }
287288self.park();
289 }
290 }
291}
292293impl UnparkThread {
294pub(crate) fn into_waker(self) -> Waker {
295unsafe {
296let raw = unparker_to_raw_waker(self.inner);
297 Waker::from_raw(raw)
298 }
299 }
300}
301302impl Inner {
303#[allow(clippy::wrong_self_convention)]
304fn into_raw(this: Arc<Inner>) -> *const () {
305 Arc::into_raw(this) as *const ()
306 }
307308unsafe fn from_raw(ptr: *const ()) -> Arc<Inner> {
309 Arc::from_raw(ptr as *const Inner)
310 }
311}
312313unsafe fn unparker_to_raw_waker(unparker: Arc<Inner>) -> RawWaker {
314 RawWaker::new(
315 Inner::into_raw(unparker),
316&RawWakerVTable::new(clone, wake, wake_by_ref, drop_waker),
317 )
318}
319320unsafe fn clone(raw: *const ()) -> RawWaker {
321 Arc::increment_strong_count(raw as *const Inner);
322 unparker_to_raw_waker(Inner::from_raw(raw))
323}
324325unsafe fn drop_waker(raw: *const ()) {
326 drop(Inner::from_raw(raw));
327}
328329unsafe fn wake(raw: *const ()) {
330let unparker = Inner::from_raw(raw);
331 unparker.unpark();
332}
333334unsafe fn wake_by_ref(raw: *const ()) {
335let raw = raw as *const Inner;
336 (*raw).unpark();
337}
338339#[cfg(loom)]
340pub(crate) fn current_thread_park_count() -> usize {
341 CURRENT_THREAD_PARK_COUNT.with(|count| count.load(SeqCst))
342}