futures_core/task/__internal/atomic_waker.rs
1use core::cell::UnsafeCell;
2use core::fmt;
3use core::task::Waker;
4
5use atomic::AtomicUsize;
6use atomic::Ordering::{AcqRel, Acquire, Release};
7
8#[cfg(feature = "portable-atomic")]
9use portable_atomic as atomic;
10
11#[cfg(not(feature = "portable-atomic"))]
12use core::sync::atomic;
13
14/// A synchronization primitive for task wakeup.
15///
16/// Sometimes the task interested in a given event will change over time.
17/// An `AtomicWaker` can coordinate concurrent notifications with the consumer
18/// potentially "updating" the underlying task to wake up. This is useful in
19/// scenarios where a computation completes in another thread and wants to
20/// notify the consumer, but the consumer is in the process of being migrated to
21/// a new logical task.
22///
23/// Consumers should call `register` before checking the result of a computation
24/// and producers should call `wake` after producing the computation (this
25/// differs from the usual `thread::park` pattern). It is also permitted for
26/// `wake` to be called **before** `register`. This results in a no-op.
27///
28/// A single `AtomicWaker` may be reused for any number of calls to `register` or
29/// `wake`.
30///
31/// # Memory ordering
32///
33/// Calling `register` "acquires" all memory "released" by calls to `wake`
34/// before the call to `register`. Later calls to `wake` will wake the
35/// registered waker (on contention this wake might be triggered in `register`).
36///
37/// For concurrent calls to `register` (should be avoided) the ordering is only
38/// guaranteed for the winning call.
39///
40/// # Examples
41///
42/// Here is a simple example providing a `Flag` that can be signalled manually
43/// when it is ready.
44///
45/// ```
46/// use futures::future::Future;
47/// use futures::task::{Context, Poll, AtomicWaker};
48/// use std::sync::Arc;
49/// use std::sync::atomic::AtomicBool;
50/// use std::sync::atomic::Ordering::Relaxed;
51/// use std::pin::Pin;
52///
53/// struct Inner {
54/// waker: AtomicWaker,
55/// set: AtomicBool,
56/// }
57///
58/// #[derive(Clone)]
59/// struct Flag(Arc<Inner>);
60///
61/// impl Flag {
62/// pub fn new() -> Self {
63/// Self(Arc::new(Inner {
64/// waker: AtomicWaker::new(),
65/// set: AtomicBool::new(false),
66/// }))
67/// }
68///
69/// pub fn signal(&self) {
70/// self.0.set.store(true, Relaxed);
71/// self.0.waker.wake();
72/// }
73/// }
74///
75/// impl Future for Flag {
76/// type Output = ();
77///
78/// fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
79/// // quick check to avoid registration if already done.
80/// if self.0.set.load(Relaxed) {
81/// return Poll::Ready(());
82/// }
83///
84/// self.0.waker.register(cx.waker());
85///
86/// // Need to check condition **after** `register` to avoid a race
87/// // condition that would result in lost notifications.
88/// if self.0.set.load(Relaxed) {
89/// Poll::Ready(())
90/// } else {
91/// Poll::Pending
92/// }
93/// }
94/// }
95/// ```
96pub struct AtomicWaker {
97 state: AtomicUsize,
98 waker: UnsafeCell<Option<Waker>>,
99}
100
101// `AtomicWaker` is a multi-consumer, single-producer transfer cell. The cell
102// stores a `Waker` value produced by calls to `register` and many threads can
103// race to take the waker (to wake it) by calling `wake`.
104//
105// If a new `Waker` instance is produced by calling `register` before an
106// existing one is consumed, then the existing one is overwritten.
107//
108// While `AtomicWaker` is single-producer, the implementation ensures memory
109// safety. In the event of concurrent calls to `register`, there will be a
110// single winner whose waker will get stored in the cell. The losers will not
111// have their tasks woken. As such, callers should ensure to add synchronization
112// to calls to `register`.
113//
114// The implementation uses a single `AtomicUsize` value to coordinate access to
115// the `Waker` cell. There are two bits that are operated on independently.
116// These are represented by `REGISTERING` and `WAKING`.
117//
118// The `REGISTERING` bit is set when a producer enters the critical section. The
119// `WAKING` bit is set when a consumer enters the critical section. Neither bit
120// being set is represented by `WAITING`.
121//
122// A thread obtains an exclusive lock on the waker cell by transitioning the
123// state from `WAITING` to `REGISTERING` or `WAKING`, depending on the operation
124// the thread wishes to perform. When this transition is made, it is guaranteed
125// that no other thread will access the waker cell.
126//
127// # Registering
128//
129// On a call to `register`, an attempt to transition the state from WAITING to
130// REGISTERING is made. On success, the caller obtains a lock on the waker cell.
131//
132// If the lock is obtained, then the thread sets the waker cell to the waker
133// provided as an argument. Then it attempts to transition the state back from
134// `REGISTERING` -> `WAITING`.
135//
136// If this transition is successful, then the registering process is complete
137// and the next call to `wake` will observe the waker.
138//
139// If the transition fails, then there was a concurrent call to `wake` that was
140// unable to access the waker cell (due to the registering thread holding the
141// lock). To handle this, the registering thread removes the waker it just set
142// from the cell and calls `wake` on it. This call to wake represents the
143// attempt to wake by the other thread (that set the `WAKING` bit). The state is
144// then transitioned from `REGISTERING | WAKING` back to `WAITING`. This
145// transition must succeed because, at this point, the state cannot be
146// transitioned by another thread.
147//
148// # Waking
149//
150// On a call to `wake`, an attempt to transition the state from `WAITING` to
151// `WAKING` is made. On success, the caller obtains a lock on the waker cell.
152//
153// If the lock is obtained, then the thread takes ownership of the current value
154// in the waker cell, and calls `wake` on it. The state is then transitioned
155// back to `WAITING`. This transition must succeed as, at this point, the state
156// cannot be transitioned by another thread.
157//
158// If the thread is unable to obtain the lock, the `WAKING` bit is still. This
159// is because it has either been set by the current thread but the previous
160// value included the `REGISTERING` bit **or** a concurrent thread is in the
161// `WAKING` critical section. Either way, no action must be taken.
162//
163// If the current thread is the only concurrent call to `wake` and another
164// thread is in the `register` critical section, when the other thread **exits**
165// the `register` critical section, it will observe the `WAKING` bit and handle
166// the wake itself.
167//
168// If another thread is in the `wake` critical section, then it will handle
169// waking the task.
170//
171// # A potential race (is safely handled).
172//
173// Imagine the following situation:
174//
175// * Thread A obtains the `wake` lock and wakes a task.
176//
177// * Before thread A releases the `wake` lock, the woken task is scheduled.
178//
179// * Thread B attempts to wake the task. In theory this should result in the
180// task being woken, but it cannot because thread A still holds the wake lock.
181//
182// This case is handled by requiring users of `AtomicWaker` to call `register`
183// **before** attempting to observe the application state change that resulted
184// in the task being awoken. The wakers also change the application state before
185// calling wake.
186//
187// Because of this, the waker will do one of two things.
188//
189// 1) Observe the application state change that Thread B is woken for. In this
190// case, it is OK for Thread B's wake to be lost.
191//
192// 2) Call register before attempting to observe the application state. Since
193// Thread A still holds the `wake` lock, the call to `register` will result
194// in the task waking itself and get scheduled again.
195
196/// Idle state
197const WAITING: usize = 0;
198
199/// A new waker value is being registered with the `AtomicWaker` cell.
200const REGISTERING: usize = 0b01;
201
202/// The waker currently registered with the `AtomicWaker` cell is being woken.
203const WAKING: usize = 0b10;
204
205impl AtomicWaker {
206 /// Create an `AtomicWaker`.
207 pub const fn new() -> Self {
208 // Make sure that task is Sync
209 #[allow(dead_code)]
210 trait AssertSync: Sync {}
211 impl AssertSync for Waker {}
212
213 Self { state: AtomicUsize::new(WAITING), waker: UnsafeCell::new(None) }
214 }
215
216 /// Registers the waker to be notified on calls to `wake`.
217 ///
218 /// The new task will take place of any previous tasks that were registered
219 /// by previous calls to `register`. Any calls to `wake` that happen after
220 /// a call to `register` (as defined by the memory ordering rules), will
221 /// notify the `register` caller's task and deregister the waker from future
222 /// notifications. Because of this, callers should ensure `register` gets
223 /// invoked with a new `Waker` **each** time they require a wakeup.
224 ///
225 /// It is safe to call `register` with multiple other threads concurrently
226 /// calling `wake`. This will result in the `register` caller's current
227 /// task being notified once.
228 ///
229 /// This function is safe to call concurrently, but this is generally a bad
230 /// idea. Concurrent calls to `register` will attempt to register different
231 /// tasks to be notified. One of the callers will win and have its task set,
232 /// but there is no guarantee as to which caller will succeed.
233 ///
234 /// # Examples
235 ///
236 /// Here is how `register` is used when implementing a flag.
237 ///
238 /// ```
239 /// use futures::future::Future;
240 /// use futures::task::{Context, Poll, AtomicWaker};
241 /// use std::sync::atomic::AtomicBool;
242 /// use std::sync::atomic::Ordering::Relaxed;
243 /// use std::pin::Pin;
244 ///
245 /// struct Flag {
246 /// waker: AtomicWaker,
247 /// set: AtomicBool,
248 /// }
249 ///
250 /// impl Future for Flag {
251 /// type Output = ();
252 ///
253 /// fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
254 /// // Register **before** checking `set` to avoid a race condition
255 /// // that would result in lost notifications.
256 /// self.waker.register(cx.waker());
257 ///
258 /// if self.set.load(Relaxed) {
259 /// Poll::Ready(())
260 /// } else {
261 /// Poll::Pending
262 /// }
263 /// }
264 /// }
265 /// ```
266 pub fn register(&self, waker: &Waker) {
267 match self
268 .state
269 .compare_exchange(WAITING, REGISTERING, Acquire, Acquire)
270 .unwrap_or_else(|x| x)
271 {
272 WAITING => {
273 unsafe {
274 // Locked acquired, update the waker cell
275
276 // Avoid cloning the waker if the old waker will awaken the same task.
277 match &*self.waker.get() {
278 Some(old_waker) if old_waker.will_wake(waker) => (),
279 _ => *self.waker.get() = Some(waker.clone()),
280 }
281
282 // Release the lock. If the state transitioned to include
283 // the `WAKING` bit, this means that at least one wake has
284 // been called concurrently.
285 //
286 // Start by assuming that the state is `REGISTERING` as this
287 // is what we just set it to. If this holds, we know that no
288 // other writes were performed in the meantime, so there is
289 // nothing to acquire, only release. In case of concurrent
290 // wakers, we need to acquire their releases, so success needs
291 // to do both.
292 let res = self.state.compare_exchange(REGISTERING, WAITING, AcqRel, Acquire);
293
294 match res {
295 Ok(_) => {
296 // memory ordering: acquired self.state during CAS
297 // - if previous wakes went through it syncs with
298 // their final release (`fetch_and`)
299 // - if there was no previous wake the next wake
300 // will wake us, no sync needed.
301 }
302 Err(actual) => {
303 // This branch can only be reached if at least one
304 // concurrent thread called `wake`. In this
305 // case, `actual` **must** be `REGISTERING |
306 // `WAKING`.
307 debug_assert_eq!(actual, REGISTERING | WAKING);
308
309 // Take the waker to wake once the atomic operation has
310 // completed.
311 let waker = (*self.waker.get()).take().unwrap();
312
313 // We need to return to WAITING state (clear our lock and
314 // concurrent WAKING flag). This needs to acquire all
315 // WAKING fetch_or releases and it needs to release our
316 // update to self.waker, so we need a `swap` operation.
317 self.state.swap(WAITING, AcqRel);
318
319 // memory ordering: we acquired the state for all
320 // concurrent wakes, but future wakes might still
321 // need to wake us in case we can't make progress
322 // from the pending wakes.
323 //
324 // So we simply schedule to come back later (we could
325 // also simply leave the registration in place above).
326 waker.wake();
327 }
328 }
329 }
330 }
331 WAKING => {
332 // Currently in the process of waking the task, i.e.,
333 // `wake` is currently being called on the old task handle.
334 //
335 // memory ordering: we acquired the state for all
336 // concurrent wakes, but future wakes might still
337 // need to wake us in case we can't make progress
338 // from the pending wakes.
339 //
340 // So we simply schedule to come back later (we
341 // could also spin here trying to acquire the lock
342 // to register).
343 waker.wake_by_ref();
344 }
345 state => {
346 // In this case, a concurrent thread is holding the
347 // "registering" lock. This probably indicates a bug in the
348 // caller's code as racing to call `register` doesn't make much
349 // sense.
350 //
351 // memory ordering: don't care. a concurrent register() is going
352 // to succeed and provide proper memory ordering.
353 //
354 // We just want to maintain memory safety. It is ok to drop the
355 // call to `register`.
356 debug_assert!(state == REGISTERING || state == REGISTERING | WAKING);
357 }
358 }
359 }
360
361 /// Calls `wake` on the last `Waker` passed to `register`.
362 ///
363 /// If `register` has not been called yet, then this does nothing.
364 pub fn wake(&self) {
365 if let Some(waker) = self.take() {
366 waker.wake();
367 }
368 }
369
370 /// Returns the last `Waker` passed to `register`, so that the user can wake it.
371 ///
372 ///
373 /// Sometimes, just waking the AtomicWaker is not fine grained enough. This allows the user
374 /// to take the waker and then wake it separately, rather than performing both steps in one
375 /// atomic action.
376 ///
377 /// If a waker has not been registered, this returns `None`.
378 pub fn take(&self) -> Option<Waker> {
379 // AcqRel ordering is used in order to acquire the value of the `task`
380 // cell as well as to establish a `release` ordering with whatever
381 // memory the `AtomicWaker` is associated with.
382 match self.state.fetch_or(WAKING, AcqRel) {
383 WAITING => {
384 // The waking lock has been acquired.
385 let waker = unsafe { (*self.waker.get()).take() };
386
387 // Release the lock
388 self.state.fetch_and(!WAKING, Release);
389
390 waker
391 }
392 state => {
393 // There is a concurrent thread currently updating the
394 // associated task.
395 //
396 // Nothing more to do as the `WAKING` bit has been set. It
397 // doesn't matter if there are concurrent registering threads or
398 // not.
399 //
400 debug_assert!(
401 state == REGISTERING || state == REGISTERING | WAKING || state == WAKING
402 );
403 None
404 }
405 }
406 }
407}
408
409impl Default for AtomicWaker {
410 fn default() -> Self {
411 Self::new()
412 }
413}
414
415impl fmt::Debug for AtomicWaker {
416 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
417 write!(f, "AtomicWaker")
418 }
419}
420
421unsafe impl Send for AtomicWaker {}
422unsafe impl Sync for AtomicWaker {}