|
87 | 87 | use crate::cell::Cell; |
88 | 88 | use crate::fmt; |
89 | 89 | use crate::marker; |
90 | | -use crate::ptr; |
91 | 90 | use crate::sync::atomic::{AtomicUsize, AtomicBool, Ordering}; |
92 | 91 | use crate::thread::{self, Thread}; |
93 | 92 |
|
@@ -432,48 +431,47 @@ impl Once { |
432 | 431 | } |
433 | 432 | } |
434 | 433 |
|
435 | | -fn wait(state_and_queue: &AtomicUsize, current_state: usize) { |
436 | | - // Create the node for our current thread that we are going to try to slot |
437 | | - // in at the head of the linked list. |
438 | | - let mut node = Waiter { |
439 | | - thread: Cell::new(Some(thread::current())), |
440 | | - signaled: AtomicBool::new(false), |
441 | | - next: ptr::null(), |
442 | | - }; |
443 | | - let me = &node as *const Waiter as usize; |
444 | | - assert!(me & STATE_MASK == 0); // We assume pointers have 2 free bits that |
445 | | - // we can use for state. |
446 | | - |
447 | | - // Try to slide in the node at the head of the linked list. |
448 | | - // Run in a loop where we make sure the status is still RUNNING, and that |
449 | | - // another thread did not just replace the head of the linked list. |
450 | | - let mut old_head_and_status = current_state; |
| 434 | +fn wait(state_and_queue: &AtomicUsize, mut current_state: usize) { |
| 435 | + // Note: the following code was carefully written to avoid creating a |
| 436 | + // mutable reference to `node` that gets aliased. |
451 | 437 | loop { |
452 | | - if old_head_and_status & STATE_MASK != RUNNING { |
453 | | - return; // No need anymore to enqueue ourselves. |
| 438 | + // Don't queue this thread if the status is no longer running, |
| 439 | + // otherwise we will not be woken up. |
| 440 | + if current_state & STATE_MASK != RUNNING { |
| 441 | + return; |
454 | 442 | } |
455 | 443 |
|
456 | | - node.next = (old_head_and_status & !STATE_MASK) as *const Waiter; |
457 | | - let old = state_and_queue.compare_and_swap(old_head_and_status, |
| 444 | + // Create the node for our current thread. |
| 445 | + let node = Waiter { |
| 446 | + thread: Cell::new(Some(thread::current())), |
| 447 | + signaled: AtomicBool::new(false), |
| 448 | + next: (current_state & !STATE_MASK) as *const Waiter, |
| 449 | + }; |
| 450 | + let me = &node as *const Waiter as usize; |
| 451 | + |
| 452 | + // Try to slide in the node at the head of the linked list, making sure |
| 453 | + // that another thread didn't just replace the head of the linked list. |
| 454 | + let old = state_and_queue.compare_and_swap(current_state, |
458 | 455 | me | RUNNING, |
459 | 456 | Ordering::Release); |
460 | | - if old == old_head_and_status { |
461 | | - break; // Success! |
| 457 | + if old != current_state { |
| 458 | + current_state = old; |
| 459 | + continue; |
462 | 460 | } |
463 | | - old_head_and_status = old; |
464 | | - } |
465 | 461 |
|
466 | | - // We have enqueued ourselves, now lets wait. |
467 | | - // It is important not to return before being signaled, otherwise we would |
468 | | - // drop our `Waiter` node and leave a hole in the linked list (and a |
469 | | - // dangling reference). Guard against spurious wakeups by reparking |
470 | | - // ourselves until we are signaled. |
471 | | - while !node.signaled.load(Ordering::Acquire) { |
472 | | - // If the managing thread happens to signal and unpark us before we can |
473 | | - // park ourselves, the result could be this thread never gets unparked. |
474 | | - // Luckily `park` comes with the guarantee that if it got an `unpark` |
475 | | - // just before on an unparked thread is does not park. |
476 | | - thread::park(); |
| 462 | + // We have enqueued ourselves, now lets wait. |
| 463 | + // It is important not to return before being signaled, otherwise we |
| 464 | + // would drop our `Waiter` node and leave a hole in the linked list |
| 465 | + // (and a dangling reference). Guard against spurious wakeups by |
| 466 | + // reparking ourselves until we are signaled. |
| 467 | + while !node.signaled.load(Ordering::Acquire) { |
| 468 | + // If the managing thread happens to signal and unpark us before we |
| 469 | + // can park ourselves, the result could be this thread never gets |
| 470 | + // unparked. Luckily `park` comes with the guarantee that if it got |
| 471 | + // an `unpark` just before on an unparked thread is does not park. |
| 472 | + thread::park(); |
| 473 | + } |
| 474 | + break; |
477 | 475 | } |
478 | 476 | } |
479 | 477 |
|
|
0 commit comments