@@ -134,7 +134,11 @@ const MASK: usize = !(QUEUE_LOCKED | QUEUED | LOCKED);
134134#[ inline]
135135fn write_lock ( state : State ) -> Option < State > {
136136 let state = state. wrapping_byte_add ( LOCKED ) ;
137- if state. addr ( ) & LOCKED == LOCKED { Some ( state) } else { None }
137+ if state. addr ( ) & LOCKED == LOCKED {
138+ Some ( state)
139+ } else {
140+ None
141+ }
138142}
139143
140144/// Marks the state as read-locked, if possible.
@@ -315,13 +319,26 @@ impl RwLock {
315319 let mut node = Node :: new ( write) ;
316320 let mut state = self . state . load ( Relaxed ) ;
317321 let mut count = 0 ;
322+ let mut has_slept = false ;
318323 loop {
319324 if let Some ( next) = update ( state) {
320325 // The lock is available, try locking it.
321326 match self . state . compare_exchange_weak ( state, next, Acquire , Relaxed ) {
322327 Ok ( _) => return ,
323328 Err ( new) => state = new,
324329 }
330+ } else if !write && has_slept {
331+ // If we are trying to read and we have already gone to sleep, first check if the
332+ // lock is in read mode before going to sleep again.
333+ let tail = unsafe { add_backlinks_and_find_tail ( to_node ( state) ) . as_ref ( ) } ;
334+ let _ = tail. next . 0 . fetch_update ( Release , Acquire , |count : * mut Node | {
335+ if count. mask ( MASK ) . addr ( ) > 0 {
336+ Some ( without_provenance_mut ( state. addr ( ) . checked_add ( SINGLE ) ? | LOCKED ) )
337+ } else {
338+ None
339+ }
340+ } ) ;
341+ todo ! ( "This is very wrong" ) ;
325342 } else if state. addr ( ) & QUEUED == 0 && count < SPIN_COUNT {
326343 // If the lock is not available and no threads are queued, spin
327344 // for a while, using exponential backoff to decrease cache
@@ -386,6 +403,8 @@ impl RwLock {
386403 node. wait ( ) ;
387404 }
388405
406+ has_slept = true ;
407+
389408 // The node was removed from the queue, disarm the guard.
390409 mem:: forget ( guard) ;
391410
@@ -451,45 +470,44 @@ impl RwLock {
451470
452471 #[ inline]
453472 pub unsafe fn downgrade ( & self ) {
454- // Atomically set to read-locked with a single reader, without any waiting threads.
455- if let Err ( mut state) = self . state . compare_exchange (
473+ // Atomically attempt to go from a single writer without any waiting threads to a single
474+ // reader without any waiting threads.
475+ if let Err ( state) = self . state . compare_exchange (
456476 without_provenance_mut ( LOCKED ) ,
457477 without_provenance_mut ( LOCKED | SINGLE ) ,
458478 Release ,
459479 Relaxed ,
460480 ) {
461- // Attempt to grab the queue lock.
462- loop {
463- let next = state. map_addr ( |addr| addr | QUEUE_LOCKED ) ;
464- match self . state . compare_exchange ( state, next, AcqRel , Relaxed ) {
465- Err ( new_state) => state = new_state,
466- Ok ( new_state) => {
467- assert_eq ! (
468- new_state. mask( !MASK ) . addr( ) ,
469- LOCKED | QUEUED | QUEUE_LOCKED ,
470- "{:p}" ,
471- new_state
472- ) ;
473- state = new_state;
474- break ;
475- }
476- }
477- }
478-
479- assert_eq ! ( state. mask( !MASK ) . addr( ) , LOCKED | QUEUED | QUEUE_LOCKED ) ;
481+ debug_assert ! (
482+ state. mask( LOCKED ) . addr( ) != 0 && state. mask( QUEUED ) . addr( ) != 0 ,
483+ "RwLock should be LOCKED and QUEUED"
484+ ) ;
480485
481- // SAFETY: We have the queue lock so all safety contracts are fulfilled.
482- let tail = unsafe { add_backlinks_and_find_tail ( to_node ( state) ) . as_ref ( ) } ;
486+ // FIXME Is this correct?
487+ // SAFETY: Since we have the write lock, nobody else can be modifying state, and since
488+ // we got `state` from the `compare_exchange`, we know it is a valid head of the queue.
489+ let tail = unsafe { add_backlinks_and_find_tail ( to_node ( state) ) } ;
483490
491+ // FIXME Is this safe to modify? There shouldn't be other threads modifying this since
492+ // we have the write lock and only we should be able to modify the nodes in the queue...
484493 // Increment the reader count from 0 to 1.
485- assert_eq ! (
486- tail. next. 0 . fetch_byte_add( SINGLE , AcqRel ) . addr( ) ,
487- 0 ,
488- "Reader count was not zero while we had the write lock"
489- ) ;
490-
491- // Release the queue lock.
492- self . state . fetch_byte_sub ( QUEUE_LOCKED , Release ) ;
494+ let old = unsafe { tail. as_ref ( ) } . next . 0 . fetch_byte_add ( SINGLE , AcqRel ) . addr ( ) ;
495+ debug_assert_eq ! ( old, 0 , "Reader count was not zero while we had the write lock" ) ;
496+
497+ // Now that we are in read mode, traverse the queue and wake up readers until we find a
498+ // writer node.
499+ let mut current = tail;
500+ while unsafe { !current. as_ref ( ) . write } {
501+ let prev = unsafe { current. as_ref ( ) . prev . get ( ) } ;
502+ unsafe {
503+ // There must be threads waiting.
504+ Node :: complete ( current) ;
505+ }
506+ match prev {
507+ Some ( prev) => current = prev,
508+ None => return ,
509+ }
510+ }
493511 }
494512 }
495513
0 commit comments