|
| 1 | +locking/rwsem: Disable preemption in all down_write*() and up_write() code paths |
| 2 | + |
| 3 | +jira LE-1907 |
| 4 | +Rebuild_History Non-Buildable kernel-rt-5.14.0-284.30.1.rt14.315.el9_2 |
| 5 | +commit-author Waiman Long <longman@redhat.com> |
| 6 | +commit 1d61659ced6bd8881cf2fb5cbcb28f9541fc7430 |
| 7 | +Empty-Commit: Cherry-Pick Conflicts during history rebuild. |
| 8 | +Will be included in final tarball splat. Ref for failed cherry-pick at: |
| 9 | +ciq/ciq_backports/kernel-rt-5.14.0-284.30.1.rt14.315.el9_2/1d61659c.failed |
| 10 | + |
| 11 | +The previous patch has disabled preemption in all the down_read() and |
| 12 | +up_read() code paths. For symmetry, this patch extends commit: |
| 13 | + |
| 14 | + 48dfb5d2560d ("locking/rwsem: Disable preemption while trying for rwsem lock") |
| 15 | + |
| 16 | +... to have preemption disabled in all the down_write() and up_write() |
| 17 | +code paths, including downgrade_write(). |
| 18 | + |
| 19 | + Suggested-by: Peter Zijlstra <peterz@infradead.org> |
| 20 | + Signed-off-by: Waiman Long <longman@redhat.com> |
| 21 | + Signed-off-by: Ingo Molnar <mingo@kernel.org> |
| 22 | +Link: https://lore.kernel.org/r/20230126003628.365092-4-longman@redhat.com |
| 23 | +(cherry picked from commit 1d61659ced6bd8881cf2fb5cbcb28f9541fc7430) |
| 24 | + Signed-off-by: Jonathan Maple <jmaple@ciq.com> |
| 25 | + |
| 26 | +# Conflicts: |
| 27 | +# kernel/locking/rwsem.c |
| 28 | +diff --cc kernel/locking/rwsem.c |
| 29 | +index 6e9b6fe29f61,acb5a50309a1..000000000000 |
| 30 | +--- a/kernel/locking/rwsem.c |
| 31 | ++++ b/kernel/locking/rwsem.c |
| 32 | +@@@ -1093,51 -1167,17 +1085,54 @@@ rwsem_down_write_slowpath(struct rw_sem |
| 33 | + * In this case, we attempt to acquire the lock again |
| 34 | + * without sleeping. |
| 35 | + */ |
| 36 | + - if (waiter.handoff_set) { |
| 37 | + + if (wstate == WRITER_HANDOFF) { |
| 38 | + enum owner_state owner_state; |
| 39 | + |
| 40 | +- preempt_disable(); |
| 41 | + owner_state = rwsem_spin_on_owner(sem); |
| 42 | +- preempt_enable(); |
| 43 | +- |
| 44 | + if (owner_state == OWNER_NULL) |
| 45 | + goto trylock_again; |
| 46 | + } |
| 47 | + |
| 48 | +++<<<<<<< HEAD |
| 49 | + + /* Block until there are no active lockers. */ |
| 50 | + + for (;;) { |
| 51 | + + if (signal_pending_state(state, current)) |
| 52 | + + goto out_nolock; |
| 53 | + + |
| 54 | + + schedule(); |
| 55 | + + lockevent_inc(rwsem_sleep_writer); |
| 56 | + + set_current_state(state); |
| 57 | + + /* |
| 58 | + + * If HANDOFF bit is set, unconditionally do |
| 59 | + + * a trylock. |
| 60 | + + */ |
| 61 | + + if (wstate == WRITER_HANDOFF) |
| 62 | + + break; |
| 63 | + + |
| 64 | + + if ((wstate == WRITER_NOT_FIRST) && |
| 65 | + + (rwsem_first_waiter(sem) == &waiter)) |
| 66 | + + wstate = WRITER_FIRST; |
| 67 | + + |
| 68 | + + count = atomic_long_read(&sem->count); |
| 69 | + + if (!(count & RWSEM_LOCK_MASK)) |
| 70 | + + break; |
| 71 | + + |
| 72 | + + /* |
| 73 | + + * The setting of the handoff bit is deferred |
| 74 | + + * until rwsem_try_write_lock() is called. |
| 75 | + + */ |
| 76 | + + if ((wstate == WRITER_FIRST) && (rt_task(current) || |
| 77 | + + time_after(jiffies, waiter.timeout))) { |
| 78 | + + wstate = WRITER_HANDOFF; |
| 79 | + + lockevent_inc(rwsem_wlock_handoff); |
| 80 | + + break; |
| 81 | + + } |
| 82 | + + } |
| 83 | +++======= |
| 84 | ++ schedule_preempt_disabled(); |
| 85 | ++ lockevent_inc(rwsem_sleep_writer); |
| 86 | ++ set_current_state(state); |
| 87 | +++>>>>>>> 1d61659ced6b (locking/rwsem: Disable preemption in all down_write*() and up_write() code paths) |
| 88 | + trylock_again: |
| 89 | + raw_spin_lock_irq(&sem->wait_lock); |
| 90 | + } |
| 91 | +* Unmerged path kernel/locking/rwsem.c |
0 commit comments