@@ -29,22 +29,13 @@ static unsigned int klp_signals_cnt;
2929
3030/*
3131 * When a livepatch is in progress, enable klp stack checking in
32- * cond_resched (). This helps CPU-bound kthreads get patched.
32+ * schedule (). This helps CPU-bound kthreads get patched.
3333 */
34- #if defined(CONFIG_PREEMPT_DYNAMIC ) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL )
35-
36- #define klp_cond_resched_enable () sched_dynamic_klp_enable()
37- #define klp_cond_resched_disable () sched_dynamic_klp_disable()
38-
39- #else /* !CONFIG_PREEMPT_DYNAMIC || !CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
4034
4135DEFINE_STATIC_KEY_FALSE (klp_sched_try_switch_key );
42- EXPORT_SYMBOL (klp_sched_try_switch_key );
4336
44- #define klp_cond_resched_enable () static_branch_enable(&klp_sched_try_switch_key)
45- #define klp_cond_resched_disable () static_branch_disable(&klp_sched_try_switch_key)
46-
47- #endif /* CONFIG_PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
37+ #define klp_resched_enable () static_branch_enable(&klp_sched_try_switch_key)
38+ #define klp_resched_disable () static_branch_disable(&klp_sched_try_switch_key)
4839
4940/*
5041 * This work can be performed periodically to finish patching or unpatching any
@@ -365,26 +356,18 @@ static bool klp_try_switch_task(struct task_struct *task)
365356
366357void __klp_sched_try_switch (void )
367358{
368- if (likely (!klp_patch_pending (current )))
369- return ;
370-
371359 /*
372- * This function is called from cond_resched() which is called in many
373- * places throughout the kernel. Using the klp_mutex here might
374- * deadlock.
375- *
376- * Instead, disable preemption to prevent racing with other callers of
377- * klp_try_switch_task(). Thanks to task_call_func() they won't be
378- * able to switch this task while it's running.
360+ * This function is called from __schedule() while a context switch is
361+ * about to happen. Preemption is already disabled and klp_mutex
362+ * can't be acquired.
363+ * Disabled preemption is used to prevent racing with other callers of
364+ * klp_try_switch_task(). Thanks to task_call_func() they won't be
365+ * able to switch to this task while it's running.
379366 */
380- preempt_disable ();
367+ lockdep_assert_preemption_disabled ();
381368
382- /*
383- * Make sure current didn't get patched between the above check and
384- * preempt_disable().
385- */
386- if (unlikely (!klp_patch_pending (current )))
387- goto out ;
369+ if (likely (!klp_patch_pending (current )))
370+ return ;
388371
389372 /*
390373 * Enforce the order of the TIF_PATCH_PENDING read above and the
@@ -395,11 +378,7 @@ void __klp_sched_try_switch(void)
395378 smp_rmb ();
396379
397380 klp_try_switch_task (current );
398-
399- out :
400- preempt_enable ();
401381}
402- EXPORT_SYMBOL (__klp_sched_try_switch );
403382
404383/*
405384 * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set.
@@ -508,7 +487,7 @@ void klp_try_complete_transition(void)
508487 }
509488
510489 /* Done! Now cleanup the data structures. */
511- klp_cond_resched_disable ();
490+ klp_resched_disable ();
512491 patch = klp_transition_patch ;
513492 klp_complete_transition ();
514493
@@ -560,7 +539,7 @@ void klp_start_transition(void)
560539 set_tsk_thread_flag (task , TIF_PATCH_PENDING );
561540 }
562541
563- klp_cond_resched_enable ();
542+ klp_resched_enable ();
564543
565544 klp_signals_cnt = 0 ;
566545}
0 commit comments