Skip to content

Commit 0857fd2

Browse files
committed
sched: Fix stop_one_cpu_nowait() vs hotplug
JIRA: https://issues.redhat.com/browse/RHEL-84526 commit f0498d2 Author: Peter Zijlstra <peterz@infradead.org> Date: Tue Oct 10 20:57:39 2023 +0200 sched: Fix stop_one_cpu_nowait() vs hotplug Kuyo reported sporadic failures on a sched_setaffinity() vs CPU hotplug stress-test -- notably affine_move_task() remains stuck in wait_for_completion(), leading to a hung-task detector warning. Specifically, it was reported that stop_one_cpu_nowait(.fn = migration_cpu_stop) returns false -- this stopper is responsible for the matching complete(). The race scenario is: CPU0 CPU1 // doing _cpu_down() __set_cpus_allowed_ptr() task_rq_lock(); takedown_cpu() stop_machine_cpuslocked(take_cpu_down..) <PREEMPT: cpu_stopper_thread() MULTI_STOP_PREPARE ... __set_cpus_allowed_ptr_locked() affine_move_task() task_rq_unlock(); <PREEMPT: cpu_stopper_thread()\> ack_state() MULTI_STOP_RUN take_cpu_down() __cpu_disable(); stop_machine_park(); stopper->enabled = false; /> /> stop_one_cpu_nowait(.fn = migration_cpu_stop); if (stopper->enabled) // false!!! That is, by doing stop_one_cpu_nowait() after dropping rq-lock, the stopper thread gets a chance to preempt and allows the cpu-down for the target CPU to complete. OTOH, since stop_one_cpu_nowait() / cpu_stop_queue_work() needs to issue a wakeup, it must not be ran under the scheduler locks. Solve this apparent contradiction by keeping preemption disabled over the unlock + queue_stopper combination: preempt_disable(); task_rq_unlock(...); if (!stop_pending) stop_one_cpu_nowait(...) preempt_enable(); This respects the lock ordering contraints while still avoiding the above race. That is, if we find the CPU is online under rq-lock, the targeted stop_one_cpu_nowait() must succeed. Apply this pattern to all similar stop_one_cpu_nowait() invocations. Fixes: 6d337ea ("sched: Fix migrate_disable() vs set_cpus_allowed_ptr()") Reported-by: "Kuyo Chang (張建文)" <Kuyo.Chang@mediatek.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: "Kuyo Chang (張建文)" <Kuyo.Chang@mediatek.com> Link: https://lkml.kernel.org/r/20231010200442.GA16515@noisy.programming.kicks-ass.net Signed-off-by: Luis Claudio R. Goncalves <lgoncalv@redhat.com>
1 parent ef2902a commit 0857fd2

File tree

4 files changed

+17
-3
lines changed

4 files changed

+17
-3
lines changed

kernel/sched/core.c

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2531,9 +2531,11 @@ static int migration_cpu_stop(void *data)
25312531
* it.
25322532
*/
25332533
WARN_ON_ONCE(!pending->stop_pending);
2534+
preempt_disable();
25342535
task_rq_unlock(rq, p, &rf);
25352536
stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
25362537
&pending->arg, &pending->stop_work);
2538+
preempt_enable();
25372539
return 0;
25382540
}
25392541
out:
@@ -2841,12 +2843,13 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
28412843
complete = true;
28422844
}
28432845

2846+
preempt_disable();
28442847
task_rq_unlock(rq, p, rf);
2845-
28462848
if (push_task) {
28472849
stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
28482850
p, &rq->push_work);
28492851
}
2852+
preempt_enable();
28502853

28512854
if (complete)
28522855
complete_all(&pending->done);
@@ -2912,12 +2915,13 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
29122915
if (flags & SCA_MIGRATE_ENABLE)
29132916
p->migration_flags &= ~MDF_PUSH;
29142917

2918+
preempt_disable();
29152919
task_rq_unlock(rq, p, rf);
2916-
29172920
if (!stop_pending) {
29182921
stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
29192922
&pending->arg, &pending->stop_work);
29202923
}
2924+
preempt_enable();
29212925

29222926
if (flags & SCA_MIGRATE_ENABLE)
29232927
return 0;
@@ -7797,9 +7801,11 @@ static void balance_push(struct rq *rq)
77977801
* Temporarily drop rq->lock such that we can wake-up the stop task.
77987802
* Both preemption and IRQs are still disabled.
77997803
*/
7804+
preempt_disable();
78007805
raw_spin_rq_unlock(rq);
78017806
stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
78027807
this_cpu_ptr(&push_work));
7808+
preempt_enable();
78037809
/*
78047810
* At this point need_resched() is true and we'll take the loop in
78057811
* schedule(). The next pick is obviously going to be the stop task

kernel/sched/deadline.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2543,9 +2543,11 @@ static void pull_dl_task(struct rq *this_rq)
25432543
double_unlock_balance(this_rq, src_rq);
25442544

25452545
if (push_task) {
2546+
preempt_disable();
25462547
raw_spin_rq_unlock(this_rq);
25472548
stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
25482549
push_task, &src_rq->push_work);
2550+
preempt_enable();
25492551
raw_spin_rq_lock(this_rq);
25502552
}
25512553
}

kernel/sched/fair.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11420,13 +11420,15 @@ static int sched_balance_rq(int this_cpu, struct rq *this_rq,
1142011420
busiest->push_cpu = this_cpu;
1142111421
active_balance = 1;
1142211422
}
11423-
raw_spin_rq_unlock_irqrestore(busiest, flags);
1142411423

11424+
preempt_disable();
11425+
raw_spin_rq_unlock_irqrestore(busiest, flags);
1142511426
if (active_balance) {
1142611427
stop_one_cpu_nowait(cpu_of(busiest),
1142711428
active_load_balance_cpu_stop, busiest,
1142811429
&busiest->active_balance_work);
1142911430
}
11431+
preempt_enable();
1143011432
}
1143111433
} else {
1143211434
sd->nr_balance_failed = 0;

kernel/sched/rt.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2045,9 +2045,11 @@ static int push_rt_task(struct rq *rq, bool pull)
20452045
*/
20462046
push_task = get_push_task(rq);
20472047
if (push_task) {
2048+
preempt_disable();
20482049
raw_spin_rq_unlock(rq);
20492050
stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
20502051
push_task, &rq->push_work);
2052+
preempt_enable();
20512053
raw_spin_rq_lock(rq);
20522054
}
20532055

@@ -2383,9 +2385,11 @@ static void pull_rt_task(struct rq *this_rq)
23832385
double_unlock_balance(this_rq, src_rq);
23842386

23852387
if (push_task) {
2388+
preempt_disable();
23862389
raw_spin_rq_unlock(this_rq);
23872390
stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
23882391
push_task, &src_rq->push_work);
2392+
preempt_enable();
23892393
raw_spin_rq_lock(this_rq);
23902394
}
23912395
}

0 commit comments

Comments
 (0)