Skip to content

Commit d045c8f

Browse files
htejungregkh
authored andcommitted
sched_ext: Factor out move_task_between_dsqs() from scx_dispatch_from_dsq()
[ Upstream commit 8427acb ] Pure reorganization. No functional changes. Signed-off-by: Tejun Heo <tj@kernel.org> Stable-dep-of: 3296682 ("sched_ext: Fix migration disabled handling in targeted dispatches") Signed-off-by: Sasha Levin <sashal@kernel.org>
1 parent e9fec6f commit d045c8f

File tree

1 file changed

+75
-41
lines changed

1 file changed

+75
-41
lines changed

kernel/sched/ext.c

Lines changed: 75 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -2397,6 +2397,73 @@ static inline bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *r
23972397
static inline bool consume_remote_task(struct rq *this_rq, struct task_struct *p, struct scx_dispatch_q *dsq, struct rq *task_rq) { return false; }
23982398
#endif /* CONFIG_SMP */
23992399

2400+
/**
2401+
* move_task_between_dsqs() - Move a task from one DSQ to another
2402+
* @p: target task
2403+
* @enq_flags: %SCX_ENQ_*
2404+
* @src_dsq: DSQ @p is currently on, must not be a local DSQ
2405+
* @dst_dsq: DSQ @p is being moved to, can be any DSQ
2406+
*
2407+
* Must be called with @p's task_rq and @src_dsq locked. If @dst_dsq is a local
2408+
* DSQ and @p is on a different CPU, @p will be migrated and thus its task_rq
2409+
* will change. As @p's task_rq is locked, this function doesn't need to use the
2410+
* holding_cpu mechanism.
2411+
*
2412+
* On return, @src_dsq is unlocked and only @p's new task_rq, which is the
2413+
* return value, is locked.
2414+
*/
2415+
static struct rq *move_task_between_dsqs(struct task_struct *p, u64 enq_flags,
2416+
struct scx_dispatch_q *src_dsq,
2417+
struct scx_dispatch_q *dst_dsq)
2418+
{
2419+
struct rq *src_rq = task_rq(p), *dst_rq;
2420+
2421+
BUG_ON(src_dsq->id == SCX_DSQ_LOCAL);
2422+
lockdep_assert_held(&src_dsq->lock);
2423+
lockdep_assert_rq_held(src_rq);
2424+
2425+
if (dst_dsq->id == SCX_DSQ_LOCAL) {
2426+
dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
2427+
if (!task_can_run_on_remote_rq(p, dst_rq, true)) {
2428+
dst_dsq = find_global_dsq(p);
2429+
dst_rq = src_rq;
2430+
}
2431+
} else {
2432+
/* no need to migrate if destination is a non-local DSQ */
2433+
dst_rq = src_rq;
2434+
}
2435+
2436+
/*
2437+
* Move @p into $dst_dsq. If $dst_dsq is the local DSQ of a different
2438+
* CPU, @p will be migrated.
2439+
*/
2440+
if (dst_dsq->id == SCX_DSQ_LOCAL) {
2441+
/* @p is going from a non-local DSQ to a local DSQ */
2442+
if (src_rq == dst_rq) {
2443+
task_unlink_from_dsq(p, src_dsq);
2444+
move_local_task_to_local_dsq(p, enq_flags,
2445+
src_dsq, dst_rq);
2446+
raw_spin_unlock(&src_dsq->lock);
2447+
} else {
2448+
raw_spin_unlock(&src_dsq->lock);
2449+
move_remote_task_to_local_dsq(p, enq_flags,
2450+
src_rq, dst_rq);
2451+
}
2452+
} else {
2453+
/*
2454+
* @p is going from a non-local DSQ to a non-local DSQ. As
2455+
* $src_dsq is already locked, do an abbreviated dequeue.
2456+
*/
2457+
task_unlink_from_dsq(p, src_dsq);
2458+
p->scx.dsq = NULL;
2459+
raw_spin_unlock(&src_dsq->lock);
2460+
2461+
dispatch_enqueue(dst_dsq, p, enq_flags);
2462+
}
2463+
2464+
return dst_rq;
2465+
}
2466+
24002467
static bool consume_dispatch_q(struct rq *rq, struct scx_dispatch_q *dsq)
24012468
{
24022469
struct task_struct *p;
@@ -6134,7 +6201,7 @@ static bool scx_dispatch_from_dsq(struct bpf_iter_scx_dsq_kern *kit,
61346201
u64 enq_flags)
61356202
{
61366203
struct scx_dispatch_q *src_dsq = kit->dsq, *dst_dsq;
6137-
struct rq *this_rq, *src_rq, *dst_rq, *locked_rq;
6204+
struct rq *this_rq, *src_rq, *locked_rq;
61386205
bool dispatched = false;
61396206
bool in_balance;
61406207
unsigned long flags;
@@ -6180,51 +6247,18 @@ static bool scx_dispatch_from_dsq(struct bpf_iter_scx_dsq_kern *kit,
61806247
/* @p is still on $src_dsq and stable, determine the destination */
61816248
dst_dsq = find_dsq_for_dispatch(this_rq, dsq_id, p);
61826249

6183-
if (dst_dsq->id == SCX_DSQ_LOCAL) {
6184-
dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
6185-
if (!task_can_run_on_remote_rq(p, dst_rq, true)) {
6186-
dst_dsq = find_global_dsq(p);
6187-
dst_rq = src_rq;
6188-
}
6189-
} else {
6190-
/* no need to migrate if destination is a non-local DSQ */
6191-
dst_rq = src_rq;
6192-
}
6193-
61946250
/*
6195-
* Move @p into $dst_dsq. If $dst_dsq is the local DSQ of a different
6196-
* CPU, @p will be migrated.
6251+
* Apply vtime and slice updates before moving so that the new time is
6252+
* visible before inserting into $dst_dsq. @p is still on $src_dsq but
6253+
* this is safe as we're locking it.
61976254
*/
6198-
if (dst_dsq->id == SCX_DSQ_LOCAL) {
6199-
/* @p is going from a non-local DSQ to a local DSQ */
6200-
if (src_rq == dst_rq) {
6201-
task_unlink_from_dsq(p, src_dsq);
6202-
move_local_task_to_local_dsq(p, enq_flags,
6203-
src_dsq, dst_rq);
6204-
raw_spin_unlock(&src_dsq->lock);
6205-
} else {
6206-
raw_spin_unlock(&src_dsq->lock);
6207-
move_remote_task_to_local_dsq(p, enq_flags,
6208-
src_rq, dst_rq);
6209-
locked_rq = dst_rq;
6210-
}
6211-
} else {
6212-
/*
6213-
* @p is going from a non-local DSQ to a non-local DSQ. As
6214-
* $src_dsq is already locked, do an abbreviated dequeue.
6215-
*/
6216-
task_unlink_from_dsq(p, src_dsq);
6217-
p->scx.dsq = NULL;
6218-
raw_spin_unlock(&src_dsq->lock);
6219-
6220-
if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_VTIME)
6221-
p->scx.dsq_vtime = kit->vtime;
6222-
dispatch_enqueue(dst_dsq, p, enq_flags);
6223-
}
6224-
6255+
if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_VTIME)
6256+
p->scx.dsq_vtime = kit->vtime;
62256257
if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_SLICE)
62266258
p->scx.slice = kit->slice;
62276259

6260+
/* execute move */
6261+
locked_rq = move_task_between_dsqs(p, enq_flags, src_dsq, dst_dsq);
62286262
dispatched = true;
62296263
out:
62306264
if (in_balance) {

0 commit comments

Comments
 (0)