Skip to content

Commit fd6b146

Browse files
committed
Merge: Sched; Fix dlserver double enqueue and a couple of other things
MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-10/-/merge_requests/98 JIRA: https://issues.redhat.com/browse/RHEL-68342 The fix for the double enueue warning came in upstream with a few other things in the same area. This short series includes all of them to prevent later issues. Signed-off-by: Phil Auld <pauld@redhat.com> Approved-by: Valentin Schneider <vschneid@redhat.com> Approved-by: Juri Lelli <juri.lelli@redhat.com> Approved-by: Waiman Long <longman@redhat.com> Approved-by: Luis Claudio R. Goncalves <lgoncalv@redhat.com> Approved-by: Aristeu Rozanski <arozansk@redhat.com> Approved-by: Rafael Aquini <raquini@redhat.com> Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> Merged-by: Jan Stancek <jstancek@redhat.com>
2 parents 40871cf + 50de1d4 commit fd6b146

File tree

7 files changed

+84
-22
lines changed

7 files changed

+84
-22
lines changed

include/linux/sched.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -656,6 +656,12 @@ struct sched_dl_entity {
656656
* @dl_defer_armed tells if the deferrable server is waiting
657657
* for the replenishment timer to activate it.
658658
*
659+
* @dl_server_active tells if the dlserver is active(started).
660+
* dlserver is started on first cfs enqueue on an idle runqueue
661+
* and is stopped when a dequeue results in 0 cfs tasks on the
662+
* runqueue. In other words, dlserver is active only when cpu's
663+
* runqueue has atleast one cfs task.
664+
*
659665
* @dl_defer_running tells if the deferrable server is actually
660666
* running, skipping the defer phase.
661667
*/
@@ -664,6 +670,7 @@ struct sched_dl_entity {
664670
unsigned int dl_non_contending : 1;
665671
unsigned int dl_overrun : 1;
666672
unsigned int dl_server : 1;
673+
unsigned int dl_server_active : 1;
667674
unsigned int dl_defer : 1;
668675
unsigned int dl_defer_armed : 1;
669676
unsigned int dl_defer_running : 1;

kernel/sched/core.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1346,7 +1346,7 @@ bool sched_can_stop_tick(struct rq *rq)
13461346
if (scx_enabled() && !scx_can_stop_tick(rq))
13471347
return false;
13481348

1349-
if (rq->cfs.nr_running > 1)
1349+
if (rq->cfs.h_nr_running > 1)
13501350
return false;
13511351

13521352
/*

kernel/sched/deadline.c

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1647,6 +1647,7 @@ void dl_server_start(struct sched_dl_entity *dl_se)
16471647
if (!dl_se->dl_runtime)
16481648
return;
16491649

1650+
dl_se->dl_server_active = 1;
16501651
enqueue_dl_entity(dl_se, ENQUEUE_WAKEUP);
16511652
if (!dl_task(dl_se->rq->curr) || dl_entity_preempt(dl_se, &rq->curr->dl))
16521653
resched_curr(dl_se->rq);
@@ -1661,6 +1662,7 @@ void dl_server_stop(struct sched_dl_entity *dl_se)
16611662
hrtimer_try_to_cancel(&dl_se->dl_timer);
16621663
dl_se->dl_defer_armed = 0;
16631664
dl_se->dl_throttled = 0;
1665+
dl_se->dl_server_active = 0;
16641666
}
16651667

16661668
void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq,
@@ -2420,8 +2422,10 @@ static struct task_struct *__pick_task_dl(struct rq *rq)
24202422
if (dl_server(dl_se)) {
24212423
p = dl_se->server_pick_task(dl_se);
24222424
if (!p) {
2423-
dl_se->dl_yielded = 1;
2424-
update_curr_dl_se(rq, dl_se, 0);
2425+
if (dl_server_active(dl_se)) {
2426+
dl_se->dl_yielded = 1;
2427+
update_curr_dl_se(rq, dl_se, 0);
2428+
}
24252429
goto again;
24262430
}
24272431
rq->dl_server = dl_se;

kernel/sched/debug.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -845,6 +845,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
845845
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread));
846846
SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
847847
SEQ_printf(m, " .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running);
848+
SEQ_printf(m, " .%-30s: %d\n", "h_nr_delayed", cfs_rq->h_nr_delayed);
848849
SEQ_printf(m, " .%-30s: %d\n", "idle_nr_running",
849850
cfs_rq->idle_nr_running);
850851
SEQ_printf(m, " .%-30s: %d\n", "idle_h_nr_running",

kernel/sched/fair.c

Lines changed: 57 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1159,8 +1159,6 @@ static inline void update_curr_task(struct task_struct *p, s64 delta_exec)
11591159
trace_sched_stat_runtime(p, delta_exec);
11601160
account_group_exec_runtime(p, delta_exec);
11611161
cgroup_account_cputime(p, delta_exec);
1162-
if (p->dl_server)
1163-
dl_server_update(p->dl_server, delta_exec);
11641162
}
11651163

11661164
static inline bool did_preempt_short(struct cfs_rq *cfs_rq, struct sched_entity *curr)
@@ -1237,11 +1235,16 @@ static void update_curr(struct cfs_rq *cfs_rq)
12371235
update_curr_task(p, delta_exec);
12381236

12391237
/*
1240-
* Any fair task that runs outside of fair_server should
1241-
* account against fair_server such that it can account for
1242-
* this time and possibly avoid running this period.
1238+
* If the fair_server is active, we need to account for the
1239+
* fair_server time whether or not the task is running on
1240+
* behalf of fair_server or not:
1241+
* - If the task is running on behalf of fair_server, we need
1242+
* to limit its time based on the assigned runtime.
1243+
* - Fair task that runs outside of fair_server should account
1244+
* against fair_server such that it can account for this time
1245+
* and possibly avoid running this period.
12431246
*/
1244-
if (p->dl_server != &rq->fair_server)
1247+
if (dl_server_active(&rq->fair_server))
12451248
dl_server_update(&rq->fair_server, delta_exec);
12461249
}
12471250

@@ -5465,9 +5468,33 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
54655468

54665469
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
54675470

5468-
static inline void finish_delayed_dequeue_entity(struct sched_entity *se)
5471+
static void set_delayed(struct sched_entity *se)
5472+
{
5473+
se->sched_delayed = 1;
5474+
for_each_sched_entity(se) {
5475+
struct cfs_rq *cfs_rq = cfs_rq_of(se);
5476+
5477+
cfs_rq->h_nr_delayed++;
5478+
if (cfs_rq_throttled(cfs_rq))
5479+
break;
5480+
}
5481+
}
5482+
5483+
static void clear_delayed(struct sched_entity *se)
54695484
{
54705485
se->sched_delayed = 0;
5486+
for_each_sched_entity(se) {
5487+
struct cfs_rq *cfs_rq = cfs_rq_of(se);
5488+
5489+
cfs_rq->h_nr_delayed--;
5490+
if (cfs_rq_throttled(cfs_rq))
5491+
break;
5492+
}
5493+
}
5494+
5495+
static inline void finish_delayed_dequeue_entity(struct sched_entity *se)
5496+
{
5497+
clear_delayed(se);
54715498
if (sched_feat(DELAY_ZERO) && se->vlag > 0)
54725499
se->vlag = 0;
54735500
}
@@ -5478,6 +5505,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
54785505
bool sleep = flags & DEQUEUE_SLEEP;
54795506

54805507
update_curr(cfs_rq);
5508+
clear_buddies(cfs_rq, se);
54815509

54825510
if (flags & DEQUEUE_DELAYED) {
54835511
SCHED_WARN_ON(!se->sched_delayed);
@@ -5494,10 +5522,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
54945522

54955523
if (sched_feat(DELAY_DEQUEUE) && delay &&
54965524
!entity_eligible(cfs_rq, se)) {
5497-
if (cfs_rq->next == se)
5498-
cfs_rq->next = NULL;
54995525
update_load_avg(cfs_rq, se, 0);
5500-
se->sched_delayed = 1;
5526+
set_delayed(se);
55015527
return false;
55025528
}
55035529
}
@@ -5520,8 +5546,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
55205546

55215547
update_stats_dequeue_fair(cfs_rq, se, flags);
55225548

5523-
clear_buddies(cfs_rq, se);
5524-
55255549
update_entity_lag(cfs_rq, se);
55265550
if (sched_feat(PLACE_REL_DEADLINE) && !sleep) {
55275551
se->deadline -= se->vruntime;
@@ -5917,7 +5941,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
59175941
struct rq *rq = rq_of(cfs_rq);
59185942
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
59195943
struct sched_entity *se;
5920-
long task_delta, idle_task_delta, dequeue = 1;
5944+
long task_delta, idle_task_delta, delayed_delta, dequeue = 1;
59215945
long rq_h_nr_running = rq->cfs.h_nr_running;
59225946

59235947
raw_spin_lock(&cfs_b->lock);
@@ -5950,6 +5974,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
59505974

59515975
task_delta = cfs_rq->h_nr_running;
59525976
idle_task_delta = cfs_rq->idle_h_nr_running;
5977+
delayed_delta = cfs_rq->h_nr_delayed;
59535978
for_each_sched_entity(se) {
59545979
struct cfs_rq *qcfs_rq = cfs_rq_of(se);
59555980
int flags;
@@ -5973,6 +5998,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
59735998

59745999
qcfs_rq->h_nr_running -= task_delta;
59756000
qcfs_rq->idle_h_nr_running -= idle_task_delta;
6001+
qcfs_rq->h_nr_delayed -= delayed_delta;
59766002

59776003
if (qcfs_rq->load.weight) {
59786004
/* Avoid re-evaluating load for this entity: */
@@ -5995,6 +6021,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
59956021

59966022
qcfs_rq->h_nr_running -= task_delta;
59976023
qcfs_rq->idle_h_nr_running -= idle_task_delta;
6024+
qcfs_rq->h_nr_delayed -= delayed_delta;
59986025
}
59996026

60006027
/* At this point se is NULL and we are at root level*/
@@ -6020,7 +6047,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
60206047
struct rq *rq = rq_of(cfs_rq);
60216048
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
60226049
struct sched_entity *se;
6023-
long task_delta, idle_task_delta;
6050+
long task_delta, idle_task_delta, delayed_delta;
60246051
long rq_h_nr_running = rq->cfs.h_nr_running;
60256052

60266053
se = cfs_rq->tg->se[cpu_of(rq)];
@@ -6056,6 +6083,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
60566083

60576084
task_delta = cfs_rq->h_nr_running;
60586085
idle_task_delta = cfs_rq->idle_h_nr_running;
6086+
delayed_delta = cfs_rq->h_nr_delayed;
60596087
for_each_sched_entity(se) {
60606088
struct cfs_rq *qcfs_rq = cfs_rq_of(se);
60616089

@@ -6073,6 +6101,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
60736101

60746102
qcfs_rq->h_nr_running += task_delta;
60756103
qcfs_rq->idle_h_nr_running += idle_task_delta;
6104+
qcfs_rq->h_nr_delayed += delayed_delta;
60766105

60776106
/* end evaluation on encountering a throttled cfs_rq */
60786107
if (cfs_rq_throttled(qcfs_rq))
@@ -6090,6 +6119,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
60906119

60916120
qcfs_rq->h_nr_running += task_delta;
60926121
qcfs_rq->idle_h_nr_running += idle_task_delta;
6122+
qcfs_rq->h_nr_delayed += delayed_delta;
60936123

60946124
/* end evaluation on encountering a throttled cfs_rq */
60956125
if (cfs_rq_throttled(qcfs_rq))
@@ -6943,7 +6973,7 @@ requeue_delayed_entity(struct sched_entity *se)
69436973
}
69446974

69456975
update_load_avg(cfs_rq, se, 0);
6946-
se->sched_delayed = 0;
6976+
clear_delayed(se);
69476977
}
69486978

69496979
/*
@@ -6957,6 +6987,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
69576987
struct cfs_rq *cfs_rq;
69586988
struct sched_entity *se = &p->se;
69596989
int idle_h_nr_running = task_has_idle_policy(p);
6990+
int h_nr_delayed = 0;
69606991
int task_new = !(flags & ENQUEUE_WAKEUP);
69616992
int rq_h_nr_running = rq->cfs.h_nr_running;
69626993
u64 slice = 0;
@@ -6983,6 +7014,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
69837014
if (p->in_iowait)
69847015
cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
69857016

7017+
if (task_new)
7018+
h_nr_delayed = !!se->sched_delayed;
7019+
69867020
for_each_sched_entity(se) {
69877021
if (se->on_rq) {
69887022
if (se->sched_delayed)
@@ -7005,6 +7039,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
70057039

70067040
cfs_rq->h_nr_running++;
70077041
cfs_rq->idle_h_nr_running += idle_h_nr_running;
7042+
cfs_rq->h_nr_delayed += h_nr_delayed;
70087043

70097044
if (cfs_rq_is_idle(cfs_rq))
70107045
idle_h_nr_running = 1;
@@ -7028,6 +7063,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
70287063

70297064
cfs_rq->h_nr_running++;
70307065
cfs_rq->idle_h_nr_running += idle_h_nr_running;
7066+
cfs_rq->h_nr_delayed += h_nr_delayed;
70317067

70327068
if (cfs_rq_is_idle(cfs_rq))
70337069
idle_h_nr_running = 1;
@@ -7090,13 +7126,16 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
70907126
struct task_struct *p = NULL;
70917127
int idle_h_nr_running = 0;
70927128
int h_nr_running = 0;
7129+
int h_nr_delayed = 0;
70937130
struct cfs_rq *cfs_rq;
70947131
u64 slice = 0;
70957132

70967133
if (entity_is_task(se)) {
70977134
p = task_of(se);
70987135
h_nr_running = 1;
70997136
idle_h_nr_running = task_has_idle_policy(p);
7137+
if (!task_sleep && !task_delayed)
7138+
h_nr_delayed = !!se->sched_delayed;
71007139
} else {
71017140
cfs_rq = group_cfs_rq(se);
71027141
slice = cfs_rq_min_slice(cfs_rq);
@@ -7114,6 +7153,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
71147153

71157154
cfs_rq->h_nr_running -= h_nr_running;
71167155
cfs_rq->idle_h_nr_running -= idle_h_nr_running;
7156+
cfs_rq->h_nr_delayed -= h_nr_delayed;
71177157

71187158
if (cfs_rq_is_idle(cfs_rq))
71197159
idle_h_nr_running = h_nr_running;
@@ -7152,6 +7192,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
71527192

71537193
cfs_rq->h_nr_running -= h_nr_running;
71547194
cfs_rq->idle_h_nr_running -= idle_h_nr_running;
7195+
cfs_rq->h_nr_delayed -= h_nr_delayed;
71557196

71567197
if (cfs_rq_is_idle(cfs_rq))
71577198
idle_h_nr_running = h_nr_running;
@@ -8780,7 +8821,7 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
87808821
if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
87818822
return;
87828823

8783-
if (sched_feat(NEXT_BUDDY) && !(wake_flags & WF_FORK)) {
8824+
if (sched_feat(NEXT_BUDDY) && !(wake_flags & WF_FORK) && !pse->sched_delayed) {
87848825
set_next_buddy(pse);
87858826
}
87868827

kernel/sched/pelt.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -321,7 +321,7 @@ int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq)
321321
{
322322
if (___update_load_sum(now, &cfs_rq->avg,
323323
scale_load_down(cfs_rq->load.weight),
324-
cfs_rq->h_nr_running,
324+
cfs_rq->h_nr_running - cfs_rq->h_nr_delayed,
325325
cfs_rq->curr != NULL)) {
326326

327327
___update_load_avg(&cfs_rq->avg, 1);

kernel/sched/sched.h

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -398,6 +398,11 @@ extern void __dl_server_attach_root(struct sched_dl_entity *dl_se, struct rq *rq
398398
extern int dl_server_apply_params(struct sched_dl_entity *dl_se,
399399
u64 runtime, u64 period, bool init);
400400

401+
static inline bool dl_server_active(struct sched_dl_entity *dl_se)
402+
{
403+
return dl_se->dl_server_active;
404+
}
405+
401406
#ifdef CONFIG_CGROUP_SCHED
402407

403408
extern struct list_head task_groups;
@@ -649,6 +654,7 @@ struct cfs_rq {
649654
unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */
650655
unsigned int idle_nr_running; /* SCHED_IDLE */
651656
unsigned int idle_h_nr_running; /* SCHED_IDLE */
657+
unsigned int h_nr_delayed;
652658

653659
s64 avg_vruntime;
654660
u64 avg_load;
@@ -898,8 +904,11 @@ struct dl_rq {
898904

899905
static inline void se_update_runnable(struct sched_entity *se)
900906
{
901-
if (!entity_is_task(se))
902-
se->runnable_weight = se->my_q->h_nr_running;
907+
if (!entity_is_task(se)) {
908+
struct cfs_rq *cfs_rq = se->my_q;
909+
910+
se->runnable_weight = cfs_rq->h_nr_running - cfs_rq->h_nr_delayed;
911+
}
903912
}
904913

905914
static inline long se_runnable(struct sched_entity *se)

0 commit comments

Comments
 (0)