Skip to content

Commit fa90f37

Browse files
committed
sched/eevdf: More PELT vs DELAYED_DEQUEUE
JIRA: https://issues.redhat.com/browse/RHEL-68342 commit 76f2f78 Author: Peter Zijlstra <peterz@infradead.org> Date: Mon Dec 2 18:45:57 2024 +0100 sched/eevdf: More PELT vs DELAYED_DEQUEUE Vincent and Dietmar noted that while commit fc1892b ("sched/eevdf: Fixup PELT vs DELAYED_DEQUEUE") fixes the entity runnable stats, it does not adjust the cfs_rq runnable stats, which are based off of h_nr_running. Track h_nr_delayed such that we can discount those and adjust the signal. Fixes: fc1892b ("sched/eevdf: Fixup PELT vs DELAYED_DEQUEUE") Closes: https://lore.kernel.org/lkml/a9a45193-d0c6-4ba2-a822-464ad30b550e@arm.com/ Closes: https://lore.kernel.org/lkml/CAKfTPtCNUvWE_GX5LyvTF-WdxUT=ZgvZZv-4t=eWntg5uOFqiQ@mail.gmail.com/ [ Fixes checkpatch warnings and rebased ] Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reported-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Reported-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: "Peter Zijlstra (Intel)" <peterz@infradead.org> Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Tested-by: K Prateek Nayak <kprateek.nayak@amd.com> Link: https://lore.kernel.org/r/20241202174606.4074512-3-vincent.guittot@linaro.org Signed-off-by: Phil Auld <pauld@redhat.com>
1 parent 7482e15 commit fa90f37

File tree

4 files changed

+54
-8
lines changed

4 files changed

+54
-8
lines changed

kernel/sched/debug.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -845,6 +845,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
845845
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread));
846846
SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
847847
SEQ_printf(m, " .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running);
848+
SEQ_printf(m, " .%-30s: %d\n", "h_nr_delayed", cfs_rq->h_nr_delayed);
848849
SEQ_printf(m, " .%-30s: %d\n", "idle_nr_running",
849850
cfs_rq->idle_nr_running);
850851
SEQ_printf(m, " .%-30s: %d\n", "idle_h_nr_running",

kernel/sched/fair.c

Lines changed: 46 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -5465,9 +5465,33 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
54655465

54665466
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
54675467

5468-
static inline void finish_delayed_dequeue_entity(struct sched_entity *se)
5468+
static void set_delayed(struct sched_entity *se)
5469+
{
5470+
se->sched_delayed = 1;
5471+
for_each_sched_entity(se) {
5472+
struct cfs_rq *cfs_rq = cfs_rq_of(se);
5473+
5474+
cfs_rq->h_nr_delayed++;
5475+
if (cfs_rq_throttled(cfs_rq))
5476+
break;
5477+
}
5478+
}
5479+
5480+
static void clear_delayed(struct sched_entity *se)
54695481
{
54705482
se->sched_delayed = 0;
5483+
for_each_sched_entity(se) {
5484+
struct cfs_rq *cfs_rq = cfs_rq_of(se);
5485+
5486+
cfs_rq->h_nr_delayed--;
5487+
if (cfs_rq_throttled(cfs_rq))
5488+
break;
5489+
}
5490+
}
5491+
5492+
static inline void finish_delayed_dequeue_entity(struct sched_entity *se)
5493+
{
5494+
clear_delayed(se);
54715495
if (sched_feat(DELAY_ZERO) && se->vlag > 0)
54725496
se->vlag = 0;
54735497
}
@@ -5497,7 +5521,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
54975521
if (cfs_rq->next == se)
54985522
cfs_rq->next = NULL;
54995523
update_load_avg(cfs_rq, se, 0);
5500-
se->sched_delayed = 1;
5524+
set_delayed(se);
55015525
return false;
55025526
}
55035527
}
@@ -5917,7 +5941,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
59175941
struct rq *rq = rq_of(cfs_rq);
59185942
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
59195943
struct sched_entity *se;
5920-
long task_delta, idle_task_delta, dequeue = 1;
5944+
long task_delta, idle_task_delta, delayed_delta, dequeue = 1;
59215945
long rq_h_nr_running = rq->cfs.h_nr_running;
59225946

59235947
raw_spin_lock(&cfs_b->lock);
@@ -5950,6 +5974,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
59505974

59515975
task_delta = cfs_rq->h_nr_running;
59525976
idle_task_delta = cfs_rq->idle_h_nr_running;
5977+
delayed_delta = cfs_rq->h_nr_delayed;
59535978
for_each_sched_entity(se) {
59545979
struct cfs_rq *qcfs_rq = cfs_rq_of(se);
59555980
int flags;
@@ -5973,6 +5998,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
59735998

59745999
qcfs_rq->h_nr_running -= task_delta;
59756000
qcfs_rq->idle_h_nr_running -= idle_task_delta;
6001+
qcfs_rq->h_nr_delayed -= delayed_delta;
59766002

59776003
if (qcfs_rq->load.weight) {
59786004
/* Avoid re-evaluating load for this entity: */
@@ -5995,6 +6021,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
59956021

59966022
qcfs_rq->h_nr_running -= task_delta;
59976023
qcfs_rq->idle_h_nr_running -= idle_task_delta;
6024+
qcfs_rq->h_nr_delayed -= delayed_delta;
59986025
}
59996026

60006027
/* At this point se is NULL and we are at root level*/
@@ -6020,7 +6047,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
60206047
struct rq *rq = rq_of(cfs_rq);
60216048
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
60226049
struct sched_entity *se;
6023-
long task_delta, idle_task_delta;
6050+
long task_delta, idle_task_delta, delayed_delta;
60246051
long rq_h_nr_running = rq->cfs.h_nr_running;
60256052

60266053
se = cfs_rq->tg->se[cpu_of(rq)];
@@ -6056,6 +6083,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
60566083

60576084
task_delta = cfs_rq->h_nr_running;
60586085
idle_task_delta = cfs_rq->idle_h_nr_running;
6086+
delayed_delta = cfs_rq->h_nr_delayed;
60596087
for_each_sched_entity(se) {
60606088
struct cfs_rq *qcfs_rq = cfs_rq_of(se);
60616089

@@ -6073,6 +6101,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
60736101

60746102
qcfs_rq->h_nr_running += task_delta;
60756103
qcfs_rq->idle_h_nr_running += idle_task_delta;
6104+
qcfs_rq->h_nr_delayed += delayed_delta;
60766105

60776106
/* end evaluation on encountering a throttled cfs_rq */
60786107
if (cfs_rq_throttled(qcfs_rq))
@@ -6090,6 +6119,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
60906119

60916120
qcfs_rq->h_nr_running += task_delta;
60926121
qcfs_rq->idle_h_nr_running += idle_task_delta;
6122+
qcfs_rq->h_nr_delayed += delayed_delta;
60936123

60946124
/* end evaluation on encountering a throttled cfs_rq */
60956125
if (cfs_rq_throttled(qcfs_rq))
@@ -6943,7 +6973,7 @@ requeue_delayed_entity(struct sched_entity *se)
69436973
}
69446974

69456975
update_load_avg(cfs_rq, se, 0);
6946-
se->sched_delayed = 0;
6976+
clear_delayed(se);
69476977
}
69486978

69496979
/*
@@ -6957,6 +6987,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
69576987
struct cfs_rq *cfs_rq;
69586988
struct sched_entity *se = &p->se;
69596989
int idle_h_nr_running = task_has_idle_policy(p);
6990+
int h_nr_delayed = 0;
69606991
int task_new = !(flags & ENQUEUE_WAKEUP);
69616992
int rq_h_nr_running = rq->cfs.h_nr_running;
69626993
u64 slice = 0;
@@ -6983,6 +7014,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
69837014
if (p->in_iowait)
69847015
cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
69857016

7017+
if (task_new)
7018+
h_nr_delayed = !!se->sched_delayed;
7019+
69867020
for_each_sched_entity(se) {
69877021
if (se->on_rq) {
69887022
if (se->sched_delayed)
@@ -7005,6 +7039,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
70057039

70067040
cfs_rq->h_nr_running++;
70077041
cfs_rq->idle_h_nr_running += idle_h_nr_running;
7042+
cfs_rq->h_nr_delayed += h_nr_delayed;
70087043

70097044
if (cfs_rq_is_idle(cfs_rq))
70107045
idle_h_nr_running = 1;
@@ -7028,6 +7063,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
70287063

70297064
cfs_rq->h_nr_running++;
70307065
cfs_rq->idle_h_nr_running += idle_h_nr_running;
7066+
cfs_rq->h_nr_delayed += h_nr_delayed;
70317067

70327068
if (cfs_rq_is_idle(cfs_rq))
70337069
idle_h_nr_running = 1;
@@ -7090,13 +7126,16 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
70907126
struct task_struct *p = NULL;
70917127
int idle_h_nr_running = 0;
70927128
int h_nr_running = 0;
7129+
int h_nr_delayed = 0;
70937130
struct cfs_rq *cfs_rq;
70947131
u64 slice = 0;
70957132

70967133
if (entity_is_task(se)) {
70977134
p = task_of(se);
70987135
h_nr_running = 1;
70997136
idle_h_nr_running = task_has_idle_policy(p);
7137+
if (!task_sleep && !task_delayed)
7138+
h_nr_delayed = !!se->sched_delayed;
71007139
} else {
71017140
cfs_rq = group_cfs_rq(se);
71027141
slice = cfs_rq_min_slice(cfs_rq);
@@ -7114,6 +7153,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
71147153

71157154
cfs_rq->h_nr_running -= h_nr_running;
71167155
cfs_rq->idle_h_nr_running -= idle_h_nr_running;
7156+
cfs_rq->h_nr_delayed -= h_nr_delayed;
71177157

71187158
if (cfs_rq_is_idle(cfs_rq))
71197159
idle_h_nr_running = h_nr_running;
@@ -7152,6 +7192,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
71527192

71537193
cfs_rq->h_nr_running -= h_nr_running;
71547194
cfs_rq->idle_h_nr_running -= idle_h_nr_running;
7195+
cfs_rq->h_nr_delayed -= h_nr_delayed;
71557196

71567197
if (cfs_rq_is_idle(cfs_rq))
71577198
idle_h_nr_running = h_nr_running;

kernel/sched/pelt.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -321,7 +321,7 @@ int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq)
321321
{
322322
if (___update_load_sum(now, &cfs_rq->avg,
323323
scale_load_down(cfs_rq->load.weight),
324-
cfs_rq->h_nr_running,
324+
cfs_rq->h_nr_running - cfs_rq->h_nr_delayed,
325325
cfs_rq->curr != NULL)) {
326326

327327
___update_load_avg(&cfs_rq->avg, 1);

kernel/sched/sched.h

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -649,6 +649,7 @@ struct cfs_rq {
649649
unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */
650650
unsigned int idle_nr_running; /* SCHED_IDLE */
651651
unsigned int idle_h_nr_running; /* SCHED_IDLE */
652+
unsigned int h_nr_delayed;
652653

653654
s64 avg_vruntime;
654655
u64 avg_load;
@@ -898,8 +899,11 @@ struct dl_rq {
898899

899900
static inline void se_update_runnable(struct sched_entity *se)
900901
{
901-
if (!entity_is_task(se))
902-
se->runnable_weight = se->my_q->h_nr_running;
902+
if (!entity_is_task(se)) {
903+
struct cfs_rq *cfs_rq = se->my_q;
904+
905+
se->runnable_weight = cfs_rq->h_nr_running - cfs_rq->h_nr_delayed;
906+
}
903907
}
904908

905909
static inline long se_runnable(struct sched_entity *se)

0 commit comments

Comments
 (0)