@@ -5465,9 +5465,33 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
54655465
54665466static __always_inline void return_cfs_rq_runtime (struct cfs_rq * cfs_rq );
54675467
5468- static inline void finish_delayed_dequeue_entity (struct sched_entity * se )
5468+ static void set_delayed (struct sched_entity * se )
5469+ {
5470+ se -> sched_delayed = 1 ;
5471+ for_each_sched_entity (se ) {
5472+ struct cfs_rq * cfs_rq = cfs_rq_of (se );
5473+
5474+ cfs_rq -> h_nr_delayed ++ ;
5475+ if (cfs_rq_throttled (cfs_rq ))
5476+ break ;
5477+ }
5478+ }
5479+
5480+ static void clear_delayed (struct sched_entity * se )
54695481{
54705482 se -> sched_delayed = 0 ;
5483+ for_each_sched_entity (se ) {
5484+ struct cfs_rq * cfs_rq = cfs_rq_of (se );
5485+
5486+ cfs_rq -> h_nr_delayed -- ;
5487+ if (cfs_rq_throttled (cfs_rq ))
5488+ break ;
5489+ }
5490+ }
5491+
5492+ static inline void finish_delayed_dequeue_entity (struct sched_entity * se )
5493+ {
5494+ clear_delayed (se );
54715495 if (sched_feat (DELAY_ZERO ) && se -> vlag > 0 )
54725496 se -> vlag = 0 ;
54735497}
@@ -5496,7 +5520,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
54965520 if (sched_feat (DELAY_DEQUEUE ) && delay &&
54975521 !entity_eligible (cfs_rq , se )) {
54985522 update_load_avg (cfs_rq , se , 0 );
5499- se -> sched_delayed = 1 ;
5523+ set_delayed ( se ) ;
55005524 return false;
55015525 }
55025526 }
@@ -5908,7 +5932,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
59085932 struct rq * rq = rq_of (cfs_rq );
59095933 struct cfs_bandwidth * cfs_b = tg_cfs_bandwidth (cfs_rq -> tg );
59105934 struct sched_entity * se ;
5911- long task_delta , idle_task_delta , dequeue = 1 ;
5935+ long task_delta , idle_task_delta , delayed_delta , dequeue = 1 ;
59125936 long rq_h_nr_running = rq -> cfs .h_nr_running ;
59135937
59145938 raw_spin_lock (& cfs_b -> lock );
@@ -5941,6 +5965,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
59415965
59425966 task_delta = cfs_rq -> h_nr_running ;
59435967 idle_task_delta = cfs_rq -> idle_h_nr_running ;
5968+ delayed_delta = cfs_rq -> h_nr_delayed ;
59445969 for_each_sched_entity (se ) {
59455970 struct cfs_rq * qcfs_rq = cfs_rq_of (se );
59465971 int flags ;
@@ -5964,6 +5989,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
59645989
59655990 qcfs_rq -> h_nr_running -= task_delta ;
59665991 qcfs_rq -> idle_h_nr_running -= idle_task_delta ;
5992+ qcfs_rq -> h_nr_delayed -= delayed_delta ;
59675993
59685994 if (qcfs_rq -> load .weight ) {
59695995 /* Avoid re-evaluating load for this entity: */
@@ -5986,6 +6012,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
59866012
59876013 qcfs_rq -> h_nr_running -= task_delta ;
59886014 qcfs_rq -> idle_h_nr_running -= idle_task_delta ;
6015+ qcfs_rq -> h_nr_delayed -= delayed_delta ;
59896016 }
59906017
59916018 /* At this point se is NULL and we are at root level*/
@@ -6011,7 +6038,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
60116038 struct rq * rq = rq_of (cfs_rq );
60126039 struct cfs_bandwidth * cfs_b = tg_cfs_bandwidth (cfs_rq -> tg );
60136040 struct sched_entity * se ;
6014- long task_delta , idle_task_delta ;
6041+ long task_delta , idle_task_delta , delayed_delta ;
60156042 long rq_h_nr_running = rq -> cfs .h_nr_running ;
60166043
60176044 se = cfs_rq -> tg -> se [cpu_of (rq )];
@@ -6047,6 +6074,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
60476074
60486075 task_delta = cfs_rq -> h_nr_running ;
60496076 idle_task_delta = cfs_rq -> idle_h_nr_running ;
6077+ delayed_delta = cfs_rq -> h_nr_delayed ;
60506078 for_each_sched_entity (se ) {
60516079 struct cfs_rq * qcfs_rq = cfs_rq_of (se );
60526080
@@ -6064,6 +6092,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
60646092
60656093 qcfs_rq -> h_nr_running += task_delta ;
60666094 qcfs_rq -> idle_h_nr_running += idle_task_delta ;
6095+ qcfs_rq -> h_nr_delayed += delayed_delta ;
60676096
60686097 /* end evaluation on encountering a throttled cfs_rq */
60696098 if (cfs_rq_throttled (qcfs_rq ))
@@ -6081,6 +6110,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
60816110
60826111 qcfs_rq -> h_nr_running += task_delta ;
60836112 qcfs_rq -> idle_h_nr_running += idle_task_delta ;
6113+ qcfs_rq -> h_nr_delayed += delayed_delta ;
60846114
60856115 /* end evaluation on encountering a throttled cfs_rq */
60866116 if (cfs_rq_throttled (qcfs_rq ))
@@ -6934,7 +6964,7 @@ requeue_delayed_entity(struct sched_entity *se)
69346964 }
69356965
69366966 update_load_avg (cfs_rq , se , 0 );
6937- se -> sched_delayed = 0 ;
6967+ clear_delayed ( se ) ;
69386968}
69396969
69406970/*
@@ -6948,6 +6978,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
69486978 struct cfs_rq * cfs_rq ;
69496979 struct sched_entity * se = & p -> se ;
69506980 int idle_h_nr_running = task_has_idle_policy (p );
6981+ int h_nr_delayed = 0 ;
69516982 int task_new = !(flags & ENQUEUE_WAKEUP );
69526983 int rq_h_nr_running = rq -> cfs .h_nr_running ;
69536984 u64 slice = 0 ;
@@ -6974,6 +7005,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
69747005 if (p -> in_iowait )
69757006 cpufreq_update_util (rq , SCHED_CPUFREQ_IOWAIT );
69767007
7008+ if (task_new )
7009+ h_nr_delayed = !!se -> sched_delayed ;
7010+
69777011 for_each_sched_entity (se ) {
69787012 if (se -> on_rq ) {
69797013 if (se -> sched_delayed )
@@ -6996,6 +7030,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
69967030
69977031 cfs_rq -> h_nr_running ++ ;
69987032 cfs_rq -> idle_h_nr_running += idle_h_nr_running ;
7033+ cfs_rq -> h_nr_delayed += h_nr_delayed ;
69997034
70007035 if (cfs_rq_is_idle (cfs_rq ))
70017036 idle_h_nr_running = 1 ;
@@ -7019,6 +7054,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
70197054
70207055 cfs_rq -> h_nr_running ++ ;
70217056 cfs_rq -> idle_h_nr_running += idle_h_nr_running ;
7057+ cfs_rq -> h_nr_delayed += h_nr_delayed ;
70227058
70237059 if (cfs_rq_is_idle (cfs_rq ))
70247060 idle_h_nr_running = 1 ;
@@ -7081,13 +7117,16 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
70817117 struct task_struct * p = NULL ;
70827118 int idle_h_nr_running = 0 ;
70837119 int h_nr_running = 0 ;
7120+ int h_nr_delayed = 0 ;
70847121 struct cfs_rq * cfs_rq ;
70857122 u64 slice = 0 ;
70867123
70877124 if (entity_is_task (se )) {
70887125 p = task_of (se );
70897126 h_nr_running = 1 ;
70907127 idle_h_nr_running = task_has_idle_policy (p );
7128+ if (!task_sleep && !task_delayed )
7129+ h_nr_delayed = !!se -> sched_delayed ;
70917130 } else {
70927131 cfs_rq = group_cfs_rq (se );
70937132 slice = cfs_rq_min_slice (cfs_rq );
@@ -7105,6 +7144,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
71057144
71067145 cfs_rq -> h_nr_running -= h_nr_running ;
71077146 cfs_rq -> idle_h_nr_running -= idle_h_nr_running ;
7147+ cfs_rq -> h_nr_delayed -= h_nr_delayed ;
71087148
71097149 if (cfs_rq_is_idle (cfs_rq ))
71107150 idle_h_nr_running = h_nr_running ;
@@ -7143,6 +7183,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
71437183
71447184 cfs_rq -> h_nr_running -= h_nr_running ;
71457185 cfs_rq -> idle_h_nr_running -= idle_h_nr_running ;
7186+ cfs_rq -> h_nr_delayed -= h_nr_delayed ;
71467187
71477188 if (cfs_rq_is_idle (cfs_rq ))
71487189 idle_h_nr_running = h_nr_running ;
0 commit comments