@@ -1159,8 +1159,6 @@ static inline void update_curr_task(struct task_struct *p, s64 delta_exec)
11591159 trace_sched_stat_runtime (p , delta_exec );
11601160 account_group_exec_runtime (p , delta_exec );
11611161 cgroup_account_cputime (p , delta_exec );
1162- if (p -> dl_server )
1163- dl_server_update (p -> dl_server , delta_exec );
11641162}
11651163
11661164static inline bool did_preempt_short (struct cfs_rq * cfs_rq , struct sched_entity * curr )
@@ -1237,11 +1235,16 @@ static void update_curr(struct cfs_rq *cfs_rq)
12371235 update_curr_task (p , delta_exec );
12381236
12391237 /*
1240- * Any fair task that runs outside of fair_server should
1241- * account against fair_server such that it can account for
1242- * this time and possibly avoid running this period.
1238+ * If the fair_server is active, we need to account for the
1239+ * fair_server time whether or not the task is running on
1240+ * behalf of fair_server or not:
1241+ * - If the task is running on behalf of fair_server, we need
1242+ * to limit its time based on the assigned runtime.
1243+ * - Fair task that runs outside of fair_server should account
1244+ * against fair_server such that it can account for this time
1245+ * and possibly avoid running this period.
12431246 */
1244- if (p -> dl_server != & rq -> fair_server )
1247+ if (dl_server_active ( & rq -> fair_server ) )
12451248 dl_server_update (& rq -> fair_server , delta_exec );
12461249 }
12471250
@@ -5471,9 +5474,33 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
54715474
54725475static __always_inline void return_cfs_rq_runtime (struct cfs_rq * cfs_rq );
54735476
5474- static inline void finish_delayed_dequeue_entity (struct sched_entity * se )
5477+ static void set_delayed (struct sched_entity * se )
5478+ {
5479+ se -> sched_delayed = 1 ;
5480+ for_each_sched_entity (se ) {
5481+ struct cfs_rq * cfs_rq = cfs_rq_of (se );
5482+
5483+ cfs_rq -> h_nr_delayed ++ ;
5484+ if (cfs_rq_throttled (cfs_rq ))
5485+ break ;
5486+ }
5487+ }
5488+
5489+ static void clear_delayed (struct sched_entity * se )
54755490{
54765491 se -> sched_delayed = 0 ;
5492+ for_each_sched_entity (se ) {
5493+ struct cfs_rq * cfs_rq = cfs_rq_of (se );
5494+
5495+ cfs_rq -> h_nr_delayed -- ;
5496+ if (cfs_rq_throttled (cfs_rq ))
5497+ break ;
5498+ }
5499+ }
5500+
5501+ static inline void finish_delayed_dequeue_entity (struct sched_entity * se )
5502+ {
5503+ clear_delayed (se );
54775504 if (sched_feat (DELAY_ZERO ) && se -> vlag > 0 )
54785505 se -> vlag = 0 ;
54795506}
@@ -5484,6 +5511,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
54845511 bool sleep = flags & DEQUEUE_SLEEP ;
54855512
54865513 update_curr (cfs_rq );
5514+ clear_buddies (cfs_rq , se );
54875515
54885516 if (flags & DEQUEUE_DELAYED ) {
54895517 SCHED_WARN_ON (!se -> sched_delayed );
@@ -5500,10 +5528,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
55005528
55015529 if (sched_feat (DELAY_DEQUEUE ) && delay &&
55025530 !entity_eligible (cfs_rq , se )) {
5503- if (cfs_rq -> next == se )
5504- cfs_rq -> next = NULL ;
55055531 update_load_avg (cfs_rq , se , 0 );
5506- se -> sched_delayed = 1 ;
5532+ set_delayed ( se ) ;
55075533 return false;
55085534 }
55095535 }
@@ -5526,8 +5552,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
55265552
55275553 update_stats_dequeue_fair (cfs_rq , se , flags );
55285554
5529- clear_buddies (cfs_rq , se );
5530-
55315555 update_entity_lag (cfs_rq , se );
55325556 if (sched_feat (PLACE_REL_DEADLINE ) && !sleep ) {
55335557 se -> deadline -= se -> vruntime ;
@@ -5917,7 +5941,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
59175941 struct rq * rq = rq_of (cfs_rq );
59185942 struct cfs_bandwidth * cfs_b = tg_cfs_bandwidth (cfs_rq -> tg );
59195943 struct sched_entity * se ;
5920- long task_delta , idle_task_delta , dequeue = 1 ;
5944+ long task_delta , idle_task_delta , delayed_delta , dequeue = 1 ;
59215945 long rq_h_nr_running = rq -> cfs .h_nr_running ;
59225946
59235947 raw_spin_lock (& cfs_b -> lock );
@@ -5950,6 +5974,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
59505974
59515975 task_delta = cfs_rq -> h_nr_running ;
59525976 idle_task_delta = cfs_rq -> idle_h_nr_running ;
5977+ delayed_delta = cfs_rq -> h_nr_delayed ;
59535978 for_each_sched_entity (se ) {
59545979 struct cfs_rq * qcfs_rq = cfs_rq_of (se );
59555980 int flags ;
@@ -5973,6 +5998,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
59735998
59745999 qcfs_rq -> h_nr_running -= task_delta ;
59756000 qcfs_rq -> idle_h_nr_running -= idle_task_delta ;
6001+ qcfs_rq -> h_nr_delayed -= delayed_delta ;
59766002
59776003 if (qcfs_rq -> load .weight ) {
59786004 /* Avoid re-evaluating load for this entity: */
@@ -5995,6 +6021,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
59956021
59966022 qcfs_rq -> h_nr_running -= task_delta ;
59976023 qcfs_rq -> idle_h_nr_running -= idle_task_delta ;
6024+ qcfs_rq -> h_nr_delayed -= delayed_delta ;
59986025 }
59996026
60006027 /* At this point se is NULL and we are at root level*/
@@ -6020,7 +6047,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
60206047 struct rq * rq = rq_of (cfs_rq );
60216048 struct cfs_bandwidth * cfs_b = tg_cfs_bandwidth (cfs_rq -> tg );
60226049 struct sched_entity * se ;
6023- long task_delta , idle_task_delta ;
6050+ long task_delta , idle_task_delta , delayed_delta ;
60246051 long rq_h_nr_running = rq -> cfs .h_nr_running ;
60256052
60266053 se = cfs_rq -> tg -> se [cpu_of (rq )];
@@ -6056,6 +6083,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
60566083
60576084 task_delta = cfs_rq -> h_nr_running ;
60586085 idle_task_delta = cfs_rq -> idle_h_nr_running ;
6086+ delayed_delta = cfs_rq -> h_nr_delayed ;
60596087 for_each_sched_entity (se ) {
60606088 struct cfs_rq * qcfs_rq = cfs_rq_of (se );
60616089
@@ -6073,6 +6101,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
60736101
60746102 qcfs_rq -> h_nr_running += task_delta ;
60756103 qcfs_rq -> idle_h_nr_running += idle_task_delta ;
6104+ qcfs_rq -> h_nr_delayed += delayed_delta ;
60766105
60776106 /* end evaluation on encountering a throttled cfs_rq */
60786107 if (cfs_rq_throttled (qcfs_rq ))
@@ -6090,6 +6119,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
60906119
60916120 qcfs_rq -> h_nr_running += task_delta ;
60926121 qcfs_rq -> idle_h_nr_running += idle_task_delta ;
6122+ qcfs_rq -> h_nr_delayed += delayed_delta ;
60936123
60946124 /* end evaluation on encountering a throttled cfs_rq */
60956125 if (cfs_rq_throttled (qcfs_rq ))
@@ -6943,7 +6973,7 @@ requeue_delayed_entity(struct sched_entity *se)
69436973 }
69446974
69456975 update_load_avg (cfs_rq , se , 0 );
6946- se -> sched_delayed = 0 ;
6976+ clear_delayed ( se ) ;
69476977}
69486978
69496979/*
@@ -6957,6 +6987,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
69576987 struct cfs_rq * cfs_rq ;
69586988 struct sched_entity * se = & p -> se ;
69596989 int idle_h_nr_running = task_has_idle_policy (p );
6990+ int h_nr_delayed = 0 ;
69606991 int task_new = !(flags & ENQUEUE_WAKEUP );
69616992 int rq_h_nr_running = rq -> cfs .h_nr_running ;
69626993 u64 slice = 0 ;
@@ -6983,6 +7014,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
69837014 if (p -> in_iowait )
69847015 cpufreq_update_util (rq , SCHED_CPUFREQ_IOWAIT );
69857016
7017+ if (task_new )
7018+ h_nr_delayed = !!se -> sched_delayed ;
7019+
69867020 for_each_sched_entity (se ) {
69877021 if (se -> on_rq ) {
69887022 if (se -> sched_delayed )
@@ -7005,6 +7039,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
70057039
70067040 cfs_rq -> h_nr_running ++ ;
70077041 cfs_rq -> idle_h_nr_running += idle_h_nr_running ;
7042+ cfs_rq -> h_nr_delayed += h_nr_delayed ;
70087043
70097044 if (cfs_rq_is_idle (cfs_rq ))
70107045 idle_h_nr_running = 1 ;
@@ -7028,6 +7063,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
70287063
70297064 cfs_rq -> h_nr_running ++ ;
70307065 cfs_rq -> idle_h_nr_running += idle_h_nr_running ;
7066+ cfs_rq -> h_nr_delayed += h_nr_delayed ;
70317067
70327068 if (cfs_rq_is_idle (cfs_rq ))
70337069 idle_h_nr_running = 1 ;
@@ -7090,13 +7126,16 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
70907126 struct task_struct * p = NULL ;
70917127 int idle_h_nr_running = 0 ;
70927128 int h_nr_running = 0 ;
7129+ int h_nr_delayed = 0 ;
70937130 struct cfs_rq * cfs_rq ;
70947131 u64 slice = 0 ;
70957132
70967133 if (entity_is_task (se )) {
70977134 p = task_of (se );
70987135 h_nr_running = 1 ;
70997136 idle_h_nr_running = task_has_idle_policy (p );
7137+ if (!task_sleep && !task_delayed )
7138+ h_nr_delayed = !!se -> sched_delayed ;
71007139 } else {
71017140 cfs_rq = group_cfs_rq (se );
71027141 slice = cfs_rq_min_slice (cfs_rq );
@@ -7114,6 +7153,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
71147153
71157154 cfs_rq -> h_nr_running -= h_nr_running ;
71167155 cfs_rq -> idle_h_nr_running -= idle_h_nr_running ;
7156+ cfs_rq -> h_nr_delayed -= h_nr_delayed ;
71177157
71187158 if (cfs_rq_is_idle (cfs_rq ))
71197159 idle_h_nr_running = h_nr_running ;
@@ -7152,6 +7192,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
71527192
71537193 cfs_rq -> h_nr_running -= h_nr_running ;
71547194 cfs_rq -> idle_h_nr_running -= idle_h_nr_running ;
7195+ cfs_rq -> h_nr_delayed -= h_nr_delayed ;
71557196
71567197 if (cfs_rq_is_idle (cfs_rq ))
71577198 idle_h_nr_running = h_nr_running ;
@@ -8780,7 +8821,7 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
87808821 if (unlikely (throttled_hierarchy (cfs_rq_of (pse ))))
87818822 return ;
87828823
8783- if (sched_feat (NEXT_BUDDY ) && !(wake_flags & WF_FORK )) {
8824+ if (sched_feat (NEXT_BUDDY ) && !(wake_flags & WF_FORK ) && ! pse -> sched_delayed ) {
87848825 set_next_buddy (pse );
87858826 }
87868827
0 commit comments