@@ -824,6 +824,8 @@ static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
824824 struct dl_rq * dl_rq = dl_rq_of_se (dl_se );
825825 struct rq * rq = rq_of_dl_rq (dl_rq );
826826
827+ update_rq_clock (rq );
828+
827829 WARN_ON (is_dl_boosted (dl_se ));
828830 WARN_ON (dl_time_before (rq_clock (rq ), dl_se -> deadline ));
829831
@@ -1215,6 +1217,8 @@ static void __push_dl_task(struct rq *rq, struct rq_flags *rf)
12151217/* a defer timer will not be reset if the runtime consumed was < dl_server_min_res */
12161218static const u64 dl_server_min_res = 1 * NSEC_PER_MSEC ;
12171219
1220+ static bool dl_server_stopped (struct sched_dl_entity * dl_se );
1221+
12181222static enum hrtimer_restart dl_server_timer (struct hrtimer * timer , struct sched_dl_entity * dl_se )
12191223{
12201224 struct rq * rq = rq_of_dl_se (dl_se );
@@ -1234,6 +1238,7 @@ static enum hrtimer_restart dl_server_timer(struct hrtimer *timer, struct sched_
12341238
12351239 if (!dl_se -> server_has_tasks (dl_se )) {
12361240 replenish_dl_entity (dl_se );
1241+ dl_server_stopped (dl_se );
12371242 return HRTIMER_NORESTART ;
12381243 }
12391244
@@ -1505,7 +1510,9 @@ static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64
15051510 if (dl_entity_is_special (dl_se ))
15061511 return ;
15071512
1508- scaled_delta_exec = dl_scaled_delta_exec (rq , dl_se , delta_exec );
1513+ scaled_delta_exec = delta_exec ;
1514+ if (!dl_server (dl_se ))
1515+ scaled_delta_exec = dl_scaled_delta_exec (rq , dl_se , delta_exec );
15091516
15101517 dl_se -> runtime -= scaled_delta_exec ;
15111518
@@ -1612,7 +1619,7 @@ static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64
16121619 */
16131620void dl_server_update_idle_time (struct rq * rq , struct task_struct * p )
16141621{
1615- s64 delta_exec , scaled_delta_exec ;
1622+ s64 delta_exec ;
16161623
16171624 if (!rq -> fair_server .dl_defer )
16181625 return ;
@@ -1625,9 +1632,7 @@ void dl_server_update_idle_time(struct rq *rq, struct task_struct *p)
16251632 if (delta_exec < 0 )
16261633 return ;
16271634
1628- scaled_delta_exec = dl_scaled_delta_exec (rq , & rq -> fair_server , delta_exec );
1629-
1630- rq -> fair_server .runtime -= scaled_delta_exec ;
1635+ rq -> fair_server .runtime -= delta_exec ;
16311636
16321637 if (rq -> fair_server .runtime < 0 ) {
16331638 rq -> fair_server .dl_defer_running = 0 ;
@@ -1640,31 +1645,17 @@ void dl_server_update_idle_time(struct rq *rq, struct task_struct *p)
16401645void dl_server_update (struct sched_dl_entity * dl_se , s64 delta_exec )
16411646{
16421647 /* 0 runtime = fair server disabled */
1643- if (dl_se -> dl_runtime )
1648+ if (dl_se -> dl_runtime ) {
1649+ dl_se -> dl_server_idle = 0 ;
16441650 update_curr_dl_se (dl_se -> rq , dl_se , delta_exec );
1651+ }
16451652}
16461653
16471654void dl_server_start (struct sched_dl_entity * dl_se )
16481655{
16491656 struct rq * rq = dl_se -> rq ;
16501657
1651- /*
1652- * XXX: the apply do not work fine at the init phase for the
1653- * fair server because things are not yet set. We need to improve
1654- * this before getting generic.
1655- */
1656- if (!dl_server (dl_se )) {
1657- u64 runtime = 50 * NSEC_PER_MSEC ;
1658- u64 period = 1000 * NSEC_PER_MSEC ;
1659-
1660- dl_server_apply_params (dl_se , runtime , period , 1 );
1661-
1662- dl_se -> dl_server = 1 ;
1663- dl_se -> dl_defer = 1 ;
1664- setup_new_dl_entity (dl_se );
1665- }
1666-
1667- if (!dl_se -> dl_runtime )
1658+ if (!dl_server (dl_se ) || dl_se -> dl_server_active )
16681659 return ;
16691660
16701661 dl_se -> dl_server_active = 1 ;
@@ -1675,7 +1666,7 @@ void dl_server_start(struct sched_dl_entity *dl_se)
16751666
16761667void dl_server_stop (struct sched_dl_entity * dl_se )
16771668{
1678- if (!dl_se -> dl_runtime )
1669+ if (!dl_server ( dl_se ) || ! dl_server_active ( dl_se ) )
16791670 return ;
16801671
16811672 dequeue_dl_entity (dl_se , DEQUEUE_SLEEP );
@@ -1685,6 +1676,20 @@ void dl_server_stop(struct sched_dl_entity *dl_se)
16851676 dl_se -> dl_server_active = 0 ;
16861677}
16871678
1679+ static bool dl_server_stopped (struct sched_dl_entity * dl_se )
1680+ {
1681+ if (!dl_se -> dl_server_active )
1682+ return false;
1683+
1684+ if (dl_se -> dl_server_idle ) {
1685+ dl_server_stop (dl_se );
1686+ return true;
1687+ }
1688+
1689+ dl_se -> dl_server_idle = 1 ;
1690+ return false;
1691+ }
1692+
16881693void dl_server_init (struct sched_dl_entity * dl_se , struct rq * rq ,
16891694 dl_server_has_tasks_f has_tasks ,
16901695 dl_server_pick_f pick_task )
@@ -1694,6 +1699,32 @@ void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq,
16941699 dl_se -> server_pick_task = pick_task ;
16951700}
16961701
1702+ void sched_init_dl_servers (void )
1703+ {
1704+ int cpu ;
1705+ struct rq * rq ;
1706+ struct sched_dl_entity * dl_se ;
1707+
1708+ for_each_online_cpu (cpu ) {
1709+ u64 runtime = 50 * NSEC_PER_MSEC ;
1710+ u64 period = 1000 * NSEC_PER_MSEC ;
1711+
1712+ rq = cpu_rq (cpu );
1713+
1714+ guard (rq_lock_irq )(rq );
1715+
1716+ dl_se = & rq -> fair_server ;
1717+
1718+ WARN_ON (dl_server (dl_se ));
1719+
1720+ dl_server_apply_params (dl_se , runtime , period , 1 );
1721+
1722+ dl_se -> dl_server = 1 ;
1723+ dl_se -> dl_defer = 1 ;
1724+ setup_new_dl_entity (dl_se );
1725+ }
1726+ }
1727+
16971728void __dl_server_attach_root (struct sched_dl_entity * dl_se , struct rq * rq )
16981729{
16991730 u64 new_bw = dl_se -> dl_bw ;
@@ -2437,7 +2468,7 @@ static struct task_struct *__pick_task_dl(struct rq *rq)
24372468 if (dl_server (dl_se )) {
24382469 p = dl_se -> server_pick_task (dl_se );
24392470 if (!p ) {
2440- if (dl_server_active (dl_se )) {
2471+ if (! dl_server_stopped (dl_se )) {
24412472 dl_se -> dl_yielded = 1 ;
24422473 update_curr_dl_se (rq , dl_se , 0 );
24432474 }
@@ -2978,7 +3009,14 @@ void dl_clear_root_domain(struct root_domain *rd)
29783009 int i ;
29793010
29803011 guard (raw_spinlock_irqsave )(& rd -> dl_bw .lock );
3012+
3013+ /*
3014+ * Reset total_bw to zero and extra_bw to max_bw so that next
3015+ * loop will add dl-servers contributions back properly,
3016+ */
29813017 rd -> dl_bw .total_bw = 0 ;
3018+ for_each_cpu (i , rd -> span )
3019+ cpu_rq (i )-> dl .extra_bw = cpu_rq (i )-> dl .max_bw ;
29823020
29833021 /*
29843022 * dl_servers are not tasks. Since dl_add_task_root_domain ignores
@@ -3244,6 +3282,9 @@ void sched_dl_do_global(void)
32443282 if (global_rt_runtime () != RUNTIME_INF )
32453283 new_bw = to_ratio (global_rt_period (), global_rt_runtime ());
32463284
3285+ for_each_possible_cpu (cpu )
3286+ init_dl_rq_bw_ratio (& cpu_rq (cpu )-> dl );
3287+
32473288 for_each_possible_cpu (cpu ) {
32483289 rcu_read_lock_sched ();
32493290
@@ -3259,7 +3300,6 @@ void sched_dl_do_global(void)
32593300 raw_spin_unlock_irqrestore (& dl_b -> lock , flags );
32603301
32613302 rcu_read_unlock_sched ();
3262- init_dl_rq_bw_ratio (& cpu_rq (cpu )-> dl );
32633303 }
32643304}
32653305
0 commit comments