@@ -2172,32 +2172,39 @@ static bool yield_to_task_scx(struct rq *rq, struct task_struct *to)
21722172 return false;
21732173}
21742174
2175- static void consume_local_task (struct task_struct * p ,
2176- struct scx_dispatch_q * dsq , struct rq * rq )
2175+ static void move_local_task_to_local_dsq (struct task_struct * p , u64 enq_flags ,
2176+ struct scx_dispatch_q * src_dsq ,
2177+ struct rq * dst_rq )
21772178{
2178- lockdep_assert_held (& dsq -> lock ); /* released on return */
2179+ struct scx_dispatch_q * dst_dsq = & dst_rq -> scx .local_dsq ;
2180+
2181+ /* @dsq is locked and @p is on @dst_rq */
2182+ lockdep_assert_held (& src_dsq -> lock );
2183+ lockdep_assert_rq_held (dst_rq );
21792184
2180- /* @dsq is locked and @p is on this rq */
21812185 WARN_ON_ONCE (p -> scx .holding_cpu >= 0 );
2182- task_unlink_from_dsq (p , dsq );
2183- list_add_tail (& p -> scx .dsq_list .node , & rq -> scx .local_dsq .list );
2184- dsq_mod_nr (& rq -> scx .local_dsq , 1 );
2185- p -> scx .dsq = & rq -> scx .local_dsq ;
2186- raw_spin_unlock (& dsq -> lock );
2186+
2187+ if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT ))
2188+ list_add (& p -> scx .dsq_list .node , & dst_dsq -> list );
2189+ else
2190+ list_add_tail (& p -> scx .dsq_list .node , & dst_dsq -> list );
2191+
2192+ dsq_mod_nr (dst_dsq , 1 );
2193+ p -> scx .dsq = dst_dsq ;
21872194}
21882195
21892196#ifdef CONFIG_SMP
21902197/**
2191- * move_task_to_local_dsq - Move a task from a different rq to a local DSQ
2198+ * move_remote_task_to_local_dsq - Move a task from a foreign rq to a local DSQ
21922199 * @p: task to move
21932200 * @enq_flags: %SCX_ENQ_*
21942201 * @src_rq: rq to move the task from, locked on entry, released on return
21952202 * @dst_rq: rq to move the task into, locked on return
21962203 *
21972204 * Move @p which is currently on @src_rq to @dst_rq's local DSQ.
21982205 */
2199- static void move_task_to_local_dsq (struct task_struct * p , u64 enq_flags ,
2200- struct rq * src_rq , struct rq * dst_rq )
2206+ static void move_remote_task_to_local_dsq (struct task_struct * p , u64 enq_flags ,
2207+ struct rq * src_rq , struct rq * dst_rq )
22012208{
22022209 lockdep_assert_rq_held (src_rq );
22032210
@@ -2320,7 +2327,7 @@ static bool consume_remote_task(struct rq *this_rq, struct task_struct *p,
23202327 raw_spin_rq_unlock (this_rq );
23212328
23222329 if (unlink_dsq_and_lock_src_rq (p , dsq , src_rq )) {
2323- move_task_to_local_dsq (p , 0 , src_rq , this_rq );
2330+ move_remote_task_to_local_dsq (p , 0 , src_rq , this_rq );
23242331 return true;
23252332 } else {
23262333 raw_spin_rq_unlock (src_rq );
@@ -2351,7 +2358,9 @@ static bool consume_dispatch_q(struct rq *rq, struct scx_dispatch_q *dsq)
23512358 struct rq * task_rq = task_rq (p );
23522359
23532360 if (rq == task_rq ) {
2354- consume_local_task (p , dsq , rq );
2361+ task_unlink_from_dsq (p , dsq );
2362+ move_local_task_to_local_dsq (p , 0 , dsq , rq );
2363+ raw_spin_unlock (& dsq -> lock );
23552364 return true;
23562365 }
23572366
@@ -2431,13 +2440,14 @@ static void dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
24312440 /*
24322441 * If @p is staying on the same rq, there's no need to go
24332442 * through the full deactivate/activate cycle. Optimize by
2434- * abbreviating the operations in move_task_to_local_dsq ().
2443+ * abbreviating move_remote_task_to_local_dsq ().
24352444 */
24362445 if (src_rq == dst_rq ) {
24372446 p -> scx .holding_cpu = -1 ;
24382447 dispatch_enqueue (& dst_rq -> scx .local_dsq , p , enq_flags );
24392448 } else {
2440- move_task_to_local_dsq (p , enq_flags , src_rq , dst_rq );
2449+ move_remote_task_to_local_dsq (p , enq_flags ,
2450+ src_rq , dst_rq );
24412451 }
24422452
24432453 /* if the destination CPU is idle, wake it up */
0 commit comments