linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v1] sched: Move curr check into __enqueue_entity and __dequeue_entity
@ 2025-06-21 14:20 Jemmy Wong
  2025-06-25 12:36 ` Phil Auld
  0 siblings, 1 reply; 2+ messages in thread
From: Jemmy Wong @ 2025-06-21 14:20 UTC (permalink / raw)
  To: Ingo Molnar, Peter Zijlstra, Juri Lelli, Vincent Guittot,
	Dietmar Eggemann, Steven Rostedt, Ben Segall, Mel Gorman,
	Valentin Schneider, linux-kernel
  Cc: jemmywong512

The check for se == cfs_rq->curr is common logic used before calling
__enqueue_entity and __dequeue_entity. Move this check into the respective
function bodies to simplify the code.

Signed-off-by: Jemmy Wong <jemmywong512@gmail.com>

---
 kernel/sched/fair.c | 32 +++++++++++++++-----------------
 1 file changed, 15 insertions(+), 17 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 7a14da5396fb..24cbb4b6c3fb 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -849,6 +849,9 @@ RB_DECLARE_CALLBACKS(static, min_vruntime_cb, struct sched_entity,
  */
 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
+	if (unlikely(se == cfs_rq->curr))
+		return;
+
 	avg_vruntime_add(cfs_rq, se);
 	se->min_vruntime = se->vruntime;
 	se->min_slice = se->slice;
@@ -858,6 +861,9 @@ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)

 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
+	if (unlikely(se == cfs_rq->curr))
+		return;
+
 	rb_erase_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
 				  &min_vruntime_cb);
 	avg_vruntime_sub(cfs_rq, se);
@@ -3797,8 +3803,6 @@ static void place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int fla
 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
 			    unsigned long weight)
 {
-	bool curr = cfs_rq->curr == se;
-
 	if (se->on_rq) {
 		/* commit outstanding execution time */
 		update_curr(cfs_rq);
@@ -3806,8 +3810,7 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
 		se->deadline -= se->vruntime;
 		se->rel_deadline = 1;
 		cfs_rq->nr_queued--;
-		if (!curr)
-			__dequeue_entity(cfs_rq, se);
+		__dequeue_entity(cfs_rq, se);
 		update_load_sub(&cfs_rq->load, se->load.weight);
 	}
 	dequeue_load_avg(cfs_rq, se);
@@ -3834,8 +3837,7 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
 	if (se->on_rq) {
 		place_entity(cfs_rq, se, 0);
 		update_load_add(&cfs_rq->load, se->load.weight);
-		if (!curr)
-			__enqueue_entity(cfs_rq, se);
+		__enqueue_entity(cfs_rq, se);
 		cfs_rq->nr_queued++;

 		/*
@@ -5363,8 +5365,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)

 	check_schedstat_required();
 	update_stats_enqueue_fair(cfs_rq, se, flags);
-	if (!curr)
-		__enqueue_entity(cfs_rq, se);
+	__enqueue_entity(cfs_rq, se);
 	se->on_rq = 1;

 	if (cfs_rq->nr_queued == 1) {
@@ -5506,8 +5507,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
 		se->rel_deadline = 1;
 	}

-	if (se != cfs_rq->curr)
-		__dequeue_entity(cfs_rq, se);
+	__dequeue_entity(cfs_rq, se);
 	se->on_rq = 0;
 	account_entity_dequeue(cfs_rq, se);

@@ -5624,14 +5624,14 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
 	/* throttle cfs_rqs exceeding runtime */
 	check_cfs_rq_runtime(cfs_rq);

+	WARN_ON_ONCE(cfs_rq->curr != prev);
 	if (prev->on_rq) {
 		update_stats_wait_start_fair(cfs_rq, prev);
-		/* Put 'current' back into the tree. */
-		__enqueue_entity(cfs_rq, prev);
 		/* in !on_rq case, update occurred at dequeue */
 		update_load_avg(cfs_rq, prev, 0);
+		cfs_rq->curr = NULL;
+		__enqueue_entity(cfs_rq, prev);
 	}
-	WARN_ON_ONCE(cfs_rq->curr != prev);
 	cfs_rq->curr = NULL;
 }

@@ -6910,12 +6910,10 @@ requeue_delayed_entity(struct sched_entity *se)
 		update_entity_lag(cfs_rq, se);
 		if (se->vlag > 0) {
 			cfs_rq->nr_queued--;
-			if (se != cfs_rq->curr)
-				__dequeue_entity(cfs_rq, se);
+			__dequeue_entity(cfs_rq, se);
 			se->vlag = 0;
 			place_entity(cfs_rq, se, 0);
-			if (se != cfs_rq->curr)
-				__enqueue_entity(cfs_rq, se);
+			__enqueue_entity(cfs_rq, se);
 			cfs_rq->nr_queued++;
 		}
 	}
--
2.43.0

^ permalink raw reply related	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2025-06-25 12:36 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-06-21 14:20 [PATCH v1] sched: Move curr check into __enqueue_entity and __dequeue_entity Jemmy Wong
2025-06-25 12:36 ` Phil Auld

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).