From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
To: phasta@kernel.org, dri-devel@lists.freedesktop.org
Cc: amd-gfx@lists.freedesktop.org, kernel-dev@igalia.com,
"Christian König" <christian.koenig@amd.com>,
"Danilo Krummrich" <dakr@kernel.org>,
"Matthew Brost" <matthew.brost@intel.com>
Subject: Re: [RFC v8 04/12] drm/sched: Consolidate entity run queue management
Date: Thu, 11 Sep 2025 15:55:03 +0100 [thread overview]
Message-ID: <73681fac-ef47-4005-87ad-cea0b91e6813@igalia.com> (raw)
In-Reply-To: <6fe010e8dc5e8a5db35d8702960f42940e342093.camel@mailbox.org>
On 11/09/2025 15:20, Philipp Stanner wrote:
> On Wed, 2025-09-03 at 11:18 +0100, Tvrtko Ursulin wrote:
>> Move the code dealing with entities entering and exiting run queues to
>> helpers to logically separate it from jobs entering and exiting entities.
>
> Sorry if I've asked this before, but does this strictly depend on the
> preceding patches or could it be branched out?
There is no fundamental dependency so I could re-order and pull it ahead
if you are certain that is what you prefer?
Regards,
Tvrtko
>> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
>> Cc: Christian König <christian.koenig@amd.com>
>> Cc: Danilo Krummrich <dakr@kernel.org>
>> Cc: Matthew Brost <matthew.brost@intel.com>
>> Cc: Philipp Stanner <phasta@kernel.org>
>> ---
>> drivers/gpu/drm/scheduler/sched_entity.c | 64 ++-------------
>> drivers/gpu/drm/scheduler/sched_internal.h | 8 +-
>> drivers/gpu/drm/scheduler/sched_main.c | 95 +++++++++++++++++++---
>> 3 files changed, 91 insertions(+), 76 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
>> index 4852006f2308..7a0a52ba87bf 100644
>> --- a/drivers/gpu/drm/scheduler/sched_entity.c
>> +++ b/drivers/gpu/drm/scheduler/sched_entity.c
>> @@ -456,24 +456,9 @@ drm_sched_job_dependency(struct drm_sched_job *job,
>> return NULL;
>> }
>>
>> -static ktime_t
>> -drm_sched_rq_get_rr_ts(struct drm_sched_rq *rq, struct drm_sched_entity *entity)
>> -{
>> - ktime_t ts;
>> -
>> - lockdep_assert_held(&entity->lock);
>> - lockdep_assert_held(&rq->lock);
>> -
>> - ts = ktime_add_ns(rq->rr_ts, 1);
>> - entity->rr_ts = ts;
>> - rq->rr_ts = ts;
>> -
>> - return ts;
>> -}
>> -
>> struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
>> {
>> - struct drm_sched_job *sched_job, *next_job;
>> + struct drm_sched_job *sched_job;
>>
>> sched_job = drm_sched_entity_queue_peek(entity);
>> if (!sched_job)
>> @@ -502,26 +487,7 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
>>
>> spsc_queue_pop(&entity->job_queue);
>>
>> - /*
>> - * Update the entity's location in the min heap according to
>> - * the timestamp of the next job, if any.
>> - */
>> - next_job = drm_sched_entity_queue_peek(entity);
>> - if (next_job) {
>> - struct drm_sched_rq *rq;
>> - ktime_t ts;
>> -
>> - spin_lock(&entity->lock);
>> - rq = entity->rq;
>> - spin_lock(&rq->lock);
>> - if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
>> - ts = next_job->submit_ts;
>> - else
>> - ts = drm_sched_rq_get_rr_ts(rq, entity);
>> - drm_sched_rq_update_fifo_locked(entity, rq, ts);
>> - spin_unlock(&rq->lock);
>> - spin_unlock(&entity->lock);
>> - }
>> + drm_sched_rq_pop_entity(entity);
>>
>> /* Jobs and entities might have different lifecycles. Since we're
>> * removing the job from the entities queue, set the jobs entity pointer
>> @@ -611,30 +577,10 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
>> /* first job wakes up scheduler */
>> if (first) {
>> struct drm_gpu_scheduler *sched;
>> - struct drm_sched_rq *rq;
>>
>> - /* Add the entity to the run queue */
>> - spin_lock(&entity->lock);
>> - if (entity->stopped) {
>> - spin_unlock(&entity->lock);
>> -
>> - DRM_ERROR("Trying to push to a killed entity\n");
>> - return;
>> - }
>> -
>> - rq = entity->rq;
>> - sched = rq->sched;
>> -
>> - spin_lock(&rq->lock);
>> - drm_sched_rq_add_entity(rq, entity);
>> - if (drm_sched_policy == DRM_SCHED_POLICY_RR)
>> - submit_ts = entity->rr_ts;
>> - drm_sched_rq_update_fifo_locked(entity, rq, submit_ts);
>> -
>> - spin_unlock(&rq->lock);
>> - spin_unlock(&entity->lock);
>> -
>> - drm_sched_wakeup(sched);
>> + sched = drm_sched_rq_add_entity(entity, submit_ts);
>> + if (sched)
>> + drm_sched_wakeup(sched);
>> }
>> }
>> EXPORT_SYMBOL(drm_sched_entity_push_job);
>> diff --git a/drivers/gpu/drm/scheduler/sched_internal.h b/drivers/gpu/drm/scheduler/sched_internal.h
>> index 7ea5a6736f98..8269c5392a82 100644
>> --- a/drivers/gpu/drm/scheduler/sched_internal.h
>> +++ b/drivers/gpu/drm/scheduler/sched_internal.h
>> @@ -12,13 +12,11 @@ extern int drm_sched_policy;
>>
>> void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
>>
>> -void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
>> - struct drm_sched_entity *entity);
>> +struct drm_gpu_scheduler *
>> +drm_sched_rq_add_entity(struct drm_sched_entity *entity, ktime_t ts);
>> void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
>> struct drm_sched_entity *entity);
>> -
>> -void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity,
>> - struct drm_sched_rq *rq, ktime_t ts);
>> +void drm_sched_rq_pop_entity(struct drm_sched_entity *entity);
>>
>> void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
>> struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
>> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
>> index 1db0a4aa1d46..c53931e63458 100644
>> --- a/drivers/gpu/drm/scheduler/sched_main.c
>> +++ b/drivers/gpu/drm/scheduler/sched_main.c
>> @@ -151,9 +151,9 @@ static void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity,
>> }
>> }
>>
>> -void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity,
>> - struct drm_sched_rq *rq,
>> - ktime_t ts)
>> +static void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity,
>> + struct drm_sched_rq *rq,
>> + ktime_t ts)
>> {
>> /*
>> * Both locks need to be grabbed, one to protect from entity->rq change
>> @@ -191,22 +191,45 @@ static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
>> /**
>> * drm_sched_rq_add_entity - add an entity
>> *
>> - * @rq: scheduler run queue
>> * @entity: scheduler entity
>> + * @ts: submission timestamp
>> *
>> * Adds a scheduler entity to the run queue.
>> + *
>> + * Returns a DRM scheduler pre-selected to handle this entity.
>> */
>> -void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
>> - struct drm_sched_entity *entity)
>> +struct drm_gpu_scheduler *
>> +drm_sched_rq_add_entity(struct drm_sched_entity *entity, ktime_t ts)
>> {
>> - lockdep_assert_held(&entity->lock);
>> - lockdep_assert_held(&rq->lock);
>> + struct drm_gpu_scheduler *sched;
>> + struct drm_sched_rq *rq;
>>
>> - if (!list_empty(&entity->list))
>> - return;
>> + /* Add the entity to the run queue */
>> + spin_lock(&entity->lock);
>> + if (entity->stopped) {
>> + spin_unlock(&entity->lock);
>>
>> - atomic_inc(rq->sched->score);
>> - list_add_tail(&entity->list, &rq->entities);
>> + DRM_ERROR("Trying to push to a killed entity\n");
>> + return NULL;
>> + }
>> +
>> + rq = entity->rq;
>> + spin_lock(&rq->lock);
>> + sched = rq->sched;
>> +
>> + if (list_empty(&entity->list)) {
>> + atomic_inc(sched->score);
>> + list_add_tail(&entity->list, &rq->entities);
>> + }
>> +
>> + if (drm_sched_policy == DRM_SCHED_POLICY_RR)
>> + ts = entity->rr_ts;
>> + drm_sched_rq_update_fifo_locked(entity, rq, ts);
>> +
>> + spin_unlock(&rq->lock);
>> + spin_unlock(&entity->lock);
>> +
>> + return sched;
>> }
>>
>> /**
>> @@ -235,6 +258,54 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
>> spin_unlock(&rq->lock);
>> }
>>
>> +static ktime_t
>> +drm_sched_rq_get_rr_ts(struct drm_sched_rq *rq, struct drm_sched_entity *entity)
>> +{
>> + ktime_t ts;
>> +
>> + lockdep_assert_held(&entity->lock);
>> + lockdep_assert_held(&rq->lock);
>> +
>> + ts = ktime_add_ns(rq->rr_ts, 1);
>> + entity->rr_ts = ts;
>> + rq->rr_ts = ts;
>> +
>> + return ts;
>> +}
>> +
>> +/**
>> + * drm_sched_rq_pop_entity - pops an entity
>> + *
>> + * @entity: scheduler entity
>> + *
>> + * To be called every time after a job is popped from the entity.
>> + */
>> +void drm_sched_rq_pop_entity(struct drm_sched_entity *entity)
>> +{
>> + struct drm_sched_job *next_job;
>> + struct drm_sched_rq *rq;
>> + ktime_t ts;
>> +
>> + /*
>> + * Update the entity's location in the min heap according to
>> + * the timestamp of the next job, if any.
>> + */
>> + next_job = drm_sched_entity_queue_peek(entity);
>> + if (!next_job)
>> + return;
>> +
>> + spin_lock(&entity->lock);
>> + rq = entity->rq;
>> + spin_lock(&rq->lock);
>> + if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
>> + ts = next_job->submit_ts;
>> + else
>> + ts = drm_sched_rq_get_rr_ts(rq, entity);
>> + drm_sched_rq_update_fifo_locked(entity, rq, ts);
>> + spin_unlock(&rq->lock);
>> + spin_unlock(&entity->lock);
>> +}
>> +
>> /**
>> * drm_sched_rq_select_entity - Select an entity which provides a job to run
>> *
>
next prev parent reply other threads:[~2025-09-11 14:55 UTC|newest]
Thread overview: 35+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-09-03 10:18 [RFC v8 00/12] Fair DRM scheduler Tvrtko Ursulin
2025-09-03 10:18 ` [RFC v8 01/12] drm/sched: Add some scheduling quality unit tests Tvrtko Ursulin
2025-09-23 13:01 ` Philipp Stanner
2025-09-25 11:41 ` Tvrtko Ursulin
2025-09-03 10:18 ` [RFC v8 02/12] drm/sched: Add some more " Tvrtko Ursulin
2025-09-03 10:18 ` [RFC v8 03/12] drm/sched: Implement RR via FIFO Tvrtko Ursulin
2025-09-03 10:18 ` [RFC v8 04/12] drm/sched: Consolidate entity run queue management Tvrtko Ursulin
2025-09-11 14:20 ` Philipp Stanner
2025-09-11 14:55 ` Tvrtko Ursulin [this message]
2025-09-16 7:41 ` Philipp Stanner
2025-09-16 9:04 ` Tvrtko Ursulin
2025-09-03 10:18 ` [RFC v8 05/12] drm/sched: Move run queue related code into a separate file Tvrtko Ursulin
2025-09-24 7:54 ` Philipp Stanner
2025-09-03 10:18 ` [RFC v8 06/12] drm/sched: Free all finished jobs at once Tvrtko Ursulin
2025-09-24 8:07 ` Philipp Stanner
2025-09-03 10:18 ` [RFC v8 07/12] drm/sched: Account entity GPU time Tvrtko Ursulin
2025-09-24 9:11 ` Philipp Stanner
2025-09-25 11:52 ` Tvrtko Ursulin
2025-09-26 12:30 ` Philipp Stanner
2025-09-03 10:18 ` [RFC v8 08/12] drm/sched: Remove idle entity from tree Tvrtko Ursulin
2025-09-11 14:32 ` Philipp Stanner
2025-09-11 15:06 ` Tvrtko Ursulin
2025-09-24 9:15 ` Philipp Stanner
2025-09-25 12:23 ` Tvrtko Ursulin
2025-09-03 10:18 ` [RFC v8 09/12] drm/sched: Add fair scheduling policy Tvrtko Ursulin
2025-09-24 9:38 ` Philipp Stanner
2025-09-25 12:34 ` Tvrtko Ursulin
2025-09-03 10:18 ` [RFC v8 10/12] drm/sched: Break submission patterns with some randomness Tvrtko Ursulin
2025-09-03 10:18 ` [RFC v8 11/12] drm/sched: Remove FIFO and RR and simplify to a single run queue Tvrtko Ursulin
2025-09-24 11:50 ` Philipp Stanner
2025-09-25 12:56 ` Tvrtko Ursulin
2025-09-26 13:32 ` Philipp Stanner
2025-09-03 10:18 ` [RFC v8 12/12] drm/sched: Embed run queue singleton into the scheduler Tvrtko Ursulin
2025-09-24 12:01 ` Philipp Stanner
2025-09-25 13:02 ` Tvrtko Ursulin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=73681fac-ef47-4005-87ad-cea0b91e6813@igalia.com \
--to=tvrtko.ursulin@igalia.com \
--cc=amd-gfx@lists.freedesktop.org \
--cc=christian.koenig@amd.com \
--cc=dakr@kernel.org \
--cc=dri-devel@lists.freedesktop.org \
--cc=kernel-dev@igalia.com \
--cc=matthew.brost@intel.com \
--cc=phasta@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox