From: Matthew Brost <matthew.brost@intel.com>
To: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
Cc: amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org,
kernel-dev@igalia.com,
"Christian König" <christian.koenig@amd.com>,
"Danilo Krummrich" <dakr@kernel.org>,
"Philipp Stanner" <phasta@kernel.org>
Subject: Re: [PATCH 06/28] drm/sched: Move run queue related code into a separate file
Date: Wed, 8 Oct 2025 15:49:26 -0700 [thread overview]
Message-ID: <aObqdqGRMs3mp1AP@lstrano-desk.jf.intel.com> (raw)
In-Reply-To: <20251008085359.52404-7-tvrtko.ursulin@igalia.com>
On Wed, Oct 08, 2025 at 09:53:37AM +0100, Tvrtko Ursulin wrote:
> Lets move all the code dealing with struct drm_sched_rq into a separate
> compilation unit. Advantage being sched_main.c is left with a clearer set
> of responsibilities.
>
> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
> Cc: Christian König <christian.koenig@amd.com>
> Cc: Danilo Krummrich <dakr@kernel.org>
> Cc: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
> Cc: Philipp Stanner <phasta@kernel.org>
> ---
> drivers/gpu/drm/scheduler/Makefile | 2 +-
> drivers/gpu/drm/scheduler/sched_internal.h | 7 +
> drivers/gpu/drm/scheduler/sched_main.c | 218 +-------------------
> drivers/gpu/drm/scheduler/sched_rq.c | 222 +++++++++++++++++++++
> 4 files changed, 232 insertions(+), 217 deletions(-)
> create mode 100644 drivers/gpu/drm/scheduler/sched_rq.c
>
> diff --git a/drivers/gpu/drm/scheduler/Makefile b/drivers/gpu/drm/scheduler/Makefile
> index 6e13e4c63e9d..74e75eff6df5 100644
> --- a/drivers/gpu/drm/scheduler/Makefile
> +++ b/drivers/gpu/drm/scheduler/Makefile
> @@ -20,7 +20,7 @@
> # OTHER DEALINGS IN THE SOFTWARE.
> #
> #
> -gpu-sched-y := sched_main.o sched_fence.o sched_entity.o
> +gpu-sched-y := sched_main.o sched_fence.o sched_entity.o sched_rq.o
>
> obj-$(CONFIG_DRM_SCHED) += gpu-sched.o
>
> diff --git a/drivers/gpu/drm/scheduler/sched_internal.h b/drivers/gpu/drm/scheduler/sched_internal.h
> index 8269c5392a82..5a8984e057e5 100644
> --- a/drivers/gpu/drm/scheduler/sched_internal.h
> +++ b/drivers/gpu/drm/scheduler/sched_internal.h
> @@ -10,8 +10,15 @@ extern int drm_sched_policy;
> #define DRM_SCHED_POLICY_RR 0
> #define DRM_SCHED_POLICY_FIFO 1
>
> +bool drm_sched_can_queue(struct drm_gpu_scheduler *sched,
> + struct drm_sched_entity *entity);
> void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
>
> +void drm_sched_rq_init(struct drm_sched_rq *rq,
> + struct drm_gpu_scheduler *sched);
> +struct drm_sched_entity *
> +drm_sched_rq_select_entity(struct drm_gpu_scheduler *sched,
> + struct drm_sched_rq *rq);
> struct drm_gpu_scheduler *
> drm_sched_rq_add_entity(struct drm_sched_entity *entity, ktime_t ts);
> void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> index e5d02c28665c..41bfee6b1777 100644
> --- a/drivers/gpu/drm/scheduler/sched_main.c
> +++ b/drivers/gpu/drm/scheduler/sched_main.c
> @@ -112,8 +112,8 @@ static u32 drm_sched_available_credits(struct drm_gpu_scheduler *sched)
> * Return true if we can push at least one more job from @entity, false
> * otherwise.
> */
> -static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched,
> - struct drm_sched_entity *entity)
> +bool drm_sched_can_queue(struct drm_gpu_scheduler *sched,
> + struct drm_sched_entity *entity)
> {
> struct drm_sched_job *s_job;
>
> @@ -133,220 +133,6 @@ static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched,
> return drm_sched_available_credits(sched) >= s_job->credits;
> }
>
> -static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a,
> - const struct rb_node *b)
> -{
> - struct drm_sched_entity *ent_a = rb_entry((a), struct drm_sched_entity, rb_tree_node);
> - struct drm_sched_entity *ent_b = rb_entry((b), struct drm_sched_entity, rb_tree_node);
> -
> - return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting);
> -}
> -
> -static void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity,
> - struct drm_sched_rq *rq)
> -{
> - if (!RB_EMPTY_NODE(&entity->rb_tree_node)) {
> - rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root);
> - RB_CLEAR_NODE(&entity->rb_tree_node);
> - }
> -}
> -
> -static void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity,
> - struct drm_sched_rq *rq,
> - ktime_t ts)
> -{
> - /*
> - * Both locks need to be grabbed, one to protect from entity->rq change
> - * for entity from within concurrent drm_sched_entity_select_rq and the
> - * other to update the rb tree structure.
> - */
> - lockdep_assert_held(&entity->lock);
> - lockdep_assert_held(&rq->lock);
> -
> - drm_sched_rq_remove_fifo_locked(entity, rq);
> -
> - entity->oldest_job_waiting = ts;
> -
> - rb_add_cached(&entity->rb_tree_node, &rq->rb_tree_root,
> - drm_sched_entity_compare_before);
> -}
> -
> -/**
> - * drm_sched_rq_init - initialize a given run queue struct
> - *
> - * @rq: scheduler run queue
> - * @sched: scheduler instance to associate with this run queue
> - *
> - * Initializes a scheduler runqueue.
> - */
> -static void drm_sched_rq_init(struct drm_sched_rq *rq,
> - struct drm_gpu_scheduler *sched)
> -{
> - spin_lock_init(&rq->lock);
> - INIT_LIST_HEAD(&rq->entities);
> - rq->rb_tree_root = RB_ROOT_CACHED;
> - rq->sched = sched;
> -}
> -
> -/**
> - * drm_sched_rq_add_entity - add an entity
> - *
> - * @entity: scheduler entity
> - * @ts: submission timestamp
> - *
> - * Adds a scheduler entity to the run queue.
> - *
> - * Returns a DRM scheduler pre-selected to handle this entity.
> - */
> -struct drm_gpu_scheduler *
> -drm_sched_rq_add_entity(struct drm_sched_entity *entity, ktime_t ts)
> -{
> - struct drm_gpu_scheduler *sched;
> - struct drm_sched_rq *rq;
> -
> - /* Add the entity to the run queue */
> - spin_lock(&entity->lock);
> - if (entity->stopped) {
> - spin_unlock(&entity->lock);
> -
> - DRM_ERROR("Trying to push to a killed entity\n");
> - return NULL;
> - }
> -
> - rq = entity->rq;
> - spin_lock(&rq->lock);
> - sched = rq->sched;
> -
> - if (list_empty(&entity->list)) {
> - atomic_inc(sched->score);
> - list_add_tail(&entity->list, &rq->entities);
> - }
> -
> - if (drm_sched_policy == DRM_SCHED_POLICY_RR)
> - ts = entity->rr_ts;
> - drm_sched_rq_update_fifo_locked(entity, rq, ts);
> -
> - spin_unlock(&rq->lock);
> - spin_unlock(&entity->lock);
> -
> - return sched;
> -}
> -
> -/**
> - * drm_sched_rq_remove_entity - remove an entity
> - *
> - * @rq: scheduler run queue
> - * @entity: scheduler entity
> - *
> - * Removes a scheduler entity from the run queue.
> - */
> -void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
> - struct drm_sched_entity *entity)
> -{
> - lockdep_assert_held(&entity->lock);
> -
> - if (list_empty(&entity->list))
> - return;
> -
> - spin_lock(&rq->lock);
> -
> - atomic_dec(rq->sched->score);
> - list_del_init(&entity->list);
> -
> - drm_sched_rq_remove_fifo_locked(entity, rq);
> -
> - spin_unlock(&rq->lock);
> -}
> -
> -static ktime_t
> -drm_sched_rq_get_rr_ts(struct drm_sched_rq *rq, struct drm_sched_entity *entity)
> -{
> - ktime_t ts;
> -
> - lockdep_assert_held(&entity->lock);
> - lockdep_assert_held(&rq->lock);
> -
> - ts = ktime_add_ns(rq->rr_ts, 1);
> - entity->rr_ts = ts;
> - rq->rr_ts = ts;
> -
> - return ts;
> -}
> -
> -/**
> - * drm_sched_rq_pop_entity - pops an entity
> - *
> - * @entity: scheduler entity
> - *
> - * To be called every time after a job is popped from the entity.
> - */
> -void drm_sched_rq_pop_entity(struct drm_sched_entity *entity)
> -{
> - struct drm_sched_job *next_job;
> - struct drm_sched_rq *rq;
> - ktime_t ts;
> -
> - /*
> - * Update the entity's location in the min heap according to
> - * the timestamp of the next job, if any.
> - */
> - next_job = drm_sched_entity_queue_peek(entity);
> - if (!next_job)
> - return;
> -
> - spin_lock(&entity->lock);
> - rq = entity->rq;
> - spin_lock(&rq->lock);
> - if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
> - ts = next_job->submit_ts;
> - else
> - ts = drm_sched_rq_get_rr_ts(rq, entity);
> - drm_sched_rq_update_fifo_locked(entity, rq, ts);
> - spin_unlock(&rq->lock);
> - spin_unlock(&entity->lock);
> -}
> -
> -/**
> - * drm_sched_rq_select_entity - Select an entity which provides a job to run
> - *
> - * @sched: the gpu scheduler
> - * @rq: scheduler run queue to check.
> - *
> - * Find oldest waiting ready entity.
> - *
> - * Return an entity if one is found; return an error-pointer (!NULL) if an
> - * entity was ready, but the scheduler had insufficient credits to accommodate
> - * its job; return NULL, if no ready entity was found.
> - */
> -static struct drm_sched_entity *
> -drm_sched_rq_select_entity(struct drm_gpu_scheduler *sched,
> - struct drm_sched_rq *rq)
> -{
> - struct rb_node *rb;
> -
> - spin_lock(&rq->lock);
> - for (rb = rb_first_cached(&rq->rb_tree_root); rb; rb = rb_next(rb)) {
> - struct drm_sched_entity *entity;
> -
> - entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node);
> - if (drm_sched_entity_is_ready(entity)) {
> - /* If we can't queue yet, preserve the current entity in
> - * terms of fairness.
> - */
> - if (!drm_sched_can_queue(sched, entity)) {
> - spin_unlock(&rq->lock);
> - return ERR_PTR(-ENOSPC);
> - }
> -
> - reinit_completion(&entity->entity_idle);
> - break;
> - }
> - }
> - spin_unlock(&rq->lock);
> -
> - return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL;
> -}
> -
> /**
> * drm_sched_run_job_queue - enqueue run-job work
> * @sched: scheduler instance
> diff --git a/drivers/gpu/drm/scheduler/sched_rq.c b/drivers/gpu/drm/scheduler/sched_rq.c
> new file mode 100644
> index 000000000000..75cbca53b3d3
> --- /dev/null
> +++ b/drivers/gpu/drm/scheduler/sched_rq.c
> @@ -0,0 +1,222 @@
> +#include <linux/rbtree.h>
> +
> +#include <drm/drm_print.h>
> +#include <drm/gpu_scheduler.h>
> +
> +#include "sched_internal.h"
> +
> +static __always_inline bool
> +drm_sched_entity_compare_before(struct rb_node *a, const struct rb_node *b)
> +{
> + struct drm_sched_entity *ea =
> + rb_entry((a), struct drm_sched_entity, rb_tree_node);
> + struct drm_sched_entity *eb =
> + rb_entry((b), struct drm_sched_entity, rb_tree_node);
> +
> + return ktime_before(ea->oldest_job_waiting, eb->oldest_job_waiting);
> +}
> +
> +static void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity,
> + struct drm_sched_rq *rq)
> +{
> + if (!RB_EMPTY_NODE(&entity->rb_tree_node)) {
> + rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root);
> + RB_CLEAR_NODE(&entity->rb_tree_node);
> + }
> +}
> +
> +static void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity,
> + struct drm_sched_rq *rq,
> + ktime_t ts)
> +{
> + /*
> + * Both locks need to be grabbed, one to protect from entity->rq change
> + * for entity from within concurrent drm_sched_entity_select_rq and the
> + * other to update the rb tree structure.
> + */
> + lockdep_assert_held(&entity->lock);
> + lockdep_assert_held(&rq->lock);
> +
> + drm_sched_rq_remove_fifo_locked(entity, rq);
> +
> + entity->oldest_job_waiting = ts;
> +
> + rb_add_cached(&entity->rb_tree_node, &rq->rb_tree_root,
> + drm_sched_entity_compare_before);
> +}
> +
> +/**
> + * drm_sched_rq_init - initialize a given run queue struct
> + *
> + * @rq: scheduler run queue
> + * @sched: scheduler instance to associate with this run queue
> + *
> + * Initializes a scheduler runqueue.
> + */
> +void drm_sched_rq_init(struct drm_sched_rq *rq,
> + struct drm_gpu_scheduler *sched)
> +{
> + spin_lock_init(&rq->lock);
> + INIT_LIST_HEAD(&rq->entities);
> + rq->rb_tree_root = RB_ROOT_CACHED;
> + rq->sched = sched;
> +}
> +
> +/**
> + * drm_sched_rq_add_entity - add an entity
> + *
> + * @entity: scheduler entity
> + * @ts: submission timestamp
> + *
> + * Adds a scheduler entity to the run queue.
> + *
> + * Returns a DRM scheduler pre-selected to handle this entity.
> + */
> +struct drm_gpu_scheduler *
> +drm_sched_rq_add_entity(struct drm_sched_entity *entity, ktime_t ts)
> +{
> + struct drm_gpu_scheduler *sched;
> + struct drm_sched_rq *rq;
> +
> + /* Add the entity to the run queue */
> + spin_lock(&entity->lock);
> + if (entity->stopped) {
> + spin_unlock(&entity->lock);
> +
> + DRM_ERROR("Trying to push to a killed entity\n");
> + return NULL;
> + }
> +
> + rq = entity->rq;
> + spin_lock(&rq->lock);
> + sched = rq->sched;
> +
> + if (list_empty(&entity->list)) {
> + atomic_inc(sched->score);
> + list_add_tail(&entity->list, &rq->entities);
> + }
> +
> + if (drm_sched_policy == DRM_SCHED_POLICY_RR)
> + ts = entity->rr_ts;
> + drm_sched_rq_update_fifo_locked(entity, rq, ts);
> +
> + spin_unlock(&rq->lock);
> + spin_unlock(&entity->lock);
> +
> + return sched;
> +}
> +
> +/**
> + * drm_sched_rq_remove_entity - remove an entity
> + *
> + * @rq: scheduler run queue
> + * @entity: scheduler entity
> + *
> + * Removes a scheduler entity from the run queue.
> + */
> +void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
> + struct drm_sched_entity *entity)
> +{
> + lockdep_assert_held(&entity->lock);
> +
> + if (list_empty(&entity->list))
> + return;
> +
> + spin_lock(&rq->lock);
> +
> + atomic_dec(rq->sched->score);
> + list_del_init(&entity->list);
> +
> + drm_sched_rq_remove_fifo_locked(entity, rq);
> +
> + spin_unlock(&rq->lock);
> +}
> +
> +static ktime_t
> +drm_sched_rq_get_rr_ts(struct drm_sched_rq *rq, struct drm_sched_entity *entity)
> +{
> + ktime_t ts;
> +
> + lockdep_assert_held(&entity->lock);
> + lockdep_assert_held(&rq->lock);
> +
> + ts = ktime_add_ns(rq->rr_ts, 1);
> + entity->rr_ts = ts;
> + rq->rr_ts = ts;
> +
> + return ts;
> +}
> +
> +/**
> + * drm_sched_rq_pop_entity - pops an entity
> + *
> + * @entity: scheduler entity
> + *
> + * To be called every time after a job is popped from the entity.
> + */
> +void drm_sched_rq_pop_entity(struct drm_sched_entity *entity)
> +{
> + struct drm_sched_job *next_job;
> + struct drm_sched_rq *rq;
> + ktime_t ts;
> +
> + /*
> + * Update the entity's location in the min heap according to
> + * the timestamp of the next job, if any.
> + */
> + next_job = drm_sched_entity_queue_peek(entity);
> + if (!next_job)
> + return;
> +
> + spin_lock(&entity->lock);
> + rq = entity->rq;
> + spin_lock(&rq->lock);
> + if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
> + ts = next_job->submit_ts;
> + else
> + ts = drm_sched_rq_get_rr_ts(rq, entity);
> + drm_sched_rq_update_fifo_locked(entity, rq, ts);
> + spin_unlock(&rq->lock);
> + spin_unlock(&entity->lock);
> +}
> +
> +/**
> + * drm_sched_rq_select_entity - Select an entity which provides a job to run
> + *
> + * @sched: the gpu scheduler
> + * @rq: scheduler run queue to check.
> + *
> + * Find oldest waiting ready entity.
> + *
> + * Return an entity if one is found; return an error-pointer (!NULL) if an
> + * entity was ready, but the scheduler had insufficient credits to accommodate
> + * its job; return NULL, if no ready entity was found.
> + */
> +struct drm_sched_entity *
> +drm_sched_rq_select_entity(struct drm_gpu_scheduler *sched,
> + struct drm_sched_rq *rq)
> +{
> + struct rb_node *rb;
> +
> + spin_lock(&rq->lock);
> + for (rb = rb_first_cached(&rq->rb_tree_root); rb; rb = rb_next(rb)) {
> + struct drm_sched_entity *entity;
> +
> + entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node);
> + if (drm_sched_entity_is_ready(entity)) {
> + /* If we can't queue yet, preserve the current entity in
> + * terms of fairness.
> + */
> + if (!drm_sched_can_queue(sched, entity)) {
> + spin_unlock(&rq->lock);
> + return ERR_PTR(-ENOSPC);
> + }
> +
> + reinit_completion(&entity->entity_idle);
> + break;
> + }
> + }
> + spin_unlock(&rq->lock);
> +
> + return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL;
> +}
> --
> 2.48.0
>
next prev parent reply other threads:[~2025-10-08 22:49 UTC|newest]
Thread overview: 76+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-10-08 8:53 [PATCH 00/28] Fair DRM scheduler Tvrtko Ursulin
2025-10-08 8:53 ` [PATCH 01/28] drm/sched: Reverse drm_sched_rq_init arguments Tvrtko Ursulin
2025-10-10 8:55 ` Philipp Stanner
2025-10-10 9:46 ` Tvrtko Ursulin
2025-10-10 10:36 ` Philipp Stanner
2025-10-11 13:21 ` Tvrtko Ursulin
2025-10-08 8:53 ` [PATCH 02/28] drm/sched: Add some scheduling quality unit tests Tvrtko Ursulin
2025-10-10 9:38 ` Philipp Stanner
2025-10-11 13:09 ` Tvrtko Ursulin
2025-10-08 8:53 ` [PATCH 03/28] drm/sched: Add some more " Tvrtko Ursulin
2025-10-10 9:48 ` Philipp Stanner
2025-10-11 13:21 ` Tvrtko Ursulin
2025-10-08 8:53 ` [PATCH 04/28] drm/sched: Implement RR via FIFO Tvrtko Ursulin
2025-10-10 10:18 ` Philipp Stanner
2025-10-11 13:30 ` Tvrtko Ursulin
2025-10-14 6:40 ` Philipp Stanner
2025-10-08 8:53 ` [PATCH 05/28] drm/sched: Consolidate entity run queue management Tvrtko Ursulin
2025-10-10 10:49 ` Philipp Stanner
2025-10-11 14:19 ` Tvrtko Ursulin
2025-10-14 6:53 ` Philipp Stanner
2025-10-14 7:26 ` Tvrtko Ursulin
2025-10-14 8:52 ` Philipp Stanner
2025-10-14 10:04 ` Tvrtko Ursulin
2025-10-14 11:23 ` Philipp Stanner
2025-10-08 8:53 ` [PATCH 06/28] drm/sched: Move run queue related code into a separate file Tvrtko Ursulin
2025-10-08 22:49 ` Matthew Brost [this message]
2025-10-08 8:53 ` [PATCH 07/28] drm/sched: Free all finished jobs at once Tvrtko Ursulin
2025-10-08 22:48 ` Matthew Brost
2025-10-08 8:53 ` [PATCH 08/28] drm/sched: Account entity GPU time Tvrtko Ursulin
2025-10-10 12:22 ` Philipp Stanner
2025-10-11 14:56 ` Tvrtko Ursulin
2025-10-08 8:53 ` [PATCH 09/28] drm/sched: Remove idle entity from tree Tvrtko Ursulin
2025-10-08 8:53 ` [PATCH 10/28] drm/sched: Add fair scheduling policy Tvrtko Ursulin
2025-10-14 10:27 ` Philipp Stanner
2025-10-14 12:56 ` Tvrtko Ursulin
2025-10-14 14:02 ` Philipp Stanner
2025-10-14 14:32 ` Simona Vetter
2025-10-14 14:58 ` Tvrtko Ursulin
2025-10-16 7:06 ` Philipp Stanner
2025-10-16 8:42 ` Tvrtko Ursulin
2025-10-16 9:50 ` Danilo Krummrich
2025-10-16 10:54 ` Tvrtko Ursulin
2025-10-16 11:14 ` Danilo Krummrich
2025-10-08 8:53 ` [PATCH 11/28] drm/sched: Favour interactive clients slightly Tvrtko Ursulin
2025-10-14 10:53 ` Philipp Stanner
2025-10-14 12:20 ` Tvrtko Ursulin
2025-10-08 8:53 ` [PATCH 12/28] drm/sched: Switch default policy to fair Tvrtko Ursulin
2025-10-10 12:56 ` Philipp Stanner
2025-10-08 8:53 ` [PATCH 13/28] drm/sched: Remove FIFO and RR and simplify to a single run queue Tvrtko Ursulin
2025-10-14 11:16 ` Philipp Stanner
2025-10-14 13:16 ` Tvrtko Ursulin
2025-10-08 8:53 ` [PATCH 14/28] drm/sched: Embed run queue singleton into the scheduler Tvrtko Ursulin
2025-10-08 8:53 ` [PATCH 15/28] accel/amdxdna: Remove drm_sched_init_args->num_rqs usage Tvrtko Ursulin
2025-10-08 8:53 ` [PATCH 16/28] accel/rocket: " Tvrtko Ursulin
2025-10-08 8:53 ` [PATCH 17/28] drm/amdgpu: " Tvrtko Ursulin
2025-10-08 8:53 ` [PATCH 18/28] drm/etnaviv: " Tvrtko Ursulin
2025-10-08 10:31 ` Christian Gmeiner
2025-10-08 8:53 ` [PATCH 19/28] drm/imagination: " Tvrtko Ursulin
2025-10-10 14:29 ` Matt Coster
2025-10-08 8:53 ` [PATCH 20/28] drm/lima: " Tvrtko Ursulin
2025-10-08 8:53 ` [PATCH 21/28] drm/msm: " Tvrtko Ursulin
2025-10-08 8:53 ` [PATCH 22/28] drm/nouveau: " Tvrtko Ursulin
2025-10-08 8:53 ` [PATCH 23/28] drm/panfrost: " Tvrtko Ursulin
2025-10-08 14:55 ` Steven Price
2025-10-08 8:53 ` [PATCH 24/28] drm/panthor: " Tvrtko Ursulin
2025-10-08 14:55 ` Steven Price
2025-10-10 10:02 ` Liviu Dudau
2025-10-08 8:53 ` [PATCH 25/28] drm/sched: " Tvrtko Ursulin
2025-10-08 22:44 ` Matthew Brost
2025-10-08 8:53 ` [PATCH 26/28] drm/v3d: " Tvrtko Ursulin
2025-10-10 14:15 ` Melissa Wen
2025-10-08 8:53 ` [PATCH 27/28] drm/xe: " Tvrtko Ursulin
2025-10-08 8:53 ` [PATCH 28/28] drm/sched: Remove drm_sched_init_args->num_rqs Tvrtko Ursulin
2025-10-10 13:00 ` Philipp Stanner
2025-10-11 14:58 ` Tvrtko Ursulin
2025-10-10 8:59 ` [PATCH 00/28] Fair DRM scheduler Philipp Stanner
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=aObqdqGRMs3mp1AP@lstrano-desk.jf.intel.com \
--to=matthew.brost@intel.com \
--cc=amd-gfx@lists.freedesktop.org \
--cc=christian.koenig@amd.com \
--cc=dakr@kernel.org \
--cc=dri-devel@lists.freedesktop.org \
--cc=kernel-dev@igalia.com \
--cc=phasta@kernel.org \
--cc=tvrtko.ursulin@igalia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox