From: Ketil Johnsen <ketil.johnsen@arm.com>
To: "David Airlie" <airlied@gmail.com>,
"Simona Vetter" <simona@ffwll.ch>,
"Maarten Lankhorst" <maarten.lankhorst@linux.intel.com>,
"Maxime Ripard" <mripard@kernel.org>,
"Thomas Zimmermann" <tzimmermann@suse.de>,
"Jonathan Corbet" <corbet@lwn.net>,
"Shuah Khan" <skhan@linuxfoundation.org>,
"Sumit Semwal" <sumit.semwal@linaro.org>,
"Benjamin Gaignard" <benjamin.gaignard@collabora.com>,
"Brian Starkey" <Brian.Starkey@arm.com>,
"John Stultz" <jstultz@google.com>,
"T.J. Mercier" <tjmercier@google.com>,
"Christian König" <christian.koenig@amd.com>,
"Boris Brezillon" <boris.brezillon@collabora.com>,
"Steven Price" <steven.price@arm.com>,
"Liviu Dudau" <liviu.dudau@arm.com>,
"Daniel Almeida" <daniel.almeida@collabora.com>,
"Alice Ryhl" <aliceryhl@google.com>,
"Matthias Brugger" <matthias.bgg@gmail.com>,
"AngeloGioacchino Del Regno"
<angelogioacchino.delregno@collabora.com>
Cc: dri-devel@lists.freedesktop.org, linux-doc@vger.kernel.org,
linux-kernel@vger.kernel.org, linux-media@vger.kernel.org,
linaro-mm-sig@lists.linaro.org,
linux-arm-kernel@lists.infradead.org,
linux-mediatek@lists.infradead.org,
Florent Tomasin <florent.tomasin@arm.com>,
Ketil Johnsen <ketil.johnsen@arm.com>
Subject: [PATCH 5/8] drm/panthor: Minor scheduler refactoring
Date: Tue, 5 May 2026 16:05:11 +0200 [thread overview]
Message-ID: <20260505140516.1372388-6-ketil.johnsen@arm.com> (raw)
In-Reply-To: <20260505140516.1372388-1-ketil.johnsen@arm.com>
From: Florent Tomasin <florent.tomasin@arm.com>
Refactor parts of the group scheduling logic into new helper functions.
This will simplify addition of the protected mode feature.
Remove redundant assignments of csg_slot.
Signed-off-by: Florent Tomasin <florent.tomasin@arm.com>
Co-developed-by: Ketil Johnsen <ketil.johnsen@arm.com>
Signed-off-by: Ketil Johnsen <ketil.johnsen@arm.com>
---
drivers/gpu/drm/panthor/panthor_sched.c | 135 +++++++++++++++---------
1 file changed, 86 insertions(+), 49 deletions(-)
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index 5ee386338005c..987072bd867c4 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -1934,6 +1934,12 @@ static void csgs_upd_ctx_init(struct panthor_csg_slots_upd_ctx *ctx)
memset(ctx, 0, sizeof(*ctx));
}
+static void csgs_upd_ctx_ring_doorbell(struct panthor_csg_slots_upd_ctx *ctx,
+ u32 csg_id)
+{
+ ctx->update_mask |= BIT(csg_id);
+}
+
static void csgs_upd_ctx_queue_reqs(struct panthor_device *ptdev,
struct panthor_csg_slots_upd_ctx *ctx,
u32 csg_id, u32 value, u32 mask)
@@ -1944,7 +1950,8 @@ static void csgs_upd_ctx_queue_reqs(struct panthor_device *ptdev,
ctx->requests[csg_id].value = (ctx->requests[csg_id].value & ~mask) | (value & mask);
ctx->requests[csg_id].mask |= mask;
- ctx->update_mask |= BIT(csg_id);
+
+ csgs_upd_ctx_ring_doorbell(ctx, csg_id);
}
static int csgs_upd_ctx_apply_locked(struct panthor_device *ptdev,
@@ -1961,8 +1968,12 @@ static int csgs_upd_ctx_apply_locked(struct panthor_device *ptdev,
while (update_slots) {
struct panthor_fw_csg_iface *csg_iface;
u32 csg_id = ffs(update_slots) - 1;
+ u32 req_mask = ctx->requests[csg_id].mask;
update_slots &= ~BIT(csg_id);
+ if (!req_mask)
+ continue;
+
csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
panthor_fw_update_reqs(csg_iface, req,
ctx->requests[csg_id].value,
@@ -1979,6 +1990,9 @@ static int csgs_upd_ctx_apply_locked(struct panthor_device *ptdev,
int ret;
update_slots &= ~BIT(csg_id);
+ if (!req_mask)
+ continue;
+
csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
ret = panthor_fw_csg_wait_acks(ptdev, csg_id, req_mask, &acked, 100);
@@ -2266,12 +2280,76 @@ tick_ctx_cleanup(struct panthor_scheduler *sched,
}
}
+static void
+tick_ctx_evict_group(struct panthor_scheduler *sched,
+ struct panthor_csg_slots_upd_ctx *upd_ctx,
+ struct panthor_group *group)
+{
+ struct panthor_device *ptdev = sched->ptdev;
+
+ if (drm_WARN_ON(&ptdev->base, group->csg_id < 0))
+ return;
+
+ csgs_upd_ctx_queue_reqs(ptdev, upd_ctx, group->csg_id,
+ group_can_run(group) ?
+ CSG_STATE_SUSPEND : CSG_STATE_TERMINATE,
+ CSG_STATE_MASK);
+}
+
+
+static void
+tick_ctx_reschedule_group(struct panthor_scheduler *sched,
+ struct panthor_csg_slots_upd_ctx *upd_ctx,
+ struct panthor_group *group,
+ int new_csg_prio)
+{
+ struct panthor_device *ptdev = sched->ptdev;
+ struct panthor_fw_csg_iface *csg_iface;
+ struct panthor_csg_slot *csg_slot;
+
+ if (group->csg_id < 0)
+ return;
+
+ csg_iface = panthor_fw_get_csg_iface(ptdev, group->csg_id);
+ csg_slot = &sched->csg_slots[group->csg_id];
+
+ if (csg_slot->priority != new_csg_prio) {
+ panthor_fw_update_reqs(csg_iface, endpoint_req,
+ CSG_EP_REQ_PRIORITY(new_csg_prio),
+ CSG_EP_REQ_PRIORITY_MASK);
+ csgs_upd_ctx_queue_reqs(ptdev, upd_ctx, group->csg_id,
+ csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG,
+ CSG_ENDPOINT_CONFIG);
+ }
+}
+
+static void
+tick_ctx_schedule_group(struct panthor_scheduler *sched,
+ struct panthor_sched_tick_ctx *ctx,
+ struct panthor_csg_slots_upd_ctx *upd_ctx,
+ struct panthor_group *group,
+ int csg_id, int csg_prio)
+{
+ struct panthor_device *ptdev = sched->ptdev;
+ struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
+
+ group_bind_locked(group, csg_id);
+ csg_slot_prog_locked(ptdev, csg_id, csg_prio);
+
+ csgs_upd_ctx_queue_reqs(ptdev, upd_ctx, csg_id,
+ group->state == PANTHOR_CS_GROUP_SUSPENDED ?
+ CSG_STATE_RESUME : CSG_STATE_START,
+ CSG_STATE_MASK);
+ csgs_upd_ctx_queue_reqs(ptdev, upd_ctx, csg_id,
+ csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG,
+ CSG_ENDPOINT_CONFIG);
+}
+
static void
tick_ctx_apply(struct panthor_scheduler *sched, struct panthor_sched_tick_ctx *ctx)
{
struct panthor_group *group, *tmp;
struct panthor_device *ptdev = sched->ptdev;
- struct panthor_csg_slot *csg_slot;
int prio, new_csg_prio = MAX_CSG_PRIO, i;
u32 free_csg_slots = 0;
struct panthor_csg_slots_upd_ctx upd_ctx;
@@ -2282,42 +2360,12 @@ tick_ctx_apply(struct panthor_scheduler *sched, struct panthor_sched_tick_ctx *c
for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
/* Suspend or terminate evicted groups. */
list_for_each_entry(group, &ctx->old_groups[prio], run_node) {
- bool term = !group_can_run(group);
- int csg_id = group->csg_id;
-
- if (drm_WARN_ON(&ptdev->base, csg_id < 0))
- continue;
-
- csg_slot = &sched->csg_slots[csg_id];
- csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
- term ? CSG_STATE_TERMINATE : CSG_STATE_SUSPEND,
- CSG_STATE_MASK);
+ tick_ctx_evict_group(sched, &upd_ctx, group);
}
/* Update priorities on already running groups. */
list_for_each_entry(group, &ctx->groups[prio], run_node) {
- struct panthor_fw_csg_iface *csg_iface;
- int csg_id = group->csg_id;
-
- if (csg_id < 0) {
- new_csg_prio--;
- continue;
- }
-
- csg_slot = &sched->csg_slots[csg_id];
- csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
- if (csg_slot->priority == new_csg_prio) {
- new_csg_prio--;
- continue;
- }
-
- panthor_fw_csg_endpoint_req_update(ptdev, csg_iface,
- CSG_EP_REQ_PRIORITY(new_csg_prio),
- CSG_EP_REQ_PRIORITY_MASK);
- csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
- csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG,
- CSG_ENDPOINT_CONFIG);
- new_csg_prio--;
+ tick_ctx_reschedule_group(sched, &upd_ctx, group, new_csg_prio--);
}
}
@@ -2354,28 +2402,17 @@ tick_ctx_apply(struct panthor_scheduler *sched, struct panthor_sched_tick_ctx *c
for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
list_for_each_entry(group, &ctx->groups[prio], run_node) {
int csg_id = group->csg_id;
- struct panthor_fw_csg_iface *csg_iface;
+ int csg_prio = new_csg_prio--;
- if (csg_id >= 0) {
- new_csg_prio--;
+ if (csg_id >= 0)
continue;
- }
csg_id = ffs(free_csg_slots) - 1;
if (drm_WARN_ON(&ptdev->base, csg_id < 0))
break;
- csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
- csg_slot = &sched->csg_slots[csg_id];
- group_bind_locked(group, csg_id);
- csg_slot_prog_locked(ptdev, csg_id, new_csg_prio--);
- csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
- group->state == PANTHOR_CS_GROUP_SUSPENDED ?
- CSG_STATE_RESUME : CSG_STATE_START,
- CSG_STATE_MASK);
- csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
- csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG,
- CSG_ENDPOINT_CONFIG);
+ tick_ctx_schedule_group(sched, ctx, &upd_ctx, group, csg_id, csg_prio);
+
free_csg_slots &= ~BIT(csg_id);
}
}
--
2.43.0
next prev parent reply other threads:[~2026-05-05 14:06 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-05-05 14:05 [PATCH 0/8] drm/panthor: Protected mode support for Mali CSF GPUs Ketil Johnsen
2026-05-05 14:05 ` [PATCH 1/8] dma-heap: Add proper kref handling on dma-buf heaps Ketil Johnsen
2026-05-05 15:20 ` Boris Brezillon
2026-05-05 15:39 ` Maxime Ripard
2026-05-05 16:40 ` Boris Brezillon
2026-05-05 14:05 ` [PATCH 2/8] dma-heap: Provide accessors so that in-kernel drivers can allocate dmabufs from specific heaps Ketil Johnsen
2026-05-05 15:45 ` Boris Brezillon
2026-05-05 14:05 ` [PATCH 3/8] drm/panthor: De-duplicate FW memory section sync Ketil Johnsen
2026-05-05 15:47 ` Boris Brezillon
2026-05-05 14:05 ` [PATCH 4/8] drm/panthor: Add support for protected memory allocation in panthor Ketil Johnsen
2026-05-05 16:15 ` Boris Brezillon
2026-05-06 10:08 ` Maxime Ripard
2026-05-06 10:50 ` Boris Brezillon
2026-05-06 12:43 ` Nicolas Frattaroli
2026-05-06 12:28 ` Nicolas Frattaroli
2026-05-05 14:05 ` Ketil Johnsen [this message]
2026-05-05 16:19 ` [PATCH 5/8] drm/panthor: Minor scheduler refactoring Boris Brezillon
2026-05-06 10:33 ` Boris Brezillon
2026-05-05 14:05 ` [PATCH 6/8] drm/panthor: Explicit expansion of locked VM region Ketil Johnsen
2026-05-05 16:32 ` Boris Brezillon
2026-05-05 14:05 ` [PATCH 7/8] drm/panthor: Add support for entering and exiting protected mode Ketil Johnsen
2026-05-05 17:11 ` Boris Brezillon
2026-05-06 8:51 ` Boris Brezillon
2026-05-05 14:05 ` [PATCH 8/8] drm/panthor: Expose protected rendering features Ketil Johnsen
2026-05-06 9:14 ` Boris Brezillon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260505140516.1372388-6-ketil.johnsen@arm.com \
--to=ketil.johnsen@arm.com \
--cc=Brian.Starkey@arm.com \
--cc=airlied@gmail.com \
--cc=aliceryhl@google.com \
--cc=angelogioacchino.delregno@collabora.com \
--cc=benjamin.gaignard@collabora.com \
--cc=boris.brezillon@collabora.com \
--cc=christian.koenig@amd.com \
--cc=corbet@lwn.net \
--cc=daniel.almeida@collabora.com \
--cc=dri-devel@lists.freedesktop.org \
--cc=florent.tomasin@arm.com \
--cc=jstultz@google.com \
--cc=linaro-mm-sig@lists.linaro.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-media@vger.kernel.org \
--cc=linux-mediatek@lists.infradead.org \
--cc=liviu.dudau@arm.com \
--cc=maarten.lankhorst@linux.intel.com \
--cc=matthias.bgg@gmail.com \
--cc=mripard@kernel.org \
--cc=simona@ffwll.ch \
--cc=skhan@linuxfoundation.org \
--cc=steven.price@arm.com \
--cc=sumit.semwal@linaro.org \
--cc=tjmercier@google.com \
--cc=tzimmermann@suse.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox