* [PATCH 1/4] drm/etnaviv: Use scheduler dependency handling
[not found] <20220331204651.2699107-1-daniel.vetter@ffwll.ch>
@ 2022-03-31 20:46 ` Daniel Vetter
2022-04-01 7:49 ` Christian König
` (2 more replies)
2022-03-31 20:46 ` [PATCH 2/4] drm/gem: Delete gem array fencing helpers Daniel Vetter
1 sibling, 3 replies; 8+ messages in thread
From: Daniel Vetter @ 2022-03-31 20:46 UTC (permalink / raw)
To: DRI Development
Cc: Daniel Vetter, Daniel Vetter, Lucas Stach, Russell King,
Christian Gmeiner, Sumit Semwal, Christian König, etnaviv,
linux-media, linaro-mm-sig
We need to pull the drm_sched_job_init much earlier, but that's very
minor surgery.
v2: Actually fix up cleanup paths by calling drm_sched_job_init, which
I wanted to to in the previous round (and did, for all other drivers).
Spotted by Lucas.
v3: Rebase over renamed functions to add dependencies.
v4: Rebase over patches from Christian.
v5: More rebasing over work from Christian.
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: Lucas Stach <l.stach@pengutronix.de>
Cc: Russell King <linux+etnaviv@armlinux.org.uk>
Cc: Christian Gmeiner <christian.gmeiner@gmail.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: "Christian König" <christian.koenig@amd.com>
Cc: etnaviv@lists.freedesktop.org
Cc: linux-media@vger.kernel.org
Cc: linaro-mm-sig@lists.linaro.org
---
drivers/gpu/drm/etnaviv/etnaviv_gem.h | 4 +-
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 51 +++++++++++--------
drivers/gpu/drm/etnaviv/etnaviv_sched.c | 53 +-------------------
drivers/gpu/drm/etnaviv/etnaviv_sched.h | 3 +-
4 files changed, 35 insertions(+), 76 deletions(-)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index 8983a0ef383e..63688e6e4580 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -80,8 +80,6 @@ struct etnaviv_gem_submit_bo {
u64 va;
struct etnaviv_gem_object *obj;
struct etnaviv_vram_mapping *mapping;
- unsigned int nr_fences;
- struct dma_fence **fences;
};
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
@@ -94,7 +92,7 @@ struct etnaviv_gem_submit {
struct etnaviv_file_private *ctx;
struct etnaviv_gpu *gpu;
struct etnaviv_iommu_context *mmu_context, *prev_mmu_context;
- struct dma_fence *out_fence, *in_fence;
+ struct dma_fence *out_fence;
int out_fence_id;
struct list_head node; /* GPU active submit list */
struct etnaviv_cmdbuf cmdbuf;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 592cbb38609a..5f502c49aec2 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -188,9 +188,9 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
if (submit->flags & ETNA_SUBMIT_NO_IMPLICIT)
continue;
- ret = dma_resv_get_fences(robj,
- bo->flags & ETNA_SUBMIT_BO_WRITE,
- &bo->nr_fences, &bo->fences);
+ ret = drm_sched_job_add_implicit_dependencies(&submit->sched_job,
+ &bo->obj->base,
+ bo->flags & ETNA_SUBMIT_BO_WRITE);
if (ret)
return ret;
}
@@ -398,8 +398,6 @@ static void submit_cleanup(struct kref *kref)
wake_up_all(&submit->gpu->fence_event);
- if (submit->in_fence)
- dma_fence_put(submit->in_fence);
if (submit->out_fence) {
/* first remove from IDR, so fence can not be found anymore */
mutex_lock(&submit->gpu->fence_lock);
@@ -530,58 +528,69 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &submit->cmdbuf,
ALIGN(args->stream_size, 8) + 8);
if (ret)
- goto err_submit_objects;
+ goto err_submit_put;
submit->ctx = file->driver_priv;
submit->mmu_context = etnaviv_iommu_context_get(submit->ctx->mmu);
submit->exec_state = args->exec_state;
submit->flags = args->flags;
+ ret = drm_sched_job_init(&submit->sched_job,
+ &ctx->sched_entity[args->pipe],
+ submit->ctx);
+ if (ret)
+ goto err_submit_put;
+
ret = submit_lookup_objects(submit, file, bos, args->nr_bos);
if (ret)
- goto err_submit_objects;
+ goto err_submit_job;
if ((priv->mmu_global->version != ETNAVIV_IOMMU_V2) &&
!etnaviv_cmd_validate_one(gpu, stream, args->stream_size / 4,
relocs, args->nr_relocs)) {
ret = -EINVAL;
- goto err_submit_objects;
+ goto err_submit_job;
}
if (args->flags & ETNA_SUBMIT_FENCE_FD_IN) {
- submit->in_fence = sync_file_get_fence(args->fence_fd);
- if (!submit->in_fence) {
+ struct dma_fence *in_fence = sync_file_get_fence(args->fence_fd);
+ if (!in_fence) {
ret = -EINVAL;
- goto err_submit_objects;
+ goto err_submit_job;
}
+
+ ret = drm_sched_job_add_dependency(&submit->sched_job,
+ in_fence);
+ if (ret)
+ goto err_submit_job;
}
ret = submit_pin_objects(submit);
if (ret)
- goto err_submit_objects;
+ goto err_submit_job;
ret = submit_reloc(submit, stream, args->stream_size / 4,
relocs, args->nr_relocs);
if (ret)
- goto err_submit_objects;
+ goto err_submit_job;
ret = submit_perfmon_validate(submit, args->exec_state, pmrs);
if (ret)
- goto err_submit_objects;
+ goto err_submit_job;
memcpy(submit->cmdbuf.vaddr, stream, args->stream_size);
ret = submit_lock_objects(submit, &ticket);
if (ret)
- goto err_submit_objects;
+ goto err_submit_job;
ret = submit_fence_sync(submit);
if (ret)
- goto err_submit_objects;
+ goto err_submit_job;
- ret = etnaviv_sched_push_job(&ctx->sched_entity[args->pipe], submit);
+ ret = etnaviv_sched_push_job(submit);
if (ret)
- goto err_submit_objects;
+ goto err_submit_job;
submit_attach_object_fences(submit);
@@ -595,7 +604,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
sync_file = sync_file_create(submit->out_fence);
if (!sync_file) {
ret = -ENOMEM;
- goto err_submit_objects;
+ goto err_submit_job;
}
fd_install(out_fence_fd, sync_file->file);
}
@@ -603,7 +612,9 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
args->fence_fd = out_fence_fd;
args->fence = submit->out_fence_id;
-err_submit_objects:
+err_submit_job:
+ drm_sched_job_cleanup(&submit->sched_job);
+err_submit_put:
etnaviv_submit_put(submit);
err_submit_ww_acquire:
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index a8452ce10e3a..72e2553fbc98 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -17,48 +17,6 @@ module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
static int etnaviv_hw_jobs_limit = 4;
module_param_named(hw_job_limit, etnaviv_hw_jobs_limit, int , 0444);
-static struct dma_fence *
-etnaviv_sched_dependency(struct drm_sched_job *sched_job,
- struct drm_sched_entity *entity)
-{
- struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
- struct dma_fence *fence;
- int i;
-
- if (unlikely(submit->in_fence)) {
- fence = submit->in_fence;
- submit->in_fence = NULL;
-
- if (!dma_fence_is_signaled(fence))
- return fence;
-
- dma_fence_put(fence);
- }
-
- for (i = 0; i < submit->nr_bos; i++) {
- struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
- int j;
-
- for (j = 0; j < bo->nr_fences; j++) {
- if (!bo->fences[j])
- continue;
-
- fence = bo->fences[j];
- bo->fences[j] = NULL;
-
- if (!dma_fence_is_signaled(fence))
- return fence;
-
- dma_fence_put(fence);
- }
- kfree(bo->fences);
- bo->nr_fences = 0;
- bo->fences = NULL;
- }
-
- return NULL;
-}
-
static struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job)
{
struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
@@ -132,29 +90,22 @@ static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
}
static const struct drm_sched_backend_ops etnaviv_sched_ops = {
- .dependency = etnaviv_sched_dependency,
.run_job = etnaviv_sched_run_job,
.timedout_job = etnaviv_sched_timedout_job,
.free_job = etnaviv_sched_free_job,
};
-int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
- struct etnaviv_gem_submit *submit)
+int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit)
{
int ret = 0;
/*
* Hold the fence lock across the whole operation to avoid jobs being
* pushed out of order with regard to their sched fence seqnos as
- * allocated in drm_sched_job_init.
+ * allocated in drm_sched_job_arm.
*/
mutex_lock(&submit->gpu->fence_lock);
- ret = drm_sched_job_init(&submit->sched_job, sched_entity,
- submit->ctx);
- if (ret)
- goto out_unlock;
-
drm_sched_job_arm(&submit->sched_job);
submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.h b/drivers/gpu/drm/etnaviv/etnaviv_sched.h
index c0a6796e22c9..baebfa069afc 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.h
@@ -18,7 +18,6 @@ struct etnaviv_gem_submit *to_etnaviv_submit(struct drm_sched_job *sched_job)
int etnaviv_sched_init(struct etnaviv_gpu *gpu);
void etnaviv_sched_fini(struct etnaviv_gpu *gpu);
-int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
- struct etnaviv_gem_submit *submit);
+int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit);
#endif /* __ETNAVIV_SCHED_H__ */
--
2.34.1
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH 2/4] drm/gem: Delete gem array fencing helpers
[not found] <20220331204651.2699107-1-daniel.vetter@ffwll.ch>
2022-03-31 20:46 ` [PATCH 1/4] drm/etnaviv: Use scheduler dependency handling Daniel Vetter
@ 2022-03-31 20:46 ` Daniel Vetter
2022-04-04 13:15 ` Daniel Vetter
1 sibling, 1 reply; 8+ messages in thread
From: Daniel Vetter @ 2022-03-31 20:46 UTC (permalink / raw)
To: DRI Development
Cc: Daniel Vetter, Daniel Vetter, Maarten Lankhorst, Maxime Ripard,
Thomas Zimmermann, David Airlie, Daniel Vetter, Sumit Semwal,
Christian König, linux-media, linaro-mm-sig
Integrated into the scheduler now and all users converted over.
v2: Rebased over changes from König.
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Maxime Ripard <mripard@kernel.org>
Cc: Thomas Zimmermann <tzimmermann@suse.de>
Cc: David Airlie <airlied@linux.ie>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: "Christian König" <christian.koenig@amd.com>
Cc: linux-media@vger.kernel.org
Cc: linaro-mm-sig@lists.linaro.org
---
drivers/gpu/drm/drm_gem.c | 80 ---------------------------------------
include/drm/drm_gem.h | 5 ---
2 files changed, 85 deletions(-)
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 56fb87885146..133dfae06fab 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -1273,83 +1273,3 @@ drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
ww_acquire_fini(acquire_ctx);
}
EXPORT_SYMBOL(drm_gem_unlock_reservations);
-
-/**
- * drm_gem_fence_array_add - Adds the fence to an array of fences to be
- * waited on, deduplicating fences from the same context.
- *
- * @fence_array: array of dma_fence * for the job to block on.
- * @fence: the dma_fence to add to the list of dependencies.
- *
- * This functions consumes the reference for @fence both on success and error
- * cases.
- *
- * Returns:
- * 0 on success, or an error on failing to expand the array.
- */
-int drm_gem_fence_array_add(struct xarray *fence_array,
- struct dma_fence *fence)
-{
- struct dma_fence *entry;
- unsigned long index;
- u32 id = 0;
- int ret;
-
- if (!fence)
- return 0;
-
- /* Deduplicate if we already depend on a fence from the same context.
- * This lets the size of the array of deps scale with the number of
- * engines involved, rather than the number of BOs.
- */
- xa_for_each(fence_array, index, entry) {
- if (entry->context != fence->context)
- continue;
-
- if (dma_fence_is_later(fence, entry)) {
- dma_fence_put(entry);
- xa_store(fence_array, index, fence, GFP_KERNEL);
- } else {
- dma_fence_put(fence);
- }
- return 0;
- }
-
- ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL);
- if (ret != 0)
- dma_fence_put(fence);
-
- return ret;
-}
-EXPORT_SYMBOL(drm_gem_fence_array_add);
-
-/**
- * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked
- * in the GEM object's reservation object to an array of dma_fences for use in
- * scheduling a rendering job.
- *
- * This should be called after drm_gem_lock_reservations() on your array of
- * GEM objects used in the job but before updating the reservations with your
- * own fences.
- *
- * @fence_array: array of dma_fence * for the job to block on.
- * @obj: the gem object to add new dependencies from.
- * @write: whether the job might write the object (so we need to depend on
- * shared fences in the reservation object).
- */
-int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
- struct drm_gem_object *obj,
- bool write)
-{
- struct dma_resv_iter cursor;
- struct dma_fence *fence;
- int ret = 0;
-
- dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {
- ret = drm_gem_fence_array_add(fence_array, fence);
- if (ret)
- break;
- }
- return ret;
-}
-EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index e2941cee14b6..9d7c61a122dc 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -407,11 +407,6 @@ int drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
struct ww_acquire_ctx *acquire_ctx);
void drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
struct ww_acquire_ctx *acquire_ctx);
-int drm_gem_fence_array_add(struct xarray *fence_array,
- struct dma_fence *fence);
-int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
- struct drm_gem_object *obj,
- bool write);
int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
u32 handle, u64 *offset);
--
2.34.1
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [PATCH 1/4] drm/etnaviv: Use scheduler dependency handling
2022-03-31 20:46 ` [PATCH 1/4] drm/etnaviv: Use scheduler dependency handling Daniel Vetter
@ 2022-04-01 7:49 ` Christian König
2022-04-04 9:12 ` Lucas Stach
2022-04-28 23:59 ` Michael Walle
2 siblings, 0 replies; 8+ messages in thread
From: Christian König @ 2022-04-01 7:49 UTC (permalink / raw)
To: Daniel Vetter, DRI Development
Cc: Daniel Vetter, Lucas Stach, Russell King, Christian Gmeiner,
Sumit Semwal, etnaviv, linux-media, linaro-mm-sig
Am 31.03.22 um 22:46 schrieb Daniel Vetter:
> We need to pull the drm_sched_job_init much earlier, but that's very
> minor surgery.
>
> v2: Actually fix up cleanup paths by calling drm_sched_job_init, which
> I wanted to to in the previous round (and did, for all other drivers).
> Spotted by Lucas.
>
> v3: Rebase over renamed functions to add dependencies.
>
> v4: Rebase over patches from Christian.
>
> v5: More rebasing over work from Christian.
>
> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
> Cc: Lucas Stach <l.stach@pengutronix.de>
> Cc: Russell King <linux+etnaviv@armlinux.org.uk>
> Cc: Christian Gmeiner <christian.gmeiner@gmail.com>
> Cc: Sumit Semwal <sumit.semwal@linaro.org>
> Cc: "Christian König" <christian.koenig@amd.com>
> Cc: etnaviv@lists.freedesktop.org
> Cc: linux-media@vger.kernel.org
> Cc: linaro-mm-sig@lists.linaro.org
Reviewed-by: Christian König <christian.koenig@amd.com>
But Lucas should probably ack that we merge it through drm-misc-next.
> ---
> drivers/gpu/drm/etnaviv/etnaviv_gem.h | 4 +-
> drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 51 +++++++++++--------
> drivers/gpu/drm/etnaviv/etnaviv_sched.c | 53 +-------------------
> drivers/gpu/drm/etnaviv/etnaviv_sched.h | 3 +-
> 4 files changed, 35 insertions(+), 76 deletions(-)
>
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
> index 8983a0ef383e..63688e6e4580 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
> @@ -80,8 +80,6 @@ struct etnaviv_gem_submit_bo {
> u64 va;
> struct etnaviv_gem_object *obj;
> struct etnaviv_vram_mapping *mapping;
> - unsigned int nr_fences;
> - struct dma_fence **fences;
> };
>
> /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
> @@ -94,7 +92,7 @@ struct etnaviv_gem_submit {
> struct etnaviv_file_private *ctx;
> struct etnaviv_gpu *gpu;
> struct etnaviv_iommu_context *mmu_context, *prev_mmu_context;
> - struct dma_fence *out_fence, *in_fence;
> + struct dma_fence *out_fence;
> int out_fence_id;
> struct list_head node; /* GPU active submit list */
> struct etnaviv_cmdbuf cmdbuf;
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
> index 592cbb38609a..5f502c49aec2 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
> @@ -188,9 +188,9 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
> if (submit->flags & ETNA_SUBMIT_NO_IMPLICIT)
> continue;
>
> - ret = dma_resv_get_fences(robj,
> - bo->flags & ETNA_SUBMIT_BO_WRITE,
> - &bo->nr_fences, &bo->fences);
> + ret = drm_sched_job_add_implicit_dependencies(&submit->sched_job,
> + &bo->obj->base,
> + bo->flags & ETNA_SUBMIT_BO_WRITE);
> if (ret)
> return ret;
> }
> @@ -398,8 +398,6 @@ static void submit_cleanup(struct kref *kref)
>
> wake_up_all(&submit->gpu->fence_event);
>
> - if (submit->in_fence)
> - dma_fence_put(submit->in_fence);
> if (submit->out_fence) {
> /* first remove from IDR, so fence can not be found anymore */
> mutex_lock(&submit->gpu->fence_lock);
> @@ -530,58 +528,69 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
> ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &submit->cmdbuf,
> ALIGN(args->stream_size, 8) + 8);
> if (ret)
> - goto err_submit_objects;
> + goto err_submit_put;
>
> submit->ctx = file->driver_priv;
> submit->mmu_context = etnaviv_iommu_context_get(submit->ctx->mmu);
> submit->exec_state = args->exec_state;
> submit->flags = args->flags;
>
> + ret = drm_sched_job_init(&submit->sched_job,
> + &ctx->sched_entity[args->pipe],
> + submit->ctx);
> + if (ret)
> + goto err_submit_put;
> +
> ret = submit_lookup_objects(submit, file, bos, args->nr_bos);
> if (ret)
> - goto err_submit_objects;
> + goto err_submit_job;
>
> if ((priv->mmu_global->version != ETNAVIV_IOMMU_V2) &&
> !etnaviv_cmd_validate_one(gpu, stream, args->stream_size / 4,
> relocs, args->nr_relocs)) {
> ret = -EINVAL;
> - goto err_submit_objects;
> + goto err_submit_job;
> }
>
> if (args->flags & ETNA_SUBMIT_FENCE_FD_IN) {
> - submit->in_fence = sync_file_get_fence(args->fence_fd);
> - if (!submit->in_fence) {
> + struct dma_fence *in_fence = sync_file_get_fence(args->fence_fd);
> + if (!in_fence) {
> ret = -EINVAL;
> - goto err_submit_objects;
> + goto err_submit_job;
> }
> +
> + ret = drm_sched_job_add_dependency(&submit->sched_job,
> + in_fence);
> + if (ret)
> + goto err_submit_job;
> }
>
> ret = submit_pin_objects(submit);
> if (ret)
> - goto err_submit_objects;
> + goto err_submit_job;
>
> ret = submit_reloc(submit, stream, args->stream_size / 4,
> relocs, args->nr_relocs);
> if (ret)
> - goto err_submit_objects;
> + goto err_submit_job;
>
> ret = submit_perfmon_validate(submit, args->exec_state, pmrs);
> if (ret)
> - goto err_submit_objects;
> + goto err_submit_job;
>
> memcpy(submit->cmdbuf.vaddr, stream, args->stream_size);
>
> ret = submit_lock_objects(submit, &ticket);
> if (ret)
> - goto err_submit_objects;
> + goto err_submit_job;
>
> ret = submit_fence_sync(submit);
> if (ret)
> - goto err_submit_objects;
> + goto err_submit_job;
>
> - ret = etnaviv_sched_push_job(&ctx->sched_entity[args->pipe], submit);
> + ret = etnaviv_sched_push_job(submit);
> if (ret)
> - goto err_submit_objects;
> + goto err_submit_job;
>
> submit_attach_object_fences(submit);
>
> @@ -595,7 +604,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
> sync_file = sync_file_create(submit->out_fence);
> if (!sync_file) {
> ret = -ENOMEM;
> - goto err_submit_objects;
> + goto err_submit_job;
> }
> fd_install(out_fence_fd, sync_file->file);
> }
> @@ -603,7 +612,9 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
> args->fence_fd = out_fence_fd;
> args->fence = submit->out_fence_id;
>
> -err_submit_objects:
> +err_submit_job:
> + drm_sched_job_cleanup(&submit->sched_job);
> +err_submit_put:
> etnaviv_submit_put(submit);
>
> err_submit_ww_acquire:
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> index a8452ce10e3a..72e2553fbc98 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> @@ -17,48 +17,6 @@ module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
> static int etnaviv_hw_jobs_limit = 4;
> module_param_named(hw_job_limit, etnaviv_hw_jobs_limit, int , 0444);
>
> -static struct dma_fence *
> -etnaviv_sched_dependency(struct drm_sched_job *sched_job,
> - struct drm_sched_entity *entity)
> -{
> - struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
> - struct dma_fence *fence;
> - int i;
> -
> - if (unlikely(submit->in_fence)) {
> - fence = submit->in_fence;
> - submit->in_fence = NULL;
> -
> - if (!dma_fence_is_signaled(fence))
> - return fence;
> -
> - dma_fence_put(fence);
> - }
> -
> - for (i = 0; i < submit->nr_bos; i++) {
> - struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
> - int j;
> -
> - for (j = 0; j < bo->nr_fences; j++) {
> - if (!bo->fences[j])
> - continue;
> -
> - fence = bo->fences[j];
> - bo->fences[j] = NULL;
> -
> - if (!dma_fence_is_signaled(fence))
> - return fence;
> -
> - dma_fence_put(fence);
> - }
> - kfree(bo->fences);
> - bo->nr_fences = 0;
> - bo->fences = NULL;
> - }
> -
> - return NULL;
> -}
> -
> static struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job)
> {
> struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
> @@ -132,29 +90,22 @@ static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
> }
>
> static const struct drm_sched_backend_ops etnaviv_sched_ops = {
> - .dependency = etnaviv_sched_dependency,
> .run_job = etnaviv_sched_run_job,
> .timedout_job = etnaviv_sched_timedout_job,
> .free_job = etnaviv_sched_free_job,
> };
>
> -int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
> - struct etnaviv_gem_submit *submit)
> +int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit)
> {
> int ret = 0;
>
> /*
> * Hold the fence lock across the whole operation to avoid jobs being
> * pushed out of order with regard to their sched fence seqnos as
> - * allocated in drm_sched_job_init.
> + * allocated in drm_sched_job_arm.
> */
> mutex_lock(&submit->gpu->fence_lock);
>
> - ret = drm_sched_job_init(&submit->sched_job, sched_entity,
> - submit->ctx);
> - if (ret)
> - goto out_unlock;
> -
> drm_sched_job_arm(&submit->sched_job);
>
> submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.h b/drivers/gpu/drm/etnaviv/etnaviv_sched.h
> index c0a6796e22c9..baebfa069afc 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.h
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.h
> @@ -18,7 +18,6 @@ struct etnaviv_gem_submit *to_etnaviv_submit(struct drm_sched_job *sched_job)
>
> int etnaviv_sched_init(struct etnaviv_gpu *gpu);
> void etnaviv_sched_fini(struct etnaviv_gpu *gpu);
> -int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
> - struct etnaviv_gem_submit *submit);
> +int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit);
>
> #endif /* __ETNAVIV_SCHED_H__ */
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 1/4] drm/etnaviv: Use scheduler dependency handling
2022-03-31 20:46 ` [PATCH 1/4] drm/etnaviv: Use scheduler dependency handling Daniel Vetter
2022-04-01 7:49 ` Christian König
@ 2022-04-04 9:12 ` Lucas Stach
2022-04-28 23:59 ` Michael Walle
2 siblings, 0 replies; 8+ messages in thread
From: Lucas Stach @ 2022-04-04 9:12 UTC (permalink / raw)
To: Daniel Vetter, DRI Development
Cc: etnaviv, Christian König, linaro-mm-sig, Christian Gmeiner,
Russell King, Daniel Vetter, linux-media, Sumit Semwal
Am Donnerstag, dem 31.03.2022 um 22:46 +0200 schrieb Daniel Vetter:
> We need to pull the drm_sched_job_init much earlier, but that's very
> minor surgery.
>
> v2: Actually fix up cleanup paths by calling drm_sched_job_init, which
> I wanted to to in the previous round (and did, for all other drivers).
> Spotted by Lucas.
>
> v3: Rebase over renamed functions to add dependencies.
>
> v4: Rebase over patches from Christian.
>
> v5: More rebasing over work from Christian.
>
> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
> Cc: Lucas Stach <l.stach@pengutronix.de>
> Cc: Russell King <linux+etnaviv@armlinux.org.uk>
> Cc: Christian Gmeiner <christian.gmeiner@gmail.com>
> Cc: Sumit Semwal <sumit.semwal@linaro.org>
> Cc: "Christian König" <christian.koenig@amd.com>
> Cc: etnaviv@lists.freedesktop.org
> Cc: linux-media@vger.kernel.org
> Cc: linaro-mm-sig@lists.linaro.org
Acked-by: Lucas Stach <l.stach@pengutronix.de>
> ---
> drivers/gpu/drm/etnaviv/etnaviv_gem.h | 4 +-
> drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 51 +++++++++++--------
> drivers/gpu/drm/etnaviv/etnaviv_sched.c | 53 +-------------------
> drivers/gpu/drm/etnaviv/etnaviv_sched.h | 3 +-
> 4 files changed, 35 insertions(+), 76 deletions(-)
>
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
> index 8983a0ef383e..63688e6e4580 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
> @@ -80,8 +80,6 @@ struct etnaviv_gem_submit_bo {
> u64 va;
> struct etnaviv_gem_object *obj;
> struct etnaviv_vram_mapping *mapping;
> - unsigned int nr_fences;
> - struct dma_fence **fences;
> };
>
> /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
> @@ -94,7 +92,7 @@ struct etnaviv_gem_submit {
> struct etnaviv_file_private *ctx;
> struct etnaviv_gpu *gpu;
> struct etnaviv_iommu_context *mmu_context, *prev_mmu_context;
> - struct dma_fence *out_fence, *in_fence;
> + struct dma_fence *out_fence;
> int out_fence_id;
> struct list_head node; /* GPU active submit list */
> struct etnaviv_cmdbuf cmdbuf;
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
> index 592cbb38609a..5f502c49aec2 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
> @@ -188,9 +188,9 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
> if (submit->flags & ETNA_SUBMIT_NO_IMPLICIT)
> continue;
>
> - ret = dma_resv_get_fences(robj,
> - bo->flags & ETNA_SUBMIT_BO_WRITE,
> - &bo->nr_fences, &bo->fences);
> + ret = drm_sched_job_add_implicit_dependencies(&submit->sched_job,
> + &bo->obj->base,
> + bo->flags & ETNA_SUBMIT_BO_WRITE);
> if (ret)
> return ret;
> }
> @@ -398,8 +398,6 @@ static void submit_cleanup(struct kref *kref)
>
> wake_up_all(&submit->gpu->fence_event);
>
> - if (submit->in_fence)
> - dma_fence_put(submit->in_fence);
> if (submit->out_fence) {
> /* first remove from IDR, so fence can not be found anymore */
> mutex_lock(&submit->gpu->fence_lock);
> @@ -530,58 +528,69 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
> ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &submit->cmdbuf,
> ALIGN(args->stream_size, 8) + 8);
> if (ret)
> - goto err_submit_objects;
> + goto err_submit_put;
>
> submit->ctx = file->driver_priv;
> submit->mmu_context = etnaviv_iommu_context_get(submit->ctx->mmu);
> submit->exec_state = args->exec_state;
> submit->flags = args->flags;
>
> + ret = drm_sched_job_init(&submit->sched_job,
> + &ctx->sched_entity[args->pipe],
> + submit->ctx);
> + if (ret)
> + goto err_submit_put;
> +
> ret = submit_lookup_objects(submit, file, bos, args->nr_bos);
> if (ret)
> - goto err_submit_objects;
> + goto err_submit_job;
>
> if ((priv->mmu_global->version != ETNAVIV_IOMMU_V2) &&
> !etnaviv_cmd_validate_one(gpu, stream, args->stream_size / 4,
> relocs, args->nr_relocs)) {
> ret = -EINVAL;
> - goto err_submit_objects;
> + goto err_submit_job;
> }
>
> if (args->flags & ETNA_SUBMIT_FENCE_FD_IN) {
> - submit->in_fence = sync_file_get_fence(args->fence_fd);
> - if (!submit->in_fence) {
> + struct dma_fence *in_fence = sync_file_get_fence(args->fence_fd);
> + if (!in_fence) {
> ret = -EINVAL;
> - goto err_submit_objects;
> + goto err_submit_job;
> }
> +
> + ret = drm_sched_job_add_dependency(&submit->sched_job,
> + in_fence);
> + if (ret)
> + goto err_submit_job;
> }
>
> ret = submit_pin_objects(submit);
> if (ret)
> - goto err_submit_objects;
> + goto err_submit_job;
>
> ret = submit_reloc(submit, stream, args->stream_size / 4,
> relocs, args->nr_relocs);
> if (ret)
> - goto err_submit_objects;
> + goto err_submit_job;
>
> ret = submit_perfmon_validate(submit, args->exec_state, pmrs);
> if (ret)
> - goto err_submit_objects;
> + goto err_submit_job;
>
> memcpy(submit->cmdbuf.vaddr, stream, args->stream_size);
>
> ret = submit_lock_objects(submit, &ticket);
> if (ret)
> - goto err_submit_objects;
> + goto err_submit_job;
>
> ret = submit_fence_sync(submit);
> if (ret)
> - goto err_submit_objects;
> + goto err_submit_job;
>
> - ret = etnaviv_sched_push_job(&ctx->sched_entity[args->pipe], submit);
> + ret = etnaviv_sched_push_job(submit);
> if (ret)
> - goto err_submit_objects;
> + goto err_submit_job;
>
> submit_attach_object_fences(submit);
>
> @@ -595,7 +604,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
> sync_file = sync_file_create(submit->out_fence);
> if (!sync_file) {
> ret = -ENOMEM;
> - goto err_submit_objects;
> + goto err_submit_job;
> }
> fd_install(out_fence_fd, sync_file->file);
> }
> @@ -603,7 +612,9 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
> args->fence_fd = out_fence_fd;
> args->fence = submit->out_fence_id;
>
> -err_submit_objects:
> +err_submit_job:
> + drm_sched_job_cleanup(&submit->sched_job);
> +err_submit_put:
> etnaviv_submit_put(submit);
>
> err_submit_ww_acquire:
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> index a8452ce10e3a..72e2553fbc98 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> @@ -17,48 +17,6 @@ module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
> static int etnaviv_hw_jobs_limit = 4;
> module_param_named(hw_job_limit, etnaviv_hw_jobs_limit, int , 0444);
>
> -static struct dma_fence *
> -etnaviv_sched_dependency(struct drm_sched_job *sched_job,
> - struct drm_sched_entity *entity)
> -{
> - struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
> - struct dma_fence *fence;
> - int i;
> -
> - if (unlikely(submit->in_fence)) {
> - fence = submit->in_fence;
> - submit->in_fence = NULL;
> -
> - if (!dma_fence_is_signaled(fence))
> - return fence;
> -
> - dma_fence_put(fence);
> - }
> -
> - for (i = 0; i < submit->nr_bos; i++) {
> - struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
> - int j;
> -
> - for (j = 0; j < bo->nr_fences; j++) {
> - if (!bo->fences[j])
> - continue;
> -
> - fence = bo->fences[j];
> - bo->fences[j] = NULL;
> -
> - if (!dma_fence_is_signaled(fence))
> - return fence;
> -
> - dma_fence_put(fence);
> - }
> - kfree(bo->fences);
> - bo->nr_fences = 0;
> - bo->fences = NULL;
> - }
> -
> - return NULL;
> -}
> -
> static struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job)
> {
> struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
> @@ -132,29 +90,22 @@ static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
> }
>
> static const struct drm_sched_backend_ops etnaviv_sched_ops = {
> - .dependency = etnaviv_sched_dependency,
> .run_job = etnaviv_sched_run_job,
> .timedout_job = etnaviv_sched_timedout_job,
> .free_job = etnaviv_sched_free_job,
> };
>
> -int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
> - struct etnaviv_gem_submit *submit)
> +int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit)
> {
> int ret = 0;
>
> /*
> * Hold the fence lock across the whole operation to avoid jobs being
> * pushed out of order with regard to their sched fence seqnos as
> - * allocated in drm_sched_job_init.
> + * allocated in drm_sched_job_arm.
> */
> mutex_lock(&submit->gpu->fence_lock);
>
> - ret = drm_sched_job_init(&submit->sched_job, sched_entity,
> - submit->ctx);
> - if (ret)
> - goto out_unlock;
> -
> drm_sched_job_arm(&submit->sched_job);
>
> submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.h b/drivers/gpu/drm/etnaviv/etnaviv_sched.h
> index c0a6796e22c9..baebfa069afc 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.h
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.h
> @@ -18,7 +18,6 @@ struct etnaviv_gem_submit *to_etnaviv_submit(struct drm_sched_job *sched_job)
>
> int etnaviv_sched_init(struct etnaviv_gpu *gpu);
> void etnaviv_sched_fini(struct etnaviv_gpu *gpu);
> -int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
> - struct etnaviv_gem_submit *submit);
> +int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit);
>
> #endif /* __ETNAVIV_SCHED_H__ */
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 2/4] drm/gem: Delete gem array fencing helpers
2022-03-31 20:46 ` [PATCH 2/4] drm/gem: Delete gem array fencing helpers Daniel Vetter
@ 2022-04-04 13:15 ` Daniel Vetter
2022-04-04 13:30 ` Christian König
0 siblings, 1 reply; 8+ messages in thread
From: Daniel Vetter @ 2022-04-04 13:15 UTC (permalink / raw)
To: DRI Development
Cc: Daniel Vetter, Daniel Vetter, Maarten Lankhorst, Maxime Ripard,
Thomas Zimmermann, David Airlie, Daniel Vetter, Sumit Semwal,
Christian König, linux-media, linaro-mm-sig
On Thu, Mar 31, 2022 at 10:46:49PM +0200, Daniel Vetter wrote:
> Integrated into the scheduler now and all users converted over.
>
> v2: Rebased over changes from König.
>
> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
> Cc: Maxime Ripard <mripard@kernel.org>
> Cc: Thomas Zimmermann <tzimmermann@suse.de>
> Cc: David Airlie <airlied@linux.ie>
> Cc: Daniel Vetter <daniel@ffwll.ch>
> Cc: Sumit Semwal <sumit.semwal@linaro.org>
> Cc: "Christian König" <christian.koenig@amd.com>
> Cc: linux-media@vger.kernel.org
> Cc: linaro-mm-sig@lists.linaro.org
Anyone up for an ack for this one here?
Thanks, Daniel
> ---
> drivers/gpu/drm/drm_gem.c | 80 ---------------------------------------
> include/drm/drm_gem.h | 5 ---
> 2 files changed, 85 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
> index 56fb87885146..133dfae06fab 100644
> --- a/drivers/gpu/drm/drm_gem.c
> +++ b/drivers/gpu/drm/drm_gem.c
> @@ -1273,83 +1273,3 @@ drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
> ww_acquire_fini(acquire_ctx);
> }
> EXPORT_SYMBOL(drm_gem_unlock_reservations);
> -
> -/**
> - * drm_gem_fence_array_add - Adds the fence to an array of fences to be
> - * waited on, deduplicating fences from the same context.
> - *
> - * @fence_array: array of dma_fence * for the job to block on.
> - * @fence: the dma_fence to add to the list of dependencies.
> - *
> - * This functions consumes the reference for @fence both on success and error
> - * cases.
> - *
> - * Returns:
> - * 0 on success, or an error on failing to expand the array.
> - */
> -int drm_gem_fence_array_add(struct xarray *fence_array,
> - struct dma_fence *fence)
> -{
> - struct dma_fence *entry;
> - unsigned long index;
> - u32 id = 0;
> - int ret;
> -
> - if (!fence)
> - return 0;
> -
> - /* Deduplicate if we already depend on a fence from the same context.
> - * This lets the size of the array of deps scale with the number of
> - * engines involved, rather than the number of BOs.
> - */
> - xa_for_each(fence_array, index, entry) {
> - if (entry->context != fence->context)
> - continue;
> -
> - if (dma_fence_is_later(fence, entry)) {
> - dma_fence_put(entry);
> - xa_store(fence_array, index, fence, GFP_KERNEL);
> - } else {
> - dma_fence_put(fence);
> - }
> - return 0;
> - }
> -
> - ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL);
> - if (ret != 0)
> - dma_fence_put(fence);
> -
> - return ret;
> -}
> -EXPORT_SYMBOL(drm_gem_fence_array_add);
> -
> -/**
> - * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked
> - * in the GEM object's reservation object to an array of dma_fences for use in
> - * scheduling a rendering job.
> - *
> - * This should be called after drm_gem_lock_reservations() on your array of
> - * GEM objects used in the job but before updating the reservations with your
> - * own fences.
> - *
> - * @fence_array: array of dma_fence * for the job to block on.
> - * @obj: the gem object to add new dependencies from.
> - * @write: whether the job might write the object (so we need to depend on
> - * shared fences in the reservation object).
> - */
> -int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
> - struct drm_gem_object *obj,
> - bool write)
> -{
> - struct dma_resv_iter cursor;
> - struct dma_fence *fence;
> - int ret = 0;
> -
> - dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {
> - ret = drm_gem_fence_array_add(fence_array, fence);
> - if (ret)
> - break;
> - }
> - return ret;
> -}
> -EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);
> diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
> index e2941cee14b6..9d7c61a122dc 100644
> --- a/include/drm/drm_gem.h
> +++ b/include/drm/drm_gem.h
> @@ -407,11 +407,6 @@ int drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
> struct ww_acquire_ctx *acquire_ctx);
> void drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
> struct ww_acquire_ctx *acquire_ctx);
> -int drm_gem_fence_array_add(struct xarray *fence_array,
> - struct dma_fence *fence);
> -int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
> - struct drm_gem_object *obj,
> - bool write);
> int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
> u32 handle, u64 *offset);
>
> --
> 2.34.1
>
--
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 2/4] drm/gem: Delete gem array fencing helpers
2022-04-04 13:15 ` Daniel Vetter
@ 2022-04-04 13:30 ` Christian König
2022-04-04 15:35 ` Daniel Vetter
0 siblings, 1 reply; 8+ messages in thread
From: Christian König @ 2022-04-04 13:30 UTC (permalink / raw)
To: Daniel Vetter, DRI Development
Cc: Daniel Vetter, Daniel Vetter, Maarten Lankhorst, Maxime Ripard,
Thomas Zimmermann, David Airlie, Sumit Semwal, linux-media,
linaro-mm-sig
Am 04.04.22 um 15:15 schrieb Daniel Vetter:
> On Thu, Mar 31, 2022 at 10:46:49PM +0200, Daniel Vetter wrote:
>> Integrated into the scheduler now and all users converted over.
>>
>> v2: Rebased over changes from König.
>>
>> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
>> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
>> Cc: Maxime Ripard <mripard@kernel.org>
>> Cc: Thomas Zimmermann <tzimmermann@suse.de>
>> Cc: David Airlie <airlied@linux.ie>
>> Cc: Daniel Vetter <daniel@ffwll.ch>
>> Cc: Sumit Semwal <sumit.semwal@linaro.org>
>> Cc: "Christian König" <christian.koenig@amd.com>
>> Cc: linux-media@vger.kernel.org
>> Cc: linaro-mm-sig@lists.linaro.org
> Anyone up for an ack for this one here?
Acked-by: Christian König <christian.koenig@amd.com>
Please land that ASAP so that I can rebase on top.
Thanks,
Christian.
>
> Thanks, Daniel
>
>> ---
>> drivers/gpu/drm/drm_gem.c | 80 ---------------------------------------
>> include/drm/drm_gem.h | 5 ---
>> 2 files changed, 85 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
>> index 56fb87885146..133dfae06fab 100644
>> --- a/drivers/gpu/drm/drm_gem.c
>> +++ b/drivers/gpu/drm/drm_gem.c
>> @@ -1273,83 +1273,3 @@ drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
>> ww_acquire_fini(acquire_ctx);
>> }
>> EXPORT_SYMBOL(drm_gem_unlock_reservations);
>> -
>> -/**
>> - * drm_gem_fence_array_add - Adds the fence to an array of fences to be
>> - * waited on, deduplicating fences from the same context.
>> - *
>> - * @fence_array: array of dma_fence * for the job to block on.
>> - * @fence: the dma_fence to add to the list of dependencies.
>> - *
>> - * This functions consumes the reference for @fence both on success and error
>> - * cases.
>> - *
>> - * Returns:
>> - * 0 on success, or an error on failing to expand the array.
>> - */
>> -int drm_gem_fence_array_add(struct xarray *fence_array,
>> - struct dma_fence *fence)
>> -{
>> - struct dma_fence *entry;
>> - unsigned long index;
>> - u32 id = 0;
>> - int ret;
>> -
>> - if (!fence)
>> - return 0;
>> -
>> - /* Deduplicate if we already depend on a fence from the same context.
>> - * This lets the size of the array of deps scale with the number of
>> - * engines involved, rather than the number of BOs.
>> - */
>> - xa_for_each(fence_array, index, entry) {
>> - if (entry->context != fence->context)
>> - continue;
>> -
>> - if (dma_fence_is_later(fence, entry)) {
>> - dma_fence_put(entry);
>> - xa_store(fence_array, index, fence, GFP_KERNEL);
>> - } else {
>> - dma_fence_put(fence);
>> - }
>> - return 0;
>> - }
>> -
>> - ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL);
>> - if (ret != 0)
>> - dma_fence_put(fence);
>> -
>> - return ret;
>> -}
>> -EXPORT_SYMBOL(drm_gem_fence_array_add);
>> -
>> -/**
>> - * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked
>> - * in the GEM object's reservation object to an array of dma_fences for use in
>> - * scheduling a rendering job.
>> - *
>> - * This should be called after drm_gem_lock_reservations() on your array of
>> - * GEM objects used in the job but before updating the reservations with your
>> - * own fences.
>> - *
>> - * @fence_array: array of dma_fence * for the job to block on.
>> - * @obj: the gem object to add new dependencies from.
>> - * @write: whether the job might write the object (so we need to depend on
>> - * shared fences in the reservation object).
>> - */
>> -int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
>> - struct drm_gem_object *obj,
>> - bool write)
>> -{
>> - struct dma_resv_iter cursor;
>> - struct dma_fence *fence;
>> - int ret = 0;
>> -
>> - dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {
>> - ret = drm_gem_fence_array_add(fence_array, fence);
>> - if (ret)
>> - break;
>> - }
>> - return ret;
>> -}
>> -EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);
>> diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
>> index e2941cee14b6..9d7c61a122dc 100644
>> --- a/include/drm/drm_gem.h
>> +++ b/include/drm/drm_gem.h
>> @@ -407,11 +407,6 @@ int drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
>> struct ww_acquire_ctx *acquire_ctx);
>> void drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
>> struct ww_acquire_ctx *acquire_ctx);
>> -int drm_gem_fence_array_add(struct xarray *fence_array,
>> - struct dma_fence *fence);
>> -int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
>> - struct drm_gem_object *obj,
>> - bool write);
>> int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
>> u32 handle, u64 *offset);
>>
>> --
>> 2.34.1
>>
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 2/4] drm/gem: Delete gem array fencing helpers
2022-04-04 13:30 ` Christian König
@ 2022-04-04 15:35 ` Daniel Vetter
0 siblings, 0 replies; 8+ messages in thread
From: Daniel Vetter @ 2022-04-04 15:35 UTC (permalink / raw)
To: Christian König
Cc: Daniel Vetter, DRI Development, Daniel Vetter, Daniel Vetter,
Maarten Lankhorst, Maxime Ripard, Thomas Zimmermann, David Airlie,
Sumit Semwal, linux-media, linaro-mm-sig
On Mon, Apr 04, 2022 at 03:30:59PM +0200, Christian König wrote:
> Am 04.04.22 um 15:15 schrieb Daniel Vetter:
> > On Thu, Mar 31, 2022 at 10:46:49PM +0200, Daniel Vetter wrote:
> > > Integrated into the scheduler now and all users converted over.
> > >
> > > v2: Rebased over changes from König.
> > >
> > > Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
> > > Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
> > > Cc: Maxime Ripard <mripard@kernel.org>
> > > Cc: Thomas Zimmermann <tzimmermann@suse.de>
> > > Cc: David Airlie <airlied@linux.ie>
> > > Cc: Daniel Vetter <daniel@ffwll.ch>
> > > Cc: Sumit Semwal <sumit.semwal@linaro.org>
> > > Cc: "Christian König" <christian.koenig@amd.com>
> > > Cc: linux-media@vger.kernel.org
> > > Cc: linaro-mm-sig@lists.linaro.org
> > Anyone up for an ack for this one here?
>
> Acked-by: Christian König <christian.koenig@amd.com>
>
> Please land that ASAP so that I can rebase on top.
First 3 patches pushed, I'll drop the fourth.
-Daniel
>
> Thanks,
> Christian.
>
> >
> > Thanks, Daniel
> >
> > > ---
> > > drivers/gpu/drm/drm_gem.c | 80 ---------------------------------------
> > > include/drm/drm_gem.h | 5 ---
> > > 2 files changed, 85 deletions(-)
> > >
> > > diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
> > > index 56fb87885146..133dfae06fab 100644
> > > --- a/drivers/gpu/drm/drm_gem.c
> > > +++ b/drivers/gpu/drm/drm_gem.c
> > > @@ -1273,83 +1273,3 @@ drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
> > > ww_acquire_fini(acquire_ctx);
> > > }
> > > EXPORT_SYMBOL(drm_gem_unlock_reservations);
> > > -
> > > -/**
> > > - * drm_gem_fence_array_add - Adds the fence to an array of fences to be
> > > - * waited on, deduplicating fences from the same context.
> > > - *
> > > - * @fence_array: array of dma_fence * for the job to block on.
> > > - * @fence: the dma_fence to add to the list of dependencies.
> > > - *
> > > - * This functions consumes the reference for @fence both on success and error
> > > - * cases.
> > > - *
> > > - * Returns:
> > > - * 0 on success, or an error on failing to expand the array.
> > > - */
> > > -int drm_gem_fence_array_add(struct xarray *fence_array,
> > > - struct dma_fence *fence)
> > > -{
> > > - struct dma_fence *entry;
> > > - unsigned long index;
> > > - u32 id = 0;
> > > - int ret;
> > > -
> > > - if (!fence)
> > > - return 0;
> > > -
> > > - /* Deduplicate if we already depend on a fence from the same context.
> > > - * This lets the size of the array of deps scale with the number of
> > > - * engines involved, rather than the number of BOs.
> > > - */
> > > - xa_for_each(fence_array, index, entry) {
> > > - if (entry->context != fence->context)
> > > - continue;
> > > -
> > > - if (dma_fence_is_later(fence, entry)) {
> > > - dma_fence_put(entry);
> > > - xa_store(fence_array, index, fence, GFP_KERNEL);
> > > - } else {
> > > - dma_fence_put(fence);
> > > - }
> > > - return 0;
> > > - }
> > > -
> > > - ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL);
> > > - if (ret != 0)
> > > - dma_fence_put(fence);
> > > -
> > > - return ret;
> > > -}
> > > -EXPORT_SYMBOL(drm_gem_fence_array_add);
> > > -
> > > -/**
> > > - * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked
> > > - * in the GEM object's reservation object to an array of dma_fences for use in
> > > - * scheduling a rendering job.
> > > - *
> > > - * This should be called after drm_gem_lock_reservations() on your array of
> > > - * GEM objects used in the job but before updating the reservations with your
> > > - * own fences.
> > > - *
> > > - * @fence_array: array of dma_fence * for the job to block on.
> > > - * @obj: the gem object to add new dependencies from.
> > > - * @write: whether the job might write the object (so we need to depend on
> > > - * shared fences in the reservation object).
> > > - */
> > > -int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
> > > - struct drm_gem_object *obj,
> > > - bool write)
> > > -{
> > > - struct dma_resv_iter cursor;
> > > - struct dma_fence *fence;
> > > - int ret = 0;
> > > -
> > > - dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {
> > > - ret = drm_gem_fence_array_add(fence_array, fence);
> > > - if (ret)
> > > - break;
> > > - }
> > > - return ret;
> > > -}
> > > -EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);
> > > diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
> > > index e2941cee14b6..9d7c61a122dc 100644
> > > --- a/include/drm/drm_gem.h
> > > +++ b/include/drm/drm_gem.h
> > > @@ -407,11 +407,6 @@ int drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
> > > struct ww_acquire_ctx *acquire_ctx);
> > > void drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
> > > struct ww_acquire_ctx *acquire_ctx);
> > > -int drm_gem_fence_array_add(struct xarray *fence_array,
> > > - struct dma_fence *fence);
> > > -int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
> > > - struct drm_gem_object *obj,
> > > - bool write);
> > > int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
> > > u32 handle, u64 *offset);
> > > --
> > > 2.34.1
> > >
>
--
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 1/4] drm/etnaviv: Use scheduler dependency handling
2022-03-31 20:46 ` [PATCH 1/4] drm/etnaviv: Use scheduler dependency handling Daniel Vetter
2022-04-01 7:49 ` Christian König
2022-04-04 9:12 ` Lucas Stach
@ 2022-04-28 23:59 ` Michael Walle
2 siblings, 0 replies; 8+ messages in thread
From: Michael Walle @ 2022-04-28 23:59 UTC (permalink / raw)
To: daniel.vetter
Cc: christian.gmeiner, christian.koenig, daniel.vetter, dri-devel,
etnaviv, l.stach, linaro-mm-sig, linux+etnaviv, linux-media,
sumit.semwal, Michael Walle
> We need to pull the drm_sched_job_init much earlier, but that's very
> minor surgery.
This patch breaks the GC7000 on the LS1028A:
[ 35.671102] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000078
[ 35.680925] Mem abort info:
[ 35.685127] ESR = 0x96000004
[ 35.689583] EC = 0x25: DABT (current EL), IL = 32 bits
[ 35.696312] SET = 0, FnV = 0
[ 35.700766] EA = 0, S1PTW = 0
[ 35.705315] FSC = 0x04: level 0 translation fault
[ 35.711616] Data abort info:
[ 35.715916] ISV = 0, ISS = 0x00000004
[ 35.721170] CM = 0, WnR = 0
[ 35.725552] user pgtable: 4k pages, 48-bit VAs, pgdp=0000002083f59000
[ 35.733420] [0000000000000078] pgd=0000000000000000, p4d=0000000000000000
[ 35.741627] Internal error: Oops: 96000004 [#1] SMP
[ 35.747902] Modules linked in:
[ 35.750963] CPU: 0 PID: 44 Comm: f0c0000.gpu Not tainted 5.18.0-rc2-00894-gde6a1d7294f5 #24
[ 35.759345] Hardware name: Kontron KBox A-230-LS (DT)
[ 35.764409] pstate: 00000005 (nzcv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[ 35.771394] pc : drm_sched_entity_select_rq+0x314/0x380
[ 35.776645] lr : drm_sched_entity_pop_job+0x4c/0x480
[ 35.781625] sp : ffff80000949bdb0
[ 35.784943] x29: ffff80000949bdb0 x28: 0000000000000000 x27: 0000000000000000
[ 35.792107] x26: ffff002003f09008 x25: ffff00200231d130 x24: ffff800008c13008
[ 35.799270] x23: ffff8000086af900 x22: ffff800008c13008 x21: ffff002003fb6e00
[ 35.806432] x20: 0000000000000040 x19: ffff002003f09008 x18: ffffffffffffffff
[ 35.813594] x17: 0000000000000000 x16: 0000000000000000 x15: ffff800009ee3d48
[ 35.820756] x14: 0000000000000000 x13: 0000000000000000 x12: ffffffff00000008
[ 35.827918] x11: 0000aaaadb063d30 x10: 0000000000000960 x9 : ffff8000086afedc
[ 35.835080] x8 : ffff00200186a900 x7 : 0000000000000000 x6 : 0000000000000000
[ 35.842242] x5 : ffff00200231d2b0 x4 : 0000000000000000 x3 : 0000000000000000
[ 35.849403] x2 : 0000000000000000 x1 : 0000000000000078 x0 : 0000000000000078
[ 35.856565] Call trace:
[ 35.859013] drm_sched_entity_select_rq+0x314/0x380
[ 35.863906] drm_sched_main+0x1b0/0x49c
[ 35.867752] kthread+0xe4/0xf0
[ 35.870814] ret_from_fork+0x10/0x20
[ 35.874401] Code: 8805fc24 35ffffa5 17fffef9 f9800031 (885f7c22)
[ 35.880513] ---[ end trace 0000000000000000 ]---
# glmark2-es2-drm
=======================================================
glmark2 2021.02
=======================================================
OpenGL Information
GL_VENDOR: etnaviv
GL_RENDERER: Vivante GC7000 rev 6202
GL_VERSION: OpenGL ES 2.0 Mesa 22.1.0-devel
Mesa is Lucas latest MR branch: lynxeye/etnaviv-gc7000-r6204.
Reverting this patch on drm-next will make the oops go away. Any idea
what's going wrong here?
-michael
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2022-04-28 23:59 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
[not found] <20220331204651.2699107-1-daniel.vetter@ffwll.ch>
2022-03-31 20:46 ` [PATCH 1/4] drm/etnaviv: Use scheduler dependency handling Daniel Vetter
2022-04-01 7:49 ` Christian König
2022-04-04 9:12 ` Lucas Stach
2022-04-28 23:59 ` Michael Walle
2022-03-31 20:46 ` [PATCH 2/4] drm/gem: Delete gem array fencing helpers Daniel Vetter
2022-04-04 13:15 ` Daniel Vetter
2022-04-04 13:30 ` Christian König
2022-04-04 15:35 ` Daniel Vetter
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).