From: Luben Tuikov <luben.tuikov@amd.com>
To: "Christian König" <ckoenig.leichtzumerken@gmail.com>,
dri-devel@lists.freedesktop.org, amd-gfx@lists.freedesktop.org
Cc: "Christian König" <christian.koenig@amd.com>
Subject: Re: [PATCH 05/13] drm/amdgpu: drop amdgpu_sync from amdgpu_vmid_grab
Date: Sat, 22 Oct 2022 21:25:21 -0400 [thread overview]
Message-ID: <808ed8b2-ac3f-0476-df64-1a8d1749de0f@amd.com> (raw)
In-Reply-To: <20221014084641.128280-6-christian.koenig@amd.com>
On 2022-10-14 04:46, Christian König wrote:
> Instead return the fence directly. Avoids memory allocation to store the
> fence.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 42 +++++++++++++------------
> drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h | 2 +-
> drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 12 +++----
> 3 files changed, 28 insertions(+), 28 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> index b76294d4275b..2a9a2593dc18 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> @@ -170,26 +170,27 @@ bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
> *
> * @vm: vm to allocate id for
> * @ring: ring we want to submit job to
> - * @sync: sync object where we add dependencies
> * @idle: resulting idle VMID
> + * @fence: fence to wait for if no id could be grabbed
> *
> * Try to find an idle VMID, if none is idle add a fence to wait to the sync
> * object. Returns -ENOMEM when we are out of memory.
> */
> static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
> struct amdgpu_ring *ring,
> - struct amdgpu_sync *sync,
> - struct amdgpu_vmid **idle)
> + struct amdgpu_vmid **idle,
> + struct dma_fence **fence)
> {
> struct amdgpu_device *adev = ring->adev;
> unsigned vmhub = ring->funcs->vmhub;
> struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
> struct dma_fence **fences;
> unsigned i;
> - int r;
>
> - if (!dma_fence_is_signaled(ring->vmid_wait))
> - return amdgpu_sync_fence(sync, ring->vmid_wait);
> + if (!dma_fence_is_signaled(ring->vmid_wait)) {
> + *fence = dma_fence_get(ring->vmid_wait);
> + return 0;
> + }
>
> fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_KERNEL);
> if (!fences)
> @@ -228,10 +229,10 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
> return -ENOMEM;
> }
>
> - r = amdgpu_sync_fence(sync, &array->base);
> + *fence = dma_fence_get(&array->base);
> dma_fence_put(ring->vmid_wait);
> ring->vmid_wait = &array->base;
> - return r;
> + return 0;
> }
> kfree(fences);
>
> @@ -243,17 +244,17 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
> *
> * @vm: vm to allocate id for
> * @ring: ring we want to submit job to
> - * @sync: sync object where we add dependencies
> * @job: job who wants to use the VMID
> * @id: resulting VMID
> + * @fence: fence to wait for if no id could be grabbed
> *
> * Try to assign a reserved VMID.
> */
> static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
> struct amdgpu_ring *ring,
> - struct amdgpu_sync *sync,
> struct amdgpu_job *job,
> - struct amdgpu_vmid **id)
> + struct amdgpu_vmid **id,
> + struct dma_fence **fence)
> {
> struct amdgpu_device *adev = ring->adev;
> unsigned vmhub = ring->funcs->vmhub;
> @@ -280,7 +281,8 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
> tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
> if (tmp) {
> *id = NULL;
> - return amdgpu_sync_fence(sync, tmp);
> + *fence = dma_fence_get(tmp);
> + return 0;
> }
> needs_flush = true;
> }
> @@ -302,17 +304,17 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
> *
> * @vm: vm to allocate id for
> * @ring: ring we want to submit job to
> - * @sync: sync object where we add dependencies
> * @job: job who wants to use the VMID
> * @id: resulting VMID
> + * @fence: fence to wait for if no id could be grabbed
> *
> * Try to reuse a VMID for this submission.
> */
> static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
> struct amdgpu_ring *ring,
> - struct amdgpu_sync *sync,
> struct amdgpu_job *job,
> - struct amdgpu_vmid **id)
> + struct amdgpu_vmid **id,
> + struct dma_fence **fence)
> {
> struct amdgpu_device *adev = ring->adev;
> unsigned vmhub = ring->funcs->vmhub;
> @@ -367,13 +369,13 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
> *
> * @vm: vm to allocate id for
> * @ring: ring we want to submit job to
> - * @sync: sync object where we add dependencies
> * @job: job who wants to use the VMID
> + * @fence: fence to wait for if no id could be grabbed
> *
> * Allocate an id for the vm, adding fences to the sync obj as necessary.
> */
> int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
> - struct amdgpu_sync *sync, struct amdgpu_job *job)
> + struct amdgpu_job *job, struct dma_fence **fence)
> {
> struct amdgpu_device *adev = ring->adev;
> unsigned vmhub = ring->funcs->vmhub;
> @@ -383,16 +385,16 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
> int r = 0;
>
> mutex_lock(&id_mgr->lock);
> - r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle);
> + r = amdgpu_vmid_grab_idle(vm, ring, &idle, fence);
> if (r || !idle)
> goto error;
>
> if (vm->reserved_vmid[vmhub]) {
> - r = amdgpu_vmid_grab_reserved(vm, ring, sync, job, &id);
> + r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence);
> if (r || !id)
> goto error;
> } else {
> - r = amdgpu_vmid_grab_used(vm, ring, sync, job, &id);
> + r = amdgpu_vmid_grab_used(vm, ring, job, &id, fence);
> if (r)
> goto error;
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
> index 1b1e7d04655c..57efe61dceed 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
> @@ -84,7 +84,7 @@ void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
> struct amdgpu_vm *vm,
> unsigned vmhub);
> int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
> - struct amdgpu_sync *sync, struct amdgpu_job *job);
> + struct amdgpu_job *job, struct dma_fence **fence);
> void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
> unsigned vmid);
> void amdgpu_vmid_reset_all(struct amdgpu_device *adev);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> index 13b752687b30..e187dc0ab898 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> @@ -238,12 +238,12 @@ int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
> return 0;
> }
>
> -static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
> - struct drm_sched_entity *s_entity)
> +static struct dma_fence *
> +amdgpu_job_dependency(struct drm_sched_job *sched_job,
> + struct drm_sched_entity *s_entity)
> {
> struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
> struct amdgpu_job *job = to_amdgpu_job(sched_job);
> - struct amdgpu_vm *vm = job->vm;
> struct dma_fence *fence;
> int r;
>
> @@ -254,12 +254,10 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
> DRM_ERROR("Error adding fence (%d)\n", r);
> }
>
> - while (fence == NULL && vm && !job->vmid) {
> - r = amdgpu_vmid_grab(vm, ring, &job->sync, job);
> + while (fence == NULL && job->vm && !job->vmid) {
In preliminary application of the patch series, checkpatch.pl complains about comparison to NULL,
and wants !fence, instead:
while (!fence && job->vm && !job->vmid) {
I can see that it had been like this before... and I know it's out of the scope of this series,
but we should fix this at some point in time.
Regards,
Luben
next prev parent reply other threads:[~2022-10-23 1:25 UTC|newest]
Thread overview: 44+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-10-14 8:46 Fixes for scheduler hang when killing a process Christian König
2022-10-14 8:46 ` [PATCH 01/13] drm/scheduler: fix fence ref counting Christian König
2022-10-25 3:23 ` Luna Nova
2022-10-25 11:35 ` Christian König
2022-10-14 8:46 ` [PATCH 02/13] drm/scheduler: add drm_sched_job_add_resv_dependencies Christian König
2022-10-14 8:46 ` [PATCH 03/13] drm/amdgpu: use drm_sched_job_add_resv_dependencies for moves Christian König
2022-10-14 8:46 ` [PATCH 04/13] drm/amdgpu: drop the fence argument from amdgpu_vmid_grab Christian König
2022-10-14 8:46 ` [PATCH 05/13] drm/amdgpu: drop amdgpu_sync " Christian König
2022-10-23 1:25 ` Luben Tuikov [this message]
2022-10-24 10:54 ` Christian König
2022-10-14 8:46 ` [PATCH 06/13] drm/amdgpu: cleanup scheduler job initialization Christian König
2022-10-23 1:50 ` Luben Tuikov
2022-10-14 8:46 ` [PATCH 07/13] drm/amdgpu: move explicit sync check into the CS Christian König
2022-10-14 8:46 ` [PATCH 08/13] drm/amdgpu: use scheduler depenencies for VM updates Christian König
2022-10-24 5:50 ` Luben Tuikov
2022-10-14 8:46 ` [PATCH 09/13] drm/amdgpu: use scheduler depenencies for UVD msgs Christian König
2022-10-24 5:53 ` Luben Tuikov
2022-10-14 8:46 ` [PATCH 10/13] drm/amdgpu: use scheduler depenencies for CS Christian König
2022-10-24 5:55 ` Luben Tuikov
2022-12-21 15:34 ` Mike Lothian
2022-12-21 15:47 ` Mike Lothian
2022-12-21 15:52 ` Luben Tuikov
2022-12-21 15:55 ` Mike Lothian
2022-10-14 8:46 ` [PATCH 11/13] drm/scheduler: remove drm_sched_dependency_optimized Christian König
2022-10-14 8:46 ` [PATCH 12/13] drm/scheduler: rework entity flush, kill and fini Christian König
2022-11-17 2:36 ` Dmitry Osipenko
2022-11-17 9:53 ` Christian König
2022-11-17 12:47 ` Dmitry Osipenko
2022-11-17 12:55 ` Christian König
2022-11-17 12:59 ` Dmitry Osipenko
2022-11-17 13:00 ` Dmitry Osipenko
2022-11-17 13:11 ` Christian König
2022-11-17 14:41 ` Dmitry Osipenko
2022-11-17 15:09 ` Christian König
2022-11-17 15:11 ` Dmitry Osipenko
2022-12-28 16:27 ` Rob Clark
2022-12-28 16:52 ` Rob Clark
2023-01-01 18:29 ` youling257
2023-01-02 9:24 ` Dmitry Osipenko
2023-01-02 14:17 ` youling 257
2023-01-02 15:08 ` Dmitry Osipenko
2022-10-14 8:46 ` [PATCH 13/13] drm/scheduler: rename dependency callback into prepare_job Christian König
2022-10-23 1:35 ` Fixes for scheduler hang when killing a process Luben Tuikov
2022-10-24 7:00 ` Luben Tuikov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=808ed8b2-ac3f-0476-df64-1a8d1749de0f@amd.com \
--to=luben.tuikov@amd.com \
--cc=amd-gfx@lists.freedesktop.org \
--cc=christian.koenig@amd.com \
--cc=ckoenig.leichtzumerken@gmail.com \
--cc=dri-devel@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox