Linux kernel -stable discussions
 help / color / mirror / Atom feed
From: Lizhi Hou <lizhi.hou@amd.com>
To: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>,
	<dri-devel@lists.freedesktop.org>
Cc: <jeff.hugo@oss.qualcomm.com>, <stable@vger.kernel.org>
Subject: Re: [PATCH] accel/ivpu: Use dma_resv_lock() instead of a custom mutex
Date: Thu, 5 Jun 2025 02:00:28 -0700	[thread overview]
Message-ID: <6a4f4df9-0305-a89d-0ed7-8fedf0e31ffc@amd.com> (raw)
In-Reply-To: <a52588be-b487-433a-a74f-eaa1d7a88654@linux.intel.com>


On 6/2/25 05:58, Jacek Lawrynowicz wrote:
> Hi,
>
> On 5/28/2025 7:50 PM, Lizhi Hou wrote:
>> On 5/28/25 08:43, Jacek Lawrynowicz wrote:
>>> This fixes a potential race conditions in:
>>>    - ivpu_bo_unbind_locked() where we modified the shmem->sgt without
>>>      holding the dma_resv_lock().
>>>    - ivpu_bo_print_info() where we read the shmem->pages without
>>>      holding the dma_resv_lock().
>>>
>>> Using dma_resv_lock() also protects against future syncronisation
>>> issues that may arise when accessing drm_gem_shmem_object or
>>> drm_gem_object members.
>>>
>>> Fixes: 42328003ecb6 ("accel/ivpu: Refactor BO creation functions")
>>> Cc: <stable@vger.kernel.org> # v6.9+
>>> Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
>>> ---
>>>    drivers/accel/ivpu/ivpu_gem.c | 63 +++++++++++++++++++----------------
>>>    drivers/accel/ivpu/ivpu_gem.h |  1 -
>>>    2 files changed, 34 insertions(+), 30 deletions(-)
>>>
>>> diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c
>>> index c193a80241f5f..5908268ca45e9 100644
>>> --- a/drivers/accel/ivpu/ivpu_gem.c
>>> +++ b/drivers/accel/ivpu/ivpu_gem.c
>>> @@ -33,6 +33,16 @@ static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, con
>>>             (bool)bo->base.base.import_attach);
>>>    }
>>>    +static inline int ivpu_bo_lock(struct ivpu_bo *bo)
>>> +{
>>> +    return dma_resv_lock(bo->base.base.resv, NULL);
>>> +}
>>> +
>>> +static inline void ivpu_bo_unlock(struct ivpu_bo *bo)
>>> +{
>>> +    dma_resv_unlock(bo->base.base.resv);
>>> +}
>>> +
>>>    /*
>>>     * ivpu_bo_pin() - pin the backing physical pages and map them to VPU.
>>>     *
>>> @@ -43,22 +53,22 @@ static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, con
>>>    int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
>>>    {
>>>        struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
>>> +    struct sg_table *sgt;
>>>        int ret = 0;
>>>    -    mutex_lock(&bo->lock);
>>> -
>>>        ivpu_dbg_bo(vdev, bo, "pin");
>>> -    drm_WARN_ON(&vdev->drm, !bo->ctx);
>>>    -    if (!bo->mmu_mapped) {
>>> -        struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
>>> +    sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
>>> +    if (IS_ERR(sgt)) {
>>> +        ret = PTR_ERR(sgt);
>>> +        ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret);
>>> +        return ret;
>>> +    }
>>>    -        if (IS_ERR(sgt)) {
>>> -            ret = PTR_ERR(sgt);
>>> -            ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret);
>>> -            goto unlock;
>>> -        }
>>> +    ivpu_bo_lock(bo);
>>>    +    if (!bo->mmu_mapped) {
>>> +        drm_WARN_ON(&vdev->drm, !bo->ctx);
>>>            ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, sgt,
>>>                               ivpu_bo_is_snooped(bo));
>>>            if (ret) {
>>> @@ -69,7 +79,7 @@ int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
>>>        }
>>>      unlock:
>>> -    mutex_unlock(&bo->lock);
>>> +    ivpu_bo_unlock(bo);
>>>          return ret;
>>>    }
>>> @@ -84,7 +94,7 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
>>>        if (!drm_dev_enter(&vdev->drm, &idx))
>>>            return -ENODEV;
>>>    -    mutex_lock(&bo->lock);
>>> +    ivpu_bo_lock(bo);
>>>          ret = ivpu_mmu_context_insert_node(ctx, range, ivpu_bo_size(bo), &bo->mm_node);
>>>        if (!ret) {
>>> @@ -94,7 +104,7 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
>>>            ivpu_err(vdev, "Failed to add BO to context %u: %d\n", ctx->id, ret);
>>>        }
>>>    -    mutex_unlock(&bo->lock);
>>> +    ivpu_bo_unlock(bo);
>>>          drm_dev_exit(idx);
>>>    @@ -105,7 +115,7 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
>>>    {
>>>        struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
>>>    -    lockdep_assert(lockdep_is_held(&bo->lock) || !kref_read(&bo->base.base.refcount));
>>> +    lockdep_assert(dma_resv_held(bo->base.base.resv) || !kref_read(&bo->base.base.refcount));
>>>          if (bo->mmu_mapped) {
>>>            drm_WARN_ON(&vdev->drm, !bo->ctx);
>>> @@ -123,14 +133,12 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
>>>        if (bo->base.base.import_attach)
>>>            return;
>>>    -    dma_resv_lock(bo->base.base.resv, NULL);
>>>        if (bo->base.sgt) {
>>>            dma_unmap_sgtable(vdev->drm.dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
>>>            sg_free_table(bo->base.sgt);
>>>            kfree(bo->base.sgt);
>>>            bo->base.sgt = NULL;
>> Maybe not directly modify sgt but use drm_gem_shmem_purge()?
> drm_gem_shmem_purge() also removes user mapping and backing pages.
> In ivpu_bo_unbind_locked() we only want to unmap the buffer from the device as it being turned off.
> We don't want to crash user process in this case and this will probably be the result of drm_gem_shmem_purge() na mmpapped buffer.
>
>> Will it potentially memleak without calling drm_gem_shmem_put_pages()? (if the bo is mmap, vmap etc)
> There is no memory leak. We are calling drm_gem_shmem_get_pages_sgt() only once per object and drm_gem_shmem_free() frees all backing memory.
Reviewed-by: Lizhi Hou <lizhi.hou@amd.com>
>
> Regards,
> Jacek

  reply	other threads:[~2025-06-05  9:00 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-05-28 15:43 [PATCH] accel/ivpu: Use dma_resv_lock() instead of a custom mutex Jacek Lawrynowicz
2025-05-28 17:50 ` Lizhi Hou
2025-06-02 12:58   ` Jacek Lawrynowicz
2025-06-05  9:00     ` Lizhi Hou [this message]
2025-06-05 12:44 ` Jacek Lawrynowicz

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=6a4f4df9-0305-a89d-0ed7-8fedf0e31ffc@amd.com \
    --to=lizhi.hou@amd.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=jacek.lawrynowicz@linux.intel.com \
    --cc=jeff.hugo@oss.qualcomm.com \
    --cc=stable@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox