From: Matthew Auld <matthew.auld@intel.com>
To: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
Cc: paulo.r.zanoni@intel.com, jani.nikula@intel.com,
intel-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org,
thomas.hellstrom@intel.com, daniel.vetter@intel.com,
christian.koenig@amd.com
Subject: Re: [Intel-gfx] [PATCH v7 20/20] drm/i915/vm_bind: Async vm_unbind support
Date: Tue, 15 Nov 2022 16:20:54 +0000 [thread overview]
Message-ID: <d8b028ea-4179-768a-7154-5b0bc3641cb1@intel.com> (raw)
In-Reply-To: <Y3O7NCxiZHjjmyN7@nvishwa1-DESK>
On 15/11/2022 16:15, Niranjana Vishwanathapura wrote:
> On Tue, Nov 15, 2022 at 11:05:21AM +0000, Matthew Auld wrote:
>> On 13/11/2022 07:57, Niranjana Vishwanathapura wrote:
>>> Asynchronously unbind the vma upon vm_unbind call.
>>> Fall back to synchronous unbind if backend doesn't support
>>> async unbind or if async unbind fails.
>>>
>>> No need for vm_unbind out fence support as i915 will internally
>>> handle all sequencing and user need not try to sequence any
>>> operation with the unbind completion.
>>>
>>> v2: use i915_vma_destroy_async in vm_unbind ioctl
>>>
>>> Signed-off-by: Niranjana Vishwanathapura
>>> <niranjana.vishwanathapura@intel.com>
>>
>> This only does it for non-partial vma, right? Or was that changed
>> somewhere?
>>
>
> No, it applies to any vma (partial or non-partial).
> It was so from the beginning.
Doesn't __i915_vma_unbind_async() return an error when mm.pages !=
vma->pages? IIRC this was discussed before. Just trying to think about
the consequences of this change.
>
> Niranjana
>
>> Reviewed-by: Matthew Auld <matthew.auld@intel.com>
>>
>>> ---
>>> .../drm/i915/gem/i915_gem_vm_bind_object.c | 2 +-
>>> drivers/gpu/drm/i915/i915_vma.c | 51 +++++++++++++++++--
>>> drivers/gpu/drm/i915/i915_vma.h | 1 +
>>> include/uapi/drm/i915_drm.h | 3 +-
>>> 4 files changed, 51 insertions(+), 6 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_vm_bind_object.c
>>> b/drivers/gpu/drm/i915/gem/i915_gem_vm_bind_object.c
>>> index d87d1210365b..36651b447966 100644
>>> --- a/drivers/gpu/drm/i915/gem/i915_gem_vm_bind_object.c
>>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_vm_bind_object.c
>>> @@ -210,7 +210,7 @@ static int i915_gem_vm_unbind_vma(struct
>>> i915_address_space *vm,
>>> */
>>> obj = vma->obj;
>>> i915_gem_object_lock(obj, NULL);
>>> - i915_vma_destroy(vma);
>>> + i915_vma_destroy_async(vma);
>>> i915_gem_object_unlock(obj);
>>> i915_gem_object_put(obj);
>>> diff --git a/drivers/gpu/drm/i915/i915_vma.c
>>> b/drivers/gpu/drm/i915/i915_vma.c
>>> index 7cf77c67d755..483d25f2425c 100644
>>> --- a/drivers/gpu/drm/i915/i915_vma.c
>>> +++ b/drivers/gpu/drm/i915/i915_vma.c
>>> @@ -42,6 +42,8 @@
>>> #include "i915_vma.h"
>>> #include "i915_vma_resource.h"
>>> +static struct dma_fence *__i915_vma_unbind_async(struct i915_vma *vma);
>>> +
>>> static inline void assert_vma_held_evict(const struct i915_vma *vma)
>>> {
>>> /*
>>> @@ -1713,7 +1715,7 @@ void i915_vma_reopen(struct i915_vma *vma)
>>> spin_unlock_irq(>->closed_lock);
>>> }
>>> -static void force_unbind(struct i915_vma *vma)
>>> +static void force_unbind(struct i915_vma *vma, bool async)
>>> {
>>> if (!drm_mm_node_allocated(&vma->node))
>>> return;
>>> @@ -1727,7 +1729,21 @@ static void force_unbind(struct i915_vma *vma)
>>> i915_vma_set_purged(vma);
>>> atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
>>> - WARN_ON(__i915_vma_unbind(vma));
>>> + if (async) {
>>> + struct dma_fence *fence;
>>> +
>>> + fence = __i915_vma_unbind_async(vma);
>>> + if (IS_ERR_OR_NULL(fence)) {
>>> + async = false;
>>> + } else {
>>> + dma_resv_add_fence(vma->obj->base.resv, fence,
>>> + DMA_RESV_USAGE_READ);
>>> + dma_fence_put(fence);
>>> + }
>>> + }
>>> +
>>> + if (!async)
>>> + WARN_ON(__i915_vma_unbind(vma));
>>> GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
>>> }
>>> @@ -1787,7 +1803,7 @@ void i915_vma_destroy_locked(struct i915_vma *vma)
>>> {
>>> lockdep_assert_held(&vma->vm->mutex);
>>> - force_unbind(vma);
>>> + force_unbind(vma, false);
>>> list_del_init(&vma->vm_link);
>>> release_references(vma, vma->vm->gt, false);
>>> }
>>> @@ -1798,7 +1814,34 @@ void i915_vma_destroy(struct i915_vma *vma)
>>> bool vm_ddestroy;
>>> mutex_lock(&vma->vm->mutex);
>>> - force_unbind(vma);
>>> + force_unbind(vma, false);
>>> + list_del_init(&vma->vm_link);
>>> + vm_ddestroy = vma->vm_ddestroy;
>>> + vma->vm_ddestroy = false;
>>> +
>>> + /* vma->vm may be freed when releasing vma->vm->mutex. */
>>> + gt = vma->vm->gt;
>>> + mutex_unlock(&vma->vm->mutex);
>>> + release_references(vma, gt, vm_ddestroy);
>>> +}
>>> +
>>> +void i915_vma_destroy_async(struct i915_vma *vma)
>>> +{
>>> + bool vm_ddestroy, async = vma->obj->mm.rsgt;
>>> + struct intel_gt *gt;
>>> +
>>> + if (dma_resv_reserve_fences(vma->obj->base.resv, 1))
>>> + async = false;
>>> +
>>> + mutex_lock(&vma->vm->mutex);
>>> + /*
>>> + * Ensure any asynchronous binding is complete while using
>>> + * async unbind as we will be releasing the vma here.
>>> + */
>>> + if (async && i915_active_wait(&vma->active))
>>> + async = false;
>>> +
>>> + force_unbind(vma, async);
>>> list_del_init(&vma->vm_link);
>>> vm_ddestroy = vma->vm_ddestroy;
>>> vma->vm_ddestroy = false;
>>> diff --git a/drivers/gpu/drm/i915/i915_vma.h
>>> b/drivers/gpu/drm/i915/i915_vma.h
>>> index 737ef310d046..25f15965dab8 100644
>>> --- a/drivers/gpu/drm/i915/i915_vma.h
>>> +++ b/drivers/gpu/drm/i915/i915_vma.h
>>> @@ -272,6 +272,7 @@ void i915_vma_reopen(struct i915_vma *vma);
>>> void i915_vma_destroy_locked(struct i915_vma *vma);
>>> void i915_vma_destroy(struct i915_vma *vma);
>>> +void i915_vma_destroy_async(struct i915_vma *vma);
>>> #define assert_vma_held(vma)
>>> dma_resv_assert_held((vma)->obj->base.resv)
>>> diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
>>> index e5600f358a15..431d40bb1dee 100644
>>> --- a/include/uapi/drm/i915_drm.h
>>> +++ b/include/uapi/drm/i915_drm.h
>>> @@ -3969,7 +3969,8 @@ struct drm_i915_gem_vm_bind {
>>> * any error.
>>> *
>>> * VM_BIND/UNBIND ioctl calls executed on different CPU threads
>>> concurrently
>>> - * are not ordered.
>>> + * are not ordered. Furthermore, parts of the VM_UNBIND operation
>>> can be done
>>> + * asynchronously.
>>> */
>>> struct drm_i915_gem_vm_unbind {
>>> /** @vm_id: VM (address space) id to bind */
next prev parent reply other threads:[~2022-11-15 16:21 UTC|newest]
Thread overview: 39+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-11-13 7:57 [Intel-gfx] [PATCH v7 00/20] drm/i915/vm_bind: Add VM_BIND functionality Niranjana Vishwanathapura
2022-11-13 7:57 ` [Intel-gfx] [PATCH v7 01/20] drm/i915/vm_bind: Expose vm lookup function Niranjana Vishwanathapura
2022-11-13 7:57 ` [Intel-gfx] [PATCH v7 02/20] drm/i915/vm_bind: Add __i915_sw_fence_await_reservation() Niranjana Vishwanathapura
2022-11-13 7:57 ` [Intel-gfx] [PATCH v7 03/20] drm/i915/vm_bind: Expose i915_gem_object_max_page_size() Niranjana Vishwanathapura
2022-11-13 7:57 ` [Intel-gfx] [PATCH v7 04/20] drm/i915/vm_bind: Add support to create persistent vma Niranjana Vishwanathapura
2022-11-13 7:57 ` [Intel-gfx] [PATCH v7 05/20] drm/i915/vm_bind: Implement bind and unbind of object Niranjana Vishwanathapura
2022-11-13 7:57 ` [Intel-gfx] [PATCH v7 06/20] drm/i915/vm_bind: Support for VM private BOs Niranjana Vishwanathapura
2022-11-13 7:57 ` [Intel-gfx] [PATCH v7 07/20] drm/i915/vm_bind: Add support to handle object evictions Niranjana Vishwanathapura
2022-11-13 7:57 ` [Intel-gfx] [PATCH v7 08/20] drm/i915/vm_bind: Support persistent vma activeness tracking Niranjana Vishwanathapura
2022-11-13 7:57 ` [Intel-gfx] [PATCH v7 09/20] drm/i915/vm_bind: Add out fence support Niranjana Vishwanathapura
2022-11-13 7:57 ` [Intel-gfx] [PATCH v7 10/20] drm/i915/vm_bind: Abstract out common execbuf functions Niranjana Vishwanathapura
2022-11-13 7:57 ` [Intel-gfx] [PATCH v7 11/20] drm/i915/vm_bind: Use common execbuf functions in execbuf path Niranjana Vishwanathapura
2022-11-13 7:57 ` [Intel-gfx] [PATCH v7 12/20] drm/i915/vm_bind: Implement I915_GEM_EXECBUFFER3 ioctl Niranjana Vishwanathapura
2022-11-13 7:57 ` [Intel-gfx] [PATCH v7 13/20] drm/i915/vm_bind: Update i915_vma_verify_bind_complete() Niranjana Vishwanathapura
2022-11-13 7:57 ` [Intel-gfx] [PATCH v7 14/20] drm/i915/vm_bind: Expose i915_request_await_bind() Niranjana Vishwanathapura
2022-11-13 7:57 ` [Intel-gfx] [PATCH v7 15/20] drm/i915/vm_bind: Handle persistent vmas in execbuf3 Niranjana Vishwanathapura
2022-11-13 7:57 ` [Intel-gfx] [PATCH v7 16/20] drm/i915/vm_bind: userptr dma-resv changes Niranjana Vishwanathapura
2022-11-13 7:57 ` [Intel-gfx] [PATCH v7 17/20] drm/i915/vm_bind: Limit vm_bind mode to non-recoverable contexts Niranjana Vishwanathapura
2022-11-13 7:57 ` [Intel-gfx] [PATCH v7 18/20] drm/i915/vm_bind: Add uapi for user to enable vm_bind_mode Niranjana Vishwanathapura
2022-11-13 7:57 ` [Intel-gfx] [PATCH v7 19/20] drm/i915/vm_bind: Render VM_BIND documentation Niranjana Vishwanathapura
2022-11-13 7:57 ` [Intel-gfx] [PATCH v7 20/20] drm/i915/vm_bind: Async vm_unbind support Niranjana Vishwanathapura
2022-11-15 11:05 ` Matthew Auld
2022-11-15 16:15 ` Niranjana Vishwanathapura
2022-11-15 16:20 ` Matthew Auld [this message]
2022-11-15 16:33 ` Niranjana Vishwanathapura
2022-11-15 23:15 ` Niranjana Vishwanathapura
2022-11-16 0:37 ` Niranjana Vishwanathapura
2022-11-23 11:42 ` Matthew Auld
2022-11-29 23:26 ` Niranjana Vishwanathapura
2022-12-01 10:10 ` Matthew Auld
2022-12-01 15:11 ` Niranjana Vishwanathapura
2022-11-15 15:58 ` Andi Shyti
2022-11-15 16:20 ` Niranjana Vishwanathapura
2022-11-13 8:39 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/i915/vm_bind: Add VM_BIND functionality (rev10) Patchwork
2022-11-13 8:39 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2022-11-13 9:29 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2022-11-13 11:50 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork
2022-11-18 23:53 ` [Intel-gfx] [PATCH v7 00/20] drm/i915/vm_bind: Add VM_BIND functionality Zanoni, Paulo R
2022-11-30 3:53 ` Niranjana Vishwanathapura
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=d8b028ea-4179-768a-7154-5b0bc3641cb1@intel.com \
--to=matthew.auld@intel.com \
--cc=christian.koenig@amd.com \
--cc=daniel.vetter@intel.com \
--cc=dri-devel@lists.freedesktop.org \
--cc=intel-gfx@lists.freedesktop.org \
--cc=jani.nikula@intel.com \
--cc=niranjana.vishwanathapura@intel.com \
--cc=paulo.r.zanoni@intel.com \
--cc=thomas.hellstrom@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox