dri-devel.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
From: Boris Brezillon <boris.brezillon@collabora.com>
To: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Cc: kernel@collabora.com, "Thomas Zimmermann" <tzimmermann@suse.de>,
	"Emma Anholt" <emma@anholt.net>,
	"Christian König" <christian.koenig@amd.com>,
	dri-devel@lists.freedesktop.org, linux-kernel@vger.kernel.org,
	"Maxime Ripard" <mripard@kernel.org>,
	"Gurchetan Singh" <gurchetansingh@chromium.org>,
	"Melissa Wen" <mwen@igalia.com>,
	"Gerd Hoffmann" <kraxel@redhat.com>,
	"Steven Price" <steven.price@arm.com>,
	virtualization@lists.linux-foundation.org,
	"Qiang Yu" <yuq825@gmail.com>
Subject: Re: [PATCH v18 24/26] drm/virtio: Attach shmem BOs dynamically
Date: Mon, 13 Nov 2023 10:57:45 +0100	[thread overview]
Message-ID: <20231113105745.7543e8dd@collabora.com> (raw)
In-Reply-To: <20231029230205.93277-25-dmitry.osipenko@collabora.com>

On Mon, 30 Oct 2023 02:02:03 +0300
Dmitry Osipenko <dmitry.osipenko@collabora.com> wrote:

> Prepare for addition of memory shrinker support by attaching shmem pages
> to host dynamically on first use. Previously the attachment vq command
> wasn't fenced and there was no vq kick made in the BO creation code path,
> hence the attachment already was happening dynamically, but implicitly.
> Making attachment explicitly dynamic will allow to simplify and reuse more
> code when shrinker will be added. The virtio_gpu_object_shmem_init() now
> works under the held reservation lock, which will be important to have for
> shrinker to avoid moving pages while they are in active use by the driver.

Ah, this commit might actually help getting rid of the workaround
introduced in "drm/shmem-helper: Add common memory shrinker".

> 
> Acked-by: Gerd Hoffmann <kraxel@redhat.com>
> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
> ---
>  drivers/gpu/drm/virtio/virtgpu_drv.h    |  7 +++
>  drivers/gpu/drm/virtio/virtgpu_gem.c    | 26 +++++++++
>  drivers/gpu/drm/virtio/virtgpu_ioctl.c  | 32 +++++++----
>  drivers/gpu/drm/virtio/virtgpu_object.c | 73 ++++++++++++++++++++-----
>  drivers/gpu/drm/virtio/virtgpu_submit.c | 15 ++++-
>  5 files changed, 125 insertions(+), 28 deletions(-)
> 
> diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
> index 56269814fb6d..421f524ae1de 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_drv.h
> +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
> @@ -89,6 +89,7 @@ struct virtio_gpu_object {
>  	uint32_t hw_res_handle;
>  	bool dumb;
>  	bool created;
> +	bool detached;
>  	bool host3d_blob, guest_blob;
>  	uint32_t blob_mem, blob_flags;
>  
> @@ -313,6 +314,8 @@ void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs);
>  void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev,
>  				       struct virtio_gpu_object_array *objs);
>  void virtio_gpu_array_put_free_work(struct work_struct *work);
> +int virtio_gpu_array_prepare(struct virtio_gpu_device *vgdev,
> +			     struct virtio_gpu_object_array *objs);
>  int virtio_gpu_gem_pin(struct virtio_gpu_object *bo);
>  void virtio_gpu_gem_unpin(struct virtio_gpu_object *bo);
>  
> @@ -453,6 +456,10 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
>  
>  bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo);
>  
> +int virtio_gpu_reattach_shmem_object_locked(struct virtio_gpu_object *bo);
> +
> +int virtio_gpu_reattach_shmem_object(struct virtio_gpu_object *bo);
> +
>  int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
>  			       uint32_t *resid);
>  /* virtgpu_prime.c */
> diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
> index 625c05d625bf..97e67064c97e 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_gem.c
> +++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
> @@ -295,6 +295,26 @@ void virtio_gpu_array_put_free_work(struct work_struct *work)
>  	spin_unlock(&vgdev->obj_free_lock);
>  }
>  
> +int virtio_gpu_array_prepare(struct virtio_gpu_device *vgdev,
> +			     struct virtio_gpu_object_array *objs)
> +{
> +	struct virtio_gpu_object *bo;
> +	int ret = 0;
> +	u32 i;
> +
> +	for (i = 0; i < objs->nents; i++) {
> +		bo = gem_to_virtio_gpu_obj(objs->objs[i]);
> +
> +		if (virtio_gpu_is_shmem(bo) && bo->detached) {
> +			ret = virtio_gpu_reattach_shmem_object_locked(bo);
> +			if (ret)
> +				break;
> +		}
> +	}
> +
> +	return ret;
> +}
> +
>  int virtio_gpu_gem_pin(struct virtio_gpu_object *bo)
>  {
>  	int err;
> @@ -303,6 +323,12 @@ int virtio_gpu_gem_pin(struct virtio_gpu_object *bo)
>  		err = drm_gem_shmem_pin(&bo->base);
>  		if (err)
>  			return err;
> +
> +		err = virtio_gpu_reattach_shmem_object(bo);
> +		if (err) {
> +			drm_gem_shmem_unpin(&bo->base);
> +			return err;
> +		}
>  	}
>  
>  	return 0;
> diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
> index b24b11f25197..070c29cea26a 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
> +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
> @@ -246,6 +246,10 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
>  	if (ret != 0)
>  		goto err_put_free;
>  
> +	ret = virtio_gpu_array_prepare(vgdev, objs);
> +	if (ret)
> +		goto err_unlock;
> +
>  	fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
>  	if (!fence) {
>  		ret = -ENOMEM;
> @@ -288,11 +292,25 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
>  		goto err_put_free;
>  	}
>  
> +	ret = virtio_gpu_array_lock_resv(objs);
> +	if (ret != 0)
> +		goto err_put_free;
> +
> +	ret = virtio_gpu_array_prepare(vgdev, objs);
> +	if (ret)
> +		goto err_unlock;
> +
> +	fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
> +	if (!fence) {
> +		ret = -ENOMEM;
> +		goto err_unlock;
> +	}
> +
>  	if (!vgdev->has_virgl_3d) {
>  		virtio_gpu_cmd_transfer_to_host_2d
>  			(vgdev, offset,
>  			 args->box.w, args->box.h, args->box.x, args->box.y,
> -			 objs, NULL);
> +			 objs, fence);
>  	} else {
>  		virtio_gpu_create_context(dev, file);
>  
> @@ -301,23 +319,13 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
>  			goto err_put_free;
>  		}
>  
> -		ret = virtio_gpu_array_lock_resv(objs);
> -		if (ret != 0)
> -			goto err_put_free;
> -
> -		ret = -ENOMEM;
> -		fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
> -					       0);
> -		if (!fence)
> -			goto err_unlock;
> -
>  		virtio_gpu_cmd_transfer_to_host_3d
>  			(vgdev,
>  			 vfpriv ? vfpriv->ctx_id : 0, offset, args->level,
>  			 args->stride, args->layer_stride, &args->box, objs,
>  			 fence);
> -		dma_fence_put(&fence->f);
>  	}
> +	dma_fence_put(&fence->f);
>  	virtio_gpu_notify(vgdev);
>  	return 0;
>  
> diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
> index 998f8b05ceb1..000bb7955a57 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_object.c
> +++ b/drivers/gpu/drm/virtio/virtgpu_object.c
> @@ -143,7 +143,7 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
>  	struct sg_table *pages;
>  	int si;
>  
> -	pages = drm_gem_shmem_get_pages_sgt(&bo->base);
> +	pages = drm_gem_shmem_get_pages_sgt_locked(&bo->base);
>  	if (IS_ERR(pages))
>  		return PTR_ERR(pages);
>  
> @@ -177,6 +177,40 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
>  	return 0;
>  }
>  
> +int virtio_gpu_reattach_shmem_object_locked(struct virtio_gpu_object *bo)
> +{
> +	struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
> +	struct virtio_gpu_mem_entry *ents;
> +	unsigned int nents;
> +	int err;
> +
> +	if (!bo->detached)
> +		return 0;
> +
> +	err = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
> +	if (err)
> +		return err;
> +
> +	virtio_gpu_object_attach(vgdev, bo, ents, nents);
> +
> +	bo->detached = false;
> +
> +	return 0;
> +}
> +
> +int virtio_gpu_reattach_shmem_object(struct virtio_gpu_object *bo)
> +{
> +	int ret;
> +
> +	ret = dma_resv_lock_interruptible(bo->base.base.resv, NULL);
> +	if (ret)
> +		return ret;
> +	ret = virtio_gpu_reattach_shmem_object_locked(bo);
> +	dma_resv_unlock(bo->base.base.resv);
> +
> +	return ret;
> +}
> +
>  int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
>  			     struct virtio_gpu_object_params *params,
>  			     struct virtio_gpu_object **bo_ptr,
> @@ -207,45 +241,56 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
>  
>  	bo->dumb = params->dumb;
>  
> -	ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
> -	if (ret != 0)
> -		goto err_put_id;
> +	if (bo->blob_mem == VIRTGPU_BLOB_MEM_GUEST)
> +		bo->guest_blob = true;
>  
>  	if (fence) {
>  		ret = -ENOMEM;
>  		objs = virtio_gpu_array_alloc(1);
>  		if (!objs)
> -			goto err_free_entry;
> +			goto err_put_id;
>  		virtio_gpu_array_add_obj(objs, &bo->base.base);
>  
>  		ret = virtio_gpu_array_lock_resv(objs);
>  		if (ret != 0)
>  			goto err_put_objs;
> +	} else {
> +		ret = dma_resv_lock(bo->base.base.resv, NULL);
> +		if (ret)
> +			goto err_put_id;
>  	}
>  
>  	if (params->blob) {
> -		if (params->blob_mem == VIRTGPU_BLOB_MEM_GUEST)
> -			bo->guest_blob = true;
> +		ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
> +		if (ret)
> +			goto err_unlock_objs;
> +	} else {
> +		bo->detached = true;
> +	}
>  
> +	if (params->blob)
>  		virtio_gpu_cmd_resource_create_blob(vgdev, bo, params,
>  						    ents, nents);
> -	} else if (params->virgl) {
> +	else if (params->virgl)
>  		virtio_gpu_cmd_resource_create_3d(vgdev, bo, params,
>  						  objs, fence);
> -		virtio_gpu_object_attach(vgdev, bo, ents, nents);
> -	} else {
> +	else
>  		virtio_gpu_cmd_create_resource(vgdev, bo, params,
>  					       objs, fence);
> -		virtio_gpu_object_attach(vgdev, bo, ents, nents);
> -	}
> +
> +	if (!fence)
> +		dma_resv_unlock(bo->base.base.resv);
>  
>  	*bo_ptr = bo;
>  	return 0;
>  
> +err_unlock_objs:
> +	if (fence)
> +		virtio_gpu_array_unlock_resv(objs);
> +	else
> +		dma_resv_unlock(bo->base.base.resv);
>  err_put_objs:
>  	virtio_gpu_array_put_free(objs);
> -err_free_entry:
> -	kvfree(ents);
>  err_put_id:
>  	virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
>  err_put_pages:
> diff --git a/drivers/gpu/drm/virtio/virtgpu_submit.c b/drivers/gpu/drm/virtio/virtgpu_submit.c
> index 5c514946bbad..6e4ef2593e8f 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_submit.c
> +++ b/drivers/gpu/drm/virtio/virtgpu_submit.c
> @@ -464,8 +464,19 @@ static void virtio_gpu_install_out_fence_fd(struct virtio_gpu_submit *submit)
>  
>  static int virtio_gpu_lock_buflist(struct virtio_gpu_submit *submit)
>  {
> -	if (submit->buflist)
> -		return virtio_gpu_array_lock_resv(submit->buflist);
> +	int err;
> +
> +	if (submit->buflist) {
> +		err = virtio_gpu_array_lock_resv(submit->buflist);
> +		if (err)
> +			return err;
> +
> +		err = virtio_gpu_array_prepare(submit->vgdev, submit->buflist);
> +		if (err) {
> +			virtio_gpu_array_unlock_resv(submit->buflist);
> +			return err;
> +		}
> +	}
>  
>  	return 0;
>  }


  reply	other threads:[~2023-11-13  9:57 UTC|newest]

Thread overview: 84+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-10-29 23:01 [PATCH v18 00/26] Add generic memory shrinker to VirtIO-GPU and Panfrost DRM drivers Dmitry Osipenko
2023-10-29 23:01 ` [PATCH v18 01/26] drm/gem: Change locked/unlocked postfix of drm_gem_v/unmap() function names Dmitry Osipenko
2023-11-24 10:34   ` Maxime Ripard
2023-10-29 23:01 ` [PATCH v18 02/26] drm/gem: Add _locked postfix to functions that have unlocked counterpart Dmitry Osipenko
2023-11-24 10:35   ` Maxime Ripard
2023-10-29 23:01 ` [PATCH v18 03/26] drm/shmem-helper: Make all exported symbols GPL Dmitry Osipenko
2023-11-24 10:36   ` Maxime Ripard
2023-10-29 23:01 ` [PATCH v18 04/26] drm/shmem-helper: Refactor locked/unlocked functions Dmitry Osipenko
2023-11-24 10:40   ` Maxime Ripard
2023-11-24 10:44     ` Boris Brezillon
2023-11-24 10:59     ` Boris Brezillon
2023-11-28 11:14       ` Maxime Ripard
2023-11-28 12:37         ` Boris Brezillon
2023-11-28 22:05           ` Dmitry Osipenko
2023-11-29  7:53             ` Boris Brezillon
2023-11-29 10:47               ` Dmitry Osipenko
2023-11-29 10:57                 ` Boris Brezillon
2023-11-29 13:09               ` Maxime Ripard
2023-11-29 13:46                 ` Boris Brezillon
2023-11-29 15:15                   ` Maxime Ripard
2023-11-29 15:47                     ` Boris Brezillon
2023-12-04 12:55                       ` Maxime Ripard
2023-12-05 11:43                         ` Dmitry Osipenko
2023-12-14 18:16                           ` Maxime Ripard
2023-12-15  0:42                             ` Dmitry Osipenko
2023-10-29 23:01 ` [PATCH v18 05/26] drm/shmem-helper: Remove obsoleted is_iomem test Dmitry Osipenko
2023-11-10 10:08   ` Boris Brezillon
2023-11-24 10:40   ` Maxime Ripard
2023-10-29 23:01 ` [PATCH v18 06/26] drm/shmem-helper: Add and use pages_pin_count Dmitry Osipenko
2023-11-24 10:41   ` Maxime Ripard
2023-10-29 23:01 ` [PATCH v18 07/26] drm/shmem-helper: Use refcount_t for pages_use_count Dmitry Osipenko
2023-11-24 10:41   ` Maxime Ripard
2023-10-29 23:01 ` [PATCH v18 08/26] drm/shmem-helper: Add and use lockless drm_gem_shmem_get_pages() Dmitry Osipenko
2023-11-24 10:47   ` Maxime Ripard
2023-11-24 11:20     ` Boris Brezillon
2023-10-29 23:01 ` [PATCH v18 09/26] drm/shmem-helper: Switch drm_gem_shmem_vmap/vunmap to use pin/unpin Dmitry Osipenko
2023-11-24 10:48   ` Maxime Ripard
2023-10-29 23:01 ` [PATCH v18 10/26] drm/shmem-helper: Use refcount_t for vmap_use_count Dmitry Osipenko
2023-11-24 10:48   ` Maxime Ripard
2023-10-29 23:01 ` [PATCH v18 11/26] drm/shmem-helper: Prepare drm_gem_shmem_free() to shrinker addition Dmitry Osipenko
2023-11-10 10:16   ` Boris Brezillon
2023-11-20 11:02     ` Dmitry Osipenko
2023-11-20 11:19       ` Boris Brezillon
2023-11-20 11:38         ` Dmitry Osipenko
2023-10-29 23:01 ` [PATCH v18 12/26] drm/shmem-helper: Make drm_gem_shmem_get_pages() public Dmitry Osipenko
2023-11-10 10:17   ` Boris Brezillon
2023-10-29 23:01 ` [PATCH v18 13/26] drm/shmem-helper: Add drm_gem_shmem_put_pages() Dmitry Osipenko
2023-10-29 23:01 ` [PATCH v18 14/26] drm/lima: Explicitly get and put drm-shmem pages Dmitry Osipenko
2023-11-10 10:30   ` Boris Brezillon
2023-10-29 23:01 ` [PATCH v18 15/26] drm/panfrost: " Dmitry Osipenko
2023-11-10 10:53   ` Boris Brezillon
2023-11-22 22:04     ` Dmitry Osipenko
2023-11-23  9:05       ` Boris Brezillon
2023-11-23 12:24         ` Dmitry Osipenko
2023-11-23 14:33           ` Boris Brezillon
2023-11-23 14:48   ` Boris Brezillon
2023-11-24  9:40     ` Boris Brezillon
2023-10-29 23:01 ` [PATCH v18 16/26] drm/virtio: " Dmitry Osipenko
2023-11-10 10:59   ` Boris Brezillon
2023-10-29 23:01 ` [PATCH v18 17/26] drm/v3d: " Dmitry Osipenko
2023-11-10 11:01   ` Boris Brezillon
2023-10-29 23:01 ` [PATCH v18 18/26] drm/shmem-helper: Change sgt allocation policy Dmitry Osipenko
2023-11-10 11:15   ` Boris Brezillon
2023-10-29 23:01 ` [PATCH v18 19/26] drm/shmem-helper: Add common memory shrinker Dmitry Osipenko
2023-11-10 14:58   ` Boris Brezillon
2023-11-13  9:35     ` Boris Brezillon
2023-10-29 23:01 ` [PATCH v18 20/26] drm/shmem-helper: Export drm_gem_shmem_get_pages_sgt_locked() Dmitry Osipenko
2023-10-29 23:02 ` [PATCH v18 21/26] drm/shmem-helper: Optimize unlocked get_pages_sgt() Dmitry Osipenko
2023-11-13  9:49   ` Boris Brezillon
2023-10-29 23:02 ` [PATCH v18 22/26] drm/shmem-helper: Don't free refcounted GEM Dmitry Osipenko
2023-11-13  9:54   ` Boris Brezillon
2023-11-22 22:30     ` Dmitry Osipenko
2023-11-23  9:08       ` Boris Brezillon
2023-11-23 12:36         ` Dmitry Osipenko
2023-10-29 23:02 ` [PATCH v18 23/26] drm/virtio: Pin display framebuffer BO Dmitry Osipenko
2023-10-29 23:02 ` [PATCH v18 24/26] drm/virtio: Attach shmem BOs dynamically Dmitry Osipenko
2023-11-13  9:57   ` Boris Brezillon [this message]
2023-11-22 22:37     ` Dmitry Osipenko
2023-11-22 22:41       ` Dmitry Osipenko
2023-10-29 23:02 ` [PATCH v18 25/26] drm/virtio: Support shmem shrinking Dmitry Osipenko
2023-11-03 22:55   ` Gurchetan Singh
2023-11-06  2:02     ` Dmitry Osipenko
2023-10-29 23:02 ` [PATCH v18 26/26] drm/panfrost: Switch to generic memory shrinker Dmitry Osipenko
2023-11-24 10:04   ` Boris Brezillon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231113105745.7543e8dd@collabora.com \
    --to=boris.brezillon@collabora.com \
    --cc=christian.koenig@amd.com \
    --cc=dmitry.osipenko@collabora.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=emma@anholt.net \
    --cc=gurchetansingh@chromium.org \
    --cc=kernel@collabora.com \
    --cc=kraxel@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mripard@kernel.org \
    --cc=mwen@igalia.com \
    --cc=steven.price@arm.com \
    --cc=tzimmermann@suse.de \
    --cc=virtualization@lists.linux-foundation.org \
    --cc=yuq825@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).