linux-doc.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Karol Wachowski <karol.wachowski@linux.intel.com>
To: Thomas Zimmermann <tzimmermann@suse.de>,
	boris.brezillon@collabora.com, simona@ffwll.ch,
	airlied@gmail.com, mripard@kernel.org,
	maarten.lankhorst@linux.intel.com, ogabbay@kernel.org,
	mamin506@gmail.com, lizhi.hou@amd.com,
	maciej.falkowski@linux.intel.com, tomeu@tomeuvizoso.net,
	frank.binns@imgtec.com, matt.coster@imgtec.com, yuq825@gmail.com,
	robh@kernel.org, steven.price@arm.com,
	adrian.larumbe@collabora.com, liviu.dudau@arm.com,
	mwen@igalia.com, kraxel@redhat.com,
	dmitry.osipenko@collabora.com, gurchetansingh@chromium.org,
	olvaffe@gmail.com, corbet@lwn.net
Cc: dri-devel@lists.freedesktop.org, lima@lists.freedesktop.org,
	virtualization@lists.linux.dev, linux-doc@vger.kernel.org
Subject: Re: [PATCH 12/13] accel/ivpu: Use GEM-UMA helpers for memory management
Date: Tue, 9 Dec 2025 15:25:06 +0100	[thread overview]
Message-ID: <a8d376f6-bd0f-419a-8709-8f4a0a1a0014@linux.intel.com> (raw)
In-Reply-To: <20251209140141.94407-13-tzimmermann@suse.de>

On 12/9/2025 2:42 PM, Thomas Zimmermann wrote:
> Convert ivpu from GEM-SHMEM to GEM-UMA. The latter is just a copy,
> so this change it merely renaming symbols. No functional changes.
> 
> GEM-SHMEM will become more self-contained for drivers without specific
> memory management. GEM-UMA's interfaces will remain flexible for drivers
> with UMA hardware, such as ivpu.
> 
> Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
> ---
>  drivers/accel/ivpu/Kconfig    |  2 +-
>  drivers/accel/ivpu/ivpu_gem.c | 36 +++++++++++++++++------------------
>  drivers/accel/ivpu/ivpu_gem.h |  4 ++--
>  3 files changed, 21 insertions(+), 21 deletions(-)
> 
> diff --git a/drivers/accel/ivpu/Kconfig b/drivers/accel/ivpu/Kconfig
> index 9e055b5ce03d..49ca139a9d31 100644
> --- a/drivers/accel/ivpu/Kconfig
> +++ b/drivers/accel/ivpu/Kconfig
> @@ -5,8 +5,8 @@ config DRM_ACCEL_IVPU
>  	depends on DRM_ACCEL
>  	depends on X86_64 && !UML
>  	depends on PCI && PCI_MSI
> +	select DRM_GEM_UMA_HELPER
>  	select FW_LOADER
> -	select DRM_GEM_SHMEM_HELPER
>  	select GENERIC_ALLOCATOR
>  	select WANT_DEV_COREDUMP
>  	help
> diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c
> index ece68f570b7e..7f4aeb482efb 100644
> --- a/drivers/accel/ivpu/ivpu_gem.c
> +++ b/drivers/accel/ivpu/ivpu_gem.c
> @@ -84,7 +84,7 @@ int __must_check ivpu_bo_bind(struct ivpu_bo *bo)
>  	if (bo->base.base.import_attach)
>  		sgt = ivpu_bo_map_attachment(vdev, bo);
>  	else
> -		sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
> +		sgt = drm_gem_uma_get_pages_sgt(&bo->base);
>  	if (IS_ERR(sgt)) {
>  		ret = PTR_ERR(sgt);
>  		ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret);
> @@ -223,7 +223,7 @@ struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev,
>  
>  	get_dma_buf(dma_buf);
>  
> -	obj = drm_gem_shmem_prime_import_sg_table(dev, attach, NULL);
> +	obj = drm_gem_uma_prime_import_sg_table(dev, attach, NULL);
>  	if (IS_ERR(obj)) {
>  		ret = PTR_ERR(obj);
>  		goto fail_detach;
> @@ -251,7 +251,7 @@ struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev,
>  
>  static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags)
>  {
> -	struct drm_gem_shmem_object *shmem;
> +	struct drm_gem_uma_object *uma;
>  	struct ivpu_bo *bo;
>  
>  	switch (flags & DRM_IVPU_BO_CACHE_MASK) {
> @@ -262,11 +262,11 @@ static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 fla
>  		return ERR_PTR(-EINVAL);
>  	}
>  
> -	shmem = drm_gem_shmem_create(&vdev->drm, size);
> -	if (IS_ERR(shmem))
> -		return ERR_CAST(shmem);
> +	uma = drm_gem_uma_create(&vdev->drm, size);
> +	if (IS_ERR(uma))
> +		return ERR_CAST(uma);
>  
> -	bo = to_ivpu_bo(&shmem->base);
> +	bo = to_ivpu_bo(&uma->base);
>  	bo->base.map_wc = flags & DRM_IVPU_BO_WC;
>  	bo->flags = flags;
>  
> @@ -330,7 +330,7 @@ static void ivpu_gem_bo_free(struct drm_gem_object *obj)
>  
>  	drm_WARN_ON(obj->dev, refcount_read(&bo->base.pages_use_count) > 1);
>  	drm_WARN_ON(obj->dev, bo->base.base.vma_node.vm_files.rb_node);
> -	drm_gem_shmem_free(&bo->base);
> +	drm_gem_uma_free(&bo->base);
>  }
>  
>  static enum drm_gem_object_status ivpu_gem_status(struct drm_gem_object *obj)
> @@ -347,15 +347,15 @@ static enum drm_gem_object_status ivpu_gem_status(struct drm_gem_object *obj)
>  static const struct drm_gem_object_funcs ivpu_gem_funcs = {
>  	.free = ivpu_gem_bo_free,
>  	.open = ivpu_gem_bo_open,
> -	.print_info = drm_gem_shmem_object_print_info,
> -	.pin = drm_gem_shmem_object_pin,
> -	.unpin = drm_gem_shmem_object_unpin,
> -	.get_sg_table = drm_gem_shmem_object_get_sg_table,
> -	.vmap = drm_gem_shmem_object_vmap,
> -	.vunmap = drm_gem_shmem_object_vunmap,
> -	.mmap = drm_gem_shmem_object_mmap,
> +	.print_info = drm_gem_uma_object_print_info,
> +	.pin = drm_gem_uma_object_pin,
> +	.unpin = drm_gem_uma_object_unpin,
> +	.get_sg_table = drm_gem_uma_object_get_sg_table,
> +	.vmap = drm_gem_uma_object_vmap,
> +	.vunmap = drm_gem_uma_object_vunmap,
> +	.mmap = drm_gem_uma_object_mmap,
>  	.status = ivpu_gem_status,
> -	.vm_ops = &drm_gem_shmem_vm_ops,
> +	.vm_ops = &drm_gem_uma_vm_ops,
>  };
>  
>  int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> @@ -435,7 +435,7 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
>  
>  	if (flags & DRM_IVPU_BO_MAPPABLE) {
>  		ivpu_bo_lock(bo);
> -		ret = drm_gem_shmem_vmap_locked(&bo->base, &map);
> +		ret = drm_gem_uma_vmap_locked(&bo->base, &map);
>  		ivpu_bo_unlock(bo);
>  
>  		if (ret)
> @@ -475,7 +475,7 @@ void ivpu_bo_free(struct ivpu_bo *bo)
>  
>  	if (bo->flags & DRM_IVPU_BO_MAPPABLE) {
>  		ivpu_bo_lock(bo);
> -		drm_gem_shmem_vunmap_locked(&bo->base, &map);
> +		drm_gem_uma_vunmap_locked(&bo->base, &map);
>  		ivpu_bo_unlock(bo);
>  	}
>  
> diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h
> index 0c3350f22b55..3e5d1a64deab 100644
> --- a/drivers/accel/ivpu/ivpu_gem.h
> +++ b/drivers/accel/ivpu/ivpu_gem.h
> @@ -6,13 +6,13 @@
>  #define __IVPU_GEM_H__
>  
>  #include <drm/drm_gem.h>
> -#include <drm/drm_gem_shmem_helper.h>
> +#include <drm/drm_gem_uma_helper.h>
>  #include <drm/drm_mm.h>
>  
>  struct ivpu_file_priv;
>  
>  struct ivpu_bo {
> -	struct drm_gem_shmem_object base;
> +	struct drm_gem_uma_object base;
>  	struct ivpu_mmu_context *ctx;
>  	struct list_head bo_list_node;
>  	struct drm_mm_node mm_node;
Reviewed-by: Karol Wachowski <karol.wachowski@linux.intel.com>

  parent reply	other threads:[~2025-12-09 14:25 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-12-09 13:41 [RFC][PATCH 00/13] drm: Introduce GEM-UMA memory management Thomas Zimmermann
2025-12-09 13:41 ` [PATCH 01/13] drm/gem-shmem: Fix typos in documentation Thomas Zimmermann
2025-12-11 10:00   ` Boris Brezillon
2025-12-11 12:03   ` Thomas Zimmermann
2025-12-09 13:41 ` [PATCH 02/13] drm/gem-shmem: Fix the MODULE_LICENSE() string Thomas Zimmermann
2025-12-11 10:01   ` Boris Brezillon
2025-12-11 12:04   ` Thomas Zimmermann
2025-12-09 13:42 ` [PATCH 03/13] drm: Add GEM-UMA helpers for memory management Thomas Zimmermann
2025-12-09 13:42 ` [PATCH 04/13] drm/gem-uma: Remove unused interfaces Thomas Zimmermann
2025-12-09 13:42 ` [PATCH 05/13] drm/imagination: Use GEM-UMA helpers for memory management Thomas Zimmermann
2025-12-09 13:42 ` [PATCH 06/13] drm/lima: " Thomas Zimmermann
2025-12-09 13:42 ` [PATCH 07/13] drm/panfrost: " Thomas Zimmermann
2025-12-09 13:42 ` [PATCH 08/13] drm/panthor: " Thomas Zimmermann
2025-12-09 13:42 ` [PATCH 09/13] drm/v3d: " Thomas Zimmermann
2025-12-09 13:42 ` [PATCH 10/13] drm/virtgpu: " Thomas Zimmermann
2025-12-09 13:42 ` [PATCH 11/13] accel/amdxdna: " Thomas Zimmermann
2025-12-09 13:42 ` [PATCH 12/13] accel/ivpu: " Thomas Zimmermann
2025-12-09 14:24   ` Karol Wachowski
2025-12-09 14:25   ` Karol Wachowski [this message]
2025-12-09 13:42 ` [PATCH 13/13] accel/rocket: " Thomas Zimmermann
2025-12-09 14:27 ` [RFC][PATCH 00/13] drm: Introduce GEM-UMA " Boris Brezillon
2025-12-09 14:51   ` Thomas Zimmermann
2025-12-09 15:30     ` Boris Brezillon
2025-12-10  7:34       ` Thomas Zimmermann
2025-12-10  9:21         ` Boris Brezillon
2025-12-10  9:57           ` Thomas Zimmermann

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=a8d376f6-bd0f-419a-8709-8f4a0a1a0014@linux.intel.com \
    --to=karol.wachowski@linux.intel.com \
    --cc=adrian.larumbe@collabora.com \
    --cc=airlied@gmail.com \
    --cc=boris.brezillon@collabora.com \
    --cc=corbet@lwn.net \
    --cc=dmitry.osipenko@collabora.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=frank.binns@imgtec.com \
    --cc=gurchetansingh@chromium.org \
    --cc=kraxel@redhat.com \
    --cc=lima@lists.freedesktop.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=liviu.dudau@arm.com \
    --cc=lizhi.hou@amd.com \
    --cc=maarten.lankhorst@linux.intel.com \
    --cc=maciej.falkowski@linux.intel.com \
    --cc=mamin506@gmail.com \
    --cc=matt.coster@imgtec.com \
    --cc=mripard@kernel.org \
    --cc=mwen@igalia.com \
    --cc=ogabbay@kernel.org \
    --cc=olvaffe@gmail.com \
    --cc=robh@kernel.org \
    --cc=simona@ffwll.ch \
    --cc=steven.price@arm.com \
    --cc=tomeu@tomeuvizoso.net \
    --cc=tzimmermann@suse.de \
    --cc=virtualization@lists.linux.dev \
    --cc=yuq825@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).