Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Leon Romanovsky <leonro@nvidia.com>
To: Matthew Brost <matthew.brost@intel.com>
Cc: <intel-xe@lists.freedesktop.org>,
	<dri-devel@lists.freedesktop.org>, <francois.dugast@intel.com>,
	<thomas.hellstrom@linux.intel.com>,
	<himal.prasad.ghimiray@intel.com>, <jgg@ziepe.ca>
Subject: Re: [RFC PATCH v3 01/11] drm/pagemap: Add helper to access zone_device_data
Date: Wed, 28 Jan 2026 15:53:45 +0200	[thread overview]
Message-ID: <20260128135345.GE40916@unreal> (raw)
In-Reply-To: <20260128004841.2436896-2-matthew.brost@intel.com>

On Tue, Jan 27, 2026 at 04:48:31PM -0800, Matthew Brost wrote:
> From: Francois Dugast <francois.dugast@intel.com>
> 
> This new helper helps ensure all accesses to zone_device_data use the
> correct API whether the page is part of a folio or not.
> 
> v2:
> - Move to drm_pagemap.h, stick to folio_zone_device_data (Matthew Brost)
> - Return struct drm_pagemap_zdd * (Matthew Brost)

The common practice is to put changelog under --- trailer as it doesn't
belong to commit history. There is no value in this changelog while
running git log ....

Thanks

> 
> Suggested-by: Matthew Brost <matthew.brost@intel.com>
> Reviewed-by: Matthew Brost <matthew.brost@intel.com>
> Signed-off-by: Francois Dugast <francois.dugast@intel.com>
> ---
>  drivers/gpu/drm/drm_gpusvm.c  |  7 +++++--
>  drivers/gpu/drm/drm_pagemap.c | 21 ++++++++++++---------
>  include/drm/drm_pagemap.h     | 14 ++++++++++++++
>  3 files changed, 31 insertions(+), 11 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c
> index aa9a0b60e727..585d913d3d19 100644
> --- a/drivers/gpu/drm/drm_gpusvm.c
> +++ b/drivers/gpu/drm/drm_gpusvm.c
> @@ -1488,12 +1488,15 @@ int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm,
>  		order = drm_gpusvm_hmm_pfn_to_order(pfns[i], i, npages);
>  		if (is_device_private_page(page) ||
>  		    is_device_coherent_page(page)) {
> +			struct drm_pagemap_zdd *__zdd =
> +				drm_pagemap_page_zone_device_data(page);
> +
>  			if (!ctx->allow_mixed &&
> -			    zdd != page->zone_device_data && i > 0) {
> +			    zdd != __zdd && i > 0) {
>  				err = -EOPNOTSUPP;
>  				goto err_unmap;
>  			}
> -			zdd = page->zone_device_data;
> +			zdd = __zdd;
>  			if (pagemap != page_pgmap(page)) {
>  				if (i > 0) {
>  					err = -EOPNOTSUPP;
> diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c
> index 03ee39a761a4..2c67aabd8d65 100644
> --- a/drivers/gpu/drm/drm_pagemap.c
> +++ b/drivers/gpu/drm/drm_pagemap.c
> @@ -244,7 +244,7 @@ static int drm_pagemap_migrate_map_pages(struct device *dev,
>  		order = folio_order(folio);
>  
>  		if (is_device_private_page(page)) {
> -			struct drm_pagemap_zdd *zdd = page->zone_device_data;
> +			struct drm_pagemap_zdd *zdd = drm_pagemap_page_zone_device_data(page);
>  			struct drm_pagemap *dpagemap = zdd->dpagemap;
>  			struct drm_pagemap_addr addr;
>  
> @@ -315,7 +315,7 @@ static void drm_pagemap_migrate_unmap_pages(struct device *dev,
>  			goto next;
>  
>  		if (is_zone_device_page(page)) {
> -			struct drm_pagemap_zdd *zdd = page->zone_device_data;
> +			struct drm_pagemap_zdd *zdd = drm_pagemap_page_zone_device_data(page);
>  			struct drm_pagemap *dpagemap = zdd->dpagemap;
>  
>  			dpagemap->ops->device_unmap(dpagemap, dev, pagemap_addr[i]);
> @@ -603,7 +603,8 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
>  
>  		pages[i] = NULL;
>  		if (src_page && is_device_private_page(src_page)) {
> -			struct drm_pagemap_zdd *src_zdd = src_page->zone_device_data;
> +			struct drm_pagemap_zdd *src_zdd =
> +				drm_pagemap_page_zone_device_data(src_page);
>  
>  			if (page_pgmap(src_page) == pagemap &&
>  			    !mdetails->can_migrate_same_pagemap) {
> @@ -725,8 +726,8 @@ static int drm_pagemap_migrate_populate_ram_pfn(struct vm_area_struct *vas,
>  			goto next;
>  
>  		if (fault_page) {
> -			if (src_page->zone_device_data !=
> -			    fault_page->zone_device_data)
> +			if (drm_pagemap_page_zone_device_data(src_page) !=
> +			    drm_pagemap_page_zone_device_data(fault_page))
>  				goto next;
>  		}
>  
> @@ -1067,7 +1068,7 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
>  	void *buf;
>  	int i, err = 0;
>  
> -	zdd = page->zone_device_data;
> +	zdd = drm_pagemap_page_zone_device_data(page);
>  	if (time_before64(get_jiffies_64(), zdd->devmem_allocation->timeslice_expiration))
>  		return 0;
>  
> @@ -1150,7 +1151,9 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
>   */
>  static void drm_pagemap_folio_free(struct folio *folio)
>  {
> -	drm_pagemap_zdd_put(folio->page.zone_device_data);
> +	struct page *page = folio_page(folio, 0);
> +
> +	drm_pagemap_zdd_put(drm_pagemap_page_zone_device_data(page));
>  }
>  
>  /**
> @@ -1166,7 +1169,7 @@ static void drm_pagemap_folio_free(struct folio *folio)
>   */
>  static vm_fault_t drm_pagemap_migrate_to_ram(struct vm_fault *vmf)
>  {
> -	struct drm_pagemap_zdd *zdd = vmf->page->zone_device_data;
> +	struct drm_pagemap_zdd *zdd = drm_pagemap_page_zone_device_data(vmf->page);
>  	int err;
>  
>  	err = __drm_pagemap_migrate_to_ram(vmf->vma,
> @@ -1232,7 +1235,7 @@ EXPORT_SYMBOL_GPL(drm_pagemap_devmem_init);
>   */
>  struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page)
>  {
> -	struct drm_pagemap_zdd *zdd = page->zone_device_data;
> +	struct drm_pagemap_zdd *zdd = drm_pagemap_page_zone_device_data(page);
>  
>  	return zdd->devmem_allocation->dpagemap;
>  }
> diff --git a/include/drm/drm_pagemap.h b/include/drm/drm_pagemap.h
> index 2baf0861f78f..14e1db564c25 100644
> --- a/include/drm/drm_pagemap.h
> +++ b/include/drm/drm_pagemap.h
> @@ -4,6 +4,7 @@
>  
>  #include <linux/dma-direction.h>
>  #include <linux/hmm.h>
> +#include <linux/memremap.h>
>  #include <linux/types.h>
>  
>  #define NR_PAGES(order) (1U << (order))
> @@ -341,6 +342,19 @@ struct drm_pagemap_migrate_details {
>  	u32 source_peer_migrates : 1;
>  };
>  
> +/**
> + * drm_pagemap_page_zone_device_data() - Page to zone_device_data
> + * @page: Pointer to the page
> + *
> + * Return: Page's zone_device_data
> + */
> +static inline struct drm_pagemap_zdd *drm_pagemap_page_zone_device_data(struct page *page)
> +{
> +	struct folio *folio = page_folio(page);
> +
> +	return folio_zone_device_data(folio);
> +}
> +
>  #if IS_ENABLED(CONFIG_ZONE_DEVICE)
>  
>  int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
> -- 
> 2.34.1
> 

  reply	other threads:[~2026-01-28 13:54 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-01-28  0:48 [RFC PATCH v3 00/11] Use new dma-map IOVA alloc, link, and sync API in GPU SVM and DRM pagemap Matthew Brost
2026-01-28  0:48 ` [RFC PATCH v3 01/11] drm/pagemap: Add helper to access zone_device_data Matthew Brost
2026-01-28 13:53   ` Leon Romanovsky [this message]
2026-01-28  0:48 ` [RFC PATCH v3 02/11] drm/gpusvm: Use dma-map IOVA alloc, link, and sync API in GPU SVM Matthew Brost
2026-01-28 14:04   ` Leon Romanovsky
2026-01-28  0:48 ` [RFC PATCH v3 03/11] drm/pagemap: Split drm_pagemap_migrate_map_pages into device / system Matthew Brost
2026-01-28  0:48 ` [RFC PATCH v3 04/11] drm/pagemap: Use dma-map IOVA alloc, link, and sync API for DRM pagemap Matthew Brost
2026-01-28 14:28   ` Leon Romanovsky
2026-01-28 17:46     ` Matthew Brost
     [not found]       ` <20260128175531.GR1641016@ziepe.ca>
2026-01-28 19:29         ` Matthew Brost
2026-01-28 19:45           ` Leon Romanovsky
2026-01-28 21:04             ` Matthew Brost
2026-01-29 10:14               ` Leon Romanovsky
2026-01-29 18:22                 ` Matthew Brost
2026-01-28  0:48 ` [RFC PATCH v3 05/11] drm/pagemap: Reduce number of IOVA link calls Matthew Brost
2026-01-28  0:48 ` [RFC PATCH v3 06/11] drm/pagemap: Add IOVA interface to DRM pagemap Matthew Brost
     [not found]   ` <20260128151458.GJ1641016@ziepe.ca>
2026-01-28 18:42     ` Matthew Brost
2026-01-28 19:41       ` Matthew Brost
     [not found]       ` <20260128193509.GU1641016@ziepe.ca>
2026-01-28 20:24         ` Matthew Brost
2026-01-29 18:57           ` Jason Gunthorpe
2026-01-29 19:28             ` Matthew Brost
2026-01-29 19:32               ` Jason Gunthorpe
2026-01-28  0:48 ` [RFC PATCH v3 07/11] drm/xe: Stub out DRM pagemap IOVA alloc implementation Matthew Brost
2026-01-28  0:48 ` [RFC PATCH v3 08/11] drm/pagemap: Use device-to-device IOVA alloc, link, and sync API for DRM pagemap Matthew Brost
2026-01-28  0:48 ` [RFC PATCH v3 09/11] drm/xe: Drop BO dma-resv lock during SVM migrate-to-device Matthew Brost
2026-01-28  0:48 ` [RFC PATCH v3 10/11] drm/xe: Implement DRM pagemap IOVA vfuncs Matthew Brost
2026-01-28  0:48 ` [RFC PATCH v3 11/11] drm/gpusvm: Use device-to-device IOVA alloc, link, and sync API in GPU SVM Matthew Brost
2026-01-28  0:59 ` ✗ CI.checkpatch: warning for Use new dma-map IOVA alloc, link, and sync API in GPU SVM and DRM pagemap (rev3) Patchwork
2026-01-28  1:01 ` ✓ CI.KUnit: success " Patchwork
2026-01-28  1:42 ` ✓ Xe.CI.BAT: " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260128135345.GE40916@unreal \
    --to=leonro@nvidia.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=francois.dugast@intel.com \
    --cc=himal.prasad.ghimiray@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=jgg@ziepe.ca \
    --cc=matthew.brost@intel.com \
    --cc=thomas.hellstrom@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox