public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH V2] accel/amdxdna: Support read-only user-pointer BO mappings
@ 2026-03-31 17:26 Lizhi Hou
  2026-03-31 17:52 ` Mario Limonciello
  2026-04-01  4:57 ` Matthew Brost
  0 siblings, 2 replies; 8+ messages in thread
From: Lizhi Hou @ 2026-03-31 17:26 UTC (permalink / raw)
  To: ogabbay, quic_jhugo, dri-devel, mario.limonciello,
	maciej.falkowski
  Cc: Max Zhen, linux-kernel, sonal.santan, Lizhi Hou

From: Max Zhen <max.zhen@amd.com>

Update the amdxdna user-pointer (ubuf) BO path to support creating buffer
objects from read-only user mappings.

Detect read-only VMAs by checking VMA permissions across all user virtual
address ranges associated with the BO. When all entries are read-only, pin
user pages without FOLL_WRITE and export the resulting dmabuf as read-only
(O_RDONLY).

This allows userptr BOs backed by read-only mappings to be safely imported
and used without requiring write access, which was previously rejected due
to unconditional FOLL_WRITE usage.

Signed-off-by: Max Zhen <max.zhen@amd.com>
Signed-off-by: Lizhi Hou <lizhi.hou@amd.com>
---
 drivers/accel/amdxdna/amdxdna_ubuf.c | 29 ++++++++++++++++++++++++++--
 1 file changed, 27 insertions(+), 2 deletions(-)

diff --git a/drivers/accel/amdxdna/amdxdna_ubuf.c b/drivers/accel/amdxdna/amdxdna_ubuf.c
index 4c0647057759..3769210c55cc 100644
--- a/drivers/accel/amdxdna/amdxdna_ubuf.c
+++ b/drivers/accel/amdxdna/amdxdna_ubuf.c
@@ -125,6 +125,26 @@ static const struct dma_buf_ops amdxdna_ubuf_dmabuf_ops = {
 	.vunmap = amdxdna_ubuf_vunmap,
 };
 
+static int readonly_va_entry(struct amdxdna_drm_va_entry *va_ent)
+{
+	struct mm_struct *mm = current->mm;
+	struct vm_area_struct *vma;
+	int ret;
+
+	mmap_read_lock(mm);
+
+	vma = find_vma(mm, va_ent->vaddr);
+	if (!vma ||
+	    vma->vm_start > va_ent->vaddr ||
+	    vma->vm_end - va_ent->vaddr < va_ent->len)
+		ret = -ENOENT;
+	else
+		ret = vma->vm_flags & VM_WRITE ? 0 : 1;
+
+	mmap_read_unlock(mm);
+	return ret;
+}
+
 struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
 				 u32 num_entries, void __user *va_entries)
 {
@@ -134,6 +154,7 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
 	struct amdxdna_ubuf_priv *ubuf;
 	u32 npages, start = 0;
 	struct dma_buf *dbuf;
+	bool readonly = true;
 	int i, ret;
 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 
@@ -172,6 +193,10 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
 			ret = -EINVAL;
 			goto free_ent;
 		}
+
+		/* Pin pages as writable as long as not all entries are read-only. */
+		if (readonly && readonly_va_entry(&va_ent[i]) != 1)
+			readonly = false;
 	}
 
 	ubuf->nr_pages = exp_info.size >> PAGE_SHIFT;
@@ -194,7 +219,7 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
 		npages = va_ent[i].len >> PAGE_SHIFT;
 
 		ret = pin_user_pages_fast(va_ent[i].vaddr, npages,
-					  FOLL_WRITE | FOLL_LONGTERM,
+					  (readonly ? 0 : FOLL_WRITE) | FOLL_LONGTERM,
 					  &ubuf->pages[start]);
 		if (ret >= 0) {
 			start += ret;
@@ -211,7 +236,7 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
 
 	exp_info.ops = &amdxdna_ubuf_dmabuf_ops;
 	exp_info.priv = ubuf;
-	exp_info.flags = O_RDWR | O_CLOEXEC;
+	exp_info.flags = (readonly ? O_RDONLY : O_RDWR) | O_CLOEXEC;
 
 	dbuf = dma_buf_export(&exp_info);
 	if (IS_ERR(dbuf)) {
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH V2] accel/amdxdna: Support read-only user-pointer BO mappings
  2026-03-31 17:26 [PATCH V2] accel/amdxdna: Support read-only user-pointer BO mappings Lizhi Hou
@ 2026-03-31 17:52 ` Mario Limonciello
  2026-04-01  4:57 ` Matthew Brost
  1 sibling, 0 replies; 8+ messages in thread
From: Mario Limonciello @ 2026-03-31 17:52 UTC (permalink / raw)
  To: Lizhi Hou, ogabbay, quic_jhugo, dri-devel, maciej.falkowski
  Cc: Max Zhen, linux-kernel, sonal.santan



On 3/31/26 12:26, Lizhi Hou wrote:
> From: Max Zhen <max.zhen@amd.com>
> 
> Update the amdxdna user-pointer (ubuf) BO path to support creating buffer
> objects from read-only user mappings.
> 
> Detect read-only VMAs by checking VMA permissions across all user virtual
> address ranges associated with the BO. When all entries are read-only, pin
> user pages without FOLL_WRITE and export the resulting dmabuf as read-only
> (O_RDONLY).
> 
> This allows userptr BOs backed by read-only mappings to be safely imported
> and used without requiring write access, which was previously rejected due
> to unconditional FOLL_WRITE usage.
> 
> Signed-off-by: Max Zhen <max.zhen@amd.com>
> Signed-off-by: Lizhi Hou <lizhi.hou@amd.com>
> ---
>   drivers/accel/amdxdna/amdxdna_ubuf.c | 29 ++++++++++++++++++++++++++--
>   1 file changed, 27 insertions(+), 2 deletions(-)
Reviewed-by: Mario Limonciello (AMD) <superm1@kernel.org>
> 
> diff --git a/drivers/accel/amdxdna/amdxdna_ubuf.c b/drivers/accel/amdxdna/amdxdna_ubuf.c
> index 4c0647057759..3769210c55cc 100644
> --- a/drivers/accel/amdxdna/amdxdna_ubuf.c
> +++ b/drivers/accel/amdxdna/amdxdna_ubuf.c
> @@ -125,6 +125,26 @@ static const struct dma_buf_ops amdxdna_ubuf_dmabuf_ops = {
>   	.vunmap = amdxdna_ubuf_vunmap,
>   };
>   
> +static int readonly_va_entry(struct amdxdna_drm_va_entry *va_ent)
> +{
> +	struct mm_struct *mm = current->mm;
> +	struct vm_area_struct *vma;
> +	int ret;
> +
> +	mmap_read_lock(mm);
> +
> +	vma = find_vma(mm, va_ent->vaddr);
> +	if (!vma ||
> +	    vma->vm_start > va_ent->vaddr ||
> +	    vma->vm_end - va_ent->vaddr < va_ent->len)
> +		ret = -ENOENT;
> +	else
> +		ret = vma->vm_flags & VM_WRITE ? 0 : 1;
> +
> +	mmap_read_unlock(mm);
> +	return ret;
> +}
> +
>   struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
>   				 u32 num_entries, void __user *va_entries)
>   {
> @@ -134,6 +154,7 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
>   	struct amdxdna_ubuf_priv *ubuf;
>   	u32 npages, start = 0;
>   	struct dma_buf *dbuf;
> +	bool readonly = true;
>   	int i, ret;
>   	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
>   
> @@ -172,6 +193,10 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
>   			ret = -EINVAL;
>   			goto free_ent;
>   		}
> +
> +		/* Pin pages as writable as long as not all entries are read-only. */
> +		if (readonly && readonly_va_entry(&va_ent[i]) != 1)
> +			readonly = false;
>   	}
>   
>   	ubuf->nr_pages = exp_info.size >> PAGE_SHIFT;
> @@ -194,7 +219,7 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
>   		npages = va_ent[i].len >> PAGE_SHIFT;
>   
>   		ret = pin_user_pages_fast(va_ent[i].vaddr, npages,
> -					  FOLL_WRITE | FOLL_LONGTERM,
> +					  (readonly ? 0 : FOLL_WRITE) | FOLL_LONGTERM,
>   					  &ubuf->pages[start]);
>   		if (ret >= 0) {
>   			start += ret;
> @@ -211,7 +236,7 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
>   
>   	exp_info.ops = &amdxdna_ubuf_dmabuf_ops;
>   	exp_info.priv = ubuf;
> -	exp_info.flags = O_RDWR | O_CLOEXEC;
> +	exp_info.flags = (readonly ? O_RDONLY : O_RDWR) | O_CLOEXEC;
>   
>   	dbuf = dma_buf_export(&exp_info);
>   	if (IS_ERR(dbuf)) {


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH V2] accel/amdxdna: Support read-only user-pointer BO mappings
  2026-03-31 17:26 [PATCH V2] accel/amdxdna: Support read-only user-pointer BO mappings Lizhi Hou
  2026-03-31 17:52 ` Mario Limonciello
@ 2026-04-01  4:57 ` Matthew Brost
  2026-04-01  5:00   ` Matthew Brost
  2026-04-01 16:51   ` Lizhi Hou
  1 sibling, 2 replies; 8+ messages in thread
From: Matthew Brost @ 2026-04-01  4:57 UTC (permalink / raw)
  To: Lizhi Hou
  Cc: ogabbay, quic_jhugo, dri-devel, mario.limonciello,
	maciej.falkowski, Max Zhen, linux-kernel, sonal.santan

On Tue, Mar 31, 2026 at 10:26:35AM -0700, Lizhi Hou wrote:
> From: Max Zhen <max.zhen@amd.com>
> 
> Update the amdxdna user-pointer (ubuf) BO path to support creating buffer
> objects from read-only user mappings.
> 
> Detect read-only VMAs by checking VMA permissions across all user virtual
> address ranges associated with the BO. When all entries are read-only, pin
> user pages without FOLL_WRITE and export the resulting dmabuf as read-only
> (O_RDONLY).
> 
> This allows userptr BOs backed by read-only mappings to be safely imported
> and used without requiring write access, which was previously rejected due
> to unconditional FOLL_WRITE usage.
> 
> Signed-off-by: Max Zhen <max.zhen@amd.com>
> Signed-off-by: Lizhi Hou <lizhi.hou@amd.com>
> ---
>  drivers/accel/amdxdna/amdxdna_ubuf.c | 29 ++++++++++++++++++++++++++--
>  1 file changed, 27 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/accel/amdxdna/amdxdna_ubuf.c b/drivers/accel/amdxdna/amdxdna_ubuf.c
> index 4c0647057759..3769210c55cc 100644
> --- a/drivers/accel/amdxdna/amdxdna_ubuf.c
> +++ b/drivers/accel/amdxdna/amdxdna_ubuf.c
> @@ -125,6 +125,26 @@ static const struct dma_buf_ops amdxdna_ubuf_dmabuf_ops = {
>  	.vunmap = amdxdna_ubuf_vunmap,
>  };
>  
> +static int readonly_va_entry(struct amdxdna_drm_va_entry *va_ent)
> +{
> +	struct mm_struct *mm = current->mm;
> +	struct vm_area_struct *vma;
> +	int ret;
> +
> +	mmap_read_lock(mm);
> +
> +	vma = find_vma(mm, va_ent->vaddr);
> +	if (!vma ||
> +	    vma->vm_start > va_ent->vaddr ||
> +	    vma->vm_end - va_ent->vaddr < va_ent->len)
> +		ret = -ENOENT;
> +	else
> +		ret = vma->vm_flags & VM_WRITE ? 0 : 1;
> +
> +	mmap_read_unlock(mm);


This looks highly questionable. Drivers should be reaching into the core
MM to create primitives.

I also glanced at the userptr implementation here — it’s quite
questionable as well, especially regarding whether notifier locking /
hmm_range_fault interaction is needed on the driver side.

I’m fairly certain that, with a bit of thought and some extensions to
DRM GPUSVM, amdxdna could build userptr on that layer (Xe does this
wihtout SVM). That would isolate core MM interactions to the common DRM
layer, which I believe the core MM folks would appreciate.

The biggest issue I see is that get_pages() in GPUSVM also performs a
DMA map, which amdxdna doesn’t appear to need. That should be easy
enough to split out. But amdxdna does need locking semantics, notifiers,
etc., which GPUSVM already provides.

I’d rather see GPUSVM expanded for the amdxdna use case so future
drivers can use it as well.

Happy to work with you on this.

Matt

> +	return ret;
> +}
> +
>  struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
>  				 u32 num_entries, void __user *va_entries)
>  {
> @@ -134,6 +154,7 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
>  	struct amdxdna_ubuf_priv *ubuf;
>  	u32 npages, start = 0;
>  	struct dma_buf *dbuf;
> +	bool readonly = true;
>  	int i, ret;
>  	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
>  
> @@ -172,6 +193,10 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
>  			ret = -EINVAL;
>  			goto free_ent;
>  		}
> +
> +		/* Pin pages as writable as long as not all entries are read-only. */
> +		if (readonly && readonly_va_entry(&va_ent[i]) != 1)
> +			readonly = false;
>  	}
>  
>  	ubuf->nr_pages = exp_info.size >> PAGE_SHIFT;
> @@ -194,7 +219,7 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
>  		npages = va_ent[i].len >> PAGE_SHIFT;
>  
>  		ret = pin_user_pages_fast(va_ent[i].vaddr, npages,
> -					  FOLL_WRITE | FOLL_LONGTERM,
> +					  (readonly ? 0 : FOLL_WRITE) | FOLL_LONGTERM,
>  					  &ubuf->pages[start]);
>  		if (ret >= 0) {
>  			start += ret;
> @@ -211,7 +236,7 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
>  
>  	exp_info.ops = &amdxdna_ubuf_dmabuf_ops;
>  	exp_info.priv = ubuf;
> -	exp_info.flags = O_RDWR | O_CLOEXEC;
> +	exp_info.flags = (readonly ? O_RDONLY : O_RDWR) | O_CLOEXEC;
>  
>  	dbuf = dma_buf_export(&exp_info);
>  	if (IS_ERR(dbuf)) {
> -- 
> 2.34.1
> 

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH V2] accel/amdxdna: Support read-only user-pointer BO mappings
  2026-04-01  4:57 ` Matthew Brost
@ 2026-04-01  5:00   ` Matthew Brost
  2026-04-01 16:51   ` Lizhi Hou
  1 sibling, 0 replies; 8+ messages in thread
From: Matthew Brost @ 2026-04-01  5:00 UTC (permalink / raw)
  To: Lizhi Hou
  Cc: ogabbay, quic_jhugo, dri-devel, mario.limonciello,
	maciej.falkowski, Max Zhen, linux-kernel, sonal.santan

On Tue, Mar 31, 2026 at 09:57:12PM -0700, Matthew Brost wrote:
> On Tue, Mar 31, 2026 at 10:26:35AM -0700, Lizhi Hou wrote:
> > From: Max Zhen <max.zhen@amd.com>
> > 
> > Update the amdxdna user-pointer (ubuf) BO path to support creating buffer
> > objects from read-only user mappings.
> > 
> > Detect read-only VMAs by checking VMA permissions across all user virtual
> > address ranges associated with the BO. When all entries are read-only, pin
> > user pages without FOLL_WRITE and export the resulting dmabuf as read-only
> > (O_RDONLY).
> > 
> > This allows userptr BOs backed by read-only mappings to be safely imported
> > and used without requiring write access, which was previously rejected due
> > to unconditional FOLL_WRITE usage.
> > 
> > Signed-off-by: Max Zhen <max.zhen@amd.com>
> > Signed-off-by: Lizhi Hou <lizhi.hou@amd.com>
> > ---
> >  drivers/accel/amdxdna/amdxdna_ubuf.c | 29 ++++++++++++++++++++++++++--
> >  1 file changed, 27 insertions(+), 2 deletions(-)
> > 
> > diff --git a/drivers/accel/amdxdna/amdxdna_ubuf.c b/drivers/accel/amdxdna/amdxdna_ubuf.c
> > index 4c0647057759..3769210c55cc 100644
> > --- a/drivers/accel/amdxdna/amdxdna_ubuf.c
> > +++ b/drivers/accel/amdxdna/amdxdna_ubuf.c
> > @@ -125,6 +125,26 @@ static const struct dma_buf_ops amdxdna_ubuf_dmabuf_ops = {
> >  	.vunmap = amdxdna_ubuf_vunmap,
> >  };
> >  
> > +static int readonly_va_entry(struct amdxdna_drm_va_entry *va_ent)
> > +{
> > +	struct mm_struct *mm = current->mm;
> > +	struct vm_area_struct *vma;
> > +	int ret;
> > +
> > +	mmap_read_lock(mm);
> > +
> > +	vma = find_vma(mm, va_ent->vaddr);
> > +	if (!vma ||
> > +	    vma->vm_start > va_ent->vaddr ||
> > +	    vma->vm_end - va_ent->vaddr < va_ent->len)
> > +		ret = -ENOENT;
> > +	else
> > +		ret = vma->vm_flags & VM_WRITE ? 0 : 1;
> > +
> > +	mmap_read_unlock(mm);
> 
> 
> This looks highly questionable. Drivers should be reaching into the core

s/should/shouldn't

Matt

> MM to create primitives.
> 
> I also glanced at the userptr implementation here — it’s quite
> questionable as well, especially regarding whether notifier locking /
> hmm_range_fault interaction is needed on the driver side.
> 
> I’m fairly certain that, with a bit of thought and some extensions to
> DRM GPUSVM, amdxdna could build userptr on that layer (Xe does this
> wihtout SVM). That would isolate core MM interactions to the common DRM
> layer, which I believe the core MM folks would appreciate.
> 
> The biggest issue I see is that get_pages() in GPUSVM also performs a
> DMA map, which amdxdna doesn’t appear to need. That should be easy
> enough to split out. But amdxdna does need locking semantics, notifiers,
> etc., which GPUSVM already provides.
> 
> I’d rather see GPUSVM expanded for the amdxdna use case so future
> drivers can use it as well.
> 
> Happy to work with you on this.
> 
> Matt
> 
> > +	return ret;
> > +}
> > +
> >  struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
> >  				 u32 num_entries, void __user *va_entries)
> >  {
> > @@ -134,6 +154,7 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
> >  	struct amdxdna_ubuf_priv *ubuf;
> >  	u32 npages, start = 0;
> >  	struct dma_buf *dbuf;
> > +	bool readonly = true;
> >  	int i, ret;
> >  	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
> >  
> > @@ -172,6 +193,10 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
> >  			ret = -EINVAL;
> >  			goto free_ent;
> >  		}
> > +
> > +		/* Pin pages as writable as long as not all entries are read-only. */
> > +		if (readonly && readonly_va_entry(&va_ent[i]) != 1)
> > +			readonly = false;
> >  	}
> >  
> >  	ubuf->nr_pages = exp_info.size >> PAGE_SHIFT;
> > @@ -194,7 +219,7 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
> >  		npages = va_ent[i].len >> PAGE_SHIFT;
> >  
> >  		ret = pin_user_pages_fast(va_ent[i].vaddr, npages,
> > -					  FOLL_WRITE | FOLL_LONGTERM,
> > +					  (readonly ? 0 : FOLL_WRITE) | FOLL_LONGTERM,
> >  					  &ubuf->pages[start]);
> >  		if (ret >= 0) {
> >  			start += ret;
> > @@ -211,7 +236,7 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
> >  
> >  	exp_info.ops = &amdxdna_ubuf_dmabuf_ops;
> >  	exp_info.priv = ubuf;
> > -	exp_info.flags = O_RDWR | O_CLOEXEC;
> > +	exp_info.flags = (readonly ? O_RDONLY : O_RDWR) | O_CLOEXEC;
> >  
> >  	dbuf = dma_buf_export(&exp_info);
> >  	if (IS_ERR(dbuf)) {
> > -- 
> > 2.34.1
> > 

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH V2] accel/amdxdna: Support read-only user-pointer BO mappings
  2026-04-01  4:57 ` Matthew Brost
  2026-04-01  5:00   ` Matthew Brost
@ 2026-04-01 16:51   ` Lizhi Hou
  2026-04-01 20:09     ` Matthew Brost
  1 sibling, 1 reply; 8+ messages in thread
From: Lizhi Hou @ 2026-04-01 16:51 UTC (permalink / raw)
  To: Matthew Brost
  Cc: ogabbay, quic_jhugo, dri-devel, mario.limonciello,
	maciej.falkowski, Max Zhen, linux-kernel, sonal.santan


On 3/31/26 21:57, Matthew Brost wrote:
> On Tue, Mar 31, 2026 at 10:26:35AM -0700, Lizhi Hou wrote:
>> From: Max Zhen <max.zhen@amd.com>
>>
>> Update the amdxdna user-pointer (ubuf) BO path to support creating buffer
>> objects from read-only user mappings.
>>
>> Detect read-only VMAs by checking VMA permissions across all user virtual
>> address ranges associated with the BO. When all entries are read-only, pin
>> user pages without FOLL_WRITE and export the resulting dmabuf as read-only
>> (O_RDONLY).
>>
>> This allows userptr BOs backed by read-only mappings to be safely imported
>> and used without requiring write access, which was previously rejected due
>> to unconditional FOLL_WRITE usage.
>>
>> Signed-off-by: Max Zhen <max.zhen@amd.com>
>> Signed-off-by: Lizhi Hou <lizhi.hou@amd.com>
>> ---
>>   drivers/accel/amdxdna/amdxdna_ubuf.c | 29 ++++++++++++++++++++++++++--
>>   1 file changed, 27 insertions(+), 2 deletions(-)
>>
>> diff --git a/drivers/accel/amdxdna/amdxdna_ubuf.c b/drivers/accel/amdxdna/amdxdna_ubuf.c
>> index 4c0647057759..3769210c55cc 100644
>> --- a/drivers/accel/amdxdna/amdxdna_ubuf.c
>> +++ b/drivers/accel/amdxdna/amdxdna_ubuf.c
>> @@ -125,6 +125,26 @@ static const struct dma_buf_ops amdxdna_ubuf_dmabuf_ops = {
>>   	.vunmap = amdxdna_ubuf_vunmap,
>>   };
>>   
>> +static int readonly_va_entry(struct amdxdna_drm_va_entry *va_ent)
>> +{
>> +	struct mm_struct *mm = current->mm;
>> +	struct vm_area_struct *vma;
>> +	int ret;
>> +
>> +	mmap_read_lock(mm);
>> +
>> +	vma = find_vma(mm, va_ent->vaddr);
>> +	if (!vma ||
>> +	    vma->vm_start > va_ent->vaddr ||
>> +	    vma->vm_end - va_ent->vaddr < va_ent->len)
>> +		ret = -ENOENT;
>> +	else
>> +		ret = vma->vm_flags & VM_WRITE ? 0 : 1;
>> +
>> +	mmap_read_unlock(mm);
>
> This looks highly questionable. Drivers should be reaching into the core
> MM to create primitives.
>
> I also glanced at the userptr implementation here — it’s quite
> questionable as well, especially regarding whether notifier locking /
> hmm_range_fault interaction is needed on the driver side.

We implemented hmm_range_fault and notifier logic in amdxdna driver.

https://github.com/torvalds/linux/blob/master/drivers/accel/amdxdna/aie2_ctx.c#L921

The code in amdxdna_ubuf.c is just getting the pages from user allocated 
buffer. The buffer pointer will not be used for device access.

Instead, user space will do a mmap to get a different buffer pointer and 
register to notifier for device access.

>
> I’m fairly certain that, with a bit of thought and some extensions to
> DRM GPUSVM, amdxdna could build userptr on that layer (Xe does this
> wihtout SVM). That would isolate core MM interactions to the common DRM
> layer, which I believe the core MM folks would appreciate.
>
> The biggest issue I see is that get_pages() in GPUSVM also performs a
> DMA map, which amdxdna doesn’t appear to need. That should be easy
> enough to split out. But amdxdna does need locking semantics, notifiers,
> etc., which GPUSVM already provides.
>
> I’d rather see GPUSVM expanded for the amdxdna use case so future
> drivers can use it as well.
>
> Happy to work with you on this.

It is wonderful to use a common drm layer to handle all the similar 
requests. It will simplify the driver for sure. I will start to work on 
switching to GPUSVM. It might be taking some time.

In the meanwhile, are you ok to merge this patch for now? This is a 
required feature for our product.


Thanks,

Lizhi

>
> Matt
>
>> +	return ret;
>> +}
>> +
>>   struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
>>   				 u32 num_entries, void __user *va_entries)
>>   {
>> @@ -134,6 +154,7 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
>>   	struct amdxdna_ubuf_priv *ubuf;
>>   	u32 npages, start = 0;
>>   	struct dma_buf *dbuf;
>> +	bool readonly = true;
>>   	int i, ret;
>>   	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
>>   
>> @@ -172,6 +193,10 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
>>   			ret = -EINVAL;
>>   			goto free_ent;
>>   		}
>> +
>> +		/* Pin pages as writable as long as not all entries are read-only. */
>> +		if (readonly && readonly_va_entry(&va_ent[i]) != 1)
>> +			readonly = false;
>>   	}
>>   
>>   	ubuf->nr_pages = exp_info.size >> PAGE_SHIFT;
>> @@ -194,7 +219,7 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
>>   		npages = va_ent[i].len >> PAGE_SHIFT;
>>   
>>   		ret = pin_user_pages_fast(va_ent[i].vaddr, npages,
>> -					  FOLL_WRITE | FOLL_LONGTERM,
>> +					  (readonly ? 0 : FOLL_WRITE) | FOLL_LONGTERM,
>>   					  &ubuf->pages[start]);
>>   		if (ret >= 0) {
>>   			start += ret;
>> @@ -211,7 +236,7 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
>>   
>>   	exp_info.ops = &amdxdna_ubuf_dmabuf_ops;
>>   	exp_info.priv = ubuf;
>> -	exp_info.flags = O_RDWR | O_CLOEXEC;
>> +	exp_info.flags = (readonly ? O_RDONLY : O_RDWR) | O_CLOEXEC;
>>   
>>   	dbuf = dma_buf_export(&exp_info);
>>   	if (IS_ERR(dbuf)) {
>> -- 
>> 2.34.1
>>

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH V2] accel/amdxdna: Support read-only user-pointer BO mappings
  2026-04-01 16:51   ` Lizhi Hou
@ 2026-04-01 20:09     ` Matthew Brost
  2026-04-01 20:30       ` Lizhi Hou
  2026-04-02  5:24       ` Lizhi Hou
  0 siblings, 2 replies; 8+ messages in thread
From: Matthew Brost @ 2026-04-01 20:09 UTC (permalink / raw)
  To: Lizhi Hou
  Cc: ogabbay, quic_jhugo, dri-devel, mario.limonciello,
	maciej.falkowski, Max Zhen, linux-kernel, sonal.santan

On Wed, Apr 01, 2026 at 09:51:39AM -0700, Lizhi Hou wrote:
> 
> On 3/31/26 21:57, Matthew Brost wrote:
> > On Tue, Mar 31, 2026 at 10:26:35AM -0700, Lizhi Hou wrote:
> > > From: Max Zhen <max.zhen@amd.com>
> > > 
> > > Update the amdxdna user-pointer (ubuf) BO path to support creating buffer
> > > objects from read-only user mappings.
> > > 
> > > Detect read-only VMAs by checking VMA permissions across all user virtual
> > > address ranges associated with the BO. When all entries are read-only, pin
> > > user pages without FOLL_WRITE and export the resulting dmabuf as read-only
> > > (O_RDONLY).
> > > 
> > > This allows userptr BOs backed by read-only mappings to be safely imported
> > > and used without requiring write access, which was previously rejected due
> > > to unconditional FOLL_WRITE usage.
> > > 
> > > Signed-off-by: Max Zhen <max.zhen@amd.com>
> > > Signed-off-by: Lizhi Hou <lizhi.hou@amd.com>
> > > ---
> > >   drivers/accel/amdxdna/amdxdna_ubuf.c | 29 ++++++++++++++++++++++++++--
> > >   1 file changed, 27 insertions(+), 2 deletions(-)
> > > 
> > > diff --git a/drivers/accel/amdxdna/amdxdna_ubuf.c b/drivers/accel/amdxdna/amdxdna_ubuf.c
> > > index 4c0647057759..3769210c55cc 100644
> > > --- a/drivers/accel/amdxdna/amdxdna_ubuf.c
> > > +++ b/drivers/accel/amdxdna/amdxdna_ubuf.c
> > > @@ -125,6 +125,26 @@ static const struct dma_buf_ops amdxdna_ubuf_dmabuf_ops = {
> > >   	.vunmap = amdxdna_ubuf_vunmap,
> > >   };
> > > +static int readonly_va_entry(struct amdxdna_drm_va_entry *va_ent)
> > > +{
> > > +	struct mm_struct *mm = current->mm;
> > > +	struct vm_area_struct *vma;
> > > +	int ret;
> > > +
> > > +	mmap_read_lock(mm);
> > > +
> > > +	vma = find_vma(mm, va_ent->vaddr);
> > > +	if (!vma ||
> > > +	    vma->vm_start > va_ent->vaddr ||
> > > +	    vma->vm_end - va_ent->vaddr < va_ent->len)
> > > +		ret = -ENOENT;
> > > +	else
> > > +		ret = vma->vm_flags & VM_WRITE ? 0 : 1;
> > > +
> > > +	mmap_read_unlock(mm);
> > 
> > This looks highly questionable. Drivers should be reaching into the core
> > MM to create primitives.
> > 
> > I also glanced at the userptr implementation here — it’s quite
> > questionable as well, especially regarding whether notifier locking /
> > hmm_range_fault interaction is needed on the driver side.
> 
> We implemented hmm_range_fault and notifier logic in amdxdna driver.
> 
> https://github.com/torvalds/linux/blob/master/drivers/accel/amdxdna/aie2_ctx.c#L921
> 
> The code in amdxdna_ubuf.c is just getting the pages from user allocated
> buffer. The buffer pointer will not be used for device access.
> 
> Instead, user space will do a mmap to get a different buffer pointer and
> register to notifier for device access.
> 
> > 
> > I’m fairly certain that, with a bit of thought and some extensions to
> > DRM GPUSVM, amdxdna could build userptr on that layer (Xe does this
> > wihtout SVM). That would isolate core MM interactions to the common DRM
> > layer, which I believe the core MM folks would appreciate.
> > 
> > The biggest issue I see is that get_pages() in GPUSVM also performs a
> > DMA map, which amdxdna doesn’t appear to need. That should be easy
> > enough to split out. But amdxdna does need locking semantics, notifiers,
> > etc., which GPUSVM already provides.
> > 
> > I’d rather see GPUSVM expanded for the amdxdna use case so future
> > drivers can use it as well.
> > 
> > Happy to work with you on this.
> 
> It is wonderful to use a common drm layer to handle all the similar
> requests. It will simplify the driver for sure. I will start to work on
> switching to GPUSVM. It might be taking some time.
> 
> In the meanwhile, are you ok to merge this patch for now? This is a required
> feature for our product.
> 

It’s not my intent to hold up merging anything required for your driver
or product timelines — rather, I want the DRM driver/accel stack to
converge on a common implementation for SVM/userptr, since this is a
really tricky area to get right.

I’m sure we’re still missing some pieces in GPUSVM to make this feasible
for all drivers, but we’re very open to refactors that would enable it.
For example, AMDGPU is also looking into using this framework, and we’ve
already discussed some refactoring there.

Let me know if you have any questions about GPUSVM or about the
refactors your driver would need to make use of it.

Matt

> 
> Thanks,
> 
> Lizhi
> 
> > 
> > Matt
> > 
> > > +	return ret;
> > > +}
> > > +
> > >   struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
> > >   				 u32 num_entries, void __user *va_entries)
> > >   {
> > > @@ -134,6 +154,7 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
> > >   	struct amdxdna_ubuf_priv *ubuf;
> > >   	u32 npages, start = 0;
> > >   	struct dma_buf *dbuf;
> > > +	bool readonly = true;
> > >   	int i, ret;
> > >   	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
> > > @@ -172,6 +193,10 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
> > >   			ret = -EINVAL;
> > >   			goto free_ent;
> > >   		}
> > > +
> > > +		/* Pin pages as writable as long as not all entries are read-only. */
> > > +		if (readonly && readonly_va_entry(&va_ent[i]) != 1)
> > > +			readonly = false;
> > >   	}
> > >   	ubuf->nr_pages = exp_info.size >> PAGE_SHIFT;
> > > @@ -194,7 +219,7 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
> > >   		npages = va_ent[i].len >> PAGE_SHIFT;
> > >   		ret = pin_user_pages_fast(va_ent[i].vaddr, npages,
> > > -					  FOLL_WRITE | FOLL_LONGTERM,
> > > +					  (readonly ? 0 : FOLL_WRITE) | FOLL_LONGTERM,
> > >   					  &ubuf->pages[start]);
> > >   		if (ret >= 0) {
> > >   			start += ret;
> > > @@ -211,7 +236,7 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
> > >   	exp_info.ops = &amdxdna_ubuf_dmabuf_ops;
> > >   	exp_info.priv = ubuf;
> > > -	exp_info.flags = O_RDWR | O_CLOEXEC;
> > > +	exp_info.flags = (readonly ? O_RDONLY : O_RDWR) | O_CLOEXEC;
> > >   	dbuf = dma_buf_export(&exp_info);
> > >   	if (IS_ERR(dbuf)) {
> > > -- 
> > > 2.34.1
> > > 

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH V2] accel/amdxdna: Support read-only user-pointer BO mappings
  2026-04-01 20:09     ` Matthew Brost
@ 2026-04-01 20:30       ` Lizhi Hou
  2026-04-02  5:24       ` Lizhi Hou
  1 sibling, 0 replies; 8+ messages in thread
From: Lizhi Hou @ 2026-04-01 20:30 UTC (permalink / raw)
  To: Matthew Brost
  Cc: ogabbay, quic_jhugo, dri-devel, mario.limonciello,
	maciej.falkowski, Max Zhen, linux-kernel, sonal.santan


On 4/1/26 13:09, Matthew Brost wrote:
> On Wed, Apr 01, 2026 at 09:51:39AM -0700, Lizhi Hou wrote:
>> On 3/31/26 21:57, Matthew Brost wrote:
>>> On Tue, Mar 31, 2026 at 10:26:35AM -0700, Lizhi Hou wrote:
>>>> From: Max Zhen <max.zhen@amd.com>
>>>>
>>>> Update the amdxdna user-pointer (ubuf) BO path to support creating buffer
>>>> objects from read-only user mappings.
>>>>
>>>> Detect read-only VMAs by checking VMA permissions across all user virtual
>>>> address ranges associated with the BO. When all entries are read-only, pin
>>>> user pages without FOLL_WRITE and export the resulting dmabuf as read-only
>>>> (O_RDONLY).
>>>>
>>>> This allows userptr BOs backed by read-only mappings to be safely imported
>>>> and used without requiring write access, which was previously rejected due
>>>> to unconditional FOLL_WRITE usage.
>>>>
>>>> Signed-off-by: Max Zhen <max.zhen@amd.com>
>>>> Signed-off-by: Lizhi Hou <lizhi.hou@amd.com>
>>>> ---
>>>>    drivers/accel/amdxdna/amdxdna_ubuf.c | 29 ++++++++++++++++++++++++++--
>>>>    1 file changed, 27 insertions(+), 2 deletions(-)
>>>>
>>>> diff --git a/drivers/accel/amdxdna/amdxdna_ubuf.c b/drivers/accel/amdxdna/amdxdna_ubuf.c
>>>> index 4c0647057759..3769210c55cc 100644
>>>> --- a/drivers/accel/amdxdna/amdxdna_ubuf.c
>>>> +++ b/drivers/accel/amdxdna/amdxdna_ubuf.c
>>>> @@ -125,6 +125,26 @@ static const struct dma_buf_ops amdxdna_ubuf_dmabuf_ops = {
>>>>    	.vunmap = amdxdna_ubuf_vunmap,
>>>>    };
>>>> +static int readonly_va_entry(struct amdxdna_drm_va_entry *va_ent)
>>>> +{
>>>> +	struct mm_struct *mm = current->mm;
>>>> +	struct vm_area_struct *vma;
>>>> +	int ret;
>>>> +
>>>> +	mmap_read_lock(mm);
>>>> +
>>>> +	vma = find_vma(mm, va_ent->vaddr);
>>>> +	if (!vma ||
>>>> +	    vma->vm_start > va_ent->vaddr ||
>>>> +	    vma->vm_end - va_ent->vaddr < va_ent->len)
>>>> +		ret = -ENOENT;
>>>> +	else
>>>> +		ret = vma->vm_flags & VM_WRITE ? 0 : 1;
>>>> +
>>>> +	mmap_read_unlock(mm);
>>> This looks highly questionable. Drivers should be reaching into the core
>>> MM to create primitives.
>>>
>>> I also glanced at the userptr implementation here — it’s quite
>>> questionable as well, especially regarding whether notifier locking /
>>> hmm_range_fault interaction is needed on the driver side.
>> We implemented hmm_range_fault and notifier logic in amdxdna driver.
>>
>> https://github.com/torvalds/linux/blob/master/drivers/accel/amdxdna/aie2_ctx.c#L921
>>
>> The code in amdxdna_ubuf.c is just getting the pages from user allocated
>> buffer. The buffer pointer will not be used for device access.
>>
>> Instead, user space will do a mmap to get a different buffer pointer and
>> register to notifier for device access.
>>
>>> I’m fairly certain that, with a bit of thought and some extensions to
>>> DRM GPUSVM, amdxdna could build userptr on that layer (Xe does this
>>> wihtout SVM). That would isolate core MM interactions to the common DRM
>>> layer, which I believe the core MM folks would appreciate.
>>>
>>> The biggest issue I see is that get_pages() in GPUSVM also performs a
>>> DMA map, which amdxdna doesn’t appear to need. That should be easy
>>> enough to split out. But amdxdna does need locking semantics, notifiers,
>>> etc., which GPUSVM already provides.
>>>
>>> I’d rather see GPUSVM expanded for the amdxdna use case so future
>>> drivers can use it as well.
>>>
>>> Happy to work with you on this.
>> It is wonderful to use a common drm layer to handle all the similar
>> requests. It will simplify the driver for sure. I will start to work on
>> switching to GPUSVM. It might be taking some time.
>>
>> In the meanwhile, are you ok to merge this patch for now? This is a required
>> feature for our product.
>>
> It’s not my intent to hold up merging anything required for your driver
> or product timelines — rather, I want the DRM driver/accel stack to
> converge on a common implementation for SVM/userptr, since this is a
> really tricky area to get right.
>
> I’m sure we’re still missing some pieces in GPUSVM to make this feasible
> for all drivers, but we’re very open to refactors that would enable it.
> For example, AMDGPU is also looking into using this framework, and we’ve
> already discussed some refactoring there.
>
> Let me know if you have any questions about GPUSVM or about the
> refactors your driver would need to make use of it.

Cool. Thanks a lot. :) Yes, it is tricky area and glad to know it would 
be handled in drm layer.

Lizhi

>
> Matt
>
>> Thanks,
>>
>> Lizhi
>>
>>> Matt
>>>
>>>> +	return ret;
>>>> +}
>>>> +
>>>>    struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
>>>>    				 u32 num_entries, void __user *va_entries)
>>>>    {
>>>> @@ -134,6 +154,7 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
>>>>    	struct amdxdna_ubuf_priv *ubuf;
>>>>    	u32 npages, start = 0;
>>>>    	struct dma_buf *dbuf;
>>>> +	bool readonly = true;
>>>>    	int i, ret;
>>>>    	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
>>>> @@ -172,6 +193,10 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
>>>>    			ret = -EINVAL;
>>>>    			goto free_ent;
>>>>    		}
>>>> +
>>>> +		/* Pin pages as writable as long as not all entries are read-only. */
>>>> +		if (readonly && readonly_va_entry(&va_ent[i]) != 1)
>>>> +			readonly = false;
>>>>    	}
>>>>    	ubuf->nr_pages = exp_info.size >> PAGE_SHIFT;
>>>> @@ -194,7 +219,7 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
>>>>    		npages = va_ent[i].len >> PAGE_SHIFT;
>>>>    		ret = pin_user_pages_fast(va_ent[i].vaddr, npages,
>>>> -					  FOLL_WRITE | FOLL_LONGTERM,
>>>> +					  (readonly ? 0 : FOLL_WRITE) | FOLL_LONGTERM,
>>>>    					  &ubuf->pages[start]);
>>>>    		if (ret >= 0) {
>>>>    			start += ret;
>>>> @@ -211,7 +236,7 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
>>>>    	exp_info.ops = &amdxdna_ubuf_dmabuf_ops;
>>>>    	exp_info.priv = ubuf;
>>>> -	exp_info.flags = O_RDWR | O_CLOEXEC;
>>>> +	exp_info.flags = (readonly ? O_RDONLY : O_RDWR) | O_CLOEXEC;
>>>>    	dbuf = dma_buf_export(&exp_info);
>>>>    	if (IS_ERR(dbuf)) {
>>>> -- 
>>>> 2.34.1
>>>>

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH V2] accel/amdxdna: Support read-only user-pointer BO mappings
  2026-04-01 20:09     ` Matthew Brost
  2026-04-01 20:30       ` Lizhi Hou
@ 2026-04-02  5:24       ` Lizhi Hou
  1 sibling, 0 replies; 8+ messages in thread
From: Lizhi Hou @ 2026-04-02  5:24 UTC (permalink / raw)
  To: Matthew Brost
  Cc: ogabbay, quic_jhugo, dri-devel, mario.limonciello,
	maciej.falkowski, Max Zhen, linux-kernel, sonal.santan

Applied to drm-misc-next

On 4/1/26 13:09, Matthew Brost wrote:
> On Wed, Apr 01, 2026 at 09:51:39AM -0700, Lizhi Hou wrote:
>> On 3/31/26 21:57, Matthew Brost wrote:
>>> On Tue, Mar 31, 2026 at 10:26:35AM -0700, Lizhi Hou wrote:
>>>> From: Max Zhen <max.zhen@amd.com>
>>>>
>>>> Update the amdxdna user-pointer (ubuf) BO path to support creating buffer
>>>> objects from read-only user mappings.
>>>>
>>>> Detect read-only VMAs by checking VMA permissions across all user virtual
>>>> address ranges associated with the BO. When all entries are read-only, pin
>>>> user pages without FOLL_WRITE and export the resulting dmabuf as read-only
>>>> (O_RDONLY).
>>>>
>>>> This allows userptr BOs backed by read-only mappings to be safely imported
>>>> and used without requiring write access, which was previously rejected due
>>>> to unconditional FOLL_WRITE usage.
>>>>
>>>> Signed-off-by: Max Zhen <max.zhen@amd.com>
>>>> Signed-off-by: Lizhi Hou <lizhi.hou@amd.com>
>>>> ---
>>>>    drivers/accel/amdxdna/amdxdna_ubuf.c | 29 ++++++++++++++++++++++++++--
>>>>    1 file changed, 27 insertions(+), 2 deletions(-)
>>>>
>>>> diff --git a/drivers/accel/amdxdna/amdxdna_ubuf.c b/drivers/accel/amdxdna/amdxdna_ubuf.c
>>>> index 4c0647057759..3769210c55cc 100644
>>>> --- a/drivers/accel/amdxdna/amdxdna_ubuf.c
>>>> +++ b/drivers/accel/amdxdna/amdxdna_ubuf.c
>>>> @@ -125,6 +125,26 @@ static const struct dma_buf_ops amdxdna_ubuf_dmabuf_ops = {
>>>>    	.vunmap = amdxdna_ubuf_vunmap,
>>>>    };
>>>> +static int readonly_va_entry(struct amdxdna_drm_va_entry *va_ent)
>>>> +{
>>>> +	struct mm_struct *mm = current->mm;
>>>> +	struct vm_area_struct *vma;
>>>> +	int ret;
>>>> +
>>>> +	mmap_read_lock(mm);
>>>> +
>>>> +	vma = find_vma(mm, va_ent->vaddr);
>>>> +	if (!vma ||
>>>> +	    vma->vm_start > va_ent->vaddr ||
>>>> +	    vma->vm_end - va_ent->vaddr < va_ent->len)
>>>> +		ret = -ENOENT;
>>>> +	else
>>>> +		ret = vma->vm_flags & VM_WRITE ? 0 : 1;
>>>> +
>>>> +	mmap_read_unlock(mm);
>>> This looks highly questionable. Drivers should be reaching into the core
>>> MM to create primitives.
>>>
>>> I also glanced at the userptr implementation here — it’s quite
>>> questionable as well, especially regarding whether notifier locking /
>>> hmm_range_fault interaction is needed on the driver side.
>> We implemented hmm_range_fault and notifier logic in amdxdna driver.
>>
>> https://github.com/torvalds/linux/blob/master/drivers/accel/amdxdna/aie2_ctx.c#L921
>>
>> The code in amdxdna_ubuf.c is just getting the pages from user allocated
>> buffer. The buffer pointer will not be used for device access.
>>
>> Instead, user space will do a mmap to get a different buffer pointer and
>> register to notifier for device access.
>>
>>> I’m fairly certain that, with a bit of thought and some extensions to
>>> DRM GPUSVM, amdxdna could build userptr on that layer (Xe does this
>>> wihtout SVM). That would isolate core MM interactions to the common DRM
>>> layer, which I believe the core MM folks would appreciate.
>>>
>>> The biggest issue I see is that get_pages() in GPUSVM also performs a
>>> DMA map, which amdxdna doesn’t appear to need. That should be easy
>>> enough to split out. But amdxdna does need locking semantics, notifiers,
>>> etc., which GPUSVM already provides.
>>>
>>> I’d rather see GPUSVM expanded for the amdxdna use case so future
>>> drivers can use it as well.
>>>
>>> Happy to work with you on this.
>> It is wonderful to use a common drm layer to handle all the similar
>> requests. It will simplify the driver for sure. I will start to work on
>> switching to GPUSVM. It might be taking some time.
>>
>> In the meanwhile, are you ok to merge this patch for now? This is a required
>> feature for our product.
>>
> It’s not my intent to hold up merging anything required for your driver
> or product timelines — rather, I want the DRM driver/accel stack to
> converge on a common implementation for SVM/userptr, since this is a
> really tricky area to get right.
>
> I’m sure we’re still missing some pieces in GPUSVM to make this feasible
> for all drivers, but we’re very open to refactors that would enable it.
> For example, AMDGPU is also looking into using this framework, and we’ve
> already discussed some refactoring there.
>
> Let me know if you have any questions about GPUSVM or about the
> refactors your driver would need to make use of it.
>
> Matt
>
>> Thanks,
>>
>> Lizhi
>>
>>> Matt
>>>
>>>> +	return ret;
>>>> +}
>>>> +
>>>>    struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
>>>>    				 u32 num_entries, void __user *va_entries)
>>>>    {
>>>> @@ -134,6 +154,7 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
>>>>    	struct amdxdna_ubuf_priv *ubuf;
>>>>    	u32 npages, start = 0;
>>>>    	struct dma_buf *dbuf;
>>>> +	bool readonly = true;
>>>>    	int i, ret;
>>>>    	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
>>>> @@ -172,6 +193,10 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
>>>>    			ret = -EINVAL;
>>>>    			goto free_ent;
>>>>    		}
>>>> +
>>>> +		/* Pin pages as writable as long as not all entries are read-only. */
>>>> +		if (readonly && readonly_va_entry(&va_ent[i]) != 1)
>>>> +			readonly = false;
>>>>    	}
>>>>    	ubuf->nr_pages = exp_info.size >> PAGE_SHIFT;
>>>> @@ -194,7 +219,7 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
>>>>    		npages = va_ent[i].len >> PAGE_SHIFT;
>>>>    		ret = pin_user_pages_fast(va_ent[i].vaddr, npages,
>>>> -					  FOLL_WRITE | FOLL_LONGTERM,
>>>> +					  (readonly ? 0 : FOLL_WRITE) | FOLL_LONGTERM,
>>>>    					  &ubuf->pages[start]);
>>>>    		if (ret >= 0) {
>>>>    			start += ret;
>>>> @@ -211,7 +236,7 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
>>>>    	exp_info.ops = &amdxdna_ubuf_dmabuf_ops;
>>>>    	exp_info.priv = ubuf;
>>>> -	exp_info.flags = O_RDWR | O_CLOEXEC;
>>>> +	exp_info.flags = (readonly ? O_RDONLY : O_RDWR) | O_CLOEXEC;
>>>>    	dbuf = dma_buf_export(&exp_info);
>>>>    	if (IS_ERR(dbuf)) {
>>>> -- 
>>>> 2.34.1
>>>>

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2026-04-02  5:24 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-03-31 17:26 [PATCH V2] accel/amdxdna: Support read-only user-pointer BO mappings Lizhi Hou
2026-03-31 17:52 ` Mario Limonciello
2026-04-01  4:57 ` Matthew Brost
2026-04-01  5:00   ` Matthew Brost
2026-04-01 16:51   ` Lizhi Hou
2026-04-01 20:09     ` Matthew Brost
2026-04-01 20:30       ` Lizhi Hou
2026-04-02  5:24       ` Lizhi Hou

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox