public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH] drm/amdxdna: fix pinned_vm accounting and rlimit rollback
@ 2026-05-02  3:17 Vineet Agarwal
  2026-05-03  4:49 ` [PATCH] drm/amdxdna: fix pinned_vm accounting and error handling in user buffer pinning Vineet Agarwal
  2026-05-03  5:50 ` [PATCH v3] " Vineet Agarwal
  0 siblings, 2 replies; 4+ messages in thread
From: Vineet Agarwal @ 2026-05-02  3:17 UTC (permalink / raw)
  To: mamin506, lizhi.hou, ogabbay; +Cc: dri-devel, linux-kernel, Vineet Agarwal

amdxdna_get_ubuf() incorrectly accounted mm->pinned_vm using the requested
number of pages instead of the actual number of pages successfully pinned
by pin_user_pages_fast().

Since pin_user_pages_fast() can return partial success, this led to incorrect
mm pinned page tracking and potential imbalance between pinned and unpinned
memory state.

Fix this by:
  - tracking the actual number of successfully pinned pages (pinned_total)
  - updating mm->pinned_vm only after successful pinning completes
  - adding proper rollback on rlimit failure path to maintain symmetry

This ensures mm pinned_vm always reflects actual pinned memory usage and
keeps accounting consistent with other kernel subsystems such as RDMA,
vdpa, and iommufd.

Signed-off-by: Vineet Agarwal <agarwal.vineet2006@gmail.com>
---
 drivers/accel/amdxdna/amdxdna_ubuf.c | 19 +++++++++++--------
 1 file changed, 11 insertions(+), 8 deletions(-)

diff --git a/drivers/accel/amdxdna/amdxdna_ubuf.c b/drivers/accel/amdxdna/amdxdna_ubuf.c
index fb999aa25318..ad609846ea30 100644
--- a/drivers/accel/amdxdna/amdxdna_ubuf.c
+++ b/drivers/accel/amdxdna/amdxdna_ubuf.c
@@ -132,7 +132,7 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
 	unsigned long lock_limit, new_pinned;
 	struct amdxdna_drm_va_entry *va_ent;
 	struct amdxdna_ubuf_priv *ubuf;
-	u32 npages, start = 0;
+	u32 npages, start = 0, pinned_total = 0;
 	struct dma_buf *dbuf;
 	int i, ret;
 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
@@ -176,13 +176,6 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
 
 	ubuf->nr_pages = exp_info.size >> PAGE_SHIFT;
 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
-	new_pinned = atomic64_add_return(ubuf->nr_pages, &ubuf->mm->pinned_vm);
-	if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
-		XDNA_DBG(xdna, "New pin %ld, limit %ld, cap %d",
-			 new_pinned, lock_limit, capable(CAP_IPC_LOCK));
-		ret = -ENOMEM;
-		goto sub_pin_cnt;
-	}
 
 	ubuf->pages = kvmalloc_objs(*ubuf->pages, ubuf->nr_pages);
 	if (!ubuf->pages) {
@@ -203,6 +196,16 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
 		}
 
 		start += ret;
+		pinned_total += ret;
+	}
+
+	new_pinned = atomic64_add_return(pinned_total,
+				&ubuf->mm->pinned_vm);
+
+	if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
+		atomic64_sub(pinned_total, &ubuf->mm->pinned_vm);
+		ret = -ENOMEM;
+		goto destroy_pages;
 	}
 
 	exp_info.ops = &amdxdna_ubuf_dmabuf_ops;
-- 
2.54.0


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH] drm/amdxdna: fix pinned_vm accounting and error handling in user buffer pinning
  2026-05-02  3:17 [PATCH] drm/amdxdna: fix pinned_vm accounting and rlimit rollback Vineet Agarwal
@ 2026-05-03  4:49 ` Vineet Agarwal
  2026-05-03  5:50 ` [PATCH v3] " Vineet Agarwal
  1 sibling, 0 replies; 4+ messages in thread
From: Vineet Agarwal @ 2026-05-03  4:49 UTC (permalink / raw)
  To: mamin506, lizhi.hou, ogabbay; +Cc: dri-devel, linux-kernel, Vineet Agarwal

amdxdna_get_ubuf() incorrectly accounted mm->pinned_vm using the
requested number of pages before pin_user_pages_fast() completed.

Additionally, the RLIMIT_MEMLOCK check was performed after pinning,
and error handling for partial pinning could lead to inconsistent
cleanup.

Fix this by:
  - checking RLIMIT_MEMLOCK before attempting to pin pages
  - ensuring partially pinned pages are properly tracked and released
  - accounting mm->pinned_vm only after successful pinning
  - removing incorrect error-path accounting and double-subtraction

The RLIMIT_MEMLOCK check is performed conservatively against the
requested number of pages to prevent excessive pinning attempts.

This ensures mm->pinned_vm remains consistent and aligns the driver
with expected GUP and memory accounting semantics.

Signed-off-by: Vineet Agarwal <agarwal.vineet2006@gmail.com>

Changes in v2:
- Move RLIMIT_MEMLOCK check before pinning
- Fix partial pin handling and cleanup paths
- Remove incorrect pinned_vm accounting in error paths
- Ensure symmetric accounting between pin and release
- Drop unused pinned_total variable

---
 drivers/accel/amdxdna/amdxdna_ubuf.c | 25 ++++++++++++-------------
 1 file changed, 12 insertions(+), 13 deletions(-)

diff --git a/drivers/accel/amdxdna/amdxdna_ubuf.c b/drivers/accel/amdxdna/amdxdna_ubuf.c
index fb999aa25318..571708f78cfe 100644
--- a/drivers/accel/amdxdna/amdxdna_ubuf.c
+++ b/drivers/accel/amdxdna/amdxdna_ubuf.c
@@ -129,7 +129,7 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
 				 u32 num_entries, void __user *va_entries)
 {
 	struct amdxdna_dev *xdna = to_xdna_dev(dev);
-	unsigned long lock_limit, new_pinned;
+	unsigned long lock_limit;
 	struct amdxdna_drm_va_entry *va_ent;
 	struct amdxdna_ubuf_priv *ubuf;
 	u32 npages, start = 0;
@@ -176,18 +176,17 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
 
 	ubuf->nr_pages = exp_info.size >> PAGE_SHIFT;
 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
-	new_pinned = atomic64_add_return(ubuf->nr_pages, &ubuf->mm->pinned_vm);
-	if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
-		XDNA_DBG(xdna, "New pin %ld, limit %ld, cap %d",
-			 new_pinned, lock_limit, capable(CAP_IPC_LOCK));
+
+	if (ubuf->nr_pages + atomic64_read(&ubuf->mm->pinned_vm) > lock_limit &&
+		!capable(CAP_IPC_LOCK)) {
 		ret = -ENOMEM;
-		goto sub_pin_cnt;
+		goto free_ent;
 	}
 
 	ubuf->pages = kvmalloc_objs(*ubuf->pages, ubuf->nr_pages);
 	if (!ubuf->pages) {
 		ret = -ENOMEM;
-		goto sub_pin_cnt;
+		goto free_ent;
 	}
 
 	for (i = 0; i < num_entries; i++) {
@@ -196,15 +195,17 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
 		ret = pin_user_pages_fast(va_ent[i].vaddr, npages,
 					  FOLL_WRITE | FOLL_LONGTERM,
 					  &ubuf->pages[start]);
-		if (ret < 0 || ret != npages) {
+		if (ret < 0)
+			goto destroy_pages;
+		start += ret;
+		if (ret != npages) {
 			ret = -ENOMEM;
-			XDNA_ERR(xdna, "Failed to pin pages ret %d", ret);
 			goto destroy_pages;
 		}
-
-		start += ret;
 	}
 
+	atomic64_add(ubuf->nr_pages, &ubuf->mm->pinned_vm);
+
 	exp_info.ops = &amdxdna_ubuf_dmabuf_ops;
 	exp_info.priv = ubuf;
 	exp_info.flags = O_RDWR | O_CLOEXEC;
@@ -222,8 +223,6 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
 	if (start)
 		unpin_user_pages(ubuf->pages, start);
 	kvfree(ubuf->pages);
-sub_pin_cnt:
-	atomic64_sub(ubuf->nr_pages, &ubuf->mm->pinned_vm);
 free_ent:
 	kvfree(va_ent);
 free_ubuf:
-- 
2.54.0


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH v3] drm/amdxdna: fix pinned_vm accounting and error handling in user buffer pinning
  2026-05-02  3:17 [PATCH] drm/amdxdna: fix pinned_vm accounting and rlimit rollback Vineet Agarwal
  2026-05-03  4:49 ` [PATCH] drm/amdxdna: fix pinned_vm accounting and error handling in user buffer pinning Vineet Agarwal
@ 2026-05-03  5:50 ` Vineet Agarwal
  2026-05-04 16:05   ` Lizhi Hou
  1 sibling, 1 reply; 4+ messages in thread
From: Vineet Agarwal @ 2026-05-03  5:50 UTC (permalink / raw)
  To: mamin506, lizhi.hou, ogabbay; +Cc: dri-devel, linux-kernel, Vineet Agarwal

amdxdna_get_ubuf() incorrectly accounted mm->pinned_vm using the
requested number of pages before pin_user_pages_fast() completed.

Since pin_user_pages_fast() can return partial success, this could
lead to incorrect accounting and inconsistent cleanup on failure.

Additionally, the RLIMIT_MEMLOCK check was performed after pinning,
allowing excessive pin attempts before validation.

Fix this by:
  - checking RLIMIT_MEMLOCK before attempting to pin pages
  - handling partial pinning correctly and ensuring proper cleanup
  - updating mm->pinned_vm only after all pages are successfully pinned
  - removing incorrect error-path accounting and double-subtraction

Also fix missing rollback when dma_buf_export() fails, which could
leave mm->pinned_vm incremented without a corresponding release.

This ensures correct pinned memory accounting and consistent error
handling aligned with other subsystems using GUP.

Signed-off-by: Vineet Agarwal <agarwal.vineet2006@gmail.com>

Changes in v3:
- Fix missing pinned_vm rollback when dma_buf_export() fails
---
 drivers/accel/amdxdna/amdxdna_ubuf.c | 26 +++++++++++++-------------
 1 file changed, 13 insertions(+), 13 deletions(-)

diff --git a/drivers/accel/amdxdna/amdxdna_ubuf.c b/drivers/accel/amdxdna/amdxdna_ubuf.c
index fb999aa25318..efce6b94fb0c 100644
--- a/drivers/accel/amdxdna/amdxdna_ubuf.c
+++ b/drivers/accel/amdxdna/amdxdna_ubuf.c
@@ -129,7 +129,7 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
 				 u32 num_entries, void __user *va_entries)
 {
 	struct amdxdna_dev *xdna = to_xdna_dev(dev);
-	unsigned long lock_limit, new_pinned;
+	unsigned long lock_limit;
 	struct amdxdna_drm_va_entry *va_ent;
 	struct amdxdna_ubuf_priv *ubuf;
 	u32 npages, start = 0;
@@ -176,18 +176,17 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
 
 	ubuf->nr_pages = exp_info.size >> PAGE_SHIFT;
 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
-	new_pinned = atomic64_add_return(ubuf->nr_pages, &ubuf->mm->pinned_vm);
-	if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
-		XDNA_DBG(xdna, "New pin %ld, limit %ld, cap %d",
-			 new_pinned, lock_limit, capable(CAP_IPC_LOCK));
+
+	if (ubuf->nr_pages + atomic64_read(&ubuf->mm->pinned_vm) > lock_limit &&
+		!capable(CAP_IPC_LOCK)) {
 		ret = -ENOMEM;
-		goto sub_pin_cnt;
+		goto free_ent;
 	}
 
 	ubuf->pages = kvmalloc_objs(*ubuf->pages, ubuf->nr_pages);
 	if (!ubuf->pages) {
 		ret = -ENOMEM;
-		goto sub_pin_cnt;
+		goto free_ent;
 	}
 
 	for (i = 0; i < num_entries; i++) {
@@ -196,15 +195,17 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
 		ret = pin_user_pages_fast(va_ent[i].vaddr, npages,
 					  FOLL_WRITE | FOLL_LONGTERM,
 					  &ubuf->pages[start]);
-		if (ret < 0 || ret != npages) {
+		if (ret < 0)
+			goto destroy_pages;
+		start += ret;
+		if (ret != npages) {
 			ret = -ENOMEM;
-			XDNA_ERR(xdna, "Failed to pin pages ret %d", ret);
 			goto destroy_pages;
 		}
-
-		start += ret;
 	}
 
+	atomic64_add(ubuf->nr_pages, &ubuf->mm->pinned_vm);
+
 	exp_info.ops = &amdxdna_ubuf_dmabuf_ops;
 	exp_info.priv = ubuf;
 	exp_info.flags = O_RDWR | O_CLOEXEC;
@@ -212,6 +213,7 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
 	dbuf = dma_buf_export(&exp_info);
 	if (IS_ERR(dbuf)) {
 		ret = PTR_ERR(dbuf);
+		atomic64_sub(ubuf->nr_pages, &ubuf->mm->pinned_vm);
 		goto destroy_pages;
 	}
 	kvfree(va_ent);
@@ -222,8 +224,6 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
 	if (start)
 		unpin_user_pages(ubuf->pages, start);
 	kvfree(ubuf->pages);
-sub_pin_cnt:
-	atomic64_sub(ubuf->nr_pages, &ubuf->mm->pinned_vm);
 free_ent:
 	kvfree(va_ent);
 free_ubuf:
-- 
2.54.0


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH v3] drm/amdxdna: fix pinned_vm accounting and error handling in user buffer pinning
  2026-05-03  5:50 ` [PATCH v3] " Vineet Agarwal
@ 2026-05-04 16:05   ` Lizhi Hou
  0 siblings, 0 replies; 4+ messages in thread
From: Lizhi Hou @ 2026-05-04 16:05 UTC (permalink / raw)
  To: Vineet Agarwal, mamin506, ogabbay; +Cc: dri-devel, linux-kernel


On 5/2/26 22:50, Vineet Agarwal wrote:
> [You don't often get email from agarwal.vineet2006@gmail.com. Learn why this is important at https://aka.ms/LearnAboutSenderIdentification ]
>
> amdxdna_get_ubuf() incorrectly accounted mm->pinned_vm using the
> requested number of pages before pin_user_pages_fast() completed.
>
> Since pin_user_pages_fast() can return partial success, this could
> lead to incorrect accounting and inconsistent cleanup on failure.

If pin_user_pages_fast() returns partial success, amdxdna_get_ubuf 
returns and pinned_vm will be restored. I do not think it is an issue here.

>
> Additionally, the RLIMIT_MEMLOCK check was performed after pinning,
> allowing excessive pin attempts before validation.

RLIMIT_MEMLOCK check is on line

    if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK))

which is before pin_user_pages_fast. I do not see anything to fix.

>
> Fix this by:
>    - checking RLIMIT_MEMLOCK before attempting to pin pages
>    - handling partial pinning correctly and ensuring proper cleanup
>    - updating mm->pinned_vm only after all pages are successfully pinned
>    - removing incorrect error-path accounting and double-subtraction
>
> Also fix missing rollback when dma_buf_export() fails, which could
> leave mm->pinned_vm incremented without a corresponding release.

when dma_buf_export() fails, it jumps to destroy_pages which cleanup 
everything including restore pinned_vm.


Lizhi

>
> This ensures correct pinned memory accounting and consistent error
> handling aligned with other subsystems using GUP.
>
> Signed-off-by: Vineet Agarwal <agarwal.vineet2006@gmail.com>
>
> Changes in v3:
> - Fix missing pinned_vm rollback when dma_buf_export() fails
> ---
>   drivers/accel/amdxdna/amdxdna_ubuf.c | 26 +++++++++++++-------------
>   1 file changed, 13 insertions(+), 13 deletions(-)
>
> diff --git a/drivers/accel/amdxdna/amdxdna_ubuf.c b/drivers/accel/amdxdna/amdxdna_ubuf.c
> index fb999aa25318..efce6b94fb0c 100644
> --- a/drivers/accel/amdxdna/amdxdna_ubuf.c
> +++ b/drivers/accel/amdxdna/amdxdna_ubuf.c
> @@ -129,7 +129,7 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
>                                   u32 num_entries, void __user *va_entries)
>   {
>          struct amdxdna_dev *xdna = to_xdna_dev(dev);
> -       unsigned long lock_limit, new_pinned;
> +       unsigned long lock_limit;
>          struct amdxdna_drm_va_entry *va_ent;
>          struct amdxdna_ubuf_priv *ubuf;
>          u32 npages, start = 0;
> @@ -176,18 +176,17 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
>
>          ubuf->nr_pages = exp_info.size >> PAGE_SHIFT;
>          lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
> -       new_pinned = atomic64_add_return(ubuf->nr_pages, &ubuf->mm->pinned_vm);
> -       if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
> -               XDNA_DBG(xdna, "New pin %ld, limit %ld, cap %d",
> -                        new_pinned, lock_limit, capable(CAP_IPC_LOCK));
> +
> +       if (ubuf->nr_pages + atomic64_read(&ubuf->mm->pinned_vm) > lock_limit &&
> +               !capable(CAP_IPC_LOCK)) {
>                  ret = -ENOMEM;
> -               goto sub_pin_cnt;
> +               goto free_ent;
>          }
>
>          ubuf->pages = kvmalloc_objs(*ubuf->pages, ubuf->nr_pages);
>          if (!ubuf->pages) {
>                  ret = -ENOMEM;
> -               goto sub_pin_cnt;
> +               goto free_ent;
>          }
>
>          for (i = 0; i < num_entries; i++) {
> @@ -196,15 +195,17 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
>                  ret = pin_user_pages_fast(va_ent[i].vaddr, npages,
>                                            FOLL_WRITE | FOLL_LONGTERM,
>                                            &ubuf->pages[start]);
> -               if (ret < 0 || ret != npages) {
> +               if (ret < 0)
> +                       goto destroy_pages;
> +               start += ret;
> +               if (ret != npages) {
>                          ret = -ENOMEM;
> -                       XDNA_ERR(xdna, "Failed to pin pages ret %d", ret);
>                          goto destroy_pages;
>                  }
> -
> -               start += ret;
>          }
>
> +       atomic64_add(ubuf->nr_pages, &ubuf->mm->pinned_vm);
> +
>          exp_info.ops = &amdxdna_ubuf_dmabuf_ops;
>          exp_info.priv = ubuf;
>          exp_info.flags = O_RDWR | O_CLOEXEC;
> @@ -212,6 +213,7 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
>          dbuf = dma_buf_export(&exp_info);
>          if (IS_ERR(dbuf)) {
>                  ret = PTR_ERR(dbuf);
> +               atomic64_sub(ubuf->nr_pages, &ubuf->mm->pinned_vm);
>                  goto destroy_pages;
>          }
>          kvfree(va_ent);
> @@ -222,8 +224,6 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
>          if (start)
>                  unpin_user_pages(ubuf->pages, start);
>          kvfree(ubuf->pages);
> -sub_pin_cnt:
> -       atomic64_sub(ubuf->nr_pages, &ubuf->mm->pinned_vm);
>   free_ent:
>          kvfree(va_ent);
>   free_ubuf:
> --
> 2.54.0
>

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2026-05-04 16:05 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-05-02  3:17 [PATCH] drm/amdxdna: fix pinned_vm accounting and rlimit rollback Vineet Agarwal
2026-05-03  4:49 ` [PATCH] drm/amdxdna: fix pinned_vm accounting and error handling in user buffer pinning Vineet Agarwal
2026-05-03  5:50 ` [PATCH v3] " Vineet Agarwal
2026-05-04 16:05   ` Lizhi Hou

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox