AMD-GFX Archive on lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v4] drm/amdkfd: Use huge page size to check split svm range alignment
@ 2025-11-20 15:50 Xiaogang.Chen
  2025-11-24 14:55 ` Chen, Xiaogang
  0 siblings, 1 reply; 3+ messages in thread
From: Xiaogang.Chen @ 2025-11-20 15:50 UTC (permalink / raw)
  To: amd-gfx; +Cc: Xiaogang Chen

From: Xiaogang Chen <xiaogang.chen@amd.com>

Fixes: 7ef6b2d4b7e5 (drm/amdkfd: remap unaligned svm ranges that have split)

When split svm ranges that have been mapped using huge page should use huge
page size(2MB) to check split range alignment, not prange->granularity that
means migration granularity.

Signed-off-by: Xiaogang Chen <xiaogang.chen@amd.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 46 +++++++++++++++++++---------
 1 file changed, 32 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 521c14c7a789..7fe9d569d416 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1144,30 +1144,48 @@ static int
 svm_range_split_tail(struct svm_range *prange, uint64_t new_last,
 		     struct list_head *insert_list, struct list_head *remap_list)
 {
+	unsigned long last_align_down = ALIGN_DOWN(prange->last, 512);
+	unsigned long start_align = ALIGN(prange->start, 512);
+	bool huge_page_mapping = last_align_down > start_align;
 	struct svm_range *tail = NULL;
-	int r = svm_range_split(prange, prange->start, new_last, &tail);
+	int r;
 
-	if (!r) {
-		list_add(&tail->list, insert_list);
-		if (!IS_ALIGNED(new_last + 1, 1UL << prange->granularity))
-			list_add(&tail->update_list, remap_list);
-	}
-	return r;
+	r = svm_range_split(prange, prange->start, new_last, &tail);
+
+	if (r)
+		return r;
+
+	list_add(&tail->list, insert_list);
+
+	if (huge_page_mapping && tail->start > start_align &&
+	    tail->start < last_align_down && (!IS_ALIGNED(tail->start, 512)))
+		list_add(&tail->update_list, remap_list);
+
+	return 0;
 }
 
 static int
 svm_range_split_head(struct svm_range *prange, uint64_t new_start,
 		     struct list_head *insert_list, struct list_head *remap_list)
 {
+	unsigned long last_align_down = ALIGN_DOWN(prange->last, 512);
+	unsigned long start_align = ALIGN(prange->start, 512);
+	bool huge_page_mapping = last_align_down > start_align;
 	struct svm_range *head = NULL;
-	int r = svm_range_split(prange, new_start, prange->last, &head);
+	int r;
 
-	if (!r) {
-		list_add(&head->list, insert_list);
-		if (!IS_ALIGNED(new_start, 1UL << prange->granularity))
-			list_add(&head->update_list, remap_list);
-	}
-	return r;
+	r = svm_range_split(prange, new_start, prange->last, &head);
+
+	if (r)
+		return r;
+
+	list_add(&head->list, insert_list);
+
+	if (huge_page_mapping && head->last > start_align &&
+	    head->last < last_align_down && (!IS_ALIGNED(head->last, 512)))
+		list_add(&head->update_list, remap_list);
+
+	return 0;
 }
 
 static void
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* RE: [PATCH v4] drm/amdkfd: Use huge page size to check split svm range alignment
  2025-11-20 15:50 [PATCH v4] drm/amdkfd: Use huge page size to check split svm range alignment Xiaogang.Chen
@ 2025-11-24 14:55 ` Chen, Xiaogang
  2025-11-24 18:45   ` Philip Yang
  0 siblings, 1 reply; 3+ messages in thread
From: Chen, Xiaogang @ 2025-11-24 14:55 UTC (permalink / raw)
  To: Chen, Xiaogang, amd-gfx@lists.freedesktop.org

[AMD Official Use Only - AMD Internal Distribution Only]

ping

-----Original Message-----
From: Xiaogang.Chen <xiaogang.chen@amd.com>
Sent: Thursday, November 20, 2025 9:51 AM
To: amd-gfx@lists.freedesktop.org
Cc: Chen, Xiaogang <Xiaogang.Chen@amd.com>
Subject: [PATCH v4] drm/amdkfd: Use huge page size to check split svm range alignment

From: Xiaogang Chen <xiaogang.chen@amd.com>

Fixes: 7ef6b2d4b7e5 (drm/amdkfd: remap unaligned svm ranges that have split)

When split svm ranges that have been mapped using huge page should use huge page size(2MB) to check split range alignment, not prange->granularity that means migration granularity.

Signed-off-by: Xiaogang Chen <xiaogang.chen@amd.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 46 +++++++++++++++++++---------
 1 file changed, 32 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 521c14c7a789..7fe9d569d416 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1144,30 +1144,48 @@ static int
 svm_range_split_tail(struct svm_range *prange, uint64_t new_last,
                     struct list_head *insert_list, struct list_head *remap_list)  {
+       unsigned long last_align_down = ALIGN_DOWN(prange->last, 512);
+       unsigned long start_align = ALIGN(prange->start, 512);
+       bool huge_page_mapping = last_align_down > start_align;
        struct svm_range *tail = NULL;
-       int r = svm_range_split(prange, prange->start, new_last, &tail);
+       int r;

-       if (!r) {
-               list_add(&tail->list, insert_list);
-               if (!IS_ALIGNED(new_last + 1, 1UL << prange->granularity))
-                       list_add(&tail->update_list, remap_list);
-       }
-       return r;
+       r = svm_range_split(prange, prange->start, new_last, &tail);
+
+       if (r)
+               return r;
+
+       list_add(&tail->list, insert_list);
+
+       if (huge_page_mapping && tail->start > start_align &&
+           tail->start < last_align_down && (!IS_ALIGNED(tail->start, 512)))
+               list_add(&tail->update_list, remap_list);
+
+       return 0;
 }

 static int
 svm_range_split_head(struct svm_range *prange, uint64_t new_start,
                     struct list_head *insert_list, struct list_head *remap_list)  {
+       unsigned long last_align_down = ALIGN_DOWN(prange->last, 512);
+       unsigned long start_align = ALIGN(prange->start, 512);
+       bool huge_page_mapping = last_align_down > start_align;
        struct svm_range *head = NULL;
-       int r = svm_range_split(prange, new_start, prange->last, &head);
+       int r;

-       if (!r) {
-               list_add(&head->list, insert_list);
-               if (!IS_ALIGNED(new_start, 1UL << prange->granularity))
-                       list_add(&head->update_list, remap_list);
-       }
-       return r;
+       r = svm_range_split(prange, new_start, prange->last, &head);
+
+       if (r)
+               return r;
+
+       list_add(&head->list, insert_list);
+
+       if (huge_page_mapping && head->last > start_align &&
+           head->last < last_align_down && (!IS_ALIGNED(head->last, 512)))
+               list_add(&head->update_list, remap_list);
+
+       return 0;
 }

 static void
--
2.34.1


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH v4] drm/amdkfd: Use huge page size to check split svm range alignment
  2025-11-24 14:55 ` Chen, Xiaogang
@ 2025-11-24 18:45   ` Philip Yang
  0 siblings, 0 replies; 3+ messages in thread
From: Philip Yang @ 2025-11-24 18:45 UTC (permalink / raw)
  To: Chen, Xiaogang, amd-gfx@lists.freedesktop.org



On 2025-11-24 09:55, Chen, Xiaogang wrote:
> [AMD Official Use Only - AMD Internal Distribution Only]
>
> ping
>
> -----Original Message-----
> From: Xiaogang.Chen <xiaogang.chen@amd.com>
> Sent: Thursday, November 20, 2025 9:51 AM
> To: amd-gfx@lists.freedesktop.org
> Cc: Chen, Xiaogang <Xiaogang.Chen@amd.com>
> Subject: [PATCH v4] drm/amdkfd: Use huge page size to check split svm range alignment
>
> From: Xiaogang Chen <xiaogang.chen@amd.com>
>
> Fixes: 7ef6b2d4b7e5 (drm/amdkfd: remap unaligned svm ranges that have split)
>
> When split svm ranges that have been mapped using huge page should use huge page size(2MB) to check split range alignment, not prange->granularity that means migration granularity.
>
> Signed-off-by: Xiaogang Chen <xiaogang.chen@amd.com>
> ---
>   drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 46 +++++++++++++++++++---------
>   1 file changed, 32 insertions(+), 14 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
> index 521c14c7a789..7fe9d569d416 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
> @@ -1144,30 +1144,48 @@ static int
>   svm_range_split_tail(struct svm_range *prange, uint64_t new_last,
>                       struct list_head *insert_list, struct list_head *remap_list)  {
> +       unsigned long last_align_down = ALIGN_DOWN(prange->last, 512);
> +       unsigned long start_align = ALIGN(prange->start, 512);
> +       bool huge_page_mapping = last_align_down > start_align;
>          struct svm_range *tail = NULL;
> -       int r = svm_range_split(prange, prange->start, new_last, &tail);
> +       int r;
>
> -       if (!r) {
> -               list_add(&tail->list, insert_list);
> -               if (!IS_ALIGNED(new_last + 1, 1UL << prange->granularity))
> -                       list_add(&tail->update_list, remap_list);
> -       }
> -       return r;
> +       r = svm_range_split(prange, prange->start, new_last, &tail);
> +
> +       if (r)
> +               return r;
> +
> +       list_add(&tail->list, insert_list);
> +
> +       if (huge_page_mapping && tail->start > start_align &&
> +           tail->start < last_align_down && (!IS_ALIGNED(tail->start, 512)))
> +               list_add(&tail->update_list, remap_list);
> +
> +       return 0;
>   }
>
>   static int
>   svm_range_split_head(struct svm_range *prange, uint64_t new_start,
>                       struct list_head *insert_list, struct list_head *remap_list)  {
> +       unsigned long last_align_down = ALIGN_DOWN(prange->last, 512);
> +       unsigned long start_align = ALIGN(prange->start, 512);
> +       bool huge_page_mapping = last_align_down > start_align;
>          struct svm_range *head = NULL;
> -       int r = svm_range_split(prange, new_start, prange->last, &head);
> +       int r;
>
> -       if (!r) {
> -               list_add(&head->list, insert_list);
> -               if (!IS_ALIGNED(new_start, 1UL << prange->granularity))
> -                       list_add(&head->update_list, remap_list);
> -       }
> -       return r;
> +       r = svm_range_split(prange, new_start, prange->last, &head);
> +
> +       if (r)
> +               return r;
> +
> +       list_add(&head->list, insert_list);
> +
> +       if (huge_page_mapping && head->last > start_align &&
> +           head->last < last_align_down && (!IS_ALIGNED(head->last, 512)))
replace all head->last with head->last + 1 in the if condition, or use 
new_start instead.

With this fixed, this patch is
Reviewed-by: Philip Yang <Philip.Yang@amd.com>
> +               list_add(&head->update_list, remap_list);
> +
> +       return 0;
>   }
>
>   static void
> --
> 2.34.1
>


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2025-11-24 18:45 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-11-20 15:50 [PATCH v4] drm/amdkfd: Use huge page size to check split svm range alignment Xiaogang.Chen
2025-11-24 14:55 ` Chen, Xiaogang
2025-11-24 18:45   ` Philip Yang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox