public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2] mshv: Simplify GPA map/unmap hypercall helpers
@ 2026-04-29 16:48 Stanislav Kinsburskii
  2026-04-30  2:06 ` Mukesh R
  0 siblings, 1 reply; 3+ messages in thread
From: Stanislav Kinsburskii @ 2026-04-29 16:48 UTC (permalink / raw)
  To: kys, haiyangz, wei.liu, decui, longli; +Cc: linux-hyperv, linux-kernel

Clean up hv_do_map_gpa_hcall() and hv_call_unmap_gpa_pages() after the
preceding bug-fix patches:

Move "done += completed" before the status checks so that pages mapped
by a partially-successful batch are included in the error cleanup unmap.
Previously these mappings were leaked on failure.

While here, improve type safety and readability:
 - Change "int done" to "u64 done" to match the u64 page_count it is
   compared against, avoiding signed/unsigned comparison hazards.
 - Use u64 for loop iteration and batch size variables consistently.
 - Add proper braces to the for-loop body in hv_do_map_gpa_hcall().
 - Remove unnecessary "ret" variable from hv_call_unmap_gpa_pages().
 - Simplify the error-path unmap to use "done << large_shift" directly
   instead of mutating done in place.

Fixes: 621191d709b14 ("Drivers: hv: Introduce mshv_root module to expose /dev/mshv to VMMs")
Signed-off-by: Stanislav Kinsburskii <skinsburskii@linux.microsoft.com>
---
 drivers/hv/mshv_root_hv_call.c |   55 +++++++++++++++-------------------------
 1 file changed, 20 insertions(+), 35 deletions(-)

diff --git a/drivers/hv/mshv_root_hv_call.c b/drivers/hv/mshv_root_hv_call.c
index e5992c324904a..1f19a4ca824f0 100644
--- a/drivers/hv/mshv_root_hv_call.c
+++ b/drivers/hv/mshv_root_hv_call.c
@@ -195,8 +195,8 @@ static int hv_do_map_gpa_hcall(u64 partition_id, u64 gfn, u64 page_struct_count,
 	struct hv_input_map_gpa_pages *input_page;
 	u64 status, *pfnlist;
 	unsigned long irq_flags, large_shift = 0;
-	int ret = 0, done = 0;
-	u64 page_count = page_struct_count;
+	u64 done = 0, page_count = page_struct_count;
+	int ret = 0;
 
 	if (page_count == 0 || (pages && mmio_spa))
 		return -EINVAL;
@@ -213,8 +213,8 @@ static int hv_do_map_gpa_hcall(u64 partition_id, u64 gfn, u64 page_struct_count,
 	}
 
 	while (done < page_count) {
-		ulong i, completed, remain = page_count - done;
-		int rep_count = min(remain, HV_MAP_GPA_BATCH_SIZE);
+		u64 i, completed, remain = page_count - done;
+		u64 rep_count = min_t(u64, remain, HV_MAP_GPA_BATCH_SIZE);
 
 		local_irq_save(irq_flags);
 		input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
@@ -224,23 +224,13 @@ static int hv_do_map_gpa_hcall(u64 partition_id, u64 gfn, u64 page_struct_count,
 		input_page->map_flags = flags;
 		pfnlist = input_page->source_gpa_page_list;
 
-		for (i = 0; i < rep_count; i++)
-			if (flags & HV_MAP_GPA_NO_ACCESS) {
+		for (i = 0; i < rep_count; i++) {
+			if (flags & HV_MAP_GPA_NO_ACCESS)
 				pfnlist[i] = 0;
-			} else if (pages) {
-				u64 index = (done + i) << large_shift;
-
-				if (index >= page_struct_count) {
-					ret = -EINVAL;
-					break;
-				}
-				pfnlist[i] = page_to_pfn(pages[index]);
-			} else {
+			else if (pages)
+				pfnlist[i] = page_to_pfn(pages[(done + i) << large_shift]);
+			else
 				pfnlist[i] = mmio_spa + done + i;
-			}
-		if (ret) {
-			local_irq_restore(irq_flags);
-			break;
 		}
 
 		status = hv_do_rep_hypercall(HVCALL_MAP_GPA_PAGES, rep_count, 0,
@@ -248,29 +238,26 @@ static int hv_do_map_gpa_hcall(u64 partition_id, u64 gfn, u64 page_struct_count,
 		local_irq_restore(irq_flags);
 
 		completed = hv_repcomp(status);
+		done += completed;
 
 		if (hv_result_needs_memory(status)) {
 			ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id,
 						    HV_MAP_GPA_DEPOSIT_PAGES);
 			if (ret)
 				break;
-
 		} else if (!hv_result_success(status)) {
 			ret = hv_result_to_errno(status);
 			break;
 		}
-
-		done += completed;
 	}
 
 	if (ret && done) {
 		u32 unmap_flags = 0;
 
-		if (flags & HV_MAP_GPA_LARGE_PAGE) {
+		if (flags & HV_MAP_GPA_LARGE_PAGE)
 			unmap_flags |= HV_UNMAP_GPA_LARGE_PAGE;
-			done <<= large_shift;
-		}
-		hv_call_unmap_gpa_pages(partition_id, gfn, done, unmap_flags);
+		hv_call_unmap_gpa_pages(partition_id, gfn,
+					done << large_shift, unmap_flags);
 	}
 
 	return ret;
@@ -305,7 +292,7 @@ int hv_call_unmap_gpa_pages(u64 partition_id, u64 gfn, u64 page_count_4k,
 	struct hv_input_unmap_gpa_pages *input_page;
 	u64 status, page_count = page_count_4k;
 	unsigned long irq_flags, large_shift = 0;
-	int ret = 0, done = 0;
+	u64 done = 0;
 
 	if (page_count == 0)
 		return -EINVAL;
@@ -319,8 +306,8 @@ int hv_call_unmap_gpa_pages(u64 partition_id, u64 gfn, u64 page_count_4k,
 	}
 
 	while (done < page_count) {
-		ulong completed, remain = page_count - done;
-		int rep_count = min(remain, HV_UMAP_GPA_PAGES);
+		u64 completed, remain = page_count - done;
+		u64 rep_count = min_t(u64, remain, HV_UMAP_GPA_PAGES);
 
 		local_irq_save(irq_flags);
 		input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
@@ -333,15 +320,13 @@ int hv_call_unmap_gpa_pages(u64 partition_id, u64 gfn, u64 page_count_4k,
 		local_irq_restore(irq_flags);
 
 		completed = hv_repcomp(status);
-		if (!hv_result_success(status)) {
-			ret = hv_result_to_errno(status);
-			break;
-		}
-
 		done += completed;
+
+		if (!hv_result_success(status))
+			return hv_result_to_errno(status);
 	}
 
-	return ret;
+	return 0;
 }
 
 int hv_call_get_gpa_access_states(u64 partition_id, u32 count, u64 gpa_base_pfn,



^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH v2] mshv: Simplify GPA map/unmap hypercall helpers
  2026-04-29 16:48 [PATCH v2] mshv: Simplify GPA map/unmap hypercall helpers Stanislav Kinsburskii
@ 2026-04-30  2:06 ` Mukesh R
  2026-04-30 14:43   ` Stanislav Kinsburskii
  0 siblings, 1 reply; 3+ messages in thread
From: Mukesh R @ 2026-04-30  2:06 UTC (permalink / raw)
  To: Stanislav Kinsburskii, kys, haiyangz, wei.liu, decui, longli
  Cc: linux-hyperv, linux-kernel


On 4/29/26 09:48, Stanislav Kinsburskii wrote:
> Clean up hv_do_map_gpa_hcall() and hv_call_unmap_gpa_pages() after the
> preceding bug-fix patches:
> 
> Move "done += completed" before the status checks so that pages mapped
> by a partially-successful batch are included in the error cleanup unmap.
> Previously these mappings were leaked on failure.
> 
> While here, improve type safety and readability:
>   - Change "int done" to "u64 done" to match the u64 page_count it is
>     compared against, avoiding signed/unsigned comparison hazards.
>   - Use u64 for loop iteration and batch size variables consistently.
>   - Add proper braces to the for-loop body in hv_do_map_gpa_hcall().
>   - Remove unnecessary "ret" variable from hv_call_unmap_gpa_pages().
>   - Simplify the error-path unmap to use "done << large_shift" directly
>     instead of mutating done in place.
> 

what changed in V2?

> Fixes: 621191d709b14 ("Drivers: hv: Introduce mshv_root module to expose /dev/mshv to VMMs")
> Signed-off-by: Stanislav Kinsburskii <skinsburskii@linux.microsoft.com>
> ---
>   drivers/hv/mshv_root_hv_call.c |   55 +++++++++++++++-------------------------
>   1 file changed, 20 insertions(+), 35 deletions(-)
> 
> diff --git a/drivers/hv/mshv_root_hv_call.c b/drivers/hv/mshv_root_hv_call.c
> index e5992c324904a..1f19a4ca824f0 100644
> --- a/drivers/hv/mshv_root_hv_call.c
> +++ b/drivers/hv/mshv_root_hv_call.c
> @@ -195,8 +195,8 @@ static int hv_do_map_gpa_hcall(u64 partition_id, u64 gfn, u64 page_struct_count,
>   	struct hv_input_map_gpa_pages *input_page;
>   	u64 status, *pfnlist;
>   	unsigned long irq_flags, large_shift = 0;
> -	int ret = 0, done = 0;
> -	u64 page_count = page_struct_count;
> +	u64 done = 0, page_count = page_struct_count;
> +	int ret = 0;
>   
>   	if (page_count == 0 || (pages && mmio_spa))
>   		return -EINVAL;
> @@ -213,8 +213,8 @@ static int hv_do_map_gpa_hcall(u64 partition_id, u64 gfn, u64 page_struct_count,
>   	}
>   
>   	while (done < page_count) {
> -		ulong i, completed, remain = page_count - done;
> -		int rep_count = min(remain, HV_MAP_GPA_BATCH_SIZE);
> +		u64 i, completed, remain = page_count - done;
> +		u64 rep_count = min_t(u64, remain, HV_MAP_GPA_BATCH_SIZE);
>   
>   		local_irq_save(irq_flags);
>   		input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
> @@ -224,23 +224,13 @@ static int hv_do_map_gpa_hcall(u64 partition_id, u64 gfn, u64 page_struct_count,
>   		input_page->map_flags = flags;
>   		pfnlist = input_page->source_gpa_page_list;
>   
> -		for (i = 0; i < rep_count; i++)
> -			if (flags & HV_MAP_GPA_NO_ACCESS) {
> +		for (i = 0; i < rep_count; i++) {
> +			if (flags & HV_MAP_GPA_NO_ACCESS)
>   				pfnlist[i] = 0;
> -			} else if (pages) {
> -				u64 index = (done + i) << large_shift;
> -
> -				if (index >= page_struct_count) {
> -					ret = -EINVAL;
> -					break;
> -				}
> -				pfnlist[i] = page_to_pfn(pages[index]);
> -			} else {
> +			else if (pages)
> +				pfnlist[i] = page_to_pfn(pages[(done + i) << large_shift]);

Entire file is 80 cols, please don't cause this one overflow.

Thanks,
-Mukesh


> +			else
>   				pfnlist[i] = mmio_spa + done + i;
> -			}
> -		if (ret) {
> -			local_irq_restore(irq_flags);
> -			break;
>   		}
>   
>   		status = hv_do_rep_hypercall(HVCALL_MAP_GPA_PAGES, rep_count, 0,
> @@ -248,29 +238,26 @@ static int hv_do_map_gpa_hcall(u64 partition_id, u64 gfn, u64 page_struct_count,
>   		local_irq_restore(irq_flags);
>   
>   		completed = hv_repcomp(status);
> +		done += completed;
>   
>   		if (hv_result_needs_memory(status)) {
>   			ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id,
>   						    HV_MAP_GPA_DEPOSIT_PAGES);
>   			if (ret)
>   				break;
> -
>   		} else if (!hv_result_success(status)) {
>   			ret = hv_result_to_errno(status);
>   			break;
>   		}
> -
> -		done += completed;
>   	}
>   
>   	if (ret && done) {
>   		u32 unmap_flags = 0;
>   
> -		if (flags & HV_MAP_GPA_LARGE_PAGE) {
> +		if (flags & HV_MAP_GPA_LARGE_PAGE)
>   			unmap_flags |= HV_UNMAP_GPA_LARGE_PAGE;
> -			done <<= large_shift;
> -		}
> -		hv_call_unmap_gpa_pages(partition_id, gfn, done, unmap_flags);
> +		hv_call_unmap_gpa_pages(partition_id, gfn,
> +					done << large_shift, unmap_flags);
>   	}
>   
>   	return ret;
> @@ -305,7 +292,7 @@ int hv_call_unmap_gpa_pages(u64 partition_id, u64 gfn, u64 page_count_4k,
>   	struct hv_input_unmap_gpa_pages *input_page;
>   	u64 status, page_count = page_count_4k;
>   	unsigned long irq_flags, large_shift = 0;
> -	int ret = 0, done = 0;
> +	u64 done = 0;
>   
>   	if (page_count == 0)
>   		return -EINVAL;
> @@ -319,8 +306,8 @@ int hv_call_unmap_gpa_pages(u64 partition_id, u64 gfn, u64 page_count_4k,
>   	}
>   
>   	while (done < page_count) {
> -		ulong completed, remain = page_count - done;
> -		int rep_count = min(remain, HV_UMAP_GPA_PAGES);
> +		u64 completed, remain = page_count - done;
> +		u64 rep_count = min_t(u64, remain, HV_UMAP_GPA_PAGES);
>   
>   		local_irq_save(irq_flags);
>   		input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
> @@ -333,15 +320,13 @@ int hv_call_unmap_gpa_pages(u64 partition_id, u64 gfn, u64 page_count_4k,
>   		local_irq_restore(irq_flags);
>   
>   		completed = hv_repcomp(status);
> -		if (!hv_result_success(status)) {
> -			ret = hv_result_to_errno(status);
> -			break;
> -		}
> -
>   		done += completed;
> +
> +		if (!hv_result_success(status))
> +			return hv_result_to_errno(status);
>   	}
>   
> -	return ret;
> +	return 0;
>   }
>   
>   int hv_call_get_gpa_access_states(u64 partition_id, u32 count, u64 gpa_base_pfn,
> 
> 


^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH v2] mshv: Simplify GPA map/unmap hypercall helpers
  2026-04-30  2:06 ` Mukesh R
@ 2026-04-30 14:43   ` Stanislav Kinsburskii
  0 siblings, 0 replies; 3+ messages in thread
From: Stanislav Kinsburskii @ 2026-04-30 14:43 UTC (permalink / raw)
  To: Mukesh R
  Cc: kys, haiyangz, wei.liu, decui, longli, linux-hyperv, linux-kernel

On Wed, Apr 29, 2026 at 07:06:08PM -0700, Mukesh R wrote:
> 
> On 4/29/26 09:48, Stanislav Kinsburskii wrote:
> > Clean up hv_do_map_gpa_hcall() and hv_call_unmap_gpa_pages() after the
> > preceding bug-fix patches:
> > 
> > Move "done += completed" before the status checks so that pages mapped
> > by a partially-successful batch are included in the error cleanup unmap.
> > Previously these mappings were leaked on failure.
> > 
> > While here, improve type safety and readability:
> >   - Change "int done" to "u64 done" to match the u64 page_count it is
> >     compared against, avoiding signed/unsigned comparison hazards.
> >   - Use u64 for loop iteration and batch size variables consistently.
> >   - Add proper braces to the for-loop body in hv_do_map_gpa_hcall().
> >   - Remove unnecessary "ret" variable from hv_call_unmap_gpa_pages().
> >   - Simplify the error-path unmap to use "done << large_shift" directly
> >     instead of mutating done in place.
> > 
> 
> what changed in V2?
> 

No functional changes: "min" was replaced with "min_t" (reported by
checkpatch.pl).

> > Fixes: 621191d709b14 ("Drivers: hv: Introduce mshv_root module to expose /dev/mshv to VMMs")
> > Signed-off-by: Stanislav Kinsburskii <skinsburskii@linux.microsoft.com>
> > ---
> >   drivers/hv/mshv_root_hv_call.c |   55 +++++++++++++++-------------------------
> >   1 file changed, 20 insertions(+), 35 deletions(-)
> > 
> > diff --git a/drivers/hv/mshv_root_hv_call.c b/drivers/hv/mshv_root_hv_call.c
> > index e5992c324904a..1f19a4ca824f0 100644
> > --- a/drivers/hv/mshv_root_hv_call.c
> > +++ b/drivers/hv/mshv_root_hv_call.c
> > @@ -195,8 +195,8 @@ static int hv_do_map_gpa_hcall(u64 partition_id, u64 gfn, u64 page_struct_count,
> >   	struct hv_input_map_gpa_pages *input_page;
> >   	u64 status, *pfnlist;
> >   	unsigned long irq_flags, large_shift = 0;
> > -	int ret = 0, done = 0;
> > -	u64 page_count = page_struct_count;
> > +	u64 done = 0, page_count = page_struct_count;
> > +	int ret = 0;
> >   	if (page_count == 0 || (pages && mmio_spa))
> >   		return -EINVAL;
> > @@ -213,8 +213,8 @@ static int hv_do_map_gpa_hcall(u64 partition_id, u64 gfn, u64 page_struct_count,
> >   	}
> >   	while (done < page_count) {
> > -		ulong i, completed, remain = page_count - done;
> > -		int rep_count = min(remain, HV_MAP_GPA_BATCH_SIZE);
> > +		u64 i, completed, remain = page_count - done;
> > +		u64 rep_count = min_t(u64, remain, HV_MAP_GPA_BATCH_SIZE);
> >   		local_irq_save(irq_flags);
> >   		input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
> > @@ -224,23 +224,13 @@ static int hv_do_map_gpa_hcall(u64 partition_id, u64 gfn, u64 page_struct_count,
> >   		input_page->map_flags = flags;
> >   		pfnlist = input_page->source_gpa_page_list;
> > -		for (i = 0; i < rep_count; i++)
> > -			if (flags & HV_MAP_GPA_NO_ACCESS) {
> > +		for (i = 0; i < rep_count; i++) {
> > +			if (flags & HV_MAP_GPA_NO_ACCESS)
> >   				pfnlist[i] = 0;
> > -			} else if (pages) {
> > -				u64 index = (done + i) << large_shift;
> > -
> > -				if (index >= page_struct_count) {
> > -					ret = -EINVAL;
> > -					break;
> > -				}
> > -				pfnlist[i] = page_to_pfn(pages[index]);
> > -			} else {
> > +			else if (pages)
> > +				pfnlist[i] = page_to_pfn(pages[(done + i) << large_shift]);
> 
> Entire file is 80 cols, please don't cause this one overflow.
> 

Sure. I'll update.

Thanks,
Stanislav

> Thanks,
> -Mukesh
> 
> 
> > +			else
> >   				pfnlist[i] = mmio_spa + done + i;
> > -			}
> > -		if (ret) {
> > -			local_irq_restore(irq_flags);
> > -			break;
> >   		}
> >   		status = hv_do_rep_hypercall(HVCALL_MAP_GPA_PAGES, rep_count, 0,
> > @@ -248,29 +238,26 @@ static int hv_do_map_gpa_hcall(u64 partition_id, u64 gfn, u64 page_struct_count,
> >   		local_irq_restore(irq_flags);
> >   		completed = hv_repcomp(status);
> > +		done += completed;
> >   		if (hv_result_needs_memory(status)) {
> >   			ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id,
> >   						    HV_MAP_GPA_DEPOSIT_PAGES);
> >   			if (ret)
> >   				break;
> > -
> >   		} else if (!hv_result_success(status)) {
> >   			ret = hv_result_to_errno(status);
> >   			break;
> >   		}
> > -
> > -		done += completed;
> >   	}
> >   	if (ret && done) {
> >   		u32 unmap_flags = 0;
> > -		if (flags & HV_MAP_GPA_LARGE_PAGE) {
> > +		if (flags & HV_MAP_GPA_LARGE_PAGE)
> >   			unmap_flags |= HV_UNMAP_GPA_LARGE_PAGE;
> > -			done <<= large_shift;
> > -		}
> > -		hv_call_unmap_gpa_pages(partition_id, gfn, done, unmap_flags);
> > +		hv_call_unmap_gpa_pages(partition_id, gfn,
> > +					done << large_shift, unmap_flags);
> >   	}
> >   	return ret;
> > @@ -305,7 +292,7 @@ int hv_call_unmap_gpa_pages(u64 partition_id, u64 gfn, u64 page_count_4k,
> >   	struct hv_input_unmap_gpa_pages *input_page;
> >   	u64 status, page_count = page_count_4k;
> >   	unsigned long irq_flags, large_shift = 0;
> > -	int ret = 0, done = 0;
> > +	u64 done = 0;
> >   	if (page_count == 0)
> >   		return -EINVAL;
> > @@ -319,8 +306,8 @@ int hv_call_unmap_gpa_pages(u64 partition_id, u64 gfn, u64 page_count_4k,
> >   	}
> >   	while (done < page_count) {
> > -		ulong completed, remain = page_count - done;
> > -		int rep_count = min(remain, HV_UMAP_GPA_PAGES);
> > +		u64 completed, remain = page_count - done;
> > +		u64 rep_count = min_t(u64, remain, HV_UMAP_GPA_PAGES);
> >   		local_irq_save(irq_flags);
> >   		input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
> > @@ -333,15 +320,13 @@ int hv_call_unmap_gpa_pages(u64 partition_id, u64 gfn, u64 page_count_4k,
> >   		local_irq_restore(irq_flags);
> >   		completed = hv_repcomp(status);
> > -		if (!hv_result_success(status)) {
> > -			ret = hv_result_to_errno(status);
> > -			break;
> > -		}
> > -
> >   		done += completed;
> > +
> > +		if (!hv_result_success(status))
> > +			return hv_result_to_errno(status);
> >   	}
> > -	return ret;
> > +	return 0;
> >   }
> >   int hv_call_get_gpa_access_states(u64 partition_id, u32 count, u64 gpa_base_pfn,
> > 
> > 
> 

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2026-04-30 14:43 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-04-29 16:48 [PATCH v2] mshv: Simplify GPA map/unmap hypercall helpers Stanislav Kinsburskii
2026-04-30  2:06 ` Mukesh R
2026-04-30 14:43   ` Stanislav Kinsburskii

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox