All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] vfio/migration: Detect and report overflow in migration size queries
@ 2026-05-13  9:45 Cédric Le Goater
  2026-05-13 17:09 ` Peter Xu
  2026-05-14 12:52 ` Avihai Horon
  0 siblings, 2 replies; 3+ messages in thread
From: Cédric Le Goater @ 2026-05-13  9:45 UTC (permalink / raw)
  To: qemu-devel; +Cc: Alex Williamson, Cédric Le Goater, Avihai Horon, Peter Xu

VFIO migration ioctls (VFIO_DEVICE_FEATURE_MIG_DATA_SIZE and
VFIO_MIG_GET_PRECOPY_INFO) return device-estimated migration sizes as
uint64_t values. A misbehaving kernel driver could return values that
are unreasonably large, which would corrupt the size accounting used
to decide migration convergence.

This misbehavior occurred a few times when testing migration of a VM
with an assigned NVIDIA vGPU and an MLX5 VF. In some of the save
iterations, the reported precopy and stopcopy sizes were unreasonably
large (close to UINT64_MAX):

  vfio_state_pending  (4fbce62c-8ce2-4cc9-b429-41635bc94f24) stopcopy size 0 precopy initial size 18446744073708667040 precopy dirty size 0
  vfio_save_iterate   (4fbce62c-8ce2-4cc9-b429-41635bc94f24) precopy initial size 18446744073707618464 precopy dirty size 0
  vfio_state_pending  (4fbce62c-8ce2-4cc9-b429-41635bc94f24) stopcopy size 18446744073708503040 precopy initial size 18446744073707618464 precopy dirty size 0
  vfio_state_pending  (4fbce62c-8ce2-4cc9-b429-41635bc94f24) stopcopy size 0 precopy initial size 18446744073707618464 precopy dirty size 0
  vfio_state_pending  (0000:b1:01.0) stopcopy size 18446744073709543408 precopy initial size 0 precopy dirty size 1008

This had the effect of corrupting migration convergence, as reported
by the HMP migrate command:

  (qemu) info migrate
  Status:                 active
  Time (ms):              total=21140, setup=86, exp_down=152455434886355
  Remaining:              16 EiB
  RAM info:
    Throughput (Mbps):    967.98
    Sizes:                pagesize=4 KiB, total=4 GiB
    Transfers:            transferred=2.29 GiB, remain=4.7 MiB
      Channels:           precopy=1.91 GiB, multifd=0 B, postcopy=0 B, vfio=387 MiB
      Page Types:         normal=499427, zero=559708
    Page Rates (pps):     transfer=0, dirty=1892
    Others:               dirty_syncs=3

Add a helper to detect values that exceed INT64_MAX, which is far
beyond any realistic device state size, and report them with an error
message. Return -ERANGE from the query functions so callers can abort
the migration rather than proceeding with corrupted estimates.
However, the callers don't yet check the return value to actually stop
the migration.

Cc: Avihai Horon <avihaih@nvidia.com>
Cc: Peter Xu <peterx@redhat.com>
Signed-off-by: Cédric Le Goater <clg@redhat.com>
---
 hw/vfio/migration.c | 32 ++++++++++++++++++++++++++++----
 1 file changed, 28 insertions(+), 4 deletions(-)

diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c
index 150e28656e97c5e8198541e5b6dfc4ed4102d143..fb12b9717f773fdde657911517de9d74c1eb3931 100644
--- a/hw/vfio/migration.c
+++ b/hw/vfio/migration.c
@@ -320,6 +320,18 @@ static void vfio_migration_cleanup(VFIODevice *vbasedev)
     migration->data_fd = -1;
 }
 
+static bool vfio_migration_check_overflow(VFIODevice *vbasedev, uint64_t size,
+                                          const char *name)
+{
+    if (size > INT64_MAX) {
+        error_report("%s: Estimated %s size overflow: 0x%"PRIx64,
+                     vbasedev->name, name, size);
+        return true;
+    }
+
+    return false;
+}
+
 static int vfio_query_stop_copy_size(VFIODevice *vbasedev)
 {
     uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature) +
@@ -329,7 +341,7 @@ static int vfio_query_stop_copy_size(VFIODevice *vbasedev)
     struct vfio_device_feature_mig_data_size *mig_data_size =
         (struct vfio_device_feature_mig_data_size *)feature->data;
     VFIOMigration *migration = vbasedev->migration;
-    int ret;
+    int ret = 0;
 
     feature->argsz = sizeof(buf);
     feature->flags =
@@ -347,7 +359,10 @@ static int vfio_query_stop_copy_size(VFIODevice *vbasedev)
                          vbasedev->name, ret);
     } else {
         migration->stopcopy_size = mig_data_size->stop_copy_length;
-        ret = 0;
+        if (vfio_migration_check_overflow(vbasedev, migration->stopcopy_size,
+                                          "stop copy size")) {
+            ret = -ERANGE;
+        }
     }
 
     trace_vfio_query_stop_copy_size(vbasedev->name,
@@ -361,7 +376,7 @@ static int vfio_query_precopy_size(VFIOMigration *migration)
     struct vfio_precopy_info precopy = {
         .argsz = sizeof(precopy),
     };
-    int ret;
+    int ret = 0;
 
     if (ioctl(migration->data_fd, VFIO_MIG_GET_PRECOPY_INFO, &precopy)) {
         migration->precopy_init_size = 0;
@@ -370,9 +385,18 @@ static int vfio_query_precopy_size(VFIOMigration *migration)
         warn_report_once("VFIO device %s ioctl(VFIO_MIG_GET_PRECOPY_INFO) "
                          "failed (%d)", migration->vbasedev->name, ret);
     } else {
+        bool overflow;
+
         migration->precopy_init_size = precopy.initial_bytes;
         migration->precopy_dirty_size = precopy.dirty_bytes;
-        ret = 0;
+
+        overflow  = vfio_migration_check_overflow(migration->vbasedev,
+                         migration->precopy_init_size,  "precopy init size");
+        overflow |= vfio_migration_check_overflow(migration->vbasedev,
+                         migration->precopy_dirty_size, "precopy dirty size");
+        if (overflow) {
+            ret = -ERANGE;
+        }
     }
 
     trace_vfio_query_precopy_size(migration->vbasedev->name,
-- 
2.54.0



^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] vfio/migration: Detect and report overflow in migration size queries
  2026-05-13  9:45 [PATCH] vfio/migration: Detect and report overflow in migration size queries Cédric Le Goater
@ 2026-05-13 17:09 ` Peter Xu
  2026-05-14 12:52 ` Avihai Horon
  1 sibling, 0 replies; 3+ messages in thread
From: Peter Xu @ 2026-05-13 17:09 UTC (permalink / raw)
  To: Cédric Le Goater; +Cc: qemu-devel, Alex Williamson, Avihai Horon

On Wed, May 13, 2026 at 11:45:22AM +0200, Cédric Le Goater wrote:
> VFIO migration ioctls (VFIO_DEVICE_FEATURE_MIG_DATA_SIZE and
> VFIO_MIG_GET_PRECOPY_INFO) return device-estimated migration sizes as
> uint64_t values. A misbehaving kernel driver could return values that
> are unreasonably large, which would corrupt the size accounting used
> to decide migration convergence.
> 
> This misbehavior occurred a few times when testing migration of a VM
> with an assigned NVIDIA vGPU and an MLX5 VF. In some of the save
> iterations, the reported precopy and stopcopy sizes were unreasonably
> large (close to UINT64_MAX):
> 
>   vfio_state_pending  (4fbce62c-8ce2-4cc9-b429-41635bc94f24) stopcopy size 0 precopy initial size 18446744073708667040 precopy dirty size 0
>   vfio_save_iterate   (4fbce62c-8ce2-4cc9-b429-41635bc94f24) precopy initial size 18446744073707618464 precopy dirty size 0
>   vfio_state_pending  (4fbce62c-8ce2-4cc9-b429-41635bc94f24) stopcopy size 18446744073708503040 precopy initial size 18446744073707618464 precopy dirty size 0
>   vfio_state_pending  (4fbce62c-8ce2-4cc9-b429-41635bc94f24) stopcopy size 0 precopy initial size 18446744073707618464 precopy dirty size 0
>   vfio_state_pending  (0000:b1:01.0) stopcopy size 18446744073709543408 precopy initial size 0 precopy dirty size 1008
> 
> This had the effect of corrupting migration convergence, as reported
> by the HMP migrate command:
> 
>   (qemu) info migrate
>   Status:                 active
>   Time (ms):              total=21140, setup=86, exp_down=152455434886355
>   Remaining:              16 EiB
>   RAM info:
>     Throughput (Mbps):    967.98
>     Sizes:                pagesize=4 KiB, total=4 GiB
>     Transfers:            transferred=2.29 GiB, remain=4.7 MiB
>       Channels:           precopy=1.91 GiB, multifd=0 B, postcopy=0 B, vfio=387 MiB
>       Page Types:         normal=499427, zero=559708
>     Page Rates (pps):     transfer=0, dirty=1892
>     Others:               dirty_syncs=3
> 
> Add a helper to detect values that exceed INT64_MAX, which is far
> beyond any realistic device state size, and report them with an error
> message. Return -ERANGE from the query functions so callers can abort
> the migration rather than proceeding with corrupted estimates.
> However, the callers don't yet check the return value to actually stop
> the migration.
> 
> Cc: Avihai Horon <avihaih@nvidia.com>
> Cc: Peter Xu <peterx@redhat.com>
> Signed-off-by: Cédric Le Goater <clg@redhat.com>

Reviewed-by: Peter Xu <peterx@redhat.com>

-- 
Peter Xu



^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] vfio/migration: Detect and report overflow in migration size queries
  2026-05-13  9:45 [PATCH] vfio/migration: Detect and report overflow in migration size queries Cédric Le Goater
  2026-05-13 17:09 ` Peter Xu
@ 2026-05-14 12:52 ` Avihai Horon
  1 sibling, 0 replies; 3+ messages in thread
From: Avihai Horon @ 2026-05-14 12:52 UTC (permalink / raw)
  To: Cédric Le Goater, qemu-devel; +Cc: Alex Williamson, Peter Xu


On 5/13/2026 12:45 PM, Cédric Le Goater wrote:
> External email: Use caution opening links or attachments
>
>
> VFIO migration ioctls (VFIO_DEVICE_FEATURE_MIG_DATA_SIZE and
> VFIO_MIG_GET_PRECOPY_INFO) return device-estimated migration sizes as
> uint64_t values. A misbehaving kernel driver could return values that
> are unreasonably large, which would corrupt the size accounting used
> to decide migration convergence.
>
> This misbehavior occurred a few times when testing migration of a VM
> with an assigned NVIDIA vGPU and an MLX5 VF. In some of the save
> iterations, the reported precopy and stopcopy sizes were unreasonably
> large (close to UINT64_MAX):
>
>    vfio_state_pending  (4fbce62c-8ce2-4cc9-b429-41635bc94f24) stopcopy size 0 precopy initial size 18446744073708667040 precopy dirty size 0
>    vfio_save_iterate   (4fbce62c-8ce2-4cc9-b429-41635bc94f24) precopy initial size 18446744073707618464 precopy dirty size 0
>    vfio_state_pending  (4fbce62c-8ce2-4cc9-b429-41635bc94f24) stopcopy size 18446744073708503040 precopy initial size 18446744073707618464 precopy dirty size 0
>    vfio_state_pending  (4fbce62c-8ce2-4cc9-b429-41635bc94f24) stopcopy size 0 precopy initial size 18446744073707618464 precopy dirty size 0
>    vfio_state_pending  (0000:b1:01.0) stopcopy size 18446744073709543408 precopy initial size 0 precopy dirty size 1008
>
> This had the effect of corrupting migration convergence, as reported
> by the HMP migrate command:
>
>    (qemu) info migrate
>    Status:                 active
>    Time (ms):              total=21140, setup=86, exp_down=152455434886355
>    Remaining:              16 EiB
>    RAM info:
>      Throughput (Mbps):    967.98
>      Sizes:                pagesize=4 KiB, total=4 GiB
>      Transfers:            transferred=2.29 GiB, remain=4.7 MiB
>        Channels:           precopy=1.91 GiB, multifd=0 B, postcopy=0 B, vfio=387 MiB
>        Page Types:         normal=499427, zero=559708
>      Page Rates (pps):     transfer=0, dirty=1892
>      Others:               dirty_syncs=3
>
> Add a helper to detect values that exceed INT64_MAX, which is far
> beyond any realistic device state size, and report them with an error
> message. Return -ERANGE from the query functions so callers can abort
> the migration rather than proceeding with corrupted estimates.
> However, the callers don't yet check the return value to actually stop
> the migration.
>
> Cc: Avihai Horon <avihaih@nvidia.com>
> Cc: Peter Xu <peterx@redhat.com>
> Signed-off-by: Cédric Le Goater <clg@redhat.com>
> ---
>   hw/vfio/migration.c | 32 ++++++++++++++++++++++++++++----
>   1 file changed, 28 insertions(+), 4 deletions(-)

Reviewed-by: Avihai Horon <avihaih@nvidia.com>

Can you tell if it was the vGPU or the mlx5 device who reported the 
overflowed value? Or both?
Are we sure the driver is buggy? E.g., do you see the overflowed values 
also in trace_vfio_query_precopy_size and 
trace_vfio_query_stop_copy_size (where we just queried the values and 
didn't touch them yet)?

Thanks.

>
> diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c
> index 150e28656e97c5e8198541e5b6dfc4ed4102d143..fb12b9717f773fdde657911517de9d74c1eb3931 100644
> --- a/hw/vfio/migration.c
> +++ b/hw/vfio/migration.c
> @@ -320,6 +320,18 @@ static void vfio_migration_cleanup(VFIODevice *vbasedev)
>       migration->data_fd = -1;
>   }
>
> +static bool vfio_migration_check_overflow(VFIODevice *vbasedev, uint64_t size,
> +                                          const char *name)
> +{
> +    if (size > INT64_MAX) {
> +        error_report("%s: Estimated %s size overflow: 0x%"PRIx64,
> +                     vbasedev->name, name, size);
> +        return true;
> +    }
> +
> +    return false;
> +}
> +
>   static int vfio_query_stop_copy_size(VFIODevice *vbasedev)
>   {
>       uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature) +
> @@ -329,7 +341,7 @@ static int vfio_query_stop_copy_size(VFIODevice *vbasedev)
>       struct vfio_device_feature_mig_data_size *mig_data_size =
>           (struct vfio_device_feature_mig_data_size *)feature->data;
>       VFIOMigration *migration = vbasedev->migration;
> -    int ret;
> +    int ret = 0;
>
>       feature->argsz = sizeof(buf);
>       feature->flags =
> @@ -347,7 +359,10 @@ static int vfio_query_stop_copy_size(VFIODevice *vbasedev)
>                            vbasedev->name, ret);
>       } else {
>           migration->stopcopy_size = mig_data_size->stop_copy_length;
> -        ret = 0;
> +        if (vfio_migration_check_overflow(vbasedev, migration->stopcopy_size,
> +                                          "stop copy size")) {
> +            ret = -ERANGE;
> +        }
>       }
>
>       trace_vfio_query_stop_copy_size(vbasedev->name,
> @@ -361,7 +376,7 @@ static int vfio_query_precopy_size(VFIOMigration *migration)
>       struct vfio_precopy_info precopy = {
>           .argsz = sizeof(precopy),
>       };
> -    int ret;
> +    int ret = 0;
>
>       if (ioctl(migration->data_fd, VFIO_MIG_GET_PRECOPY_INFO, &precopy)) {
>           migration->precopy_init_size = 0;
> @@ -370,9 +385,18 @@ static int vfio_query_precopy_size(VFIOMigration *migration)
>           warn_report_once("VFIO device %s ioctl(VFIO_MIG_GET_PRECOPY_INFO) "
>                            "failed (%d)", migration->vbasedev->name, ret);
>       } else {
> +        bool overflow;
> +
>           migration->precopy_init_size = precopy.initial_bytes;
>           migration->precopy_dirty_size = precopy.dirty_bytes;
> -        ret = 0;
> +
> +        overflow  = vfio_migration_check_overflow(migration->vbasedev,
> +                         migration->precopy_init_size,  "precopy init size");
> +        overflow |= vfio_migration_check_overflow(migration->vbasedev,
> +                         migration->precopy_dirty_size, "precopy dirty size");
> +        if (overflow) {
> +            ret = -ERANGE;
> +        }
>       }
>
>       trace_vfio_query_precopy_size(migration->vbasedev->name,
> --
> 2.54.0
>


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2026-05-14 12:58 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-05-13  9:45 [PATCH] vfio/migration: Detect and report overflow in migration size queries Cédric Le Goater
2026-05-13 17:09 ` Peter Xu
2026-05-14 12:52 ` Avihai Horon

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.