qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
* [Qemu-devel] [PATCH 0/2] vhost: check if vhost has capacity for hotplugged memory
@ 2015-07-20 13:49 Igor Mammedov
  2015-07-20 13:49 ` [Qemu-devel] [PATCH 1/2] vhost: add vhost_has_free_slot() interface Igor Mammedov
                   ` (2 more replies)
  0 siblings, 3 replies; 7+ messages in thread
From: Igor Mammedov @ 2015-07-20 13:49 UTC (permalink / raw)
  To: qemu-devel; +Cc: mst

it's defensive patchset which helps to avoid QEMU crashing
at memory hotplug time by checking that vhost has free capacity
for an additional memory slot.

Igor Mammedov (2):
  vhost: add vhost_has_free_slot() interface
  pc-dimm: add vhost slots limit check before commiting to hotplug

 hw/mem/pc-dimm.c                  |  7 +++++++
 hw/virtio/vhost-backend.c         | 23 ++++++++++++++++++++++-
 hw/virtio/vhost-user.c            |  8 +++++++-
 hw/virtio/vhost.c                 | 21 +++++++++++++++++++++
 include/hw/virtio/vhost-backend.h |  2 ++
 include/hw/virtio/vhost.h         |  1 +
 stubs/Makefile.objs               |  1 +
 stubs/vhost.c                     |  6 ++++++
 8 files changed, 67 insertions(+), 2 deletions(-)
 create mode 100644 stubs/vhost.c

-- 
1.8.3.1

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [Qemu-devel] [PATCH 1/2] vhost: add vhost_has_free_slot() interface
  2015-07-20 13:49 [Qemu-devel] [PATCH 0/2] vhost: check if vhost has capacity for hotplugged memory Igor Mammedov
@ 2015-07-20 13:49 ` Igor Mammedov
  2015-07-29 10:20   ` Michael S. Tsirkin
  2015-07-20 13:49 ` [Qemu-devel] [PATCH 2/2] pc-dimm: add vhost slots limit check before commiting to hotplug Igor Mammedov
  2015-07-28 15:08 ` [Qemu-devel] [PATCH 0/2] vhost: check if vhost has capacity for hotplugged memory Igor Mammedov
  2 siblings, 1 reply; 7+ messages in thread
From: Igor Mammedov @ 2015-07-20 13:49 UTC (permalink / raw)
  To: qemu-devel; +Cc: mst

it will allow for other parts of QEMU check if it's safe
to map memory region during hotplug/runtime.
That way hotplug path will have a chance to cancel
hotplug operation instead of crashing in vhost_commit().

Signed-off-by: Igor Mammedov <imammedo@redhat.com>
---
 hw/virtio/vhost-backend.c         | 23 ++++++++++++++++++++++-
 hw/virtio/vhost-user.c            |  8 +++++++-
 hw/virtio/vhost.c                 | 21 +++++++++++++++++++++
 include/hw/virtio/vhost-backend.h |  2 ++
 include/hw/virtio/vhost.h         |  1 +
 stubs/Makefile.objs               |  1 +
 stubs/vhost.c                     |  6 ++++++
 7 files changed, 60 insertions(+), 2 deletions(-)
 create mode 100644 stubs/vhost.c

diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c
index 4d68a27..46fa707 100644
--- a/hw/virtio/vhost-backend.c
+++ b/hw/virtio/vhost-backend.c
@@ -11,6 +11,7 @@
 #include "hw/virtio/vhost.h"
 #include "hw/virtio/vhost-backend.h"
 #include "qemu/error-report.h"
+#include "linux/vhost.h"
 
 #include <sys/ioctl.h>
 
@@ -42,11 +43,31 @@ static int vhost_kernel_cleanup(struct vhost_dev *dev)
     return close(fd);
 }
 
+static int vhost_kernel_memslots_limit(struct vhost_dev *dev)
+{
+    int limit;
+    int s = offsetof(struct vhost_memory, regions);
+    struct vhost_memory *mem = g_malloc0(s);
+
+    assert(dev->mem->nregions);
+    do {
+        s += sizeof mem->regions[0];
+        mem = g_realloc(mem, s);
+        mem->regions[mem->nregions] = dev->mem->regions[0];
+        mem->nregions++;
+    } while (vhost_kernel_call(dev, VHOST_SET_MEM_TABLE, mem) != -1);
+    limit = mem->nregions - 1 > 0 ? mem->nregions - 1 : 0;
+    g_free(mem);
+
+    return limit;
+}
+
 static const VhostOps kernel_ops = {
         .backend_type = VHOST_BACKEND_TYPE_KERNEL,
         .vhost_call = vhost_kernel_call,
         .vhost_backend_init = vhost_kernel_init,
-        .vhost_backend_cleanup = vhost_kernel_cleanup
+        .vhost_backend_cleanup = vhost_kernel_cleanup,
+        .vhost_backend_memslots_limit = vhost_kernel_memslots_limit
 };
 
 int vhost_set_backend_type(struct vhost_dev *dev, VhostBackendType backend_type)
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index d6f2163..0487809 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -352,9 +352,15 @@ static int vhost_user_cleanup(struct vhost_dev *dev)
     return 0;
 }
 
+static int vhost_user_memslots_limit(struct vhost_dev *dev)
+{
+    return VHOST_MEMORY_MAX_NREGIONS;
+}
+
 const VhostOps user_ops = {
         .backend_type = VHOST_BACKEND_TYPE_USER,
         .vhost_call = vhost_user_call,
         .vhost_backend_init = vhost_user_init,
-        .vhost_backend_cleanup = vhost_user_cleanup
+        .vhost_backend_cleanup = vhost_user_cleanup,
+        .vhost_backend_memslots_limit = vhost_user_memslots_limit
         };
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 2712c6f..e964004 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -26,6 +26,18 @@
 
 static struct vhost_log *vhost_log;
 
+static int used_memslots;
+static int memslots_limit = -1;
+
+bool vhost_has_free_slot(void)
+{
+    if (memslots_limit >= 0) {
+        return memslots_limit > used_memslots;
+    }
+
+    return true;
+}
+
 static void vhost_dev_sync_region(struct vhost_dev *dev,
                                   MemoryRegionSection *section,
                                   uint64_t mfirst, uint64_t mlast,
@@ -457,6 +469,7 @@ static void vhost_set_memory(MemoryListener *listener,
     dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr);
     dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1);
     dev->memory_changed = true;
+    used_memslots = dev->mem->nregions;
 }
 
 static bool vhost_section(MemoryRegionSection *section)
@@ -1119,6 +1132,14 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
     if (r < 0) {
         goto fail_features;
     }
+
+    r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
+    if (memslots_limit > 0) {
+        memslots_limit = MIN(memslots_limit, r);
+    } else {
+        memslots_limit = r;
+    }
+
     r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_MEM_TABLE, hdev->mem);
     if (r < 0) {
         r = -errno;
diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h
index e472f29..28b6714 100644
--- a/include/hw/virtio/vhost-backend.h
+++ b/include/hw/virtio/vhost-backend.h
@@ -24,12 +24,14 @@ typedef int (*vhost_call)(struct vhost_dev *dev, unsigned long int request,
              void *arg);
 typedef int (*vhost_backend_init)(struct vhost_dev *dev, void *opaque);
 typedef int (*vhost_backend_cleanup)(struct vhost_dev *dev);
+typedef int (*vhost_backend_memslots_limit)(struct vhost_dev *dev);
 
 typedef struct VhostOps {
     VhostBackendType backend_type;
     vhost_call vhost_call;
     vhost_backend_init vhost_backend_init;
     vhost_backend_cleanup vhost_backend_cleanup;
+    vhost_backend_memslots_limit vhost_backend_memslots_limit;
 } VhostOps;
 
 extern const VhostOps user_ops;
diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
index dd51050..17ff7b6 100644
--- a/include/hw/virtio/vhost.h
+++ b/include/hw/virtio/vhost.h
@@ -81,4 +81,5 @@ uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
                             uint64_t features);
 void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
                         uint64_t features);
+bool vhost_has_free_slot(void);
 #endif
diff --git a/stubs/Makefile.objs b/stubs/Makefile.objs
index 9937a12..d2f1b21 100644
--- a/stubs/Makefile.objs
+++ b/stubs/Makefile.objs
@@ -38,3 +38,4 @@ stub-obj-$(CONFIG_WIN32) += fd-register.o
 stub-obj-y += cpus.o
 stub-obj-y += kvm.o
 stub-obj-y += qmp_pc_dimm_device_list.o
+stub-obj-y += vhost.o
diff --git a/stubs/vhost.c b/stubs/vhost.c
new file mode 100644
index 0000000..d346b85
--- /dev/null
+++ b/stubs/vhost.c
@@ -0,0 +1,6 @@
+#include "hw/virtio/vhost.h"
+
+bool vhost_has_free_slot(void)
+{
+    return true;
+}
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [Qemu-devel] [PATCH 2/2] pc-dimm: add vhost slots limit check before commiting to hotplug
  2015-07-20 13:49 [Qemu-devel] [PATCH 0/2] vhost: check if vhost has capacity for hotplugged memory Igor Mammedov
  2015-07-20 13:49 ` [Qemu-devel] [PATCH 1/2] vhost: add vhost_has_free_slot() interface Igor Mammedov
@ 2015-07-20 13:49 ` Igor Mammedov
  2015-07-28 15:08 ` [Qemu-devel] [PATCH 0/2] vhost: check if vhost has capacity for hotplugged memory Igor Mammedov
  2 siblings, 0 replies; 7+ messages in thread
From: Igor Mammedov @ 2015-07-20 13:49 UTC (permalink / raw)
  To: qemu-devel; +Cc: mst

it allows safely cancel memory hotplug if vhost backend
doesn't support necessary amount of memory slots and prevents
QEMU crashing in vhost due to hitting vhost limit on amount
of supported memory ranges.

Signed-off-by: Igor Mammedov <imammedo@redhat.com>
---
 hw/mem/pc-dimm.c | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/hw/mem/pc-dimm.c b/hw/mem/pc-dimm.c
index bb04862..901bdbf 100644
--- a/hw/mem/pc-dimm.c
+++ b/hw/mem/pc-dimm.c
@@ -25,6 +25,7 @@
 #include "sysemu/numa.h"
 #include "sysemu/kvm.h"
 #include "trace.h"
+#include "hw/virtio/vhost.h"
 
 typedef struct pc_dimms_capacity {
      uint64_t size;
@@ -95,6 +96,12 @@ void pc_dimm_memory_plug(DeviceState *dev, MemoryHotplugState *hpms,
         goto out;
     }
 
+    if (!vhost_has_free_slot()) {
+        error_setg(&local_err, "a used vhost backend has no free"
+                               " memory slots left");
+        goto out;
+    }
+
     memory_region_add_subregion(&hpms->mr, addr - hpms->base, mr);
     vmstate_register_ram(mr, dev);
     numa_set_mem_node_id(addr, memory_region_size(mr), dimm->node);
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [Qemu-devel] [PATCH 0/2] vhost: check if vhost has capacity for hotplugged memory
  2015-07-20 13:49 [Qemu-devel] [PATCH 0/2] vhost: check if vhost has capacity for hotplugged memory Igor Mammedov
  2015-07-20 13:49 ` [Qemu-devel] [PATCH 1/2] vhost: add vhost_has_free_slot() interface Igor Mammedov
  2015-07-20 13:49 ` [Qemu-devel] [PATCH 2/2] pc-dimm: add vhost slots limit check before commiting to hotplug Igor Mammedov
@ 2015-07-28 15:08 ` Igor Mammedov
  2015-07-29  8:33   ` Paolo Bonzini
  2 siblings, 1 reply; 7+ messages in thread
From: Igor Mammedov @ 2015-07-28 15:08 UTC (permalink / raw)
  To: qemu-devel; +Cc: Paolo, mst

On Mon, 20 Jul 2015 15:49:28 +0200
Igor Mammedov <imammedo@redhat.com> wrote:

> it's defensive patchset which helps to avoid QEMU crashing
> at memory hotplug time by checking that vhost has free capacity
> for an additional memory slot.
> 
> Igor Mammedov (2):
>   vhost: add vhost_has_free_slot() interface
>   pc-dimm: add vhost slots limit check before commiting to hotplug
> 
>  hw/mem/pc-dimm.c                  |  7 +++++++
>  hw/virtio/vhost-backend.c         | 23 ++++++++++++++++++++++-
>  hw/virtio/vhost-user.c            |  8 +++++++-
>  hw/virtio/vhost.c                 | 21 +++++++++++++++++++++
>  include/hw/virtio/vhost-backend.h |  2 ++
>  include/hw/virtio/vhost.h         |  1 +
>  stubs/Makefile.objs               |  1 +
>  stubs/vhost.c                     |  6 ++++++
>  8 files changed, 67 insertions(+), 2 deletions(-)
>  create mode 100644 stubs/vhost.c
> 

ping,

patches are not invasive and fix QEMU crash during memory hotplug
when running on kernels that have lower vhost memory slots limit.
Pls consider applying them for 2.4.

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [Qemu-devel] [PATCH 0/2] vhost: check if vhost has capacity for hotplugged memory
  2015-07-28 15:08 ` [Qemu-devel] [PATCH 0/2] vhost: check if vhost has capacity for hotplugged memory Igor Mammedov
@ 2015-07-29  8:33   ` Paolo Bonzini
  0 siblings, 0 replies; 7+ messages in thread
From: Paolo Bonzini @ 2015-07-29  8:33 UTC (permalink / raw)
  To: Igor Mammedov, qemu-devel; +Cc: mst



On 28/07/2015 17:08, Igor Mammedov wrote:
>> >  hw/mem/pc-dimm.c                  |  7 +++++++
>> >  hw/virtio/vhost-backend.c         | 23 ++++++++++++++++++++++-
>> >  hw/virtio/vhost-user.c            |  8 +++++++-
>> >  hw/virtio/vhost.c                 | 21 +++++++++++++++++++++
>> >  include/hw/virtio/vhost-backend.h |  2 ++
>> >  include/hw/virtio/vhost.h         |  1 +
>> >  stubs/Makefile.objs               |  1 +
>> >  stubs/vhost.c                     |  6 ++++++
>> >  8 files changed, 67 insertions(+), 2 deletions(-)
>> >  create mode 100644 stubs/vhost.c
>> > 
> ping,
> 
> patches are not invasive and fix QEMU crash during memory hotplug
> when running on kernels that have lower vhost memory slots limit.
> Pls consider applying them for 2.4.

I like the approach.  For 2.5 we could consider using a
NotifierWithReturn: it can be easily extended to cover KVM as well and
removes the need for the stub.

Paolo

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [Qemu-devel] [PATCH 1/2] vhost: add vhost_has_free_slot() interface
  2015-07-20 13:49 ` [Qemu-devel] [PATCH 1/2] vhost: add vhost_has_free_slot() interface Igor Mammedov
@ 2015-07-29 10:20   ` Michael S. Tsirkin
  2015-07-29 11:26     ` Igor Mammedov
  0 siblings, 1 reply; 7+ messages in thread
From: Michael S. Tsirkin @ 2015-07-29 10:20 UTC (permalink / raw)
  To: Igor Mammedov; +Cc: qemu-devel

On Mon, Jul 20, 2015 at 03:49:29PM +0200, Igor Mammedov wrote:
> it will allow for other parts of QEMU check if it's safe
> to map memory region during hotplug/runtime.
> That way hotplug path will have a chance to cancel
> hotplug operation instead of crashing in vhost_commit().
> 
> Signed-off-by: Igor Mammedov <imammedo@redhat.com>
> ---
>  hw/virtio/vhost-backend.c         | 23 ++++++++++++++++++++++-
>  hw/virtio/vhost-user.c            |  8 +++++++-
>  hw/virtio/vhost.c                 | 21 +++++++++++++++++++++
>  include/hw/virtio/vhost-backend.h |  2 ++
>  include/hw/virtio/vhost.h         |  1 +
>  stubs/Makefile.objs               |  1 +
>  stubs/vhost.c                     |  6 ++++++
>  7 files changed, 60 insertions(+), 2 deletions(-)
>  create mode 100644 stubs/vhost.c
> 
> diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c
> index 4d68a27..46fa707 100644
> --- a/hw/virtio/vhost-backend.c
> +++ b/hw/virtio/vhost-backend.c
> @@ -11,6 +11,7 @@
>  #include "hw/virtio/vhost.h"
>  #include "hw/virtio/vhost-backend.h"
>  #include "qemu/error-report.h"
> +#include "linux/vhost.h"
>  
>  #include <sys/ioctl.h>
>  
> @@ -42,11 +43,31 @@ static int vhost_kernel_cleanup(struct vhost_dev *dev)
>      return close(fd);
>  }
>  
> +static int vhost_kernel_memslots_limit(struct vhost_dev *dev)
> +{
> +    int limit;
> +    int s = offsetof(struct vhost_memory, regions);
> +    struct vhost_memory *mem = g_malloc0(s);
> +
> +    assert(dev->mem->nregions);
> +    do {
> +        s += sizeof mem->regions[0];
> +        mem = g_realloc(mem, s);
> +        mem->regions[mem->nregions] = dev->mem->regions[0];
> +        mem->nregions++;
> +    } while (vhost_kernel_call(dev, VHOST_SET_MEM_TABLE, mem) != -1);
> +    limit = mem->nregions - 1 > 0 ? mem->nregions - 1 : 0;
> +    g_free(mem);
> +
> +    return limit;
> +}
> +

I don't like this probing much: if one clocks the size up
significantly, this will slow down the box trying
to allocate kernel memory for no real reason.

I'd rather check /sys/modules/vhost/parameters/max_mem_regions.
If not there, assume 64.

>  static const VhostOps kernel_ops = {
>          .backend_type = VHOST_BACKEND_TYPE_KERNEL,
>          .vhost_call = vhost_kernel_call,
>          .vhost_backend_init = vhost_kernel_init,
> -        .vhost_backend_cleanup = vhost_kernel_cleanup
> +        .vhost_backend_cleanup = vhost_kernel_cleanup,
> +        .vhost_backend_memslots_limit = vhost_kernel_memslots_limit
>  };
>  
>  int vhost_set_backend_type(struct vhost_dev *dev, VhostBackendType backend_type)
> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> index d6f2163..0487809 100644
> --- a/hw/virtio/vhost-user.c
> +++ b/hw/virtio/vhost-user.c
> @@ -352,9 +352,15 @@ static int vhost_user_cleanup(struct vhost_dev *dev)
>      return 0;
>  }
>  
> +static int vhost_user_memslots_limit(struct vhost_dev *dev)
> +{
> +    return VHOST_MEMORY_MAX_NREGIONS;
> +}
> +
>  const VhostOps user_ops = {
>          .backend_type = VHOST_BACKEND_TYPE_USER,
>          .vhost_call = vhost_user_call,
>          .vhost_backend_init = vhost_user_init,
> -        .vhost_backend_cleanup = vhost_user_cleanup
> +        .vhost_backend_cleanup = vhost_user_cleanup,
> +        .vhost_backend_memslots_limit = vhost_user_memslots_limit
>          };
> diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> index 2712c6f..e964004 100644
> --- a/hw/virtio/vhost.c
> +++ b/hw/virtio/vhost.c
> @@ -26,6 +26,18 @@
>  
>  static struct vhost_log *vhost_log;
>  
> +static int used_memslots;
> +static int memslots_limit = -1;
> +
> +bool vhost_has_free_slot(void)
> +{
> +    if (memslots_limit >= 0) {
> +        return memslots_limit > used_memslots;
> +    }
> +
> +    return true;
> +}
> +
>  static void vhost_dev_sync_region(struct vhost_dev *dev,
>                                    MemoryRegionSection *section,
>                                    uint64_t mfirst, uint64_t mlast,
> @@ -457,6 +469,7 @@ static void vhost_set_memory(MemoryListener *listener,
>      dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr);
>      dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1);
>      dev->memory_changed = true;
> +    used_memslots = dev->mem->nregions;
>  }
>  
>  static bool vhost_section(MemoryRegionSection *section)
> @@ -1119,6 +1132,14 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
>      if (r < 0) {
>          goto fail_features;
>      }
> +
> +    r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
> +    if (memslots_limit > 0) {
> +        memslots_limit = MIN(memslots_limit, r);
> +    } else {
> +        memslots_limit = r;
> +    }
> +
>      r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_MEM_TABLE, hdev->mem);
>      if (r < 0) {
>          r = -errno;
> diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h
> index e472f29..28b6714 100644
> --- a/include/hw/virtio/vhost-backend.h
> +++ b/include/hw/virtio/vhost-backend.h
> @@ -24,12 +24,14 @@ typedef int (*vhost_call)(struct vhost_dev *dev, unsigned long int request,
>               void *arg);
>  typedef int (*vhost_backend_init)(struct vhost_dev *dev, void *opaque);
>  typedef int (*vhost_backend_cleanup)(struct vhost_dev *dev);
> +typedef int (*vhost_backend_memslots_limit)(struct vhost_dev *dev);
>  
>  typedef struct VhostOps {
>      VhostBackendType backend_type;
>      vhost_call vhost_call;
>      vhost_backend_init vhost_backend_init;
>      vhost_backend_cleanup vhost_backend_cleanup;
> +    vhost_backend_memslots_limit vhost_backend_memslots_limit;
>  } VhostOps;
>  
>  extern const VhostOps user_ops;
> diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
> index dd51050..17ff7b6 100644
> --- a/include/hw/virtio/vhost.h
> +++ b/include/hw/virtio/vhost.h
> @@ -81,4 +81,5 @@ uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
>                              uint64_t features);
>  void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
>                          uint64_t features);
> +bool vhost_has_free_slot(void);
>  #endif
> diff --git a/stubs/Makefile.objs b/stubs/Makefile.objs
> index 9937a12..d2f1b21 100644
> --- a/stubs/Makefile.objs
> +++ b/stubs/Makefile.objs
> @@ -38,3 +38,4 @@ stub-obj-$(CONFIG_WIN32) += fd-register.o
>  stub-obj-y += cpus.o
>  stub-obj-y += kvm.o
>  stub-obj-y += qmp_pc_dimm_device_list.o
> +stub-obj-y += vhost.o
> diff --git a/stubs/vhost.c b/stubs/vhost.c
> new file mode 100644
> index 0000000..d346b85
> --- /dev/null
> +++ b/stubs/vhost.c
> @@ -0,0 +1,6 @@
> +#include "hw/virtio/vhost.h"
> +
> +bool vhost_has_free_slot(void)
> +{
> +    return true;
> +}
> -- 
> 1.8.3.1

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [Qemu-devel] [PATCH 1/2] vhost: add vhost_has_free_slot() interface
  2015-07-29 10:20   ` Michael S. Tsirkin
@ 2015-07-29 11:26     ` Igor Mammedov
  0 siblings, 0 replies; 7+ messages in thread
From: Igor Mammedov @ 2015-07-29 11:26 UTC (permalink / raw)
  To: Michael S. Tsirkin; +Cc: qemu-devel

On Wed, 29 Jul 2015 13:20:24 +0300
"Michael S. Tsirkin" <mst@redhat.com> wrote:

> On Mon, Jul 20, 2015 at 03:49:29PM +0200, Igor Mammedov wrote:
> > it will allow for other parts of QEMU check if it's safe
> > to map memory region during hotplug/runtime.
> > That way hotplug path will have a chance to cancel
> > hotplug operation instead of crashing in vhost_commit().
> > 
> > Signed-off-by: Igor Mammedov <imammedo@redhat.com>
> > ---
> >  hw/virtio/vhost-backend.c         | 23 ++++++++++++++++++++++-
> >  hw/virtio/vhost-user.c            |  8 +++++++-
> >  hw/virtio/vhost.c                 | 21 +++++++++++++++++++++
> >  include/hw/virtio/vhost-backend.h |  2 ++
> >  include/hw/virtio/vhost.h         |  1 +
> >  stubs/Makefile.objs               |  1 +
> >  stubs/vhost.c                     |  6 ++++++
> >  7 files changed, 60 insertions(+), 2 deletions(-)
> >  create mode 100644 stubs/vhost.c
> > 
> > diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c
> > index 4d68a27..46fa707 100644
> > --- a/hw/virtio/vhost-backend.c
> > +++ b/hw/virtio/vhost-backend.c
> > @@ -11,6 +11,7 @@
> >  #include "hw/virtio/vhost.h"
> >  #include "hw/virtio/vhost-backend.h"
> >  #include "qemu/error-report.h"
> > +#include "linux/vhost.h"
> >  
> >  #include <sys/ioctl.h>
> >  
> > @@ -42,11 +43,31 @@ static int vhost_kernel_cleanup(struct vhost_dev *dev)
> >      return close(fd);
> >  }
> >  
> > +static int vhost_kernel_memslots_limit(struct vhost_dev *dev)
> > +{
> > +    int limit;
> > +    int s = offsetof(struct vhost_memory, regions);
> > +    struct vhost_memory *mem = g_malloc0(s);
> > +
> > +    assert(dev->mem->nregions);
> > +    do {
> > +        s += sizeof mem->regions[0];
> > +        mem = g_realloc(mem, s);
> > +        mem->regions[mem->nregions] = dev->mem->regions[0];
> > +        mem->nregions++;
> > +    } while (vhost_kernel_call(dev, VHOST_SET_MEM_TABLE, mem) != -1);
> > +    limit = mem->nregions - 1 > 0 ? mem->nregions - 1 : 0;
> > +    g_free(mem);
> > +
> > +    return limit;
> > +}
> > +
> 
> I don't like this probing much: if one clocks the size up
> significantly, this will slow down the box trying
> to allocate kernel memory for no real reason.
> 
> I'd rather check /sys/modules/vhost/parameters/max_mem_regions.
> If not there, assume 64.
I'll respin with this suggestion even if it's a bit ugly.
In addition to it we can add IOCTL interface to detect limit later
when kernel side would be ready for it.


> 
> >  static const VhostOps kernel_ops = {
> >          .backend_type = VHOST_BACKEND_TYPE_KERNEL,
> >          .vhost_call = vhost_kernel_call,
> >          .vhost_backend_init = vhost_kernel_init,
> > -        .vhost_backend_cleanup = vhost_kernel_cleanup
> > +        .vhost_backend_cleanup = vhost_kernel_cleanup,
> > +        .vhost_backend_memslots_limit = vhost_kernel_memslots_limit
> >  };
> >  
> >  int vhost_set_backend_type(struct vhost_dev *dev, VhostBackendType backend_type)
> > diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> > index d6f2163..0487809 100644
> > --- a/hw/virtio/vhost-user.c
> > +++ b/hw/virtio/vhost-user.c
> > @@ -352,9 +352,15 @@ static int vhost_user_cleanup(struct vhost_dev *dev)
> >      return 0;
> >  }
> >  
> > +static int vhost_user_memslots_limit(struct vhost_dev *dev)
> > +{
> > +    return VHOST_MEMORY_MAX_NREGIONS;
> > +}
> > +
> >  const VhostOps user_ops = {
> >          .backend_type = VHOST_BACKEND_TYPE_USER,
> >          .vhost_call = vhost_user_call,
> >          .vhost_backend_init = vhost_user_init,
> > -        .vhost_backend_cleanup = vhost_user_cleanup
> > +        .vhost_backend_cleanup = vhost_user_cleanup,
> > +        .vhost_backend_memslots_limit = vhost_user_memslots_limit
> >          };
> > diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> > index 2712c6f..e964004 100644
> > --- a/hw/virtio/vhost.c
> > +++ b/hw/virtio/vhost.c
> > @@ -26,6 +26,18 @@
> >  
> >  static struct vhost_log *vhost_log;
> >  
> > +static int used_memslots;
> > +static int memslots_limit = -1;
> > +
> > +bool vhost_has_free_slot(void)
> > +{
> > +    if (memslots_limit >= 0) {
> > +        return memslots_limit > used_memslots;
> > +    }
> > +
> > +    return true;
> > +}
> > +
> >  static void vhost_dev_sync_region(struct vhost_dev *dev,
> >                                    MemoryRegionSection *section,
> >                                    uint64_t mfirst, uint64_t mlast,
> > @@ -457,6 +469,7 @@ static void vhost_set_memory(MemoryListener *listener,
> >      dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr);
> >      dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1);
> >      dev->memory_changed = true;
> > +    used_memslots = dev->mem->nregions;
> >  }
> >  
> >  static bool vhost_section(MemoryRegionSection *section)
> > @@ -1119,6 +1132,14 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
> >      if (r < 0) {
> >          goto fail_features;
> >      }
> > +
> > +    r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
> > +    if (memslots_limit > 0) {
> > +        memslots_limit = MIN(memslots_limit, r);
> > +    } else {
> > +        memslots_limit = r;
> > +    }
> > +
> >      r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_MEM_TABLE, hdev->mem);
> >      if (r < 0) {
> >          r = -errno;
> > diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h
> > index e472f29..28b6714 100644
> > --- a/include/hw/virtio/vhost-backend.h
> > +++ b/include/hw/virtio/vhost-backend.h
> > @@ -24,12 +24,14 @@ typedef int (*vhost_call)(struct vhost_dev *dev, unsigned long int request,
> >               void *arg);
> >  typedef int (*vhost_backend_init)(struct vhost_dev *dev, void *opaque);
> >  typedef int (*vhost_backend_cleanup)(struct vhost_dev *dev);
> > +typedef int (*vhost_backend_memslots_limit)(struct vhost_dev *dev);
> >  
> >  typedef struct VhostOps {
> >      VhostBackendType backend_type;
> >      vhost_call vhost_call;
> >      vhost_backend_init vhost_backend_init;
> >      vhost_backend_cleanup vhost_backend_cleanup;
> > +    vhost_backend_memslots_limit vhost_backend_memslots_limit;
> >  } VhostOps;
> >  
> >  extern const VhostOps user_ops;
> > diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
> > index dd51050..17ff7b6 100644
> > --- a/include/hw/virtio/vhost.h
> > +++ b/include/hw/virtio/vhost.h
> > @@ -81,4 +81,5 @@ uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
> >                              uint64_t features);
> >  void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
> >                          uint64_t features);
> > +bool vhost_has_free_slot(void);
> >  #endif
> > diff --git a/stubs/Makefile.objs b/stubs/Makefile.objs
> > index 9937a12..d2f1b21 100644
> > --- a/stubs/Makefile.objs
> > +++ b/stubs/Makefile.objs
> > @@ -38,3 +38,4 @@ stub-obj-$(CONFIG_WIN32) += fd-register.o
> >  stub-obj-y += cpus.o
> >  stub-obj-y += kvm.o
> >  stub-obj-y += qmp_pc_dimm_device_list.o
> > +stub-obj-y += vhost.o
> > diff --git a/stubs/vhost.c b/stubs/vhost.c
> > new file mode 100644
> > index 0000000..d346b85
> > --- /dev/null
> > +++ b/stubs/vhost.c
> > @@ -0,0 +1,6 @@
> > +#include "hw/virtio/vhost.h"
> > +
> > +bool vhost_has_free_slot(void)
> > +{
> > +    return true;
> > +}
> > -- 
> > 1.8.3.1
> 

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2015-07-29 11:26 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-07-20 13:49 [Qemu-devel] [PATCH 0/2] vhost: check if vhost has capacity for hotplugged memory Igor Mammedov
2015-07-20 13:49 ` [Qemu-devel] [PATCH 1/2] vhost: add vhost_has_free_slot() interface Igor Mammedov
2015-07-29 10:20   ` Michael S. Tsirkin
2015-07-29 11:26     ` Igor Mammedov
2015-07-20 13:49 ` [Qemu-devel] [PATCH 2/2] pc-dimm: add vhost slots limit check before commiting to hotplug Igor Mammedov
2015-07-28 15:08 ` [Qemu-devel] [PATCH 0/2] vhost: check if vhost has capacity for hotplugged memory Igor Mammedov
2015-07-29  8:33   ` Paolo Bonzini

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).