From: "Michael S. Tsirkin" <mst@redhat.com>
To: Igor Mammedov <imammedo@redhat.com>
Cc: qemu-devel@nongnu.org
Subject: Re: [Qemu-devel] [PATCH 1/2] vhost: add vhost_has_free_slot() interface
Date: Wed, 29 Jul 2015 13:20:24 +0300 [thread overview]
Message-ID: <20150729131622-mutt-send-email-mst@redhat.com> (raw)
In-Reply-To: <1437400170-66929-2-git-send-email-imammedo@redhat.com>
On Mon, Jul 20, 2015 at 03:49:29PM +0200, Igor Mammedov wrote:
> it will allow for other parts of QEMU check if it's safe
> to map memory region during hotplug/runtime.
> That way hotplug path will have a chance to cancel
> hotplug operation instead of crashing in vhost_commit().
>
> Signed-off-by: Igor Mammedov <imammedo@redhat.com>
> ---
> hw/virtio/vhost-backend.c | 23 ++++++++++++++++++++++-
> hw/virtio/vhost-user.c | 8 +++++++-
> hw/virtio/vhost.c | 21 +++++++++++++++++++++
> include/hw/virtio/vhost-backend.h | 2 ++
> include/hw/virtio/vhost.h | 1 +
> stubs/Makefile.objs | 1 +
> stubs/vhost.c | 6 ++++++
> 7 files changed, 60 insertions(+), 2 deletions(-)
> create mode 100644 stubs/vhost.c
>
> diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c
> index 4d68a27..46fa707 100644
> --- a/hw/virtio/vhost-backend.c
> +++ b/hw/virtio/vhost-backend.c
> @@ -11,6 +11,7 @@
> #include "hw/virtio/vhost.h"
> #include "hw/virtio/vhost-backend.h"
> #include "qemu/error-report.h"
> +#include "linux/vhost.h"
>
> #include <sys/ioctl.h>
>
> @@ -42,11 +43,31 @@ static int vhost_kernel_cleanup(struct vhost_dev *dev)
> return close(fd);
> }
>
> +static int vhost_kernel_memslots_limit(struct vhost_dev *dev)
> +{
> + int limit;
> + int s = offsetof(struct vhost_memory, regions);
> + struct vhost_memory *mem = g_malloc0(s);
> +
> + assert(dev->mem->nregions);
> + do {
> + s += sizeof mem->regions[0];
> + mem = g_realloc(mem, s);
> + mem->regions[mem->nregions] = dev->mem->regions[0];
> + mem->nregions++;
> + } while (vhost_kernel_call(dev, VHOST_SET_MEM_TABLE, mem) != -1);
> + limit = mem->nregions - 1 > 0 ? mem->nregions - 1 : 0;
> + g_free(mem);
> +
> + return limit;
> +}
> +
I don't like this probing much: if one clocks the size up
significantly, this will slow down the box trying
to allocate kernel memory for no real reason.
I'd rather check /sys/modules/vhost/parameters/max_mem_regions.
If not there, assume 64.
> static const VhostOps kernel_ops = {
> .backend_type = VHOST_BACKEND_TYPE_KERNEL,
> .vhost_call = vhost_kernel_call,
> .vhost_backend_init = vhost_kernel_init,
> - .vhost_backend_cleanup = vhost_kernel_cleanup
> + .vhost_backend_cleanup = vhost_kernel_cleanup,
> + .vhost_backend_memslots_limit = vhost_kernel_memslots_limit
> };
>
> int vhost_set_backend_type(struct vhost_dev *dev, VhostBackendType backend_type)
> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> index d6f2163..0487809 100644
> --- a/hw/virtio/vhost-user.c
> +++ b/hw/virtio/vhost-user.c
> @@ -352,9 +352,15 @@ static int vhost_user_cleanup(struct vhost_dev *dev)
> return 0;
> }
>
> +static int vhost_user_memslots_limit(struct vhost_dev *dev)
> +{
> + return VHOST_MEMORY_MAX_NREGIONS;
> +}
> +
> const VhostOps user_ops = {
> .backend_type = VHOST_BACKEND_TYPE_USER,
> .vhost_call = vhost_user_call,
> .vhost_backend_init = vhost_user_init,
> - .vhost_backend_cleanup = vhost_user_cleanup
> + .vhost_backend_cleanup = vhost_user_cleanup,
> + .vhost_backend_memslots_limit = vhost_user_memslots_limit
> };
> diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> index 2712c6f..e964004 100644
> --- a/hw/virtio/vhost.c
> +++ b/hw/virtio/vhost.c
> @@ -26,6 +26,18 @@
>
> static struct vhost_log *vhost_log;
>
> +static int used_memslots;
> +static int memslots_limit = -1;
> +
> +bool vhost_has_free_slot(void)
> +{
> + if (memslots_limit >= 0) {
> + return memslots_limit > used_memslots;
> + }
> +
> + return true;
> +}
> +
> static void vhost_dev_sync_region(struct vhost_dev *dev,
> MemoryRegionSection *section,
> uint64_t mfirst, uint64_t mlast,
> @@ -457,6 +469,7 @@ static void vhost_set_memory(MemoryListener *listener,
> dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr);
> dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1);
> dev->memory_changed = true;
> + used_memslots = dev->mem->nregions;
> }
>
> static bool vhost_section(MemoryRegionSection *section)
> @@ -1119,6 +1132,14 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
> if (r < 0) {
> goto fail_features;
> }
> +
> + r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
> + if (memslots_limit > 0) {
> + memslots_limit = MIN(memslots_limit, r);
> + } else {
> + memslots_limit = r;
> + }
> +
> r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_MEM_TABLE, hdev->mem);
> if (r < 0) {
> r = -errno;
> diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h
> index e472f29..28b6714 100644
> --- a/include/hw/virtio/vhost-backend.h
> +++ b/include/hw/virtio/vhost-backend.h
> @@ -24,12 +24,14 @@ typedef int (*vhost_call)(struct vhost_dev *dev, unsigned long int request,
> void *arg);
> typedef int (*vhost_backend_init)(struct vhost_dev *dev, void *opaque);
> typedef int (*vhost_backend_cleanup)(struct vhost_dev *dev);
> +typedef int (*vhost_backend_memslots_limit)(struct vhost_dev *dev);
>
> typedef struct VhostOps {
> VhostBackendType backend_type;
> vhost_call vhost_call;
> vhost_backend_init vhost_backend_init;
> vhost_backend_cleanup vhost_backend_cleanup;
> + vhost_backend_memslots_limit vhost_backend_memslots_limit;
> } VhostOps;
>
> extern const VhostOps user_ops;
> diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
> index dd51050..17ff7b6 100644
> --- a/include/hw/virtio/vhost.h
> +++ b/include/hw/virtio/vhost.h
> @@ -81,4 +81,5 @@ uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
> uint64_t features);
> void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
> uint64_t features);
> +bool vhost_has_free_slot(void);
> #endif
> diff --git a/stubs/Makefile.objs b/stubs/Makefile.objs
> index 9937a12..d2f1b21 100644
> --- a/stubs/Makefile.objs
> +++ b/stubs/Makefile.objs
> @@ -38,3 +38,4 @@ stub-obj-$(CONFIG_WIN32) += fd-register.o
> stub-obj-y += cpus.o
> stub-obj-y += kvm.o
> stub-obj-y += qmp_pc_dimm_device_list.o
> +stub-obj-y += vhost.o
> diff --git a/stubs/vhost.c b/stubs/vhost.c
> new file mode 100644
> index 0000000..d346b85
> --- /dev/null
> +++ b/stubs/vhost.c
> @@ -0,0 +1,6 @@
> +#include "hw/virtio/vhost.h"
> +
> +bool vhost_has_free_slot(void)
> +{
> + return true;
> +}
> --
> 1.8.3.1
next prev parent reply other threads:[~2015-07-29 10:20 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-07-20 13:49 [Qemu-devel] [PATCH 0/2] vhost: check if vhost has capacity for hotplugged memory Igor Mammedov
2015-07-20 13:49 ` [Qemu-devel] [PATCH 1/2] vhost: add vhost_has_free_slot() interface Igor Mammedov
2015-07-29 10:20 ` Michael S. Tsirkin [this message]
2015-07-29 11:26 ` Igor Mammedov
2015-07-20 13:49 ` [Qemu-devel] [PATCH 2/2] pc-dimm: add vhost slots limit check before commiting to hotplug Igor Mammedov
2015-07-28 15:08 ` [Qemu-devel] [PATCH 0/2] vhost: check if vhost has capacity for hotplugged memory Igor Mammedov
2015-07-29 8:33 ` Paolo Bonzini
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20150729131622-mutt-send-email-mst@redhat.com \
--to=mst@redhat.com \
--cc=imammedo@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).