* [Qemu-devel] [PATCH RESEND v10] vhost: used_memslots refactoring
@ 2018-03-27 4:14 Jay Zhou
2018-03-31 7:10 ` no-reply
2018-05-01 8:18 ` Zhoujian (jay)
0 siblings, 2 replies; 3+ messages in thread
From: Jay Zhou @ 2018-03-27 4:14 UTC (permalink / raw)
To: qemu-devel
Cc: mst, imammedo, weidong.huang, wangxinxin.wang, arei.gonglei,
jianjay.zhou, liuzhe13
Used_memslots is shared by vhost kernel and user, it is equal to
dev->mem->nregions, which is correct for vhost kernel, but not for
vhost user, the latter one uses memory regions that have file
descriptor. E.g. a VM has a vhost-user NIC and 8(vhost user memslot
upper limit) memory slots, it will be failed to hotplug a new DIMM
device since vhost_has_free_slot() finds no free slot left. It
should be successful if only part of memory slots have file
descriptor, so setting used memslots for vhost-user and
vhost-kernel respectively.
Signed-off-by: Igor Mammedov <imammedo@redhat.com>
Signed-off-by: Jay Zhou <jianjay.zhou@huawei.com>
Signed-off-by: Liuzhe <liuzhe13@huawei.com>
---
v10:
- fix misaligned access to structures
- refine on setting used_memslots for vhost-user to
avoid side effect
- distinguish "has free memslots" and "out of memslots"
v7 ... v9:
- rebased on the master
v2 ... v6:
- delete the "used_memslots" global variable, and add it
for vhost-user and vhost-kernel separately
- refine the function, commit log
- used_memslots refactoring
hw/virtio/vhost-backend.c | 21 ++++++++++++++++++++-
hw/virtio/vhost-user.c | 36 +++++++++++++++++++++++++++++++++---
hw/virtio/vhost.c | 13 ++++++-------
include/hw/virtio/vhost-backend.h | 8 ++++++--
4 files changed, 65 insertions(+), 13 deletions(-)
diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c
index 7f09efa..3539b5f 100644
--- a/hw/virtio/vhost-backend.c
+++ b/hw/virtio/vhost-backend.c
@@ -15,6 +15,8 @@
#include "hw/virtio/vhost-backend.h"
#include "qemu/error-report.h"
+static unsigned int vhost_kernel_used_memslots;
+
static int vhost_kernel_call(struct vhost_dev *dev, unsigned long int request,
void *arg)
{
@@ -62,6 +64,21 @@ static int vhost_kernel_memslots_limit(struct vhost_dev *dev)
return limit;
}
+static void vhost_kernel_set_used_memslots(struct vhost_dev *dev)
+{
+ vhost_kernel_used_memslots = dev->mem->nregions;
+}
+
+static bool vhost_kernel_has_free_memslots(struct vhost_dev *dev)
+{
+ return vhost_kernel_used_memslots < vhost_kernel_memslots_limit(dev);
+}
+
+static bool vhost_kernel_out_of_memslots(struct vhost_dev *dev)
+{
+ return vhost_kernel_used_memslots > vhost_kernel_memslots_limit(dev);
+}
+
static int vhost_kernel_net_set_backend(struct vhost_dev *dev,
struct vhost_vring_file *file)
{
@@ -237,7 +254,9 @@ static const VhostOps kernel_ops = {
.backend_type = VHOST_BACKEND_TYPE_KERNEL,
.vhost_backend_init = vhost_kernel_init,
.vhost_backend_cleanup = vhost_kernel_cleanup,
- .vhost_backend_memslots_limit = vhost_kernel_memslots_limit,
+ .vhost_set_used_memslots = vhost_kernel_set_used_memslots,
+ .vhost_backend_has_free_memslots = vhost_kernel_has_free_memslots,
+ .vhost_out_of_memslots = vhost_kernel_out_of_memslots,
.vhost_net_set_backend = vhost_kernel_net_set_backend,
.vhost_scsi_set_endpoint = vhost_kernel_scsi_set_endpoint,
.vhost_scsi_clear_endpoint = vhost_kernel_scsi_clear_endpoint,
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index 44aea5c..9691806 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -170,6 +170,8 @@ static VhostUserMsg m __attribute__ ((unused));
/* The version of the protocol we support */
#define VHOST_USER_VERSION (0x1)
+static unsigned int vhost_user_used_memslots;
+
struct vhost_user {
struct vhost_dev *dev;
CharBackend *chr;
@@ -1289,9 +1291,35 @@ static int vhost_user_get_vq_index(struct vhost_dev *dev, int idx)
return idx;
}
-static int vhost_user_memslots_limit(struct vhost_dev *dev)
+static void vhost_user_set_used_memslots(struct vhost_dev *dev)
+{
+ int i, fd;
+ unsigned int fd_num = 0;
+
+ for (i = 0; i < dev->mem->nregions; ++i) {
+ struct vhost_memory_region *reg = dev->mem->regions + i;
+ ram_addr_t offset;
+ MemoryRegion *mr;
+
+ assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
+ mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr,
+ &offset);
+ fd = memory_region_get_fd(mr);
+ if (fd > 0) {
+ fd_num++;
+ }
+ }
+ vhost_user_used_memslots = fd_num;
+}
+
+static bool vhost_user_has_free_memslots(struct vhost_dev *dev)
+{
+ return vhost_user_used_memslots < VHOST_MEMORY_MAX_NREGIONS;
+}
+
+static bool vhost_user_out_of_memslots(struct vhost_dev *dev)
{
- return VHOST_MEMORY_MAX_NREGIONS;
+ return vhost_user_used_memslots > VHOST_MEMORY_MAX_NREGIONS;
}
static bool vhost_user_requires_shm_log(struct vhost_dev *dev)
@@ -1563,7 +1591,9 @@ const VhostOps user_ops = {
.backend_type = VHOST_BACKEND_TYPE_USER,
.vhost_backend_init = vhost_user_init,
.vhost_backend_cleanup = vhost_user_cleanup,
- .vhost_backend_memslots_limit = vhost_user_memslots_limit,
+ .vhost_set_used_memslots = vhost_user_set_used_memslots,
+ .vhost_backend_has_free_memslots = vhost_user_has_free_memslots,
+ .vhost_out_of_memslots = vhost_user_out_of_memslots,
.vhost_set_log_base = vhost_user_set_log_base,
.vhost_set_mem_table = vhost_user_set_mem_table,
.vhost_set_vring_addr = vhost_user_set_vring_addr,
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 250f886..706951a 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -44,20 +44,19 @@
static struct vhost_log *vhost_log;
static struct vhost_log *vhost_log_shm;
-static unsigned int used_memslots;
static QLIST_HEAD(, vhost_dev) vhost_devices =
QLIST_HEAD_INITIALIZER(vhost_devices);
bool vhost_has_free_slot(void)
{
- unsigned int slots_limit = ~0U;
struct vhost_dev *hdev;
QLIST_FOREACH(hdev, &vhost_devices, entry) {
- unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
- slots_limit = MIN(slots_limit, r);
+ if (!hdev->vhost_ops->vhost_backend_has_free_memslots(hdev)) {
+ return false;
+ }
}
- return slots_limit > used_memslots;
+ return true;
}
static void vhost_dev_sync_region(struct vhost_dev *dev,
@@ -446,7 +445,6 @@ static void vhost_commit(MemoryListener *listener)
dev->n_mem_sections * sizeof dev->mem->regions[0];
dev->mem = g_realloc(dev->mem, regions_size);
dev->mem->nregions = dev->n_mem_sections;
- used_memslots = dev->mem->nregions;
for (i = 0; i < dev->n_mem_sections; i++) {
struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
struct MemoryRegionSection *mrs = dev->mem_sections + i;
@@ -458,6 +456,7 @@ static void vhost_commit(MemoryListener *listener)
mrs->offset_within_region;
cur_vmr->flags_padding = 0;
}
+ dev->vhost_ops->vhost_set_used_memslots(dev);
if (!dev->started) {
goto out;
@@ -1248,7 +1247,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
memory_listener_register(&hdev->memory_listener, &address_space_memory);
QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
- if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
+ if (hdev->vhost_ops->vhost_out_of_memslots(hdev)) {
error_report("vhost backend memory slots limit is less"
" than current number of present memory slots");
r = -1;
diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h
index 5dac61f..f7ab82d 100644
--- a/include/hw/virtio/vhost-backend.h
+++ b/include/hw/virtio/vhost-backend.h
@@ -36,7 +36,9 @@ struct vhost_iotlb_msg;
typedef int (*vhost_backend_init)(struct vhost_dev *dev, void *opaque);
typedef int (*vhost_backend_cleanup)(struct vhost_dev *dev);
-typedef int (*vhost_backend_memslots_limit)(struct vhost_dev *dev);
+typedef void (*vhost_set_used_memslots_op)(struct vhost_dev *dev);
+typedef bool (*vhost_backend_has_free_memslots)(struct vhost_dev *dev);
+typedef bool (*vhost_out_of_memslots)(struct vhost_dev *dev);
typedef int (*vhost_net_set_backend_op)(struct vhost_dev *dev,
struct vhost_vring_file *file);
@@ -105,7 +107,9 @@ typedef struct VhostOps {
VhostBackendType backend_type;
vhost_backend_init vhost_backend_init;
vhost_backend_cleanup vhost_backend_cleanup;
- vhost_backend_memslots_limit vhost_backend_memslots_limit;
+ vhost_set_used_memslots_op vhost_set_used_memslots;
+ vhost_backend_has_free_memslots vhost_backend_has_free_memslots;
+ vhost_out_of_memslots vhost_out_of_memslots;
vhost_net_set_backend_op vhost_net_set_backend;
vhost_net_set_mtu_op vhost_net_set_mtu;
vhost_scsi_set_endpoint_op vhost_scsi_set_endpoint;
--
1.8.3.1
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [Qemu-devel] [PATCH RESEND v10] vhost: used_memslots refactoring
2018-03-27 4:14 [Qemu-devel] [PATCH RESEND v10] vhost: used_memslots refactoring Jay Zhou
@ 2018-03-31 7:10 ` no-reply
2018-05-01 8:18 ` Zhoujian (jay)
1 sibling, 0 replies; 3+ messages in thread
From: no-reply @ 2018-03-31 7:10 UTC (permalink / raw)
To: jianjay.zhou
Cc: famz, qemu-devel, weidong.huang, mst, wangxinxin.wang,
arei.gonglei, imammedo, liuzhe13
Hi,
This series failed docker-build@min-glib build test. Please find the testing commands and
their output below. If you have Docker installed, you can probably reproduce it
locally.
Type: series
Message-id: 1522124046-34596-1-git-send-email-jianjay.zhou@huawei.com
Subject: [Qemu-devel] [PATCH RESEND v10] vhost: used_memslots refactoring
=== TEST SCRIPT BEGIN ===
#!/bin/bash
set -e
git submodule update --init dtc
# Let docker tests dump environment info
export SHOW_ENV=1
export J=8
time make docker-test-build@min-glib
=== TEST SCRIPT END ===
Updating 3c8cf5a9c21ff8782164d1def7f44bd888713384
Switched to a new branch 'test'
e184f5d2a2 vhost: used_memslots refactoring
=== OUTPUT BEGIN ===
Submodule 'dtc' (git://git.qemu-project.org/dtc.git) registered for path 'dtc'
Cloning into '/var/tmp/patchew-tester-tmp-gaztzauz/src/dtc'...
Submodule path 'dtc': checked out 'e54388015af1fb4bf04d0bca99caba1074d9cc42'
BUILD min-glib
make[1]: Entering directory '/var/tmp/patchew-tester-tmp-gaztzauz/src'
GEN /var/tmp/patchew-tester-tmp-gaztzauz/src/docker-src.2018-03-31-03.09.40.32136/qemu.tar
Cloning into '/var/tmp/patchew-tester-tmp-gaztzauz/src/docker-src.2018-03-31-03.09.40.32136/qemu.tar.vroot'...
done.
Checking out files: 15% (967/6066)
Checking out files: 16% (971/6066)
Checking out files: 17% (1032/6066)
Checking out files: 18% (1092/6066)
Checking out files: 19% (1153/6066)
Checking out files: 20% (1214/6066)
Checking out files: 21% (1274/6066)
Checking out files: 22% (1335/6066)
Checking out files: 22% (1360/6066)
Checking out files: 23% (1396/6066)
Checking out files: 24% (1456/6066)
Checking out files: 25% (1517/6066)
Checking out files: 26% (1578/6066)
Checking out files: 27% (1638/6066)
Checking out files: 28% (1699/6066)
Checking out files: 29% (1760/6066)
Checking out files: 30% (1820/6066)
Checking out files: 31% (1881/6066)
Checking out files: 32% (1942/6066)
Checking out files: 33% (2002/6066)
Checking out files: 34% (2063/6066)
Checking out files: 35% (2124/6066)
Checking out files: 36% (2184/6066)
Checking out files: 37% (2245/6066)
Checking out files: 38% (2306/6066)
Checking out files: 39% (2366/6066)
Checking out files: 40% (2427/6066)
Checking out files: 41% (2488/6066)
Checking out files: 42% (2548/6066)
Checking out files: 43% (2609/6066)
Checking out files: 44% (2670/6066)
Checking out files: 45% (2730/6066)
Checking out files: 46% (2791/6066)
Checking out files: 47% (2852/6066)
Checking out files: 48% (2912/6066)
Checking out files: 49% (2973/6066)
Checking out files: 50% (3033/6066)
Checking out files: 51% (3094/6066)
Checking out files: 52% (3155/6066)
Checking out files: 53% (3215/6066)
Checking out files: 54% (3276/6066)
Checking out files: 55% (3337/6066)
Checking out files: 56% (3397/6066)
Checking out files: 57% (3458/6066)
Checking out files: 58% (3519/6066)
Checking out files: 59% (3579/6066)
Checking out files: 59% (3586/6066)
Checking out files: 60% (3640/6066)
Checking out files: 61% (3701/6066)
Checking out files: 62% (3761/6066)
Checking out files: 63% (3822/6066)
Checking out files: 64% (3883/6066)
Checking out files: 65% (3943/6066)
Checking out files: 66% (4004/6066)
Checking out files: 67% (4065/6066)
Checking out files: 68% (4125/6066)
Checking out files: 69% (4186/6066)
Checking out files: 70% (4247/6066)
Checking out files: 71% (4307/6066)
Checking out files: 72% (4368/6066)
Checking out files: 73% (4429/6066)
Checking out files: 74% (4489/6066)
Checking out files: 75% (4550/6066)
Checking out files: 76% (4611/6066)
Checking out files: 77% (4671/6066)
Checking out files: 78% (4732/6066)
Checking out files: 79% (4793/6066)
Checking out files: 80% (4853/6066)
Checking out files: 80% (4878/6066)
Checking out files: 81% (4914/6066)
Checking out files: 82% (4975/6066)
Checking out files: 83% (5035/6066)
Checking out files: 84% (5096/6066)
Checking out files: 84% (5143/6066)
Checking out files: 85% (5157/6066)
Checking out files: 86% (5217/6066)
Checking out files: 87% (5278/6066)
Checking out files: 88% (5339/6066)
Checking out files: 89% (5399/6066)
Checking out files: 90% (5460/6066)
Checking out files: 91% (5521/6066)
Checking out files: 92% (5581/6066)
Checking out files: 93% (5642/6066)
Checking out files: 94% (5703/6066)
Checking out files: 95% (5763/6066)
Checking out files: 96% (5824/6066)
Checking out files: 97% (5885/6066)
Checking out files: 98% (5945/6066)
Checking out files: 99% (6006/6066)
Checking out files: 100% (6066/6066)
Checking out files: 100% (6066/6066), done.
Your branch is up-to-date with 'origin/test'.
Submodule 'dtc' (git://git.qemu-project.org/dtc.git) registered for path 'dtc'
Cloning into '/var/tmp/patchew-tester-tmp-gaztzauz/src/docker-src.2018-03-31-03.09.40.32136/qemu.tar.vroot/dtc'...
Submodule path 'dtc': checked out 'e54388015af1fb4bf04d0bca99caba1074d9cc42'
Submodule 'ui/keycodemapdb' (git://git.qemu.org/keycodemapdb.git) registered for path 'ui/keycodemapdb'
Cloning into '/var/tmp/patchew-tester-tmp-gaztzauz/src/docker-src.2018-03-31-03.09.40.32136/qemu.tar.vroot/ui/keycodemapdb'...
Submodule path 'ui/keycodemapdb': checked out '6b3d716e2b6472eb7189d3220552280ef3d832ce'
tar: /var/tmp/patchew-tester-tmp-gaztzauz/src/docker-src.2018-03-31-03.09.40.32136/qemu.tar: Wrote only 2048 of 10240 bytes
tar: Error is not recoverable: exiting now
failed to create tar file
COPY RUNNER
RUN test-build in qemu:min-glib
Environment variables:
HOSTNAME=9d7442065e30
MAKEFLAGS= -j8
J=8
CCACHE_DIR=/var/tmp/ccache
EXTRA_CONFIGURE_OPTS=
V=
SHOW_ENV=1
PATH=/usr/lib/ccache:/usr/lib64/ccache:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
PWD=/
TARGET_LIST=
SHLVL=1
HOME=/root
TEST_DIR=/tmp/qemu-test
FEATURES= dtc
DEBUG=
_=/usr/bin/env
Configure options:
--enable-werror --target-list=x86_64-softmmu,aarch64-softmmu --prefix=/tmp/qemu-test/install
ERROR: DTC (libfdt) version >= 1.4.2 not present.
Please install the DTC (libfdt) devel package
Traceback (most recent call last):
File "./tests/docker/docker.py", line 407, in <module>
sys.exit(main())
File "./tests/docker/docker.py", line 404, in main
return args.cmdobj.run(args, argv)
File "./tests/docker/docker.py", line 261, in run
return Docker().run(argv, args.keep, quiet=args.quiet)
File "./tests/docker/docker.py", line 229, in run
quiet=quiet)
File "./tests/docker/docker.py", line 147, in _do_check
return subprocess.check_call(self._command + cmd, **kwargs)
File "/usr/lib64/python2.7/subprocess.py", line 186, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '['docker', 'run', '--label', 'com.qemu.instance.uuid=8b73a49034b211e8a67052540069c830', '-u', '0', '--security-opt', 'seccomp=unconfined', '--rm', '--net=none', '-e', 'TARGET_LIST=', '-e', 'EXTRA_CONFIGURE_OPTS=', '-e', 'V=', '-e', 'J=8', '-e', 'DEBUG=', '-e', 'SHOW_ENV=1', '-e', 'CCACHE_DIR=/var/tmp/ccache', '-v', '/root/.cache/qemu-docker-ccache:/var/tmp/ccache:z', '-v', '/var/tmp/patchew-tester-tmp-gaztzauz/src/docker-src.2018-03-31-03.09.40.32136:/var/tmp/qemu:z,ro', 'qemu:min-glib', '/var/tmp/qemu/run', 'test-build']' returned non-zero exit status 1
make[1]: *** [tests/docker/Makefile.include:129: docker-run] Error 1
make[1]: Leaving directory '/var/tmp/patchew-tester-tmp-gaztzauz/src'
make: *** [tests/docker/Makefile.include:163: docker-run-test-build@min-glib] Error 2
real 0m53.243s
user 0m9.356s
sys 0m6.946s
=== OUTPUT END ===
Test command exited with code: 2
---
Email generated automatically by Patchew [http://patchew.org/].
Please send your feedback to patchew-devel@redhat.com
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [Qemu-devel] [PATCH RESEND v10] vhost: used_memslots refactoring
2018-03-27 4:14 [Qemu-devel] [PATCH RESEND v10] vhost: used_memslots refactoring Jay Zhou
2018-03-31 7:10 ` no-reply
@ 2018-05-01 8:18 ` Zhoujian (jay)
1 sibling, 0 replies; 3+ messages in thread
From: Zhoujian (jay) @ 2018-05-01 8:18 UTC (permalink / raw)
To: qemu-devel@nongnu.org, mst@redhat.com, imammedo@redhat.com
Cc: Huangweidong (C), wangxin (U), Gonglei (Arei),
Liuzhe (Ahriy, Euler)
Hi Michael and Igor,
I'm afraid this patch has been missed, could you help to review?
PS: In order to avoid side effect, code redundancy does exist between
vhost_user_set_used_memslots(), vhost_user_set_mem_table() and
vhost_user_set_mem_table_postcopy(), I haven't found a good solution to
solve it since my limited knowledge. But the counting for vhost-user
and vhost-kernel's used_memslots are okay now, I think. Pls let me know
if there're issues to be dealt with.
Regards,
Jay
> -----Original Message-----
> From: Zhoujian (jay)
> Sent: Tuesday, March 27, 2018 12:14 PM
> To: qemu-devel@nongnu.org
> Cc: mst@redhat.com; imammedo@redhat.com; Huangweidong (C)
> <weidong.huang@huawei.com>; wangxin (U) <wangxinxin.wang@huawei.com>; Gonglei
> (Arei) <arei.gonglei@huawei.com>; Zhoujian (jay) <jianjay.zhou@huawei.com>;
> Liuzhe (Ahriy, Euler) <liuzhe13@huawei.com>
> Subject: [PATCH RESEND v10] vhost: used_memslots refactoring
>
> Used_memslots is shared by vhost kernel and user, it is equal to
> dev->mem->nregions, which is correct for vhost kernel, but not for
> vhost user, the latter one uses memory regions that have file descriptor. E.g.
> a VM has a vhost-user NIC and 8(vhost user memslot upper limit) memory slots,
> it will be failed to hotplug a new DIMM device since vhost_has_free_slot()
> finds no free slot left. It should be successful if only part of memory slots
> have file descriptor, so setting used memslots for vhost-user and vhost-
> kernel respectively.
>
> Signed-off-by: Igor Mammedov <imammedo@redhat.com>
> Signed-off-by: Jay Zhou <jianjay.zhou@huawei.com>
> Signed-off-by: Liuzhe <liuzhe13@huawei.com>
> ---
>
> v10:
> - fix misaligned access to structures
> - refine on setting used_memslots for vhost-user to
> avoid side effect
> - distinguish "has free memslots" and "out of memslots"
> v7 ... v9:
> - rebased on the master
> v2 ... v6:
> - delete the "used_memslots" global variable, and add it
> for vhost-user and vhost-kernel separately
> - refine the function, commit log
> - used_memslots refactoring
>
> hw/virtio/vhost-backend.c | 21 ++++++++++++++++++++-
> hw/virtio/vhost-user.c | 36 +++++++++++++++++++++++++++++++++---
> hw/virtio/vhost.c | 13 ++++++-------
> include/hw/virtio/vhost-backend.h | 8 ++++++--
> 4 files changed, 65 insertions(+), 13 deletions(-)
>
> diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c index
> 7f09efa..3539b5f 100644
> --- a/hw/virtio/vhost-backend.c
> +++ b/hw/virtio/vhost-backend.c
> @@ -15,6 +15,8 @@
> #include "hw/virtio/vhost-backend.h"
> #include "qemu/error-report.h"
>
> +static unsigned int vhost_kernel_used_memslots;
> +
> static int vhost_kernel_call(struct vhost_dev *dev, unsigned long int
> request,
> void *arg) { @@ -62,6 +64,21 @@ static int
> vhost_kernel_memslots_limit(struct vhost_dev *dev)
> return limit;
> }
>
> +static void vhost_kernel_set_used_memslots(struct vhost_dev *dev) {
> + vhost_kernel_used_memslots = dev->mem->nregions; }
> +
> +static bool vhost_kernel_has_free_memslots(struct vhost_dev *dev) {
> + return vhost_kernel_used_memslots <
> +vhost_kernel_memslots_limit(dev); }
> +
> +static bool vhost_kernel_out_of_memslots(struct vhost_dev *dev) {
> + return vhost_kernel_used_memslots >
> +vhost_kernel_memslots_limit(dev); }
> +
> static int vhost_kernel_net_set_backend(struct vhost_dev *dev,
> struct vhost_vring_file *file) { @@
> -237,7 +254,9 @@ static const VhostOps kernel_ops = {
> .backend_type = VHOST_BACKEND_TYPE_KERNEL,
> .vhost_backend_init = vhost_kernel_init,
> .vhost_backend_cleanup = vhost_kernel_cleanup,
> - .vhost_backend_memslots_limit = vhost_kernel_memslots_limit,
> + .vhost_set_used_memslots = vhost_kernel_set_used_memslots,
> + .vhost_backend_has_free_memslots = vhost_kernel_has_free_memslots,
> + .vhost_out_of_memslots = vhost_kernel_out_of_memslots,
> .vhost_net_set_backend = vhost_kernel_net_set_backend,
> .vhost_scsi_set_endpoint = vhost_kernel_scsi_set_endpoint,
> .vhost_scsi_clear_endpoint = vhost_kernel_scsi_clear_endpoint, diff
> --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c index
> 44aea5c..9691806 100644
> --- a/hw/virtio/vhost-user.c
> +++ b/hw/virtio/vhost-user.c
> @@ -170,6 +170,8 @@ static VhostUserMsg m __attribute__ ((unused));
> /* The version of the protocol we support */
> #define VHOST_USER_VERSION (0x1)
>
> +static unsigned int vhost_user_used_memslots;
> +
> struct vhost_user {
> struct vhost_dev *dev;
> CharBackend *chr;
> @@ -1289,9 +1291,35 @@ static int vhost_user_get_vq_index(struct vhost_dev
> *dev, int idx)
> return idx;
> }
>
> -static int vhost_user_memslots_limit(struct vhost_dev *dev)
> +static void vhost_user_set_used_memslots(struct vhost_dev *dev) {
> + int i, fd;
> + unsigned int fd_num = 0;
> +
> + for (i = 0; i < dev->mem->nregions; ++i) {
> + struct vhost_memory_region *reg = dev->mem->regions + i;
> + ram_addr_t offset;
> + MemoryRegion *mr;
> +
> + assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
> + mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr,
> + &offset);
> + fd = memory_region_get_fd(mr);
> + if (fd > 0) {
> + fd_num++;
> + }
> + }
> + vhost_user_used_memslots = fd_num;
> +}
> +
> +static bool vhost_user_has_free_memslots(struct vhost_dev *dev) {
> + return vhost_user_used_memslots < VHOST_MEMORY_MAX_NREGIONS; }
> +
> +static bool vhost_user_out_of_memslots(struct vhost_dev *dev)
> {
> - return VHOST_MEMORY_MAX_NREGIONS;
> + return vhost_user_used_memslots > VHOST_MEMORY_MAX_NREGIONS;
> }
>
> static bool vhost_user_requires_shm_log(struct vhost_dev *dev) @@ -1563,7
> +1591,9 @@ const VhostOps user_ops = {
> .backend_type = VHOST_BACKEND_TYPE_USER,
> .vhost_backend_init = vhost_user_init,
> .vhost_backend_cleanup = vhost_user_cleanup,
> - .vhost_backend_memslots_limit = vhost_user_memslots_limit,
> + .vhost_set_used_memslots = vhost_user_set_used_memslots,
> + .vhost_backend_has_free_memslots = vhost_user_has_free_memslots,
> + .vhost_out_of_memslots = vhost_user_out_of_memslots,
> .vhost_set_log_base = vhost_user_set_log_base,
> .vhost_set_mem_table = vhost_user_set_mem_table,
> .vhost_set_vring_addr = vhost_user_set_vring_addr, diff --git
> a/hw/virtio/vhost.c b/hw/virtio/vhost.c index 250f886..706951a 100644
> --- a/hw/virtio/vhost.c
> +++ b/hw/virtio/vhost.c
> @@ -44,20 +44,19 @@
> static struct vhost_log *vhost_log;
> static struct vhost_log *vhost_log_shm;
>
> -static unsigned int used_memslots;
> static QLIST_HEAD(, vhost_dev) vhost_devices =
> QLIST_HEAD_INITIALIZER(vhost_devices);
>
> bool vhost_has_free_slot(void)
> {
> - unsigned int slots_limit = ~0U;
> struct vhost_dev *hdev;
>
> QLIST_FOREACH(hdev, &vhost_devices, entry) {
> - unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
> - slots_limit = MIN(slots_limit, r);
> + if (!hdev->vhost_ops->vhost_backend_has_free_memslots(hdev)) {
> + return false;
> + }
> }
> - return slots_limit > used_memslots;
> + return true;
> }
>
> static void vhost_dev_sync_region(struct vhost_dev *dev, @@ -446,7 +445,6 @@
> static void vhost_commit(MemoryListener *listener)
> dev->n_mem_sections * sizeof dev->mem->regions[0];
> dev->mem = g_realloc(dev->mem, regions_size);
> dev->mem->nregions = dev->n_mem_sections;
> - used_memslots = dev->mem->nregions;
> for (i = 0; i < dev->n_mem_sections; i++) {
> struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
> struct MemoryRegionSection *mrs = dev->mem_sections + i; @@ -458,6
> +456,7 @@ static void vhost_commit(MemoryListener *listener)
> mrs->offset_within_region;
> cur_vmr->flags_padding = 0;
> }
> + dev->vhost_ops->vhost_set_used_memslots(dev);
>
> if (!dev->started) {
> goto out;
> @@ -1248,7 +1247,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
> memory_listener_register(&hdev->memory_listener, &address_space_memory);
> QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
>
> - if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev))
> {
> + if (hdev->vhost_ops->vhost_out_of_memslots(hdev)) {
> error_report("vhost backend memory slots limit is less"
> " than current number of present memory slots");
> r = -1;
> diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-
> backend.h
> index 5dac61f..f7ab82d 100644
> --- a/include/hw/virtio/vhost-backend.h
> +++ b/include/hw/virtio/vhost-backend.h
> @@ -36,7 +36,9 @@ struct vhost_iotlb_msg;
>
> typedef int (*vhost_backend_init)(struct vhost_dev *dev, void *opaque);
> typedef int (*vhost_backend_cleanup)(struct vhost_dev *dev); -typedef int
> (*vhost_backend_memslots_limit)(struct vhost_dev *dev);
> +typedef void (*vhost_set_used_memslots_op)(struct vhost_dev *dev);
> +typedef bool (*vhost_backend_has_free_memslots)(struct vhost_dev *dev);
> +typedef bool (*vhost_out_of_memslots)(struct vhost_dev *dev);
>
> typedef int (*vhost_net_set_backend_op)(struct vhost_dev *dev,
> struct vhost_vring_file *file); @@ -105,7
> +107,9 @@ typedef struct VhostOps {
> VhostBackendType backend_type;
> vhost_backend_init vhost_backend_init;
> vhost_backend_cleanup vhost_backend_cleanup;
> - vhost_backend_memslots_limit vhost_backend_memslots_limit;
> + vhost_set_used_memslots_op vhost_set_used_memslots;
> + vhost_backend_has_free_memslots vhost_backend_has_free_memslots;
> + vhost_out_of_memslots vhost_out_of_memslots;
> vhost_net_set_backend_op vhost_net_set_backend;
> vhost_net_set_mtu_op vhost_net_set_mtu;
> vhost_scsi_set_endpoint_op vhost_scsi_set_endpoint;
> --
> 1.8.3.1
>
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2018-05-01 8:18 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2018-03-27 4:14 [Qemu-devel] [PATCH RESEND v10] vhost: used_memslots refactoring Jay Zhou
2018-03-31 7:10 ` no-reply
2018-05-01 8:18 ` Zhoujian (jay)
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).