* [RFC PATCH 1/8] tests: vhost-vdpa: add initial VDUSE-based vhost-vdpa tests
2026-03-05 16:39 [RFC PATCH 0/8] Add vhost-vdpa and Shadow Virtqueue tests Eugenio Pérez
@ 2026-03-05 16:39 ` Eugenio Pérez
2026-03-25 21:02 ` Fabiano Rosas
2026-03-05 16:39 ` [RFC PATCH 2/8] tests: vhost-vdpa: parameterize VDUSE operations Eugenio Pérez
` (6 subsequent siblings)
7 siblings, 1 reply; 10+ messages in thread
From: Eugenio Pérez @ 2026-03-05 16:39 UTC (permalink / raw)
To: qemu-devel
Cc: Maxime Coquelin, Lei Yang, Paolo Bonzini, Michael S. Tsirkin,
Stefano Garzarella, Koushik Dutta, Fabiano Rosas, Jason Wang,
Laurent Vivier
Based on vhost-user tests, the qos register itself as a VDUSE device and
receives the events from QEMU.
The test infrastructure creates a thread that acts as a VDUSE device,
while the regular test thread is managing QEMU.
This basic test just verify that the guest memory ring addresses are
accessible, similar to the already existing test in vhost-user.
This enables automated testing of vhost-vdpa code paths that previously
required manual testing with real hardware.
Changes from vhost-user test:
* Automatic cleanup of many things.
* Handle the vduse fd and timeout.
* VDPA device cannot be removed before deleting QEMU, killing QEMU in
vhost_vdpa_test_cleanup.
* Read in enable callbacks, and the actual test_read_guest_mem is just
waiting.
* Add vhost_vdpa_thread to abstract fd monitoring
* Use QemuMutex and QemuConf for scoped cleanup
RFC: I'm not sure if this is the right place to add the tests in meson.
Also, a few things are just with asserts() or g_spawn(), instead of
more elegant code.
Also, I don't know how to include the libvduse.a library as meson
complains it's out of the tests/ directory, so I'm including the .c
directly. Ugly but it works.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
tests/qtest/meson.build | 3 +
tests/qtest/vhost-vdpa-test.c | 426 ++++++++++++++++++++++++++++++++++
2 files changed, 429 insertions(+)
create mode 100644 tests/qtest/vhost-vdpa-test.c
diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build
index ba9f59d2f8f7..0fdc8fb4a764 100644
--- a/tests/qtest/meson.build
+++ b/tests/qtest/meson.build
@@ -346,6 +346,9 @@ endif
if have_tools and have_vhost_user_blk_server
qos_test_ss.add(files('vhost-user-blk-test.c'))
endif
+if have_libvduse and have_vhost_vdpa
+ qos_test_ss.add(files('vhost-vdpa-test.c'))
+endif
tpmemu_files = ['tpm-emu.c', 'tpm-util.c', 'tpm-tests.c']
diff --git a/tests/qtest/vhost-vdpa-test.c b/tests/qtest/vhost-vdpa-test.c
new file mode 100644
index 000000000000..1fc5acacfed3
--- /dev/null
+++ b/tests/qtest/vhost-vdpa-test.c
@@ -0,0 +1,426 @@
+/*
+ * QTest testcase for vhost-vdpa using VDUSE devices
+ *
+ * Based on vhost-user-test.c
+ * Copyright (c) 2014 Virtual Open Systems Sarl.
+ * Copyright (c) 2026 - VDUSE adaptation
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+
+#include "qemu/lockable.h"
+
+#include "libqtest-single.h"
+#include "qapi/error.h"
+#include "libqos/qgraph.h"
+#include "hw/virtio/virtio-net.h"
+
+#include "standard-headers/linux/virtio_ids.h"
+#include "standard-headers/linux/virtio_net.h"
+
+#include "subprojects/libvduse/linux-headers/linux/vduse.h"
+#include "subprojects/libvduse/libvduse.h"
+
+#include <poll.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <linux/vhost.h>
+
+/* TODO fix this */
+#include "subprojects/libvduse/libvduse.c"
+
+#define QEMU_CMD_MEM " -m %d -object memory-backend-file,id=mem,size=%dM," \
+ "mem-path=%s,share=on -numa node,memdev=mem"
+#define QEMU_CMD_VDPA " -netdev type=vhost-vdpa,vhostdev=%s,id=hs0"
+#define VDUSE_RECONNECT_LOG "vduse_reconnect.log"
+
+typedef struct VdpaThread {
+ GThread *thread;
+ GMainLoop *loop;
+ GMainContext *context;
+} VdpaThread;
+
+static void *vhost_vdpa_thread_function(void *data)
+{
+ GMainLoop *loop = data;
+ g_main_loop_run(loop);
+ return NULL;
+}
+
+static void vhost_vdpa_thread_init(VdpaThread *t)
+{
+ t->context = g_main_context_new();
+ t->loop = g_main_loop_new(t->context, FALSE);
+ t->thread = g_thread_new("vdpa-thread", vhost_vdpa_thread_function, t->loop);
+}
+
+static void vhost_vdpa_thread_cleanup(VdpaThread *t)
+{
+ g_main_loop_quit(t->loop);
+ g_thread_join(t->thread);
+
+ while (g_main_context_pending(NULL)) {
+ g_main_context_iteration(NULL, TRUE);
+ }
+
+ g_main_loop_unref(t->loop);
+ g_main_context_unref(t->context);
+}
+
+static void vhost_vdpa_thread_add_source_fd(VdpaThread *t, int fd,
+ GUnixFDSourceFunc func, void *data)
+{
+ GSource *src = g_unix_fd_source_new(fd, G_IO_IN);
+ g_source_set_callback(src, (GSourceFunc)func, data, NULL);
+ g_source_attach(src, t->context);
+ g_source_unref(src);
+}
+
+typedef struct TestServer {
+ gchar *vduse_name;
+ gchar *vdpa_dev_path;
+ gchar *tmpfs;
+ int vq_read_num;
+ VduseDev *vdev;
+ VdpaThread vdpa_thread;
+ QemuMutex data_mutex;
+ QemuCond data_cond;
+ bool ready;
+} TestServer;
+
+static bool test_read_first_byte(int dev_fd, uint64_t addr)
+{
+ struct vduse_iotlb_entry entry;
+ int fd;
+ void *mmap_addr;
+
+ entry.start = addr;
+ entry.last = addr + 1;
+
+ fd = ioctl(dev_fd, VDUSE_IOTLB_GET_FD, &entry);
+ if (fd < 0) {
+ g_test_message("Failed to get fd for iova 0x%" PRIx64 ": %s",
+ addr, strerror(errno));
+ return false;
+ }
+
+ mmap_addr = mmap(0, 1, PROT_READ, MAP_SHARED, fd, 0);
+ if (mmap_addr == MAP_FAILED) {
+ close(fd);
+ g_test_message("Failed to mmap fd for iova 0x%" PRIx64 ": %s",
+ addr, strerror(errno));
+ goto close_fd;
+ }
+
+ *(volatile uint8_t *)mmap_addr;
+ munmap(mmap_addr, 1);
+
+close_fd:
+ close(fd);
+
+ return true;
+}
+
+static void vduse_read_guest_mem_enable_queue(VduseDev *dev, VduseVirtq *vq)
+{
+ TestServer *s = vduse_dev_get_priv(dev);
+ int dev_fd = vduse_dev_get_fd(dev);
+ struct vduse_vq_info vq_info;
+ int ret;
+
+ g_test_message("Enabling queue %d", vq->index);
+
+ /* Get VQ info to retrieve ring addresses */
+ vq_info.index = vq->index;
+ ret = ioctl(dev_fd, VDUSE_VQ_GET_INFO, &vq_info);
+ if (ret < 0 || !vq_info.ready) {
+ return;
+ }
+
+ test_read_first_byte(dev_fd, vq_info.desc_addr);
+ test_read_first_byte(dev_fd, vq_info.driver_addr);
+ test_read_first_byte(dev_fd, vq_info.device_addr);
+
+ QEMU_LOCK_GUARD(&s->data_mutex);
+ s->vq_read_num++;
+ if (s->vq_read_num == 2) {
+ /* Notify the test that we have read the rings for both queues */
+ qemu_cond_broadcast(&s->data_cond);
+ }
+}
+
+static void vduse_read_guest_mem_disable_queue(VduseDev *dev, VduseVirtq *vq)
+{
+ /* Queue disabled */
+}
+
+static const VduseOps vduse_read_guest_mem_ops = {
+ .enable_queue = vduse_read_guest_mem_enable_queue,
+ .disable_queue = vduse_read_guest_mem_disable_queue,
+};
+
+static gboolean vduse_dev_handler_source_fd(int fd, GIOCondition condition,
+ void *data)
+{
+ TestServer *s = data;
+ int r;
+
+ if (poll(&(struct pollfd){.fd = fd, .events = POLLIN}, 1, 0) <= 0) {
+ return G_SOURCE_CONTINUE /* Spurious */;
+ }
+
+ r = vduse_dev_handler(s->vdev);
+ assert (r == 0);
+ return G_SOURCE_CONTINUE;
+}
+
+typedef enum {
+ VDPA_DEV_ADD,
+ VDPA_DEV_DEL,
+} vdpa_cmd_t;
+
+/* TODO: Issue proper nl commands */
+static int netlink_vdpa_device_do(vdpa_cmd_t cmd, const char *vduse_name)
+{
+ g_autoptr(GError) err = NULL;
+ g_auto(GStrv) argv = g_strdupv(
+ (cmd == VDPA_DEV_ADD) ?
+ (char **)(const char *[]){"vdpa", "dev", "add", "name", vduse_name,
+ "mgmtdev", "vduse", NULL} :
+ (char **)(const char *[]){"vdpa", "dev", "del", vduse_name, NULL});
+ GSpawnFlags flags = G_SPAWN_DEFAULT | G_SPAWN_SEARCH_PATH |
+ G_SPAWN_STDIN_FROM_DEV_NULL |
+ G_SPAWN_STDOUT_TO_DEV_NULL |
+ G_SPAWN_STDERR_TO_DEV_NULL;
+ if (cmd == VDPA_DEV_DEL) {
+ /* TODO: del blocks in read() for the write_err_and_exit, or just for
+ * the child to properly close child_err_report_pipe. But, either way,
+ * it causes the test to hang if we don't set this flag.
+ *
+ * It seems run under gdb step by step also makes the parent able to
+ * continue, so probably a race condition?
+ *
+ * glib2-devel-2.84.4.
+ */
+ flags |= G_SPAWN_LEAVE_DESCRIPTORS_OPEN;
+ }
+ gint wait_status = 0;
+
+ if (!g_spawn_sync(/* working_dir */ NULL, argv, /* envp */ NULL, flags,
+ /* child_setup */ NULL, /* user_data */ NULL,
+ /* standard_output */ NULL, /* standard_error */ NULL,
+ &wait_status, &err)) {
+ g_test_message("Failed to execute command: %s", err->message);
+ return -1;
+ }
+
+ assert(WIFEXITED(wait_status));
+ if (WEXITSTATUS(wait_status) != 0) {
+ g_test_message("Command failed with exit code: %d",
+ WEXITSTATUS(wait_status));
+ return wait_status;
+ }
+
+ return WEXITSTATUS(wait_status);
+}
+
+static char *vhost_find_device(const char *name)
+{
+ /* Find vhost-vdpa device name */
+ g_autoptr(GDir) dir = NULL;
+ g_autoptr(GError) err = NULL;
+ g_autofree char *sys_path = g_strdup_printf("/sys/devices/virtual/vduse/%s/%s",
+ name,
+ name);
+ dir = g_dir_open(sys_path, 0, &err);
+ if (!dir) {
+ g_test_message("Failed to open sys path %s: %s", sys_path, err->message);
+ return NULL;
+ }
+
+ for (const char *entry; (entry = g_dir_read_name(dir)) != NULL; ) {
+ if (g_str_has_prefix(entry, "vhost-vdpa-")) {
+ return g_strdup_printf("/dev/%s", entry);
+ }
+ }
+
+ return NULL;
+}
+
+static bool test_setup_reconnect_log(VduseDev *vdev, const char *tmpfs)
+{
+ g_autofree char *filename = NULL;
+ g_autoptr(GError) err = NULL;
+ int fd, r;
+ bool ok;
+
+ filename = g_build_filename(tmpfs, "vhost-vdpa-test-XXXXXX", NULL);
+ fd = g_mkstemp_full(filename, 0, 0600);
+ if (fd < 0) {
+ g_test_message("Failed to create temporary file for reconnect log: %s",
+ g_strerror(errno));
+ return false;
+ }
+
+ /* TODO: Properly handle errors here */
+ r = vduse_set_reconnect_log_file(vdev, filename);
+ assert(r == 0);
+ r = unlink(filename);
+ assert(r == 0);
+ ok = g_close(fd, &err);
+ assert(ok == TRUE);
+
+ return ok;
+}
+
+static TestServer *test_server_new(const gchar *name)
+{
+ TestServer *server = g_new0(TestServer, 1);
+ g_autoptr(GError) err = NULL;
+ g_autofree char *tmpfs = NULL;
+ char config[sizeof(struct virtio_net_config)] = {0};
+ uint64_t features;
+
+ vhost_vdpa_thread_init(&server->vdpa_thread);
+
+ server->vduse_name = g_strdup_printf("vdpa-test-%s", name);
+
+ qemu_mutex_init(&server->data_mutex);
+ qemu_cond_init(&server->data_cond);
+
+ features = vduse_get_virtio_features() |
+ (1ULL << VIRTIO_NET_F_MAC);
+
+ server->vdev = vduse_dev_create(server->vduse_name,
+ VIRTIO_ID_NET,
+ 0x1AF4, /* PCI vendor ID */
+ features,
+ 2, /* num_queues */
+ sizeof(config),
+ config,
+ &vduse_read_guest_mem_ops,
+ server);
+
+ if (!server->vdev) {
+ return server;
+ }
+
+ vhost_vdpa_thread_add_source_fd(&server->vdpa_thread, server->vdev->fd,
+ vduse_dev_handler_source_fd, server);
+
+ tmpfs = g_dir_make_tmp("vhost-test-XXXXXX", &err);
+ if (!tmpfs) {
+ g_test_message("Can't create temporary directory in %s: %s",
+ g_get_tmp_dir(), err->message);
+ }
+ g_assert_nonnull(tmpfs);
+ server->tmpfs = g_steal_pointer(&tmpfs);
+
+ test_setup_reconnect_log(server->vdev, server->tmpfs);
+ vduse_dev_setup_queue(server->vdev, 0, VIRTQUEUE_MAX_SIZE);
+ vduse_dev_setup_queue(server->vdev, 1, VIRTQUEUE_MAX_SIZE);
+
+ if (netlink_vdpa_device_do(VDPA_DEV_ADD, server->vduse_name) != 0) {
+ g_test_message("Failed to add vdpa device");
+ return server;
+ }
+ server->vdpa_dev_path = vhost_find_device(server->vduse_name);
+ if (!server->vdpa_dev_path) {
+ return server;
+ }
+
+ server->ready = true;
+
+ return server;
+}
+
+static void test_server_free(TestServer *server)
+{
+ g_test_message("About to call vdpa del device");
+
+ netlink_vdpa_device_do(VDPA_DEV_DEL, server->vduse_name);
+
+ /* finish the helper thread and dispatch pending sources */
+ vhost_vdpa_thread_cleanup(&server->vdpa_thread);
+
+ if (server->vdev) {
+ vduse_dev_destroy(server->vdev);
+ }
+
+ g_free(server->vduse_name);
+ g_free(server->vdpa_dev_path);
+ g_free(server->tmpfs);
+
+ qemu_cond_destroy(&server->data_cond);
+ qemu_mutex_destroy(&server->data_mutex);
+ g_free(server);
+}
+
+static void wait_for_vqs(TestServer *s)
+{
+ gint64 end_time_us;
+
+ QEMU_LOCK_GUARD(&s->data_mutex);
+ end_time_us = g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND;
+ while (s->vq_read_num < 2) {
+ if (!qemu_cond_timedwait(&s->data_cond, &s->data_mutex,
+ end_time_us - g_get_monotonic_time())) {
+ /* timeout has passed */
+ g_assert_cmpint(s->vq_read_num, ==, 2);
+ break;
+ }
+ }
+}
+
+static void vhost_vdpa_test_cleanup(void *s)
+{
+ TestServer *server = s;
+
+ /* Cannot delete vdpa dev until QEMU stops using it. */
+ qtest_kill_qemu(global_qtest);
+ test_server_free(server);
+}
+
+static void *vhost_vdpa_test_setup_memfile(GString *cmd_line, void *arg)
+{
+ TestServer *server = test_server_new("vdpa-memfile");
+
+ if (!server->ready) {
+ g_test_skip("Failed to create VDUSE device");
+ test_server_free(server);
+ return NULL;
+ }
+
+ g_string_append_printf(cmd_line, QEMU_CMD_MEM, 256, 256, server->tmpfs);
+ g_string_append_printf(cmd_line, QEMU_CMD_VDPA, server->vdpa_dev_path);
+ g_test_message("cmdline: %s", cmd_line->str);
+
+ g_test_queue_destroy(vhost_vdpa_test_cleanup, server);
+
+ return server;
+}
+
+static void test_read_guest_mem(void *obj, void *arg, QGuestAllocator *alloc)
+{
+ TestServer *server = arg;
+
+ wait_for_vqs(server);
+}
+
+static void register_vhost_vdpa_test(void)
+{
+ QOSGraphTestOptions opts = {
+ .before = vhost_vdpa_test_setup_memfile,
+ .subprocess = true,
+ .arg = NULL,
+ };
+
+ qos_add_test("vhost-vdpa/read-guest-mem/memfile",
+ "virtio-net",
+ test_read_guest_mem, &opts);
+}
+libqos_init(register_vhost_vdpa_test);
--
2.53.0
^ permalink raw reply related [flat|nested] 10+ messages in thread* Re: [RFC PATCH 1/8] tests: vhost-vdpa: add initial VDUSE-based vhost-vdpa tests
2026-03-05 16:39 ` [RFC PATCH 1/8] tests: vhost-vdpa: add initial VDUSE-based vhost-vdpa tests Eugenio Pérez
@ 2026-03-25 21:02 ` Fabiano Rosas
0 siblings, 0 replies; 10+ messages in thread
From: Fabiano Rosas @ 2026-03-25 21:02 UTC (permalink / raw)
To: Eugenio Pérez, qemu-devel
Cc: Maxime Coquelin, Lei Yang, Paolo Bonzini, Michael S. Tsirkin,
Stefano Garzarella, Koushik Dutta, Jason Wang, Laurent Vivier
Eugenio Pérez <eperezma@redhat.com> writes:
> Based on vhost-user tests, the qos register itself as a VDUSE device and
> receives the events from QEMU.
>
> The test infrastructure creates a thread that acts as a VDUSE device,
> while the regular test thread is managing QEMU.
>
> This basic test just verify that the guest memory ring addresses are
> accessible, similar to the already existing test in vhost-user.
>
> This enables automated testing of vhost-vdpa code paths that previously
> required manual testing with real hardware.
>
> Changes from vhost-user test:
> * Automatic cleanup of many things.
> * Handle the vduse fd and timeout.
> * VDPA device cannot be removed before deleting QEMU, killing QEMU in
> vhost_vdpa_test_cleanup.
> * Read in enable callbacks, and the actual test_read_guest_mem is just
> waiting.
> * Add vhost_vdpa_thread to abstract fd monitoring
> * Use QemuMutex and QemuConf for scoped cleanup
>
> RFC: I'm not sure if this is the right place to add the tests in meson.
> Also, a few things are just with asserts() or g_spawn(), instead of
> more elegant code.
>
> Also, I don't know how to include the libvduse.a library as meson
> complains it's out of the tests/ directory, so I'm including the .c
> directly. Ugly but it works.
>
The proper way should be the following. Unfortunately, you'll have to
rewrite some parts of the test as it currently accesses symbols internal
to the library.
-- >8 --
From ca4418e57a2de83b1fea49f589f2c27ba424039b Mon Sep 17 00:00:00 2001
From: Fabiano Rosas <farosas@suse.de>
Date: Wed, 25 Mar 2026 17:54:41 -0300
Subject: [PATCH] tmp
---
tests/qtest/meson.build | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build
index 2b002ea1ce..d6cf0e25ad 100644
--- a/tests/qtest/meson.build
+++ b/tests/qtest/meson.build
@@ -348,6 +348,7 @@ if have_tools and have_vhost_user_blk_server
endif
if have_libvduse and have_vhost_vdpa
qos_test_ss.add(files('vhost-vdpa-test.c'))
+ qos_test_ss.add(libvduse)
endif
tpmemu_files = ['tpm-emu.c', 'tpm-util.c', 'tpm-tests.c']
@@ -395,7 +396,8 @@ qtests = {
'pxe-test': files('boot-sector.c'),
'pnv-xive2-test': files('pnv-xive2-common.c', 'pnv-xive2-flush-sync.c',
'pnv-xive2-nvpg_bar.c'),
- 'qos-test': [chardev, io, qos_test_ss.apply({}).sources()],
+ 'qos-test': [chardev, io, qos_test_ss.apply({}).sources(),
+ qos_test_ss.apply({}).dependencies()],
'tpm-crb-swtpm-test': [io, tpmemu_files],
'tpm-crb-test': [io, tpmemu_files],
'tpm-tis-swtpm-test': [io, tpmemu_files, 'tpm-tis-util.c'],
--
2.51.0
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [RFC PATCH 2/8] tests: vhost-vdpa: parameterize VDUSE operations
2026-03-05 16:39 [RFC PATCH 0/8] Add vhost-vdpa and Shadow Virtqueue tests Eugenio Pérez
2026-03-05 16:39 ` [RFC PATCH 1/8] tests: vhost-vdpa: add initial VDUSE-based vhost-vdpa tests Eugenio Pérez
@ 2026-03-05 16:39 ` Eugenio Pérez
2026-03-05 16:39 ` [RFC PATCH 3/8] tests: vhost-vdpa: add TX packet transmission test Eugenio Pérez
` (5 subsequent siblings)
7 siblings, 0 replies; 10+ messages in thread
From: Eugenio Pérez @ 2026-03-05 16:39 UTC (permalink / raw)
To: qemu-devel
Cc: Maxime Coquelin, Lei Yang, Paolo Bonzini, Michael S. Tsirkin,
Stefano Garzarella, Koushik Dutta, Fabiano Rosas, Jason Wang,
Laurent Vivier
Make the VduseOps callbacks a parameter to test_server_new() instead
of hardcoding vduse_read_guest_mem_ops.
This allows different test cases to provide custom queue enable/disable
handlers for testing various vhost-vdpa scenarios.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
tests/qtest/vhost-vdpa-test.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/tests/qtest/vhost-vdpa-test.c b/tests/qtest/vhost-vdpa-test.c
index 1fc5acacfed3..1c7d8540bd19 100644
--- a/tests/qtest/vhost-vdpa-test.c
+++ b/tests/qtest/vhost-vdpa-test.c
@@ -277,7 +277,7 @@ static bool test_setup_reconnect_log(VduseDev *vdev, const char *tmpfs)
return ok;
}
-static TestServer *test_server_new(const gchar *name)
+static TestServer *test_server_new(const gchar *name, const VduseOps *ops)
{
TestServer *server = g_new0(TestServer, 1);
g_autoptr(GError) err = NULL;
@@ -302,7 +302,7 @@ static TestServer *test_server_new(const gchar *name)
2, /* num_queues */
sizeof(config),
config,
- &vduse_read_guest_mem_ops,
+ ops,
server);
if (!server->vdev) {
@@ -387,7 +387,7 @@ static void vhost_vdpa_test_cleanup(void *s)
static void *vhost_vdpa_test_setup_memfile(GString *cmd_line, void *arg)
{
- TestServer *server = test_server_new("vdpa-memfile");
+ TestServer *server = test_server_new("vdpa-memfile", &vduse_read_guest_mem_ops);
if (!server->ready) {
g_test_skip("Failed to create VDUSE device");
--
2.53.0
^ permalink raw reply related [flat|nested] 10+ messages in thread* [RFC PATCH 3/8] tests: vhost-vdpa: add TX packet transmission test
2026-03-05 16:39 [RFC PATCH 0/8] Add vhost-vdpa and Shadow Virtqueue tests Eugenio Pérez
2026-03-05 16:39 ` [RFC PATCH 1/8] tests: vhost-vdpa: add initial VDUSE-based vhost-vdpa tests Eugenio Pérez
2026-03-05 16:39 ` [RFC PATCH 2/8] tests: vhost-vdpa: parameterize VDUSE operations Eugenio Pérez
@ 2026-03-05 16:39 ` Eugenio Pérez
2026-03-05 16:39 ` [RFC PATCH 4/8] tests: vhost-vdpa: test SVQ cleanup of pending buffers Eugenio Pérez
` (4 subsequent siblings)
7 siblings, 0 replies; 10+ messages in thread
From: Eugenio Pérez @ 2026-03-05 16:39 UTC (permalink / raw)
To: qemu-devel
Cc: Maxime Coquelin, Lei Yang, Paolo Bonzini, Michael S. Tsirkin,
Stefano Garzarella, Koushik Dutta, Fabiano Rosas, Jason Wang,
Laurent Vivier
Add test infrastructure for sending packets through TX virtqueue.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
tests/qtest/vhost-vdpa-test.c | 141 +++++++++++++++++++++++++++++++---
1 file changed, 132 insertions(+), 9 deletions(-)
diff --git a/tests/qtest/vhost-vdpa-test.c b/tests/qtest/vhost-vdpa-test.c
index 1c7d8540bd19..7b1c34aa415e 100644
--- a/tests/qtest/vhost-vdpa-test.c
+++ b/tests/qtest/vhost-vdpa-test.c
@@ -17,6 +17,7 @@
#include "libqtest-single.h"
#include "qapi/error.h"
#include "libqos/qgraph.h"
+#include "libqos/virtio-net.h"
#include "hw/virtio/virtio-net.h"
#include "standard-headers/linux/virtio_ids.h"
@@ -25,6 +26,7 @@
#include "subprojects/libvduse/linux-headers/linux/vduse.h"
#include "subprojects/libvduse/libvduse.h"
+#include <endian.h>
#include <poll.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
@@ -35,7 +37,7 @@
#define QEMU_CMD_MEM " -m %d -object memory-backend-file,id=mem,size=%dM," \
"mem-path=%s,share=on -numa node,memdev=mem"
-#define QEMU_CMD_VDPA " -netdev type=vhost-vdpa,vhostdev=%s,id=hs0"
+#define QEMU_CMD_VDPA " -netdev type=vhost-vdpa,x-svq=on,vhostdev=%s,id=hs0"
#define VDUSE_RECONNECT_LOG "vduse_reconnect.log"
typedef struct VdpaThread {
@@ -80,6 +82,61 @@ static void vhost_vdpa_thread_add_source_fd(VdpaThread *t, int fd,
g_source_unref(src);
}
+/**
+ * Send a descriptor or a chain of descriptors to the device, and optionally
+ * and / or update the avail ring and avail_idx of the driver ring.
+ *
+ * @alloc: the guest allocator to allocate memory for the descriptors
+ * @net: the virtio net device
+ * @t: the vdpa thread to push the expected chain length if kick is true
+ *
+ * Returns the kick_id you can use to kick the device in a later call to this
+ * function.
+ */
+static uint32_t vhost_vdpa_add_tx_pkt_descs(QGuestAllocator *alloc,
+ QVirtioNet *net, VdpaThread *t)
+{
+ QTestState *qts = global_qtest;
+ uint32_t req_addr;
+
+ /* TODO: Actually free this. RFC, is actually needed? */
+ req_addr = guest_alloc(alloc, 64);
+ g_assert_cmpint(req_addr, >, 0);
+
+ return qvirtqueue_add(qts, net->queues[1], req_addr, 64, /* write */ false,
+ /* next */ false);
+}
+
+static void vhost_vdpa_kick_tx_desc(VdpaThread *t, QVirtioNet *net,
+ uint32_t kick_id)
+{
+ QTestState *qts = global_qtest;
+
+ qvirtqueue_kick(qts, net->vdev, net->queues[1], kick_id);
+}
+
+static void vhost_vdpa_get_tx_pkt(QGuestAllocator *alloc, QVirtioNet *net,
+ uint32_t desc_idx, VdpaThread *t)
+{
+ g_autofree struct VduseVirtqElement *elem = NULL;
+ int64_t timeout = 5 * G_TIME_SPAN_SECOND;
+ QTestState *qts = global_qtest;
+ vring_desc_t desc;
+ int64_t end_time_us;
+ uint32_t len;
+
+ end_time_us = g_get_monotonic_time() + timeout;
+ qvirtio_wait_used_elem(qts, net->vdev, net->queues[1], desc_idx, &len,
+ timeout);
+ g_assert_cmpint(g_get_monotonic_time(), <, end_time_us);
+ g_assert_cmpint(len, ==, 0);
+
+ qtest_memread(qts, net->queues[1]->desc + sizeof(desc)*desc_idx, &desc,
+ sizeof(desc));
+ /* We know we're version 1 so always little endian */
+ guest_free(alloc, le64toh(desc.addr));
+}
+
typedef struct TestServer {
gchar *vduse_name;
gchar *vdpa_dev_path;
@@ -163,6 +220,54 @@ static const VduseOps vduse_read_guest_mem_ops = {
.disable_queue = vduse_read_guest_mem_disable_queue,
};
+static gboolean vhost_vdpa_rxtx_handle_tx(int fd, GIOCondition condition,
+ void *data)
+{
+ VduseVirtq *vq = data;
+
+ eventfd_read(fd, (eventfd_t[]){0});
+ do {
+ g_autofree VduseVirtqElement *elem = NULL;
+
+ elem = vduse_queue_pop(vq, sizeof(*elem));
+ if (!elem) {
+ break;
+ }
+
+ g_test_message("Got element with %d buffers", elem->out_num);
+ g_assert_cmpint(elem->in_num, ==, 0);
+
+ vduse_queue_push(vq, elem, 0);
+ vduse_queue_notify(vq);
+ } while (true);
+
+ return G_SOURCE_CONTINUE;
+}
+
+static void vduse_rxtx_enable_queue(VduseDev *dev, VduseVirtq *vq)
+{
+ TestServer *s = vduse_dev_get_priv(dev);
+
+ g_test_message("Enabling queue %d", vq->index);
+
+ if (vq->index == 1) {
+ /* This is the tx queue, add a source to handle it */
+ vhost_vdpa_thread_add_source_fd(&s->vdpa_thread,
+ vduse_queue_get_fd(vq),
+ vhost_vdpa_rxtx_handle_tx, vq);
+ }
+}
+
+static void vduse_rxtx_disable_queue(VduseDev *dev, VduseVirtq *vq)
+{
+ /* Queue disabled */
+}
+
+static const VduseOps vduse_rxtx_ops = {
+ .enable_queue = vduse_rxtx_enable_queue,
+ .disable_queue = vduse_rxtx_disable_queue,
+};
+
static gboolean vduse_dev_handler_source_fd(int fd, GIOCondition condition,
void *data)
{
@@ -292,7 +397,8 @@ static TestServer *test_server_new(const gchar *name, const VduseOps *ops)
qemu_mutex_init(&server->data_mutex);
qemu_cond_init(&server->data_cond);
- features = vduse_get_virtio_features() |
+ /* Disabling NOTIFY_ON_EMPTY as SVQ does not support it */
+ features = (vduse_get_virtio_features() & ~(1ULL << VIRTIO_F_NOTIFY_ON_EMPTY)) |
(1ULL << VIRTIO_NET_F_MAC);
server->vdev = vduse_dev_create(server->vduse_name,
@@ -376,6 +482,13 @@ static void wait_for_vqs(TestServer *s)
}
}
+static void test_wait(void *obj, void *arg, QGuestAllocator *alloc)
+{
+ TestServer *server = arg;
+
+ wait_for_vqs(server);
+}
+
static void vhost_vdpa_test_cleanup(void *s)
{
TestServer *server = s;
@@ -385,9 +498,9 @@ static void vhost_vdpa_test_cleanup(void *s)
test_server_free(server);
}
-static void *vhost_vdpa_test_setup_memfile(GString *cmd_line, void *arg)
+static void *vhost_vdpa_test_setup(GString *cmd_line, void *arg)
{
- TestServer *server = test_server_new("vdpa-memfile", &vduse_read_guest_mem_ops);
+ TestServer *server = test_server_new("vdpa-memfile", arg);
if (!server->ready) {
g_test_skip("Failed to create VDUSE device");
@@ -404,23 +517,33 @@ static void *vhost_vdpa_test_setup_memfile(GString *cmd_line, void *arg)
return server;
}
-static void test_read_guest_mem(void *obj, void *arg, QGuestAllocator *alloc)
+static void vhost_vdpa_tx_test(void *obj, void *arg, QGuestAllocator *alloc)
{
TestServer *server = arg;
+ QVirtioNet *net = obj;
+ uint32_t free_head;
- wait_for_vqs(server);
+ free_head = vhost_vdpa_add_tx_pkt_descs(alloc, net, &server->vdpa_thread);
+ vhost_vdpa_kick_tx_desc(&server->vdpa_thread, net, free_head);
+ vhost_vdpa_get_tx_pkt(alloc, net, free_head, &server->vdpa_thread);
}
static void register_vhost_vdpa_test(void)
{
+ /* TODO: void * discards const qualifier */
QOSGraphTestOptions opts = {
- .before = vhost_vdpa_test_setup_memfile,
+ .before = vhost_vdpa_test_setup,
.subprocess = true,
- .arg = NULL,
+ .arg = (void *)&vduse_read_guest_mem_ops,
};
qos_add_test("vhost-vdpa/read-guest-mem/memfile",
"virtio-net",
- test_read_guest_mem, &opts);
+ test_wait, &opts);
+
+ opts.arg = (void *)&vduse_rxtx_ops;
+ qos_add_test("vhost-vdpa/rxtx",
+ "virtio-net",
+ vhost_vdpa_tx_test, &opts);
}
libqos_init(register_vhost_vdpa_test);
--
2.53.0
^ permalink raw reply related [flat|nested] 10+ messages in thread* [RFC PATCH 4/8] tests: vhost-vdpa: test SVQ cleanup of pending buffers
2026-03-05 16:39 [RFC PATCH 0/8] Add vhost-vdpa and Shadow Virtqueue tests Eugenio Pérez
` (2 preceding siblings ...)
2026-03-05 16:39 ` [RFC PATCH 3/8] tests: vhost-vdpa: add TX packet transmission test Eugenio Pérez
@ 2026-03-05 16:39 ` Eugenio Pérez
2026-03-05 16:39 ` [RFC PATCH 5/8] tests: vhost-vdpa: add descriptor chain tests Eugenio Pérez
` (3 subsequent siblings)
7 siblings, 0 replies; 10+ messages in thread
From: Eugenio Pérez @ 2026-03-05 16:39 UTC (permalink / raw)
To: qemu-devel
Cc: Maxime Coquelin, Lei Yang, Paolo Bonzini, Michael S. Tsirkin,
Stefano Garzarella, Koushik Dutta, Fabiano Rosas, Jason Wang,
Laurent Vivier
Add RX buffers to the receive queue before terminating QEMU to verify
that shadow virtqueue properly cleans up unused descriptors during
shutdown.
This tests the SVQ teardown path when descriptors remain in the
available ring but were never used by the device.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
tests/qtest/vhost-vdpa-test.c | 23 +++++++++++++++++++++++
1 file changed, 23 insertions(+)
diff --git a/tests/qtest/vhost-vdpa-test.c b/tests/qtest/vhost-vdpa-test.c
index 7b1c34aa415e..8dde7d95b167 100644
--- a/tests/qtest/vhost-vdpa-test.c
+++ b/tests/qtest/vhost-vdpa-test.c
@@ -40,10 +40,15 @@
#define QEMU_CMD_VDPA " -netdev type=vhost-vdpa,x-svq=on,vhostdev=%s,id=hs0"
#define VDUSE_RECONNECT_LOG "vduse_reconnect.log"
+static int NUM_RX_BUFS = 2;
+
typedef struct VdpaThread {
GThread *thread;
GMainLoop *loop;
GMainContext *context;
+
+ /* Guest memory that must be free at the end of the test */
+ uint64_t qemu_mem_to_free;
} VdpaThread;
static void *vhost_vdpa_thread_function(void *data)
@@ -82,6 +87,21 @@ static void vhost_vdpa_thread_add_source_fd(VdpaThread *t, int fd,
g_source_unref(src);
}
+static void vhost_vdpa_add_rx_pkts(QGuestAllocator *alloc, QVirtioNet *net,
+ VdpaThread *t)
+{
+ QTestState *qts = global_qtest;
+
+ t->qemu_mem_to_free = guest_alloc(alloc, 64);
+
+ for (int i = 0; i < NUM_RX_BUFS; i++) {
+ uint32_t head = qvirtqueue_add(qts, net->queues[0],
+ t->qemu_mem_to_free, 64,
+ /* write */ false, /* next */ false);
+ qvirtqueue_kick(qts, net->vdev, net->queues[0], head);
+ }
+}
+
/**
* Send a descriptor or a chain of descriptors to the device, and optionally
* and / or update the avail ring and avail_idx of the driver ring.
@@ -523,6 +543,9 @@ static void vhost_vdpa_tx_test(void *obj, void *arg, QGuestAllocator *alloc)
QVirtioNet *net = obj;
uint32_t free_head;
+ /* Add some rx packets so SVQ must clean them at the end of QEMU run */
+ vhost_vdpa_add_rx_pkts(alloc, net, &server->vdpa_thread);
+
free_head = vhost_vdpa_add_tx_pkt_descs(alloc, net, &server->vdpa_thread);
vhost_vdpa_kick_tx_desc(&server->vdpa_thread, net, free_head);
vhost_vdpa_get_tx_pkt(alloc, net, free_head, &server->vdpa_thread);
--
2.53.0
^ permalink raw reply related [flat|nested] 10+ messages in thread* [RFC PATCH 5/8] tests: vhost-vdpa: add descriptor chain tests
2026-03-05 16:39 [RFC PATCH 0/8] Add vhost-vdpa and Shadow Virtqueue tests Eugenio Pérez
` (3 preceding siblings ...)
2026-03-05 16:39 ` [RFC PATCH 4/8] tests: vhost-vdpa: test SVQ cleanup of pending buffers Eugenio Pérez
@ 2026-03-05 16:39 ` Eugenio Pérez
2026-03-05 16:39 ` [RFC PATCH 6/8] tests: vhost-vdpa: test out-of-order descriptor completion Eugenio Pérez
` (2 subsequent siblings)
7 siblings, 0 replies; 10+ messages in thread
From: Eugenio Pérez @ 2026-03-05 16:39 UTC (permalink / raw)
To: qemu-devel
Cc: Maxime Coquelin, Lei Yang, Paolo Bonzini, Michael S. Tsirkin,
Stefano Garzarella, Koushik Dutta, Fabiano Rosas, Jason Wang,
Laurent Vivier
Extend TX tests to support multi-descriptor chains:
- Add chain_len parameter to control chain length
- Use GAsyncQueue to validate expected vs actual chain lengths
- Test single descriptors, 2-descriptor chains, and out-of-order chains
Chain descriptors all point to the same guest memory buffer to
simplify allocation tracking while still testing chain traversal logic.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
tests/qtest/vhost-vdpa-test.c | 53 ++++++++++++++++++++++++++++++-----
1 file changed, 46 insertions(+), 7 deletions(-)
diff --git a/tests/qtest/vhost-vdpa-test.c b/tests/qtest/vhost-vdpa-test.c
index 8dde7d95b167..433e5d71ca7a 100644
--- a/tests/qtest/vhost-vdpa-test.c
+++ b/tests/qtest/vhost-vdpa-test.c
@@ -47,6 +47,9 @@ typedef struct VdpaThread {
GMainLoop *loop;
GMainContext *context;
+ /* Expected elements queue to compare properties */
+ GAsyncQueue *elem_queue;
+
/* Guest memory that must be free at the end of the test */
uint64_t qemu_mem_to_free;
} VdpaThread;
@@ -60,6 +63,7 @@ static void *vhost_vdpa_thread_function(void *data)
static void vhost_vdpa_thread_init(VdpaThread *t)
{
+ t->elem_queue = g_async_queue_new();
t->context = g_main_context_new();
t->loop = g_main_loop_new(t->context, FALSE);
t->thread = g_thread_new("vdpa-thread", vhost_vdpa_thread_function, t->loop);
@@ -76,6 +80,7 @@ static void vhost_vdpa_thread_cleanup(VdpaThread *t)
g_main_loop_unref(t->loop);
g_main_context_unref(t->context);
+ g_async_queue_unref(t->elem_queue);
}
static void vhost_vdpa_thread_add_source_fd(VdpaThread *t, int fd,
@@ -109,29 +114,48 @@ static void vhost_vdpa_add_rx_pkts(QGuestAllocator *alloc, QVirtioNet *net,
* @alloc: the guest allocator to allocate memory for the descriptors
* @net: the virtio net device
* @t: the vdpa thread to push the expected chain length if kick is true
+ * @chain_len: the number of descriptors in the chain to add
*
* Returns the kick_id you can use to kick the device in a later call to this
* function.
*/
static uint32_t vhost_vdpa_add_tx_pkt_descs(QGuestAllocator *alloc,
- QVirtioNet *net, VdpaThread *t)
+ QVirtioNet *net, VdpaThread *t,
+ uint32_t chain_len)
{
QTestState *qts = global_qtest;
- uint32_t req_addr;
+ uint32_t req_addr, kick_id = UINT32_MAX;
+ assert(chain_len > 0);
/* TODO: Actually free this. RFC, is actually needed? */
req_addr = guest_alloc(alloc, 64);
g_assert_cmpint(req_addr, >, 0);
- return qvirtqueue_add(qts, net->queues[1], req_addr, 64, /* write */ false,
- /* next */ false);
+ /*
+ * We set up the descriptors in a way that each of them points to the same
+ * buffer. This simplifies guest's memory management while still exercising
+ * chain traversal in SVQ.
+ */
+ for (uint32_t i = 0; i < chain_len; i++) {
+ uint32_t head;
+ bool next = i != chain_len - 1;
+
+ head = qvirtqueue_add(qts, net->queues[1], req_addr, 64,
+ /* write */ false, next);
+ if (i == 0) {
+ kick_id = head;
+ }
+ }
+
+ return kick_id;
}
static void vhost_vdpa_kick_tx_desc(VdpaThread *t, QVirtioNet *net,
- uint32_t kick_id)
+ uint32_t kick_id, uint32_t chain_len)
{
QTestState *qts = global_qtest;
+ g_async_queue_push(t->elem_queue, (void *)(intptr_t)chain_len);
qvirtqueue_kick(qts, net->vdev, net->queues[1], kick_id);
}
@@ -244,9 +268,12 @@ static gboolean vhost_vdpa_rxtx_handle_tx(int fd, GIOCondition condition,
void *data)
{
VduseVirtq *vq = data;
+ VduseDev *dev = vduse_queue_get_dev(vq);
+ TestServer *s = vduse_dev_get_priv(dev);
eventfd_read(fd, (eventfd_t[]){0});
do {
+ intptr_t expected_elems;
g_autofree VduseVirtqElement *elem = NULL;
elem = vduse_queue_pop(vq, sizeof(*elem));
@@ -254,7 +281,10 @@ static gboolean vhost_vdpa_rxtx_handle_tx(int fd, GIOCondition condition,
break;
}
+ expected_elems = (intptr_t)g_async_queue_try_pop(s->vdpa_thread.elem_queue);
+ g_assert_cmpint(expected_elems, >, 0);
g_test_message("Got element with %d buffers", elem->out_num);
+ g_assert_cmpint(elem->out_num, ==, expected_elems);
g_assert_cmpint(elem->in_num, ==, 0);
vduse_queue_push(vq, elem, 0);
@@ -546,9 +576,18 @@ static void vhost_vdpa_tx_test(void *obj, void *arg, QGuestAllocator *alloc)
/* Add some rx packets so SVQ must clean them at the end of QEMU run */
vhost_vdpa_add_rx_pkts(alloc, net, &server->vdpa_thread);
- free_head = vhost_vdpa_add_tx_pkt_descs(alloc, net, &server->vdpa_thread);
- vhost_vdpa_kick_tx_desc(&server->vdpa_thread, net, free_head);
+ /* Simple packet */
+ free_head = vhost_vdpa_add_tx_pkt_descs(alloc, net, &server->vdpa_thread,
+ 1);
+ vhost_vdpa_kick_tx_desc(&server->vdpa_thread, net, free_head, 1);
+ vhost_vdpa_get_tx_pkt(alloc, net, free_head, &server->vdpa_thread);
+
+ /* Simple chain */
+ free_head = vhost_vdpa_add_tx_pkt_descs(alloc, net, &server->vdpa_thread,
+ 2);
+ vhost_vdpa_kick_tx_desc(&server->vdpa_thread, net, free_head, 2);
vhost_vdpa_get_tx_pkt(alloc, net, free_head, &server->vdpa_thread);
+
}
static void register_vhost_vdpa_test(void)
--
2.53.0
^ permalink raw reply related [flat|nested] 10+ messages in thread* [RFC PATCH 6/8] tests: vhost-vdpa: test out-of-order descriptor completion
2026-03-05 16:39 [RFC PATCH 0/8] Add vhost-vdpa and Shadow Virtqueue tests Eugenio Pérez
` (4 preceding siblings ...)
2026-03-05 16:39 ` [RFC PATCH 5/8] tests: vhost-vdpa: add descriptor chain tests Eugenio Pérez
@ 2026-03-05 16:39 ` Eugenio Pérez
2026-03-05 16:39 ` [RFC PATCH 7/8] tests: vhost-vdpa: introduce TestParameters struct Eugenio Pérez
2026-03-05 16:39 ` [RFC PATCH 8/8] tests: vhost-vdpa: add VIRTIO_F_IN_ORDER feature tests Eugenio Pérez
7 siblings, 0 replies; 10+ messages in thread
From: Eugenio Pérez @ 2026-03-05 16:39 UTC (permalink / raw)
To: qemu-devel
Cc: Maxime Coquelin, Lei Yang, Paolo Bonzini, Michael S. Tsirkin,
Stefano Garzarella, Koushik Dutta, Fabiano Rosas, Jason Wang,
Laurent Vivier
Add tests that submit descriptors in one order but complete them in
a different order:
- Submit desc1 then desc2, complete desc2 then desc1
- Same pattern with 2-descriptor chains
This verifies SVQ correctly handles devices that don't preserve
submission order in completions.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
tests/qtest/vhost-vdpa-test.c | 20 +++++++++++++++++++-
1 file changed, 19 insertions(+), 1 deletion(-)
diff --git a/tests/qtest/vhost-vdpa-test.c b/tests/qtest/vhost-vdpa-test.c
index 433e5d71ca7a..5efe688673cb 100644
--- a/tests/qtest/vhost-vdpa-test.c
+++ b/tests/qtest/vhost-vdpa-test.c
@@ -571,7 +571,7 @@ static void vhost_vdpa_tx_test(void *obj, void *arg, QGuestAllocator *alloc)
{
TestServer *server = arg;
QVirtioNet *net = obj;
- uint32_t free_head;
+ uint32_t free_head, free_head2;
/* Add some rx packets so SVQ must clean them at the end of QEMU run */
vhost_vdpa_add_rx_pkts(alloc, net, &server->vdpa_thread);
@@ -588,6 +588,24 @@ static void vhost_vdpa_tx_test(void *obj, void *arg, QGuestAllocator *alloc)
vhost_vdpa_kick_tx_desc(&server->vdpa_thread, net, free_head, 2);
vhost_vdpa_get_tx_pkt(alloc, net, free_head, &server->vdpa_thread);
+ /* Out of order descriptors */
+ free_head = vhost_vdpa_add_tx_pkt_descs(alloc, net, &server->vdpa_thread,
+ 1);
+ free_head2 = vhost_vdpa_add_tx_pkt_descs(alloc, net, &server->vdpa_thread,
+ 1);
+ vhost_vdpa_kick_tx_desc(&server->vdpa_thread, net, free_head2, 1);
+ vhost_vdpa_kick_tx_desc(&server->vdpa_thread, net, free_head, 1);
+
+ vhost_vdpa_get_tx_pkt(alloc, net, free_head2, &server->vdpa_thread);
+ vhost_vdpa_get_tx_pkt(alloc, net, free_head, &server->vdpa_thread);
+
+ /* Out of order chains */
+ free_head = vhost_vdpa_add_tx_pkt_descs(alloc, net, &server->vdpa_thread,
+ 2);
+ free_head2 = vhost_vdpa_add_tx_pkt_descs(alloc, net, &server->vdpa_thread,
+ 2);
+ vhost_vdpa_kick_tx_desc(&server->vdpa_thread, net, free_head2, 2);
+ vhost_vdpa_kick_tx_desc(&server->vdpa_thread, net, free_head, 2);
}
static void register_vhost_vdpa_test(void)
--
2.53.0
^ permalink raw reply related [flat|nested] 10+ messages in thread* [RFC PATCH 7/8] tests: vhost-vdpa: introduce TestParameters struct
2026-03-05 16:39 [RFC PATCH 0/8] Add vhost-vdpa and Shadow Virtqueue tests Eugenio Pérez
` (5 preceding siblings ...)
2026-03-05 16:39 ` [RFC PATCH 6/8] tests: vhost-vdpa: test out-of-order descriptor completion Eugenio Pérez
@ 2026-03-05 16:39 ` Eugenio Pérez
2026-03-05 16:39 ` [RFC PATCH 8/8] tests: vhost-vdpa: add VIRTIO_F_IN_ORDER feature tests Eugenio Pérez
7 siblings, 0 replies; 10+ messages in thread
From: Eugenio Pérez @ 2026-03-05 16:39 UTC (permalink / raw)
To: qemu-devel
Cc: Maxime Coquelin, Lei Yang, Paolo Bonzini, Michael S. Tsirkin,
Stefano Garzarella, Koushik Dutta, Fabiano Rosas, Jason Wang,
Laurent Vivier
Encapsulate test configuration (VduseOps, features) in TestParameters
struct to prepare for parameterized tests with different device feature
flags.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
tests/qtest/vhost-vdpa-test.c | 21 +++++++++++++++++----
1 file changed, 17 insertions(+), 4 deletions(-)
diff --git a/tests/qtest/vhost-vdpa-test.c b/tests/qtest/vhost-vdpa-test.c
index 5efe688673cb..0192b00748d5 100644
--- a/tests/qtest/vhost-vdpa-test.c
+++ b/tests/qtest/vhost-vdpa-test.c
@@ -42,6 +42,10 @@
static int NUM_RX_BUFS = 2;
+typedef struct TestParameters {
+ const VduseOps *ops;
+} TestParameters;
+
typedef struct VdpaThread {
GThread *thread;
GMainLoop *loop;
@@ -264,6 +268,10 @@ static const VduseOps vduse_read_guest_mem_ops = {
.disable_queue = vduse_read_guest_mem_disable_queue,
};
+static const TestParameters mem_read_params = {
+ .ops = &vduse_read_guest_mem_ops,
+};
+
static gboolean vhost_vdpa_rxtx_handle_tx(int fd, GIOCondition condition,
void *data)
{
@@ -318,6 +326,10 @@ static const VduseOps vduse_rxtx_ops = {
.disable_queue = vduse_rxtx_disable_queue,
};
+static const TestParameters rxtx_params = {
+ .ops = &vduse_rxtx_ops,
+};
+
static gboolean vduse_dev_handler_source_fd(int fd, GIOCondition condition,
void *data)
{
@@ -432,7 +444,8 @@ static bool test_setup_reconnect_log(VduseDev *vdev, const char *tmpfs)
return ok;
}
-static TestServer *test_server_new(const gchar *name, const VduseOps *ops)
+static TestServer *test_server_new(const gchar *name,
+ const TestParameters *params)
{
TestServer *server = g_new0(TestServer, 1);
g_autoptr(GError) err = NULL;
@@ -458,7 +471,7 @@ static TestServer *test_server_new(const gchar *name, const VduseOps *ops)
2, /* num_queues */
sizeof(config),
config,
- ops,
+ params->ops,
server);
if (!server->vdev) {
@@ -614,14 +627,14 @@ static void register_vhost_vdpa_test(void)
QOSGraphTestOptions opts = {
.before = vhost_vdpa_test_setup,
.subprocess = true,
- .arg = (void *)&vduse_read_guest_mem_ops,
+ .arg = (void *)&mem_read_params,
};
qos_add_test("vhost-vdpa/read-guest-mem/memfile",
"virtio-net",
test_wait, &opts);
- opts.arg = (void *)&vduse_rxtx_ops;
+ opts.arg = (void *)&rxtx_params;
qos_add_test("vhost-vdpa/rxtx",
"virtio-net",
vhost_vdpa_tx_test, &opts);
--
2.53.0
^ permalink raw reply related [flat|nested] 10+ messages in thread* [RFC PATCH 8/8] tests: vhost-vdpa: add VIRTIO_F_IN_ORDER feature tests
2026-03-05 16:39 [RFC PATCH 0/8] Add vhost-vdpa and Shadow Virtqueue tests Eugenio Pérez
` (6 preceding siblings ...)
2026-03-05 16:39 ` [RFC PATCH 7/8] tests: vhost-vdpa: introduce TestParameters struct Eugenio Pérez
@ 2026-03-05 16:39 ` Eugenio Pérez
7 siblings, 0 replies; 10+ messages in thread
From: Eugenio Pérez @ 2026-03-05 16:39 UTC (permalink / raw)
To: qemu-devel
Cc: Maxime Coquelin, Lei Yang, Paolo Bonzini, Michael S. Tsirkin,
Stefano Garzarella, Koushik Dutta, Fabiano Rosas, Jason Wang,
Laurent Vivier
The test verifies SVQ correctly handles batched pushes where multiple
elements are filled before flushing the used ring.
With this test, gcov reported coverage is:
Total Hit
Lines: 83.9 % 347 291
Functions: 90.3 % 31 28
Branches: 59.2 % 157 93
Apart from impossible banches like scoped cleanups, the missing blocks
are:
* Event idx.
* All SVQ CVQ handling.
* Hard to reproduce casuistics like a linear buffer in GPA that is
split into more than one buffer in HVA, and then SVQ is saturated.
* Buggy input (no descriptors, used descriptors that are not available,
moving indexes more than vq size).
* Unbinding device call notifier from QEMU vhost system.
RFC: I think the tx_packets_inorder struct is also better than the async
queue for previous tests. But I'd like to know other people opinions
too.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
tests/qtest/vhost-vdpa-test.c | 183 ++++++++++++++++++++++++++++++++--
1 file changed, 173 insertions(+), 10 deletions(-)
diff --git a/tests/qtest/vhost-vdpa-test.c b/tests/qtest/vhost-vdpa-test.c
index 0192b00748d5..99a4df4433e3 100644
--- a/tests/qtest/vhost-vdpa-test.c
+++ b/tests/qtest/vhost-vdpa-test.c
@@ -44,6 +44,7 @@ static int NUM_RX_BUFS = 2;
typedef struct TestParameters {
const VduseOps *ops;
+ uint64_t features;
} TestParameters;
typedef struct VdpaThread {
@@ -56,6 +57,9 @@ typedef struct VdpaThread {
/* Guest memory that must be free at the end of the test */
uint64_t qemu_mem_to_free;
+
+ /* Expected avail_idx in in_order */
+ uint16_t expected_avail_idx;
} VdpaThread;
static void *vhost_vdpa_thread_function(void *data)
@@ -163,8 +167,19 @@ static void vhost_vdpa_kick_tx_desc(VdpaThread *t, QVirtioNet *net,
qvirtqueue_kick(qts, net->vdev, net->queues[1], kick_id);
}
+/**
+ * Get the next used element of the tx queue and check properties of it. If
+ * wait is true, wait until an element is available, otherwise just check if
+ * it's already available.
+ *
+ * Note that waiting clean the ISR, so if you call this function with
+ * wait=true, another thread must call the VQ so this call returns. If you
+ * expect to fetch many descs you need to get subsequent elements with
+ * wait=false.
+ */
static void vhost_vdpa_get_tx_pkt(QGuestAllocator *alloc, QVirtioNet *net,
- uint32_t desc_idx, VdpaThread *t)
+ uint32_t desc_idx, VdpaThread *t,
+ bool wait)
{
g_autofree struct VduseVirtqElement *elem = NULL;
int64_t timeout = 5 * G_TIME_SPAN_SECOND;
@@ -174,8 +189,17 @@ static void vhost_vdpa_get_tx_pkt(QGuestAllocator *alloc, QVirtioNet *net,
uint32_t len;
end_time_us = g_get_monotonic_time() + timeout;
- qvirtio_wait_used_elem(qts, net->vdev, net->queues[1], desc_idx, &len,
- timeout);
+ if (wait) {
+ qvirtio_wait_used_elem(qts, net->vdev, net->queues[1], desc_idx, &len,
+ timeout);
+ } else {
+ uint32_t got_desc_idx;
+
+ if (!qvirtqueue_get_buf(qts, net->queues[1], &got_desc_idx, &len)) {
+ g_test_fail();
+ }
+ g_assert_cmpint(got_desc_idx, ==, desc_idx);
+ }
g_assert_cmpint(g_get_monotonic_time(), <, end_time_us);
g_assert_cmpint(len, ==, 0);
@@ -190,6 +214,7 @@ typedef struct TestServer {
gchar *vdpa_dev_path;
gchar *tmpfs;
int vq_read_num;
+ size_t test_cursor;
VduseDev *vdev;
VdpaThread vdpa_thread;
QemuMutex data_mutex;
@@ -330,6 +355,93 @@ static const TestParameters rxtx_params = {
.ops = &vduse_rxtx_ops,
};
+static const struct {
+ uint16_t out_num;
+ bool push;
+} tx_packets_inorder[] = {
+ /* One buffer first */
+ { .out_num = 1, .push = true },
+
+ /* Then two buffers chained */
+ { .out_num = 2, .push = true },
+
+ /* Then one buffer, but batch it with the next one */
+ { .out_num = 1, .push = false },
+ { .out_num = 1, .push = true },
+
+ /* Finally, two buffers chained again, but batch them together */
+ { .out_num = 2, .push = false },
+ { .out_num = 2, .push = true },
+};
+
+static gboolean vhost_vdpa_rxtx_inorder_handle_tx(int fd,
+ GIOCondition condition,
+ void *data)
+{
+ VduseVirtq *vq = data;
+ VduseDev *dev = vduse_queue_get_dev(vq);
+ TestServer *s = vduse_dev_get_priv(dev);
+
+ eventfd_read(fd, (eventfd_t[]){0});
+ do {
+ g_autofree VduseVirtqElement *elem = NULL;
+ size_t batch_size = 1;
+ elem = vduse_queue_pop(vq, sizeof(*elem));
+ if (!elem) {
+ break;
+ }
+
+ g_assert_cmpint(s->test_cursor, <, G_N_ELEMENTS(tx_packets_inorder));
+ g_assert_cmpint(elem->out_num, ==,
+ tx_packets_inorder[s->test_cursor].out_num);
+ g_assert_cmpint(elem->in_num, ==, 0);
+
+ if (tx_packets_inorder[s->test_cursor].push) {
+ for (ssize_t i = s->test_cursor - 1;
+ i >= 0 && !tx_packets_inorder[i].push; i--) {
+ batch_size++;
+ }
+ vduse_queue_fill(vq, elem, /* len */ 0, /* offset */ 0);
+ vduse_queue_flush(vq, batch_size);
+ vduse_queue_notify(vq);
+ }
+ s->test_cursor++;
+ } while (true);
+
+ return G_SOURCE_CONTINUE;
+}
+
+static void vduse_rxtx_inorder_enable_queue(VduseDev *dev, VduseVirtq *vq)
+{
+ TestServer *s = vduse_dev_get_priv(dev);
+
+ g_test_message("Enabling queue %d", vq->index);
+
+ if (vq->index == 1) {
+ g_assert(dev->features & (1ULL << VIRTIO_F_IN_ORDER));
+
+ /* This is the tx queue, add a source to handle it */
+ vhost_vdpa_thread_add_source_fd(&s->vdpa_thread,
+ vduse_queue_get_fd(vq),
+ vhost_vdpa_rxtx_inorder_handle_tx, vq);
+ }
+}
+
+static void vduse_rxtx_inorder_disable_queue(VduseDev *dev, VduseVirtq *vq)
+{
+ /* Queue disabled */
+}
+
+static const VduseOps vduse_rxtx_inorder_ops = {
+ .enable_queue = vduse_rxtx_inorder_enable_queue,
+ .disable_queue = vduse_rxtx_inorder_disable_queue,
+};
+
+static const TestParameters rxtx_inorder_params = {
+ .ops = &vduse_rxtx_inorder_ops,
+ .features = (1ULL << VIRTIO_F_IN_ORDER),
+};
+
static gboolean vduse_dev_handler_source_fd(int fd, GIOCondition condition,
void *data)
{
@@ -462,7 +574,7 @@ static TestServer *test_server_new(const gchar *name,
/* Disabling NOTIFY_ON_EMPTY as SVQ does not support it */
features = (vduse_get_virtio_features() & ~(1ULL << VIRTIO_F_NOTIFY_ON_EMPTY)) |
- (1ULL << VIRTIO_NET_F_MAC);
+ (1ULL << VIRTIO_NET_F_MAC) | params->features;
server->vdev = vduse_dev_create(server->vduse_name,
VIRTIO_ID_NET,
@@ -561,7 +673,7 @@ static void vhost_vdpa_test_cleanup(void *s)
test_server_free(server);
}
-static void *vhost_vdpa_test_setup(GString *cmd_line, void *arg)
+static void *vhost_vdpa_test_setup0(GString *cmd_line, void *arg)
{
TestServer *server = test_server_new("vdpa-memfile", arg);
@@ -580,6 +692,17 @@ static void *vhost_vdpa_test_setup(GString *cmd_line, void *arg)
return server;
}
+static void *vhost_vdpa_test_setup_inorder(GString *cmd_line, void *arg)
+{
+ const char *device = g_strstr_len(cmd_line->str, -1,
+ "-device virtio-net-pci");
+ g_assert_nonnull(device);
+ device += strlen("-device virtio-net-pci");
+ g_string_insert(cmd_line, device - cmd_line->str, ",in_order=on");
+
+ return vhost_vdpa_test_setup0(cmd_line, arg);
+}
+
static void vhost_vdpa_tx_test(void *obj, void *arg, QGuestAllocator *alloc)
{
TestServer *server = arg;
@@ -593,13 +716,13 @@ static void vhost_vdpa_tx_test(void *obj, void *arg, QGuestAllocator *alloc)
free_head = vhost_vdpa_add_tx_pkt_descs(alloc, net, &server->vdpa_thread,
1);
vhost_vdpa_kick_tx_desc(&server->vdpa_thread, net, free_head, 1);
- vhost_vdpa_get_tx_pkt(alloc, net, free_head, &server->vdpa_thread);
+ vhost_vdpa_get_tx_pkt(alloc, net, free_head, &server->vdpa_thread, true);
/* Simple chain */
free_head = vhost_vdpa_add_tx_pkt_descs(alloc, net, &server->vdpa_thread,
2);
vhost_vdpa_kick_tx_desc(&server->vdpa_thread, net, free_head, 2);
- vhost_vdpa_get_tx_pkt(alloc, net, free_head, &server->vdpa_thread);
+ vhost_vdpa_get_tx_pkt(alloc, net, free_head, &server->vdpa_thread, true);
/* Out of order descriptors */
free_head = vhost_vdpa_add_tx_pkt_descs(alloc, net, &server->vdpa_thread,
@@ -609,8 +732,8 @@ static void vhost_vdpa_tx_test(void *obj, void *arg, QGuestAllocator *alloc)
vhost_vdpa_kick_tx_desc(&server->vdpa_thread, net, free_head2, 1);
vhost_vdpa_kick_tx_desc(&server->vdpa_thread, net, free_head, 1);
- vhost_vdpa_get_tx_pkt(alloc, net, free_head2, &server->vdpa_thread);
- vhost_vdpa_get_tx_pkt(alloc, net, free_head, &server->vdpa_thread);
+ vhost_vdpa_get_tx_pkt(alloc, net, free_head2, &server->vdpa_thread, true);
+ vhost_vdpa_get_tx_pkt(alloc, net, free_head, &server->vdpa_thread, true);
/* Out of order chains */
free_head = vhost_vdpa_add_tx_pkt_descs(alloc, net, &server->vdpa_thread,
@@ -621,11 +744,45 @@ static void vhost_vdpa_tx_test(void *obj, void *arg, QGuestAllocator *alloc)
vhost_vdpa_kick_tx_desc(&server->vdpa_thread, net, free_head, 2);
}
+static void vhost_vdpa_tx_inorder_test(void *obj, void *arg, QGuestAllocator *alloc)
+{
+ uint32_t free_head[G_N_ELEMENTS(tx_packets_inorder)];
+ TestServer *server = arg;
+ QVirtioNet *net = obj;
+ size_t free_head_idx = 0;
+
+ for (size_t i = 0; i < G_N_ELEMENTS(tx_packets_inorder); i++) {
+ uint32_t chain_len = tx_packets_inorder[i].out_num;
+ bool wait = true;
+
+ free_head[i] = vhost_vdpa_add_tx_pkt_descs(alloc, net,
+ &server->vdpa_thread,
+ chain_len);
+ vhost_vdpa_kick_tx_desc(&server->vdpa_thread, net, free_head[i], chain_len);
+ if (!tx_packets_inorder[i].push) {
+ continue;
+ }
+
+ /*
+ * TODO: SVQ uses the emulated VirtIO device underneath so it does not
+ * support to batch many used elements in order. This is fragile and
+ * could change in the future, and the right solution is to add proper
+ * support in vhost_vdpa_get_tx_pkt -> qvirtio_wait_used_elem. Let's
+ * just code the current behavior here for now.
+ */
+ for (; free_head_idx <= i; free_head_idx++) {
+ vhost_vdpa_get_tx_pkt(alloc, net, free_head[free_head_idx],
+ &server->vdpa_thread, wait);
+ wait = false;
+ }
+ }
+}
+
static void register_vhost_vdpa_test(void)
{
/* TODO: void * discards const qualifier */
QOSGraphTestOptions opts = {
- .before = vhost_vdpa_test_setup,
+ .before = vhost_vdpa_test_setup0,
.subprocess = true,
.arg = (void *)&mem_read_params,
};
@@ -638,5 +795,11 @@ static void register_vhost_vdpa_test(void)
qos_add_test("vhost-vdpa/rxtx",
"virtio-net",
vhost_vdpa_tx_test, &opts);
+
+ opts.before = vhost_vdpa_test_setup_inorder;
+ opts.arg = (void *)&rxtx_inorder_params;
+ qos_add_test("vhost-vdpa/rxtx-inorder",
+ "virtio-net",
+ vhost_vdpa_tx_inorder_test, &opts);
}
libqos_init(register_vhost_vdpa_test);
--
2.53.0
^ permalink raw reply related [flat|nested] 10+ messages in thread