From: "Eugenio Pérez" <eperezma@redhat.com>
To: "Michael S . Tsirkin " <mst@redhat.com>
Cc: "Laurent Vivier" <lvivier@redhat.com>,
linux-kernel@vger.kernel.org, jasowang@redhat.com,
"Xuan Zhuo" <xuanzhuo@linux.alibaba.com>,
"Eugenio Pérez" <eperezma@redhat.com>,
"Maxime Coquelin" <mcoqueli@redhat.com>,
"Cindy Lu" <lulu@redhat.com>,
virtualization@lists.linux.dev,
"Yongji Xie" <xieyongji@bytedance.com>,
"Stefano Garzarella" <sgarzare@redhat.com>
Subject: [PATCH v14 04/13] vduse: return internal vq group struct as map token
Date: Fri, 16 Jan 2026 15:04:46 +0100 [thread overview]
Message-ID: <20260116140455.1560491-5-eperezma@redhat.com> (raw)
In-Reply-To: <20260116140455.1560491-1-eperezma@redhat.com>
Return the internal struct that represents the vq group as virtqueue map
token, instead of the device. This allows the map functions to access
the information per group.
At this moment all the virtqueues share the same vq group, that only
can point to ASID 0. This change prepares the infrastructure for actual
per-group address space handling
Acked-by: Jason Wang <jasowang@redhat.com>
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
v4:
* Revert the "invalid vq group" concept, and assume 0 by default.
* Revert unnecesary blank line addition (Jason)
v3:
* Adapt all virtio_map_ops callbacks to handle empty tokens in case of
invalid groups.
* Make setting status DRIVER_OK fail if vq group is not valid.
* Remove the _int name suffix from struct vduse_vq_group.
RFC v3:
* Make the vq groups a dynamic array to support an arbitrary number of
them.
---
drivers/vdpa/vdpa_user/vduse_dev.c | 100 ++++++++++++++++++++++++++---
include/linux/virtio.h | 6 +-
2 files changed, 94 insertions(+), 12 deletions(-)
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index 5bffc25a266e..68290c3d9d8f 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -22,6 +22,7 @@
#include <linux/uio.h>
#include <linux/vdpa.h>
#include <linux/nospec.h>
+#include <linux/virtio.h>
#include <linux/vmalloc.h>
#include <linux/sched/mm.h>
#include <uapi/linux/vduse.h>
@@ -85,6 +86,10 @@ struct vduse_umem {
struct mm_struct *mm;
};
+struct vduse_vq_group {
+ struct vduse_dev *dev;
+};
+
struct vduse_dev {
struct vduse_vdpa *vdev;
struct device *dev;
@@ -118,6 +123,7 @@ struct vduse_dev {
u32 vq_align;
u32 ngroups;
struct vduse_umem *umem;
+ struct vduse_vq_group *groups;
struct mutex mem_lock;
unsigned int bounce_size;
struct mutex domain_lock;
@@ -605,6 +611,17 @@ static u32 vduse_get_vq_group(struct vdpa_device *vdpa, u16 idx)
return dev->vqs[idx]->group;
}
+static union virtio_map vduse_get_vq_map(struct vdpa_device *vdpa, u16 idx)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+ u32 vq_group = vduse_get_vq_group(vdpa, idx);
+ union virtio_map ret = {
+ .group = &dev->groups[vq_group],
+ };
+
+ return ret;
+}
+
static int vduse_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 idx,
struct vdpa_vq_state *state)
{
@@ -825,6 +842,7 @@ static const struct vdpa_config_ops vduse_vdpa_config_ops = {
.get_vq_affinity = vduse_vdpa_get_vq_affinity,
.reset = vduse_vdpa_reset,
.set_map = vduse_vdpa_set_map,
+ .get_vq_map = vduse_get_vq_map,
.free = vduse_vdpa_free,
};
@@ -832,7 +850,14 @@ static void vduse_dev_sync_single_for_device(union virtio_map token,
dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir)
{
- struct vduse_iova_domain *domain = token.iova_domain;
+ struct vduse_dev *vdev;
+ struct vduse_iova_domain *domain;
+
+ if (!token.group)
+ return;
+
+ vdev = token.group->dev;
+ domain = vdev->domain;
vduse_domain_sync_single_for_device(domain, dma_addr, size, dir);
}
@@ -841,7 +866,14 @@ static void vduse_dev_sync_single_for_cpu(union virtio_map token,
dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir)
{
- struct vduse_iova_domain *domain = token.iova_domain;
+ struct vduse_dev *vdev;
+ struct vduse_iova_domain *domain;
+
+ if (!token.group)
+ return;
+
+ vdev = token.group->dev;
+ domain = vdev->domain;
vduse_domain_sync_single_for_cpu(domain, dma_addr, size, dir);
}
@@ -851,7 +883,14 @@ static dma_addr_t vduse_dev_map_page(union virtio_map token, struct page *page,
enum dma_data_direction dir,
unsigned long attrs)
{
- struct vduse_iova_domain *domain = token.iova_domain;
+ struct vduse_dev *vdev;
+ struct vduse_iova_domain *domain;
+
+ if (!token.group)
+ return DMA_MAPPING_ERROR;
+
+ vdev = token.group->dev;
+ domain = vdev->domain;
return vduse_domain_map_page(domain, page, offset, size, dir, attrs);
}
@@ -860,7 +899,14 @@ static void vduse_dev_unmap_page(union virtio_map token, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
- struct vduse_iova_domain *domain = token.iova_domain;
+ struct vduse_dev *vdev;
+ struct vduse_iova_domain *domain;
+
+ if (!token.group)
+ return;
+
+ vdev = token.group->dev;
+ domain = vdev->domain;
return vduse_domain_unmap_page(domain, dma_addr, size, dir, attrs);
}
@@ -868,11 +914,17 @@ static void vduse_dev_unmap_page(union virtio_map token, dma_addr_t dma_addr,
static void *vduse_dev_alloc_coherent(union virtio_map token, size_t size,
dma_addr_t *dma_addr, gfp_t flag)
{
- struct vduse_iova_domain *domain = token.iova_domain;
+ struct vduse_dev *vdev;
+ struct vduse_iova_domain *domain;
unsigned long iova;
void *addr;
*dma_addr = DMA_MAPPING_ERROR;
+ if (!token.group)
+ return NULL;
+
+ vdev = token.group->dev;
+ domain = vdev->domain;
addr = vduse_domain_alloc_coherent(domain, size,
(dma_addr_t *)&iova, flag);
if (!addr)
@@ -887,14 +939,28 @@ static void vduse_dev_free_coherent(union virtio_map token, size_t size,
void *vaddr, dma_addr_t dma_addr,
unsigned long attrs)
{
- struct vduse_iova_domain *domain = token.iova_domain;
+ struct vduse_dev *vdev;
+ struct vduse_iova_domain *domain;
+
+ if (!token.group)
+ return;
+
+ vdev = token.group->dev;
+ domain = vdev->domain;
vduse_domain_free_coherent(domain, size, vaddr, dma_addr, attrs);
}
static bool vduse_dev_need_sync(union virtio_map token, dma_addr_t dma_addr)
{
- struct vduse_iova_domain *domain = token.iova_domain;
+ struct vduse_dev *vdev;
+ struct vduse_iova_domain *domain;
+
+ if (!token.group)
+ return false;
+
+ vdev = token.group->dev;
+ domain = vdev->domain;
return dma_addr < domain->bounce_size;
}
@@ -908,7 +974,14 @@ static int vduse_dev_mapping_error(union virtio_map token, dma_addr_t dma_addr)
static size_t vduse_dev_max_mapping_size(union virtio_map token)
{
- struct vduse_iova_domain *domain = token.iova_domain;
+ struct vduse_dev *vdev;
+ struct vduse_iova_domain *domain;
+
+ if (!token.group)
+ return 0;
+
+ vdev = token.group->dev;
+ domain = vdev->domain;
return domain->bounce_size;
}
@@ -1726,6 +1799,7 @@ static int vduse_destroy_dev(char *name)
if (dev->domain)
vduse_domain_destroy(dev->domain);
kfree(dev->name);
+ kfree(dev->groups);
vduse_dev_destroy(dev);
module_put(THIS_MODULE);
@@ -1895,6 +1969,13 @@ static int vduse_create_dev(struct vduse_dev_config *config,
dev->ngroups = (dev->api_version < VDUSE_API_VERSION_1)
? 1
: config->ngroups;
+ dev->groups = kcalloc(dev->ngroups, sizeof(dev->groups[0]),
+ GFP_KERNEL);
+ if (!dev->groups)
+ goto err_vq_groups;
+ for (u32 i = 0; i < dev->ngroups; ++i)
+ dev->groups[i].dev = dev;
+
dev->name = kstrdup(config->name, GFP_KERNEL);
if (!dev->name)
goto err_str;
@@ -1931,6 +2012,8 @@ static int vduse_create_dev(struct vduse_dev_config *config,
err_idr:
kfree(dev->name);
err_str:
+ kfree(dev->groups);
+err_vq_groups:
vduse_dev_destroy(dev);
err:
return ret;
@@ -2092,7 +2175,6 @@ static int vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
return -ENOMEM;
}
- dev->vdev->vdpa.vmap.iova_domain = dev->domain;
ret = _vdpa_register_device(&dev->vdev->vdpa, dev->vq_num);
if (ret) {
put_device(&dev->vdev->vdpa.dev);
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 63bb05ece8c5..3bbc4cb6a672 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -43,13 +43,13 @@ struct virtqueue {
void *priv;
};
-struct vduse_iova_domain;
+struct vduse_vq_group;
union virtio_map {
/* Device that performs DMA */
struct device *dma_dev;
- /* VDUSE specific mapping data */
- struct vduse_iova_domain *iova_domain;
+ /* VDUSE specific virtqueue group for doing map */
+ struct vduse_vq_group *group;
};
int virtqueue_add_outbuf(struct virtqueue *vq,
--
2.52.0
next prev parent reply other threads:[~2026-01-16 14:05 UTC|newest]
Thread overview: 30+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-01-16 14:04 [PATCH v14 00/13] Add multiple address spaces support to VDUSE Eugenio Pérez
2026-01-16 14:04 ` [PATCH v14 01/13] vhost: move vdpa group bound check to vhost_vdpa Eugenio Pérez
2026-01-19 6:29 ` Jason Wang
2026-01-16 14:04 ` [PATCH v14 02/13] vduse: add v1 API definition Eugenio Pérez
2026-01-16 14:04 ` [PATCH v14 03/13] vduse: add vq group support Eugenio Pérez
2026-01-16 14:04 ` Eugenio Pérez [this message]
2026-01-16 14:04 ` [PATCH v14 05/13] vdpa: document set_group_asid thread safety Eugenio Pérez
2026-01-19 6:30 ` Jason Wang
2026-01-16 14:04 ` [PATCH v14 06/13] vhost: forbid change vq groups ASID if DRIVER_OK is set Eugenio Pérez
2026-01-19 6:30 ` Jason Wang
2026-01-16 14:04 ` [PATCH v14 07/13] vduse: refactor vdpa_dev_add for goto err handling Eugenio Pérez
2026-01-16 14:04 ` [PATCH v14 08/13] vduse: remove unused vaddr parameter of vduse_domain_free_coherent Eugenio Pérez
2026-01-16 14:04 ` [PATCH v14 09/13] vduse: take out allocations from vduse_dev_alloc_coherent Eugenio Pérez
2026-01-19 6:42 ` Jason Wang
2026-01-16 14:04 ` [PATCH v14 10/13] vduse: merge tree search logic of IOTLB_GET_FD and IOTLB_GET_INFO ioctls Eugenio Pérez
2026-01-16 14:04 ` [PATCH v14 11/13] vduse: add vq group asid support Eugenio Pérez
2026-01-16 18:48 ` ALOK TIWARI
2026-01-19 7:16 ` Jason Wang
2026-01-19 8:09 ` Eugenio Perez Martin
2026-01-19 8:34 ` Jason Wang
2026-01-19 9:39 ` Michael S. Tsirkin
2026-01-19 10:29 ` Eugenio Perez Martin
2026-01-19 12:25 ` Jason Wang
2026-01-16 14:04 ` [PATCH v14 12/13] vduse: bump version number Eugenio Pérez
2026-01-16 14:04 ` [PATCH v14 13/13] Documentation: Add documentation for VDUSE Address Space IDs Eugenio Pérez
2026-01-16 18:41 ` ALOK TIWARI
2026-01-19 7:23 ` Eugenio Perez Martin
2026-01-19 7:19 ` Jason Wang
2026-01-19 7:40 ` Eugenio Perez Martin
2026-01-19 12:27 ` Jason Wang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260116140455.1560491-5-eperezma@redhat.com \
--to=eperezma@redhat.com \
--cc=jasowang@redhat.com \
--cc=linux-kernel@vger.kernel.org \
--cc=lulu@redhat.com \
--cc=lvivier@redhat.com \
--cc=mcoqueli@redhat.com \
--cc=mst@redhat.com \
--cc=sgarzare@redhat.com \
--cc=virtualization@lists.linux.dev \
--cc=xieyongji@bytedance.com \
--cc=xuanzhuo@linux.alibaba.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox