From: "Eugenio Pérez" <eperezma@redhat.com>
To: qemu-devel@nongnu.org
Cc: Laurent Vivier <lvivier@redhat.com>,
Parav Pandit <parav@mellanox.com>, Cindy Lu <lulu@redhat.com>,
"Michael S. Tsirkin" <mst@redhat.com>,
Jason Wang <jasowang@redhat.com>,
Cornelia Huck <cohuck@redhat.com>,
Markus Armbruster <armbru@redhat.com>,
Gautam Dawar <gdawar@xilinx.com>,
Harpreet Singh Anand <hanand@xilinx.com>,
"Gonglei \(Arei\)" <arei.gonglei@huawei.com>,
Peter Xu <peterx@redhat.com>, Eli Cohen <eli@mellanox.com>,
Paolo Bonzini <pbonzini@redhat.com>,
Zhu Lingshan <lingshan.zhu@intel.com>,
Eric Blake <eblake@redhat.com>,
Liuxiangdong <liuxiangdong5@huawei.com>
Subject: [RFC PATCH v5 22/23] vdpa: Add asid attribute to vdpa device
Date: Fri, 8 Apr 2022 15:34:14 +0200 [thread overview]
Message-ID: <20220408133415.1371760-23-eperezma@redhat.com> (raw)
In-Reply-To: <20220408133415.1371760-1-eperezma@redhat.com>
We can configure ASID per group, but we still use asid 0 for every vdpa
device. Multiple asid support for cvq will be introduced in next
patches
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
include/hw/virtio/vhost-vdpa.h | 2 +
include/hw/virtio/vhost.h | 2 +
hw/net/vhost_net.c | 1 +
hw/virtio/vhost-vdpa.c | 97 ++++++++++++++++++++++++++++------
net/vhost-vdpa.c | 15 +++---
hw/virtio/trace-events | 9 ++--
6 files changed, 99 insertions(+), 27 deletions(-)
diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
index f1ba46a860..aa572d1acc 100644
--- a/include/hw/virtio/vhost-vdpa.h
+++ b/include/hw/virtio/vhost-vdpa.h
@@ -32,6 +32,8 @@ typedef struct vhost_vdpa {
MemoryListener listener;
struct vhost_vdpa_iova_range iova_range;
uint64_t acked_features;
+ /* one past the last vq index of this virtqueue group */
+ int vq_group_index_end;
bool shadow_vqs_enabled;
/* IOVA mapping used by the Shadow Virtqueue */
VhostIOVATree *iova_tree;
diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
index 034868fa9e..2a6819dc2e 100644
--- a/include/hw/virtio/vhost.h
+++ b/include/hw/virtio/vhost.h
@@ -78,6 +78,8 @@ struct vhost_dev {
int vq_index_end;
/* if non-zero, minimum required value for max_queues */
int num_queues;
+ /* address space id */
+ uint32_t address_space_id;
/* Must be a vq group different than any other vhost dev */
bool independent_vq_group;
uint64_t features;
diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index 10480e19e5..e8a99c8605 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -352,6 +352,7 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
}
net = get_vhost_net(peer);
+ net->dev.address_space_id = !!cvq_idx;
net->dev.independent_vq_group = !!cvq_idx;
vhost_net_set_vq_index(net, i * 2, index_end);
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index dfff94d46f..1b4e03c658 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -79,14 +79,18 @@ static int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
int ret = 0;
msg.type = v->msg_type;
+ if (v->dev->backend_cap & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID)) {
+ msg.asid = v->dev->address_space_id;
+ }
msg.iotlb.iova = iova;
msg.iotlb.size = size;
msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr;
msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW;
msg.iotlb.type = VHOST_IOTLB_UPDATE;
- trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.iotlb.iova, msg.iotlb.size,
- msg.iotlb.uaddr, msg.iotlb.perm, msg.iotlb.type);
+ trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.asid, msg.iotlb.iova,
+ msg.iotlb.size, msg.iotlb.uaddr, msg.iotlb.perm,
+ msg.iotlb.type);
if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
error_report("failed to write, fd=%d, errno=%d (%s)",
@@ -104,12 +108,15 @@ static int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova,
int fd = v->device_fd;
int ret = 0;
+ if (v->dev->backend_cap & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID)) {
+ msg.asid = v->dev->address_space_id;
+ }
msg.type = v->msg_type;
msg.iotlb.iova = iova;
msg.iotlb.size = size;
msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
- trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.iotlb.iova,
+ trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.asid, msg.iotlb.iova,
msg.iotlb.size, msg.iotlb.type);
if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
@@ -129,7 +136,12 @@ static void vhost_vdpa_listener_begin_batch(struct vhost_vdpa *v)
.iotlb.type = VHOST_IOTLB_BATCH_BEGIN,
};
- trace_vhost_vdpa_listener_begin_batch(v, fd, msg.type, msg.iotlb.type);
+ if (v->dev->backend_cap & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID)) {
+ msg.asid = v->dev->address_space_id;
+ }
+
+ trace_vhost_vdpa_listener_begin_batch(v, fd, msg.type, msg.asid,
+ msg.iotlb.type);
if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
error_report("failed to write, fd=%d, errno=%d (%s)",
fd, errno, strerror(errno));
@@ -162,9 +174,13 @@ static void vhost_vdpa_listener_commit(MemoryListener *listener)
}
msg.type = v->msg_type;
+ if (dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_ASID)) {
+ msg.asid = v->dev->address_space_id;
+ }
msg.iotlb.type = VHOST_IOTLB_BATCH_END;
- trace_vhost_vdpa_listener_commit(v, fd, msg.type, msg.iotlb.type);
+ trace_vhost_vdpa_listener_commit(v, fd, msg.type, msg.asid,
+ msg.iotlb.type);
if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
error_report("failed to write, fd=%d, errno=%d (%s)",
fd, errno, strerror(errno));
@@ -1170,10 +1186,48 @@ call_err:
return false;
}
+static int vhost_vdpa_set_vq_group_address_space_id(struct vhost_dev *dev,
+ struct vhost_vring_state *asid)
+{
+ trace_vhost_vdpa_set_vq_group_address_space_id(dev, asid->index, asid->num);
+ return vhost_vdpa_call(dev, VHOST_VDPA_SET_GROUP_ASID, asid);
+}
+
+static int vhost_vdpa_set_address_space_id(struct vhost_dev *dev)
+{
+ struct vhost_vring_state vq_group = {
+ .index = dev->vq_index,
+ };
+ struct vhost_vring_state asid;
+ int ret;
+
+ if (!dev->address_space_id) {
+ return 0;
+ }
+
+ ret = vhost_vdpa_get_vring_group(dev, &vq_group);
+ if (unlikely(ret)) {
+ error_report("Can't read vq group, errno=%d (%s)", ret,
+ g_strerror(-ret));
+ return ret;
+ }
+
+ asid.index = vq_group.num;
+ asid.num = dev->address_space_id;
+ ret = vhost_vdpa_set_vq_group_address_space_id(dev, &asid);
+ if (unlikely(ret)) {
+ error_report("Can't set vq group %u asid %u, errno=%d (%s)",
+ asid.index, asid.num, ret, g_strerror(-ret));
+ }
+ return ret;
+}
+
static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
{
struct vhost_vdpa *v = dev->opaque;
- bool ok;
+ bool vq_group_end, ok;
+ int r = 0;
+
trace_vhost_vdpa_dev_start(dev, started);
if (started) {
@@ -1182,6 +1236,10 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
!vhost_dev_is_independent_group(dev)) {
return -1;
}
+ r = vhost_vdpa_set_address_space_id(dev);
+ if (unlikely(r)) {
+ return r;
+ }
ok = vhost_vdpa_svqs_start(dev);
if (unlikely(!ok)) {
return -1;
@@ -1195,21 +1253,26 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
}
- if (dev->vq_index + dev->nvqs != dev->vq_index_end) {
- return 0;
+ vq_group_end = dev->vq_index + dev->nvqs == v->vq_group_index_end;
+ if (vq_group_end && started) {
+ memory_listener_register(&v->listener, &address_space_memory);
}
- if (started) {
- memory_listener_register(&v->listener, &address_space_memory);
- return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
- } else {
- vhost_vdpa_reset_device(dev);
- vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
- VIRTIO_CONFIG_S_DRIVER);
- memory_listener_unregister(&v->listener);
+ if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
+ if (started) {
+ r = vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
+ } else {
+ vhost_vdpa_reset_device(dev);
+ vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
+ VIRTIO_CONFIG_S_DRIVER);
+ }
+ }
- return 0;
+ if (vq_group_end && !started) {
+ memory_listener_unregister(&v->listener);
}
+
+ return r;
}
static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base,
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 09fcc4a88e..6207ead884 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -152,9 +152,10 @@ err_init:
static void vhost_vdpa_cleanup(NetClientState *nc)
{
VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
- struct vhost_dev *dev = s->vhost_vdpa.dev;
+ struct vhost_vdpa *v = &s->vhost_vdpa;
+ struct vhost_dev *dev = v->dev;
- if (dev && dev->vq_index + dev->nvqs == dev->vq_index_end) {
+ if (dev && dev->vq_index + dev->nvqs == v->vq_group_index_end) {
g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
}
if (s->vhost_net) {
@@ -333,6 +334,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
int vdpa_device_fd,
int queue_pair_index,
int nvqs,
+ int vq_group_end,
bool is_datapath,
bool svq,
VhostIOVATree *iova_tree)
@@ -354,6 +356,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
s->vhost_vdpa.device_fd = vdpa_device_fd;
s->vhost_vdpa.index = queue_pair_index;
s->vhost_vdpa.shadow_vqs_enabled = svq;
+ s->vhost_vdpa.vq_group_index_end = vq_group_end;
if (!is_datapath) {
s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops;
}
@@ -464,16 +467,16 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
for (i = 0; i < queue_pairs; i++) {
ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
- vdpa_device_fd, i, 2, true, opts->x_svq,
- iova_tree);
+ vdpa_device_fd, i, 2, 2 * queue_pairs,
+ true, opts->x_svq, iova_tree);
if (!ncs[i])
goto err;
}
if (has_cvq) {
nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
- vdpa_device_fd, i, 1, false, opts->x_svq,
- iova_tree);
+ vdpa_device_fd, i, 1, 2 * queue_pairs + 1,
+ false, opts->x_svq, iova_tree);
if (!nc)
goto err;
}
diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
index e6fdc03514..2858deac60 100644
--- a/hw/virtio/trace-events
+++ b/hw/virtio/trace-events
@@ -23,10 +23,10 @@ vhost_user_postcopy_waker_found(uint64_t client_addr) "0x%"PRIx64
vhost_user_postcopy_waker_nomatch(const char *rb, uint64_t rb_offset) "%s + 0x%"PRIx64
# vhost-vdpa.c
-vhost_vdpa_dma_map(void *vdpa, int fd, uint32_t msg_type, uint64_t iova, uint64_t size, uint64_t uaddr, uint8_t perm, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" uaddr: 0x%"PRIx64" perm: 0x%"PRIx8" type: %"PRIu8
-vhost_vdpa_dma_unmap(void *vdpa, int fd, uint32_t msg_type, uint64_t iova, uint64_t size, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" type: %"PRIu8
-vhost_vdpa_listener_begin_batch(void *v, int fd, uint32_t msg_type, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8
-vhost_vdpa_listener_commit(void *v, int fd, uint32_t msg_type, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8
+vhost_vdpa_dma_map(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_t iova, uint64_t size, uint64_t uaddr, uint8_t perm, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" uaddr: 0x%"PRIx64" perm: 0x%"PRIx8" type: %"PRIu8
+vhost_vdpa_dma_unmap(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_t iova, uint64_t size, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" type: %"PRIu8
+vhost_vdpa_listener_begin_batch(void *v, int fd, uint32_t msg_type, uint32_t asid, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" type: %"PRIu8
+vhost_vdpa_listener_commit(void *v, int fd, uint32_t msg_type, uint32_t asid, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" type: %"PRIu8
vhost_vdpa_listener_region_add(void *vdpa, uint64_t iova, uint64_t llend, void *vaddr, bool readonly) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64" vaddr: %p read-only: %d"
vhost_vdpa_listener_region_del(void *vdpa, uint64_t iova, uint64_t llend) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64
vhost_vdpa_add_status(void *dev, uint8_t status) "dev: %p status: 0x%"PRIx8
@@ -44,6 +44,7 @@ vhost_vdpa_dump_config(void *dev, const char *line) "dev: %p %s"
vhost_vdpa_set_config(void *dev, uint32_t offset, uint32_t size, uint32_t flags) "dev: %p offset: %"PRIu32" size: %"PRIu32" flags: 0x%"PRIx32
vhost_vdpa_get_config(void *dev, void *config, uint32_t config_len) "dev: %p config: %p config_len: %"PRIu32
vhost_vdpa_get_vring_group(void *dev, unsigned int index, unsigned int num) "dev: %p index: %u num: %u"
+vhost_vdpa_set_vq_group_address_space_id(void *dev, unsigned int index, unsigned int num) "dev: %p index: %u num: %u"
vhost_vdpa_dev_start(void *dev, bool started) "dev: %p started: %d"
vhost_vdpa_set_log_base(void *dev, uint64_t base, unsigned long long size, int refcnt, int fd, void *log) "dev: %p base: 0x%"PRIx64" size: %llu refcnt: %d fd: %d log: %p"
vhost_vdpa_set_vring_addr(void *dev, unsigned int index, unsigned int flags, uint64_t desc_user_addr, uint64_t used_user_addr, uint64_t avail_user_addr, uint64_t log_guest_addr) "dev: %p index: %u flags: 0x%x desc_user_addr: 0x%"PRIx64" used_user_addr: 0x%"PRIx64" avail_user_addr: 0x%"PRIx64" log_guest_addr: 0x%"PRIx64
--
2.27.0
next prev parent reply other threads:[~2022-04-08 13:56 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-04-08 13:33 [RFC PATCH v5 00/23] Net Control VQ support with asid in vDPA SVQ Eugenio Pérez
2022-04-08 13:33 ` [RFC PATCH v5 01/23] vdpa: Add missing tracing to batch mapping functions Eugenio Pérez
2022-04-14 3:32 ` Jason Wang
2022-04-08 13:33 ` [RFC PATCH v5 02/23] vdpa: Fix bad index calculus at vhost_vdpa_get_vring_base Eugenio Pérez
2022-04-14 3:34 ` Jason Wang
2022-04-22 9:00 ` Eugenio Perez Martin
2022-04-08 13:33 ` [RFC PATCH v5 03/23] util: Return void on iova_tree_remove Eugenio Pérez
2022-04-14 3:36 ` Jason Wang
2022-04-08 13:33 ` [RFC PATCH v5 04/23] hw/virtio: Replace g_memdup() by g_memdup2() Eugenio Pérez
2022-04-14 3:37 ` Jason Wang
2022-04-08 13:33 ` [RFC PATCH v5 05/23] vhost: Fix bad return of descriptors to SVQ Eugenio Pérez
2022-04-14 3:38 ` Jason Wang
2022-04-08 13:33 ` [RFC PATCH v5 06/23] vdpa: Add x-svq to NetdevVhostVDPAOptions Eugenio Pérez
2022-04-14 3:42 ` Jason Wang
2022-04-18 10:34 ` Eugenio Perez Martin
2022-04-08 13:33 ` [RFC PATCH v5 07/23] vhost: move descriptor translation to vhost_svq_vring_write_descs Eugenio Pérez
2022-04-08 13:34 ` [RFC PATCH v5 08/23] vdpa: Fix index calculus at vhost_vdpa_svqs_start Eugenio Pérez
2022-04-08 13:34 ` [RFC PATCH v5 09/23] virtio-net: Expose ctrl virtqueue logic Eugenio Pérez
2022-04-08 13:34 ` [RFC PATCH v5 10/23] vdpa: Extract get features part from vhost_vdpa_get_max_queue_pairs Eugenio Pérez
2022-04-08 13:34 ` [RFC PATCH v5 11/23] virtio: Make virtqueue_alloc_element non-static Eugenio Pérez
2022-04-08 13:34 ` [RFC PATCH v5 12/23] vhost: Add SVQElement Eugenio Pérez
2022-04-08 13:34 ` [RFC PATCH v5 13/23] vhost: Add custom used buffer callback Eugenio Pérez
2022-04-08 13:34 ` [RFC PATCH v5 14/23] vdpa: control virtqueue support on shadow virtqueue Eugenio Pérez
2022-04-08 13:34 ` [RFC PATCH v5 15/23] vhost: Add vhost_iova_tree_find Eugenio Pérez
2022-04-08 13:34 ` [RFC PATCH v5 16/23] vdpa: Add map/unmap operation callback to SVQ Eugenio Pérez
2022-04-08 13:34 ` [RFC PATCH v5 17/23] vhost: Add vhost_svq_inject Eugenio Pérez
2022-04-08 13:34 ` [RFC PATCH v5 18/23] vdpa: add NetClientState->start() callback Eugenio Pérez
2022-04-08 13:34 ` [RFC PATCH v5 19/23] vdpa: Add vhost_vdpa_start_control_svq Eugenio Pérez
2022-04-08 13:34 ` [RFC PATCH v5 20/23] vhost: Update kernel headers Eugenio Pérez
2022-04-08 13:34 ` [RFC PATCH v5 21/23] vhost: Make possible to check for device exclusive vq group Eugenio Pérez
2022-04-08 13:34 ` Eugenio Pérez [this message]
2022-04-08 13:34 ` [RFC PATCH v5 23/23] vdpa: Add x-cvq-svq Eugenio Pérez
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220408133415.1371760-23-eperezma@redhat.com \
--to=eperezma@redhat.com \
--cc=arei.gonglei@huawei.com \
--cc=armbru@redhat.com \
--cc=cohuck@redhat.com \
--cc=eblake@redhat.com \
--cc=eli@mellanox.com \
--cc=gdawar@xilinx.com \
--cc=hanand@xilinx.com \
--cc=jasowang@redhat.com \
--cc=lingshan.zhu@intel.com \
--cc=liuxiangdong5@huawei.com \
--cc=lulu@redhat.com \
--cc=lvivier@redhat.com \
--cc=mst@redhat.com \
--cc=parav@mellanox.com \
--cc=pbonzini@redhat.com \
--cc=peterx@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).