From: "Eugenio Pérez" <eperezma@redhat.com>
To: qemu-devel@nongnu.org
Cc: Liuxiangdong <liuxiangdong5@huawei.com>,
Stefano Garzarella <sgarzare@redhat.com>,
Zhu Lingshan <lingshan.zhu@intel.com>,
Si-Wei Liu <si-wei.liu@oracle.com>,
Laurent Vivier <lvivier@redhat.com>,
"Gonglei (Arei)" <arei.gonglei@huawei.com>,
Stefan Hajnoczi <stefanha@redhat.com>,
Jason Wang <jasowang@redhat.com>,
"Michael S. Tsirkin" <mst@redhat.com>, Cindy Lu <lulu@redhat.com>,
Gautam Dawar <gdawar@xilinx.com>, Eli Cohen <eli@mellanox.com>,
Cornelia Huck <cohuck@redhat.com>,
Paolo Bonzini <pbonzini@redhat.com>,
Longpeng <longpeng2@huawei.com>,
Harpreet Singh Anand <hanand@xilinx.com>,
Parav Pandit <parav@mellanox.com>,
kvm@vger.kernel.org, virtualization@lists.linux-foundation.org
Subject: [PATCH v9 09/12] vdpa: add asid parameter to vhost_vdpa_dma_map/unmap
Date: Thu, 15 Dec 2022 12:31:41 +0100 [thread overview]
Message-ID: <20221215113144.322011-10-eperezma@redhat.com> (raw)
In-Reply-To: <20221215113144.322011-1-eperezma@redhat.com>
So the caller can choose which ASID is destined.
No need to update the batch functions as they will always be called from
memory listener updates at the moment. Memory listener updates will
always update ASID 0, as it's the passthrough ASID.
All vhost devices's ASID are 0 at this moment.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
Acked-by: Jason Wang <jasowang@redhat.com>
---
v7:
* Move comment on zero initailization of vhost_vdpa_dma_map above the
functions.
* Add VHOST_VDPA_GUEST_PA_ASID macro.
v5:
* Solve conflict, now vhost_vdpa_svq_unmap_ring returns void
* Change comment on zero initialization.
v4: Add comment specifying behavior if device does not support _F_ASID
v3: Deleted unneeded space
---
include/hw/virtio/vhost-vdpa.h | 14 ++++++++++---
hw/virtio/vhost-vdpa.c | 36 +++++++++++++++++++++++-----------
net/vhost-vdpa.c | 6 +++---
hw/virtio/trace-events | 4 ++--
4 files changed, 41 insertions(+), 19 deletions(-)
diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
index 1111d85643..e57dfa1fd1 100644
--- a/include/hw/virtio/vhost-vdpa.h
+++ b/include/hw/virtio/vhost-vdpa.h
@@ -19,6 +19,12 @@
#include "hw/virtio/virtio.h"
#include "standard-headers/linux/vhost_types.h"
+/*
+ * ASID dedicated to map guest's addresses. If SVQ is disabled it maps GPA to
+ * qemu's IOVA. If SVQ is enabled it maps also the SVQ vring here
+ */
+#define VHOST_VDPA_GUEST_PA_ASID 0
+
typedef struct VhostVDPAHostNotifier {
MemoryRegion mr;
void *addr;
@@ -29,6 +35,7 @@ typedef struct vhost_vdpa {
int index;
uint32_t msg_type;
bool iotlb_batch_begin_sent;
+ uint32_t address_space_id;
MemoryListener listener;
struct vhost_vdpa_iova_range iova_range;
uint64_t acked_features;
@@ -42,8 +49,9 @@ typedef struct vhost_vdpa {
VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
} VhostVDPA;
-int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
- void *vaddr, bool readonly);
-int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova, hwaddr size);
+int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
+ hwaddr size, void *vaddr, bool readonly);
+int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
+ hwaddr size);
#endif
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 86e1fa8e9e..5e591a8fda 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -72,22 +72,28 @@ static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section,
return false;
}
-int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
- void *vaddr, bool readonly)
+/*
+ * The caller must set asid = 0 if the device does not support asid.
+ * This is not an ABI break since it is set to 0 by the initializer anyway.
+ */
+int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
+ hwaddr size, void *vaddr, bool readonly)
{
struct vhost_msg_v2 msg = {};
int fd = v->device_fd;
int ret = 0;
msg.type = v->msg_type;
+ msg.asid = asid;
msg.iotlb.iova = iova;
msg.iotlb.size = size;
msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr;
msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW;
msg.iotlb.type = VHOST_IOTLB_UPDATE;
- trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.iotlb.iova, msg.iotlb.size,
- msg.iotlb.uaddr, msg.iotlb.perm, msg.iotlb.type);
+ trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.asid, msg.iotlb.iova,
+ msg.iotlb.size, msg.iotlb.uaddr, msg.iotlb.perm,
+ msg.iotlb.type);
if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
error_report("failed to write, fd=%d, errno=%d (%s)",
@@ -98,18 +104,24 @@ int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
return ret;
}
-int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova, hwaddr size)
+/*
+ * The caller must set asid = 0 if the device does not support asid.
+ * This is not an ABI break since it is set to 0 by the initializer anyway.
+ */
+int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
+ hwaddr size)
{
struct vhost_msg_v2 msg = {};
int fd = v->device_fd;
int ret = 0;
msg.type = v->msg_type;
+ msg.asid = asid;
msg.iotlb.iova = iova;
msg.iotlb.size = size;
msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
- trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.iotlb.iova,
+ trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.asid, msg.iotlb.iova,
msg.iotlb.size, msg.iotlb.type);
if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
@@ -229,8 +241,8 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener,
}
vhost_vdpa_iotlb_batch_begin_once(v);
- ret = vhost_vdpa_dma_map(v, iova, int128_get64(llsize),
- vaddr, section->readonly);
+ ret = vhost_vdpa_dma_map(v, VHOST_VDPA_GUEST_PA_ASID, iova,
+ int128_get64(llsize), vaddr, section->readonly);
if (ret) {
error_report("vhost vdpa map fail!");
goto fail_map;
@@ -303,7 +315,8 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener,
vhost_iova_tree_remove(v->iova_tree, *result);
}
vhost_vdpa_iotlb_batch_begin_once(v);
- ret = vhost_vdpa_dma_unmap(v, iova, int128_get64(llsize));
+ ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova,
+ int128_get64(llsize));
if (ret) {
error_report("vhost_vdpa dma unmap error!");
}
@@ -869,7 +882,7 @@ static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v, hwaddr addr)
}
size = ROUND_UP(result->size, qemu_real_host_page_size());
- r = vhost_vdpa_dma_unmap(v, result->iova, size);
+ r = vhost_vdpa_dma_unmap(v, v->address_space_id, result->iova, size);
if (unlikely(r < 0)) {
error_report("Unable to unmap SVQ vring: %s (%d)", g_strerror(-r), -r);
return;
@@ -909,7 +922,8 @@ static bool vhost_vdpa_svq_map_ring(struct vhost_vdpa *v, DMAMap *needle,
return false;
}
- r = vhost_vdpa_dma_map(v, needle->iova, needle->size + 1,
+ r = vhost_vdpa_dma_map(v, v->address_space_id, needle->iova,
+ needle->size + 1,
(void *)(uintptr_t)needle->translated_addr,
needle->perm == IOMMU_RO);
if (unlikely(r != 0)) {
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index e829ef1f43..a592ee07ec 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -259,7 +259,7 @@ static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
return;
}
- r = vhost_vdpa_dma_unmap(v, map->iova, map->size + 1);
+ r = vhost_vdpa_dma_unmap(v, v->address_space_id, map->iova, map->size + 1);
if (unlikely(r != 0)) {
error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
}
@@ -299,8 +299,8 @@ static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
return r;
}
- r = vhost_vdpa_dma_map(v, map.iova, vhost_vdpa_net_cvq_cmd_page_len(), buf,
- !write);
+ r = vhost_vdpa_dma_map(v, v->address_space_id, map.iova,
+ vhost_vdpa_net_cvq_cmd_page_len(), buf, !write);
if (unlikely(r < 0)) {
goto dma_map_err;
}
diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
index 14fc5b9bb2..96da58a41f 100644
--- a/hw/virtio/trace-events
+++ b/hw/virtio/trace-events
@@ -30,8 +30,8 @@ vhost_user_write(uint32_t req, uint32_t flags) "req:%d flags:0x%"PRIx32""
vhost_user_create_notifier(int idx, void *n) "idx:%d n:%p"
# vhost-vdpa.c
-vhost_vdpa_dma_map(void *vdpa, int fd, uint32_t msg_type, uint64_t iova, uint64_t size, uint64_t uaddr, uint8_t perm, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" uaddr: 0x%"PRIx64" perm: 0x%"PRIx8" type: %"PRIu8
-vhost_vdpa_dma_unmap(void *vdpa, int fd, uint32_t msg_type, uint64_t iova, uint64_t size, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" type: %"PRIu8
+vhost_vdpa_dma_map(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_t iova, uint64_t size, uint64_t uaddr, uint8_t perm, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" uaddr: 0x%"PRIx64" perm: 0x%"PRIx8" type: %"PRIu8
+vhost_vdpa_dma_unmap(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_t iova, uint64_t size, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" type: %"PRIu8
vhost_vdpa_listener_begin_batch(void *v, int fd, uint32_t msg_type, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8
vhost_vdpa_listener_commit(void *v, int fd, uint32_t msg_type, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8
vhost_vdpa_listener_region_add(void *vdpa, uint64_t iova, uint64_t llend, void *vaddr, bool readonly) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64" vaddr: %p read-only: %d"
--
2.31.1
next prev parent reply other threads:[~2022-12-15 11:37 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-12-15 11:31 [PATCH v9 00/12] ASID support in vhost-vdpa net Eugenio Pérez
2022-12-15 11:31 ` [PATCH v9 01/12] vdpa: use v->shadow_vqs_enabled in vhost_vdpa_svqs_start & stop Eugenio Pérez
2022-12-15 11:31 ` [PATCH v9 02/12] vhost: set SVQ device call handler at SVQ start Eugenio Pérez
2022-12-15 11:31 ` [PATCH v9 03/12] vhost: allocate SVQ device file descriptors at device start Eugenio Pérez
2022-12-15 11:31 ` [PATCH v9 04/12] vhost: move iova_tree set to vhost_svq_start Eugenio Pérez
2022-12-15 11:31 ` [PATCH v9 05/12] vdpa: add vhost_vdpa_net_valid_svq_features Eugenio Pérez
2022-12-15 11:31 ` [PATCH v9 06/12] vdpa: request iova_range only once Eugenio Pérez
2022-12-16 7:29 ` Jason Wang
2022-12-16 9:52 ` Eugenio Perez Martin
2022-12-21 8:21 ` Jason Wang
2022-12-21 11:47 ` Michael S. Tsirkin
2022-12-15 11:31 ` [PATCH v9 07/12] vdpa: move SVQ vring features check to net/ Eugenio Pérez
2022-12-15 11:31 ` [PATCH v9 08/12] vdpa: allocate SVQ array unconditionally Eugenio Pérez
2022-12-15 11:31 ` Eugenio Pérez [this message]
2022-12-15 11:31 ` [PATCH v9 10/12] vdpa: store x-svq parameter in VhostVDPAState Eugenio Pérez
2022-12-15 11:31 ` [PATCH v9 11/12] vdpa: add shadow_data to vhost_vdpa Eugenio Pérez
2022-12-15 11:31 ` [PATCH v9 12/12] vdpa: always start CVQ in SVQ mode if possible Eugenio Pérez
2022-12-16 7:35 ` Jason Wang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221215113144.322011-10-eperezma@redhat.com \
--to=eperezma@redhat.com \
--cc=arei.gonglei@huawei.com \
--cc=cohuck@redhat.com \
--cc=eli@mellanox.com \
--cc=gdawar@xilinx.com \
--cc=hanand@xilinx.com \
--cc=jasowang@redhat.com \
--cc=kvm@vger.kernel.org \
--cc=lingshan.zhu@intel.com \
--cc=liuxiangdong5@huawei.com \
--cc=longpeng2@huawei.com \
--cc=lulu@redhat.com \
--cc=lvivier@redhat.com \
--cc=mst@redhat.com \
--cc=parav@mellanox.com \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=sgarzare@redhat.com \
--cc=si-wei.liu@oracle.com \
--cc=stefanha@redhat.com \
--cc=virtualization@lists.linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).