From: "Eugenio Pérez" <eperezma@redhat.com>
To: qemu-devel@nongnu.org
Cc: Parav Pandit <parav@mellanox.com>,
si-wei.liu@oracle.com, Stefano Garzarella <sgarzare@redhat.com>,
Zhu Lingshan <lingshan.zhu@intel.com>,
Lei Yang <leiyang@redhat.com>,
"Michael S. Tsirkin" <mst@redhat.com>,
Jason Wang <jasowang@redhat.com>,
Dragos Tatulea <dtatulea@nvidia.com>,
Laurent Vivier <lvivier@redhat.com>
Subject: [PATCH 9.0 11/13] vdpa: use VhostVDPAShared in vdpa_dma_map and unmap
Date: Fri, 24 Nov 2023 18:14:28 +0100 [thread overview]
Message-ID: <20231124171430.2964464-12-eperezma@redhat.com> (raw)
In-Reply-To: <20231124171430.2964464-1-eperezma@redhat.com>
The callers only have the shared information by the end of this series.
Start converting this functions.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
include/hw/virtio/vhost-vdpa.h | 4 +--
hw/virtio/vhost-vdpa.c | 50 +++++++++++++++++-----------------
net/vhost-vdpa.c | 5 ++--
3 files changed, 30 insertions(+), 29 deletions(-)
diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
index 3880b9e7f2..705c754776 100644
--- a/include/hw/virtio/vhost-vdpa.h
+++ b/include/hw/virtio/vhost-vdpa.h
@@ -69,9 +69,9 @@ typedef struct vhost_vdpa {
int vhost_vdpa_get_iova_range(int fd, struct vhost_vdpa_iova_range *iova_range);
int vhost_vdpa_set_vring_ready(struct vhost_vdpa *v, unsigned idx);
-int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
+int vhost_vdpa_dma_map(VhostVDPAShared *s, uint32_t asid, hwaddr iova,
hwaddr size, void *vaddr, bool readonly);
-int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
+int vhost_vdpa_dma_unmap(VhostVDPAShared *s, uint32_t asid, hwaddr iova,
hwaddr size);
typedef struct vdpa_iommu {
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index a07cd85081..0ed6550aad 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -86,11 +86,11 @@ static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section,
* The caller must set asid = 0 if the device does not support asid.
* This is not an ABI break since it is set to 0 by the initializer anyway.
*/
-int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
+int vhost_vdpa_dma_map(VhostVDPAShared *s, uint32_t asid, hwaddr iova,
hwaddr size, void *vaddr, bool readonly)
{
struct vhost_msg_v2 msg = {};
- int fd = v->shared->device_fd;
+ int fd = s->device_fd;
int ret = 0;
msg.type = VHOST_IOTLB_MSG_V2;
@@ -101,7 +101,7 @@ int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW;
msg.iotlb.type = VHOST_IOTLB_UPDATE;
- trace_vhost_vdpa_dma_map(v->shared, fd, msg.type, msg.asid, msg.iotlb.iova,
+ trace_vhost_vdpa_dma_map(s, fd, msg.type, msg.asid, msg.iotlb.iova,
msg.iotlb.size, msg.iotlb.uaddr, msg.iotlb.perm,
msg.iotlb.type);
@@ -118,11 +118,11 @@ int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
* The caller must set asid = 0 if the device does not support asid.
* This is not an ABI break since it is set to 0 by the initializer anyway.
*/
-int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
+int vhost_vdpa_dma_unmap(VhostVDPAShared *s, uint32_t asid, hwaddr iova,
hwaddr size)
{
struct vhost_msg_v2 msg = {};
- int fd = v->shared->device_fd;
+ int fd = s->device_fd;
int ret = 0;
msg.type = VHOST_IOTLB_MSG_V2;
@@ -131,8 +131,8 @@ int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
msg.iotlb.size = size;
msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
- trace_vhost_vdpa_dma_unmap(v->shared, fd, msg.type, msg.asid,
- msg.iotlb.iova, msg.iotlb.size, msg.iotlb.type);
+ trace_vhost_vdpa_dma_unmap(s, fd, msg.type, msg.asid, msg.iotlb.iova,
+ msg.iotlb.size, msg.iotlb.type);
if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
error_report("failed to write, fd=%d, errno=%d (%s)",
@@ -143,30 +143,29 @@ int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
return ret;
}
-static void vhost_vdpa_listener_begin_batch(struct vhost_vdpa *v)
+static void vhost_vdpa_listener_begin_batch(VhostVDPAShared *s)
{
- int fd = v->shared->device_fd;
+ int fd = s->device_fd;
struct vhost_msg_v2 msg = {
.type = VHOST_IOTLB_MSG_V2,
.iotlb.type = VHOST_IOTLB_BATCH_BEGIN,
};
- trace_vhost_vdpa_listener_begin_batch(v->shared, fd, msg.type,
- msg.iotlb.type);
+ trace_vhost_vdpa_listener_begin_batch(s, fd, msg.type, msg.iotlb.type);
if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
error_report("failed to write, fd=%d, errno=%d (%s)",
fd, errno, strerror(errno));
}
}
-static void vhost_vdpa_iotlb_batch_begin_once(struct vhost_vdpa *v)
+static void vhost_vdpa_iotlb_batch_begin_once(VhostVDPAShared *s)
{
- if (v->shared->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) &&
- !v->shared->iotlb_batch_begin_sent) {
- vhost_vdpa_listener_begin_batch(v);
+ if (s->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) &&
+ !s->iotlb_batch_begin_sent) {
+ vhost_vdpa_listener_begin_batch(s);
}
- v->shared->iotlb_batch_begin_sent = true;
+ s->iotlb_batch_begin_sent = true;
}
static void vhost_vdpa_listener_commit(MemoryListener *listener)
@@ -226,7 +225,7 @@ static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
if (!memory_get_xlat_addr(iotlb, &vaddr, NULL, &read_only, NULL)) {
return;
}
- ret = vhost_vdpa_dma_map(v, VHOST_VDPA_GUEST_PA_ASID, iova,
+ ret = vhost_vdpa_dma_map(v->shared, VHOST_VDPA_GUEST_PA_ASID, iova,
iotlb->addr_mask + 1, vaddr, read_only);
if (ret) {
error_report("vhost_vdpa_dma_map(%p, 0x%" HWADDR_PRIx ", "
@@ -234,7 +233,7 @@ static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
v, iova, iotlb->addr_mask + 1, vaddr, ret);
}
} else {
- ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova,
+ ret = vhost_vdpa_dma_unmap(v->shared, VHOST_VDPA_GUEST_PA_ASID, iova,
iotlb->addr_mask + 1);
if (ret) {
error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", "
@@ -370,8 +369,8 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener,
iova = mem_region.iova;
}
- vhost_vdpa_iotlb_batch_begin_once(v);
- ret = vhost_vdpa_dma_map(v, VHOST_VDPA_GUEST_PA_ASID, iova,
+ vhost_vdpa_iotlb_batch_begin_once(v->shared);
+ ret = vhost_vdpa_dma_map(v->shared, VHOST_VDPA_GUEST_PA_ASID, iova,
int128_get64(llsize), vaddr, section->readonly);
if (ret) {
error_report("vhost vdpa map fail!");
@@ -455,13 +454,13 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener,
iova = result->iova;
vhost_iova_tree_remove(v->shared->iova_tree, *result);
}
- vhost_vdpa_iotlb_batch_begin_once(v);
+ vhost_vdpa_iotlb_batch_begin_once(v->shared);
/*
* The unmap ioctl doesn't accept a full 64-bit. need to check it
*/
if (int128_eq(llsize, int128_2_64())) {
llsize = int128_rshift(llsize, 1);
- ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova,
+ ret = vhost_vdpa_dma_unmap(v->shared, VHOST_VDPA_GUEST_PA_ASID, iova,
int128_get64(llsize));
if (ret) {
@@ -471,7 +470,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener,
}
iova += int128_get64(llsize);
}
- ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova,
+ ret = vhost_vdpa_dma_unmap(v->shared, VHOST_VDPA_GUEST_PA_ASID, iova,
int128_get64(llsize));
if (ret) {
@@ -1077,7 +1076,8 @@ static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v, hwaddr addr)
}
size = ROUND_UP(result->size, qemu_real_host_page_size());
- r = vhost_vdpa_dma_unmap(v, v->address_space_id, result->iova, size);
+ r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, result->iova,
+ size);
if (unlikely(r < 0)) {
error_report("Unable to unmap SVQ vring: %s (%d)", g_strerror(-r), -r);
return;
@@ -1117,7 +1117,7 @@ static bool vhost_vdpa_svq_map_ring(struct vhost_vdpa *v, DMAMap *needle,
return false;
}
- r = vhost_vdpa_dma_map(v, v->address_space_id, needle->iova,
+ r = vhost_vdpa_dma_map(v->shared, v->address_space_id, needle->iova,
needle->size + 1,
(void *)(uintptr_t)needle->translated_addr,
needle->perm == IOMMU_RO);
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 3aefd77968..3fb209cd35 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -472,7 +472,8 @@ static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
return;
}
- r = vhost_vdpa_dma_unmap(v, v->address_space_id, map->iova, map->size + 1);
+ r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, map->iova,
+ map->size + 1);
if (unlikely(r != 0)) {
error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
}
@@ -496,7 +497,7 @@ static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
return r;
}
- r = vhost_vdpa_dma_map(v, v->address_space_id, map.iova,
+ r = vhost_vdpa_dma_map(v->shared, v->address_space_id, map.iova,
vhost_vdpa_net_cvq_cmd_page_len(), buf, !write);
if (unlikely(r < 0)) {
goto dma_map_err;
--
2.39.3
next prev parent reply other threads:[~2023-11-24 17:15 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-11-24 17:14 [PATCH 9.0 00/13] Consolidate common vdpa members in VhostVDPAShared Eugenio Pérez
2023-11-24 17:14 ` [PATCH 9.0 01/13] vdpa: add VhostVDPAShared Eugenio Pérez
2023-12-01 5:35 ` Jason Wang
2023-12-01 6:41 ` Eugenio Perez Martin
2023-11-24 17:14 ` [PATCH 9.0 02/13] vdpa: move iova tree to the shared struct Eugenio Pérez
2023-11-24 17:14 ` [PATCH 9.0 03/13] vdpa: move iova_range to vhost_vdpa_shared Eugenio Pérez
2023-11-24 17:14 ` [PATCH 9.0 04/13] vdpa: move shadow_data " Eugenio Pérez
2023-12-06 6:04 ` Si-Wei Liu
2023-11-24 17:14 ` [PATCH 9.0 05/13] vdpa: use vdpa shared for tracing Eugenio Pérez
2023-11-24 17:14 ` [PATCH 9.0 06/13] vdpa: move file descriptor to vhost_vdpa_shared Eugenio Pérez
2023-11-24 17:14 ` [PATCH 9.0 07/13] vdpa: move iotlb_batch_begin_sent " Eugenio Pérez
2023-11-24 17:14 ` [PATCH 9.0 08/13] vdpa: move backend_cap " Eugenio Pérez
2023-11-24 17:14 ` [PATCH 9.0 09/13] vdpa: remove msg type of vhost_vdpa Eugenio Pérez
2023-11-24 17:14 ` [PATCH 9.0 10/13] vdpa: move iommu_list to vhost_vdpa_shared Eugenio Pérez
2023-11-24 17:14 ` Eugenio Pérez [this message]
2023-11-24 17:14 ` [PATCH 9.0 12/13] vdpa: use dev_shared in vdpa_iommu Eugenio Pérez
2023-11-24 17:14 ` [PATCH 9.0 13/13] vdpa: move memory listener to vhost_vdpa_shared Eugenio Pérez
2023-11-30 3:21 ` [PATCH 9.0 00/13] Consolidate common vdpa members in VhostVDPAShared Lei Yang
2023-11-30 7:38 ` Eugenio Perez Martin
2023-11-30 8:19 ` Lei Yang
2023-12-01 7:04 ` Jason Wang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231124171430.2964464-12-eperezma@redhat.com \
--to=eperezma@redhat.com \
--cc=dtatulea@nvidia.com \
--cc=jasowang@redhat.com \
--cc=leiyang@redhat.com \
--cc=lingshan.zhu@intel.com \
--cc=lvivier@redhat.com \
--cc=mst@redhat.com \
--cc=parav@mellanox.com \
--cc=qemu-devel@nongnu.org \
--cc=sgarzare@redhat.com \
--cc=si-wei.liu@oracle.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).