From: "Michael S. Tsirkin" <mst@redhat.com>
To: qemu-devel@nongnu.org
Cc: "Peter Maydell" <peter.maydell@linaro.org>,
"Eugenio Pérez" <eperezma@redhat.com>,
"Jason Wang" <jasowang@redhat.com>,
"Lei Yang" <leiyang@redhat.com>
Subject: [PULL 13/73] vdpa net: move iova tree creation from init to start
Date: Tue, 7 Mar 2023 20:11:30 -0500 [thread overview]
Message-ID: <00ef422e9fbfef1fb40447b08826db0951d788dd.1678237635.git.mst@redhat.com> (raw)
In-Reply-To: <cover.1678237635.git.mst@redhat.com>
From: Eugenio Pérez <eperezma@redhat.com>
Only create iova_tree if and when it is needed.
The cleanup keeps being responsible for the last VQ but this change
allows it to merge both cleanup functions.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
Acked-by: Jason Wang <jasowang@redhat.com>
Message-Id: <20230303172445.1089785-2-eperezma@redhat.com>
Tested-by: Lei Yang <leiyang@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
net/vhost-vdpa.c | 113 ++++++++++++++++++++++++++++++++++-------------
1 file changed, 83 insertions(+), 30 deletions(-)
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index de5ed8ff22..d195f48776 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -178,13 +178,9 @@ err_init:
static void vhost_vdpa_cleanup(NetClientState *nc)
{
VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
- struct vhost_dev *dev = &s->vhost_net->dev;
qemu_vfree(s->cvq_cmd_out_buffer);
qemu_vfree(s->status);
- if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
- g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
- }
if (s->vhost_net) {
vhost_net_cleanup(s->vhost_net);
g_free(s->vhost_net);
@@ -234,10 +230,64 @@ static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf,
return size;
}
+/** From any vdpa net client, get the netclient of the first queue pair */
+static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s)
+{
+ NICState *nic = qemu_get_nic(s->nc.peer);
+ NetClientState *nc0 = qemu_get_peer(nic->ncs, 0);
+
+ return DO_UPCAST(VhostVDPAState, nc, nc0);
+}
+
+static void vhost_vdpa_net_data_start_first(VhostVDPAState *s)
+{
+ struct vhost_vdpa *v = &s->vhost_vdpa;
+
+ if (v->shadow_vqs_enabled) {
+ v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
+ v->iova_range.last);
+ }
+}
+
+static int vhost_vdpa_net_data_start(NetClientState *nc)
+{
+ VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
+ struct vhost_vdpa *v = &s->vhost_vdpa;
+
+ assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
+
+ if (v->index == 0) {
+ vhost_vdpa_net_data_start_first(s);
+ return 0;
+ }
+
+ if (v->shadow_vqs_enabled) {
+ VhostVDPAState *s0 = vhost_vdpa_net_first_nc_vdpa(s);
+ v->iova_tree = s0->vhost_vdpa.iova_tree;
+ }
+
+ return 0;
+}
+
+static void vhost_vdpa_net_client_stop(NetClientState *nc)
+{
+ VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
+ struct vhost_dev *dev;
+
+ assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
+
+ dev = s->vhost_vdpa.dev;
+ if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
+ g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
+ }
+}
+
static NetClientInfo net_vhost_vdpa_info = {
.type = NET_CLIENT_DRIVER_VHOST_VDPA,
.size = sizeof(VhostVDPAState),
.receive = vhost_vdpa_receive,
+ .start = vhost_vdpa_net_data_start,
+ .stop = vhost_vdpa_net_client_stop,
.cleanup = vhost_vdpa_cleanup,
.has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
.has_ufo = vhost_vdpa_has_ufo,
@@ -351,7 +401,7 @@ dma_map_err:
static int vhost_vdpa_net_cvq_start(NetClientState *nc)
{
- VhostVDPAState *s;
+ VhostVDPAState *s, *s0;
struct vhost_vdpa *v;
uint64_t backend_features;
int64_t cvq_group;
@@ -415,8 +465,6 @@ static int vhost_vdpa_net_cvq_start(NetClientState *nc)
return r;
}
- v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
- v->iova_range.last);
v->shadow_vqs_enabled = true;
s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID;
@@ -425,6 +473,27 @@ out:
return 0;
}
+ s0 = vhost_vdpa_net_first_nc_vdpa(s);
+ if (s0->vhost_vdpa.iova_tree) {
+ /*
+ * SVQ is already configured for all virtqueues. Reuse IOVA tree for
+ * simplicity, whether CVQ shares ASID with guest or not, because:
+ * - Memory listener need access to guest's memory addresses allocated
+ * in the IOVA tree.
+ * - There should be plenty of IOVA address space for both ASID not to
+ * worry about collisions between them. Guest's translations are
+ * still validated with virtio virtqueue_pop so there is no risk for
+ * the guest to access memory that it shouldn't.
+ *
+ * To allocate a iova tree per ASID is doable but it complicates the
+ * code and it is not worth it for the moment.
+ */
+ v->iova_tree = s0->vhost_vdpa.iova_tree;
+ } else {
+ v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
+ v->iova_range.last);
+ }
+
r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,
vhost_vdpa_net_cvq_cmd_page_len(), false);
if (unlikely(r < 0)) {
@@ -449,15 +518,9 @@ static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
if (s->vhost_vdpa.shadow_vqs_enabled) {
vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
- if (!s->always_svq) {
- /*
- * If only the CVQ is shadowed we can delete this safely.
- * If all the VQs are shadows this will be needed by the time the
- * device is started again to register SVQ vrings and similar.
- */
- g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
- }
}
+
+ vhost_vdpa_net_client_stop(nc);
}
static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
@@ -667,8 +730,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
int nvqs,
bool is_datapath,
bool svq,
- struct vhost_vdpa_iova_range iova_range,
- VhostIOVATree *iova_tree)
+ struct vhost_vdpa_iova_range iova_range)
{
NetClientState *nc = NULL;
VhostVDPAState *s;
@@ -690,7 +752,6 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
s->vhost_vdpa.shadow_vqs_enabled = svq;
s->vhost_vdpa.iova_range = iova_range;
s->vhost_vdpa.shadow_data = svq;
- s->vhost_vdpa.iova_tree = iova_tree;
if (!is_datapath) {
s->cvq_cmd_out_buffer = qemu_memalign(qemu_real_host_page_size(),
vhost_vdpa_net_cvq_cmd_page_len());
@@ -760,7 +821,6 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
uint64_t features;
int vdpa_device_fd;
g_autofree NetClientState **ncs = NULL;
- g_autoptr(VhostIOVATree) iova_tree = NULL;
struct vhost_vdpa_iova_range iova_range;
NetClientState *nc;
int queue_pairs, r, i = 0, has_cvq = 0;
@@ -812,12 +872,8 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
goto err;
}
- if (opts->x_svq) {
- if (!vhost_vdpa_net_valid_svq_features(features, errp)) {
- goto err_svq;
- }
-
- iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last);
+ if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) {
+ goto err;
}
ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
@@ -825,7 +881,7 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
for (i = 0; i < queue_pairs; i++) {
ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
vdpa_device_fd, i, 2, true, opts->x_svq,
- iova_range, iova_tree);
+ iova_range);
if (!ncs[i])
goto err;
}
@@ -833,13 +889,11 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
if (has_cvq) {
nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
vdpa_device_fd, i, 1, false,
- opts->x_svq, iova_range, iova_tree);
+ opts->x_svq, iova_range);
if (!nc)
goto err;
}
- /* iova_tree ownership belongs to last NetClientState */
- g_steal_pointer(&iova_tree);
return 0;
err:
@@ -849,7 +903,6 @@ err:
}
}
-err_svq:
qemu_close(vdpa_device_fd);
return -1;
--
MST
next prev parent reply other threads:[~2023-03-08 1:12 UTC|newest]
Thread overview: 99+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-03-08 1:10 [PULL 00/73] virtio,pc,pci: features, fixes Michael S. Tsirkin
2023-03-08 1:10 ` [PULL 01/73] cryptodev: Introduce cryptodev.json Michael S. Tsirkin
2023-03-08 1:10 ` [PULL 02/73] cryptodev: Remove 'name' & 'model' fields Michael S. Tsirkin
2023-03-08 1:10 ` [PULL 03/73] cryptodev: Introduce cryptodev alg type in QAPI Michael S. Tsirkin
2023-03-08 1:11 ` [PULL 04/73] cryptodev: Introduce server " Michael S. Tsirkin
2023-03-08 1:11 ` [PULL 05/73] cryptodev: Introduce 'query-cryptodev' QMP command Michael S. Tsirkin
2023-03-08 1:11 ` [PULL 06/73] cryptodev-builtin: Detect akcipher capability Michael S. Tsirkin
2023-03-08 1:11 ` [PULL 07/73] hmp: add cryptodev info command Michael S. Tsirkin
2023-03-08 1:11 ` [PULL 08/73] cryptodev: Use CryptoDevBackendOpInfo for operation Michael S. Tsirkin
2023-03-08 1:11 ` [PULL 09/73] cryptodev: Account statistics Michael S. Tsirkin
2023-03-08 1:11 ` [PULL 10/73] cryptodev: support QoS Michael S. Tsirkin
2023-03-08 1:11 ` [PULL 11/73] cryptodev: Support query-stats QMP command Michael S. Tsirkin
2023-05-02 17:03 ` Peter Maydell
2023-05-03 4:19 ` zhenwei pi
2023-03-08 1:11 ` [PULL 12/73] MAINTAINERS: add myself as the maintainer for cryptodev Michael S. Tsirkin
2023-03-08 1:11 ` Michael S. Tsirkin [this message]
2023-03-08 1:11 ` [PULL 14/73] vdpa: Remember last call fd set Michael S. Tsirkin
2023-03-08 1:11 ` [PULL 15/73] vdpa: Negotiate _F_SUSPEND feature Michael S. Tsirkin
2023-03-08 1:11 ` [PULL 16/73] vdpa: rewind at get_base, not set_base Michael S. Tsirkin
2023-03-08 1:11 ` [PULL 17/73] vdpa: add vhost_vdpa->suspended parameter Michael S. Tsirkin
2023-03-08 1:11 ` [PULL 18/73] vdpa: add vhost_vdpa_suspend Michael S. Tsirkin
2023-03-08 1:11 ` [PULL 19/73] vdpa: move vhost reset after get vring base Michael S. Tsirkin
2023-03-08 1:11 ` [PULL 20/73] vdpa: add vdpa net migration state notifier Michael S. Tsirkin
2023-03-08 1:11 ` [PULL 21/73] vdpa: disable RAM block discard only for the first device Michael S. Tsirkin
2023-03-08 1:11 ` [PULL 22/73] vdpa net: block migration if the device has CVQ Michael S. Tsirkin
2023-03-08 1:11 ` [PULL 23/73] vdpa: block migration if device has unsupported features Michael S. Tsirkin
2023-03-08 1:12 ` [PULL 24/73] vdpa: block migration if SVQ does not admit a feature Michael S. Tsirkin
2023-03-08 1:12 ` [PULL 25/73] vdpa net: allow VHOST_F_LOG_ALL Michael S. Tsirkin
2023-03-08 1:12 ` [PULL 26/73] vdpa: return VHOST_F_LOG_ALL in vhost-vdpa devices Michael S. Tsirkin
2023-03-08 1:12 ` [PULL 27/73] Revert "tests/qtest: Check for devices in bios-tables-test" Michael S. Tsirkin
2023-03-08 1:12 ` [PULL 28/73] tests: acpi: whitelist new q35.noacpihp test and pc.hpbrroot Michael S. Tsirkin
2023-03-08 1:12 ` [PULL 29/73] tests: acpi: add test_acpi_q35_tcg_no_acpi_hotplug test and extend test_acpi_piix4_no_acpi_pci_hotplug Michael S. Tsirkin
2023-03-08 1:12 ` [PULL 30/73] tests: acpi: update expected blobs Michael S. Tsirkin
2023-03-13 10:57 ` Philippe Mathieu-Daudé
2023-03-13 12:59 ` Michael S. Tsirkin
2023-03-08 1:12 ` [PULL 31/73] tests: acpi: whitelist q35/DSDT.multi-bridge before extending testcase Michael S. Tsirkin
2023-03-08 1:12 ` [PULL 32/73] tests: acpi: extend multi-bridge case with case 'root-port,id=HOHP,hotplug=off root-port,bus=NOHP' Michael S. Tsirkin
2023-03-08 1:12 ` [PULL 33/73] x86: pcihp: fix missing PCNT callchain when intermediate root-port has 'hotplug=off' set Michael S. Tsirkin
2023-03-08 1:12 ` [PULL 34/73] tests: acpi: whitelist pc/DSDT.hpbrroot and pc/DSDT.hpbridge tests Michael S. Tsirkin
2023-03-08 1:12 ` [PULL 35/73] x86: pcihp: fix missing bridge AML when intermediate root-port has 'hotplug=off' set Michael S. Tsirkin
2023-03-08 1:12 ` [PULL 36/73] tests: acpi: update expected blobs Michael S. Tsirkin
2023-03-08 1:12 ` [PULL 37/73] pcihp: piix4: do not redirect hotplug controller to piix4 when ACPI hotplug is disabled Michael S. Tsirkin
2023-03-08 1:12 ` [PULL 38/73] pci: fix 'hotplugglable' property behavior Michael S. Tsirkin
2023-03-08 1:12 ` [PULL 39/73] tests: acpi: whitelist DSDT blobs before isolating PCI _DSM func 0 prolog Michael S. Tsirkin
2023-03-08 1:12 ` [PULL 40/73] pcihp: move PCI _DSM function 0 prolog into separate function Michael S. Tsirkin
2023-03-08 1:12 ` [PULL 41/73] tests: acpi: update expected blobs Michael S. Tsirkin
2023-03-08 1:12 ` [PULL 42/73] tests: acpi: whitelist DSDT before adding EDSM method Michael S. Tsirkin
2023-03-08 1:12 ` [PULL 43/73] acpi: pci: add EDSM method to DSDT Michael S. Tsirkin
2023-03-08 1:13 ` [PULL 44/73] tests: acpi: update expected blobs Michael S. Tsirkin
2023-03-08 1:13 ` [PULL 45/73] tests: acpi: whitelist DSDT before adding device with acpi-index to testcases Michael S. Tsirkin
2023-03-08 1:13 ` [PULL 46/73] tests: acpi: add device with acpi-index on non-hotpluggble bus Michael S. Tsirkin
2023-03-08 1:13 ` [PULL 47/73] acpi: pci: support acpi-index for non-hotpluggable devices Michael S. Tsirkin
2023-03-08 1:13 ` [PULL 48/73] tests: acpi: update expected blobs Michael S. Tsirkin
2023-03-08 1:13 ` [PULL 49/73] tests: acpi: whitelist DSDT before exposing non zero functions Michael S. Tsirkin
2023-03-08 1:13 ` [PULL 50/73] acpi: pci: describe all functions on populated slots Michael S. Tsirkin
2023-03-08 1:13 ` [PULL 51/73] tests: acpi: update expected blobs Michael S. Tsirkin
2023-03-08 1:13 ` [PULL 52/73] tests: acpi: whitelist DSDT before adding non-0 function device with acpi-index to testcases Michael S. Tsirkin
2023-03-08 1:13 ` [PULL 53/73] tests: acpi: add non zero function device with acpi-index on non-hotpluggble bus Michael S. Tsirkin
2023-03-08 1:13 ` [PULL 54/73] tests: acpi: update expected blobs Michael S. Tsirkin
2023-03-08 1:13 ` [PULL 55/73] pci: move acpi-index uniqueness check to generic PCI device code Michael S. Tsirkin
2023-03-08 1:13 ` [PULL 56/73] acpi: pci: drop BSEL usage when deciding that device isn't hotpluggable Michael S. Tsirkin
2023-03-08 1:13 ` [PULL 57/73] acpi: pci: move BSEL into build_append_pcihp_slots() Michael S. Tsirkin
2023-03-08 1:13 ` [PULL 58/73] acpi: pci: move out ACPI PCI hotplug generator from generic slot generator build_append_pci_bus_devices() Michael S. Tsirkin
2023-03-08 1:13 ` [PULL 59/73] pcihp: move fields enabling hotplug into AcpiPciHpState Michael S. Tsirkin
2023-03-08 1:13 ` [PULL 60/73] pcihp: add ACPI PCI hotplug specific is_hotpluggable_bus() callback Michael S. Tsirkin
2023-03-08 1:13 ` [PULL 61/73] hw/pci/aer: Implement PCI_ERR_UNCOR_MASK register Michael S. Tsirkin
2023-04-26 0:42 ` Peter Xu
2023-04-26 6:12 ` Michael S. Tsirkin
2023-04-26 7:19 ` Juan Quintela
2023-05-03 0:32 ` Leonardo Brás
2023-05-03 4:08 ` Michael S. Tsirkin
2023-05-03 9:31 ` Jonathan Cameron via
2023-03-08 1:13 ` [PULL 62/73] hw/pci/aer: Add missing routing for AER errors Michael S. Tsirkin
2023-03-08 1:13 ` [PULL 63/73] hw/pci-bridge/cxl_root_port: Wire up AER Michael S. Tsirkin
2023-03-08 1:14 ` [PULL 64/73] hw/pci-bridge/cxl_root_port: Wire up MSI Michael S. Tsirkin
2023-03-08 1:14 ` [PULL 65/73] hw/mem/cxl-type3: Add AER extended capability Michael S. Tsirkin
2023-03-08 1:14 ` [PULL 66/73] hw/cxl: Fix endian issues in CXL RAS capability defaults / masks Michael S. Tsirkin
2023-03-08 1:14 ` [PULL 67/73] hw/pci/aer: Make PCIE AER error injection facility available for other emulation to use Michael S. Tsirkin
2023-03-08 1:14 ` [PULL 68/73] hw/mem/cxl_type3: Add CXL RAS Error Injection Support Michael S. Tsirkin
2023-03-08 1:14 ` [PULL 69/73] hw/pci: Add pcie_count_ds_port() and pcie_find_port_first() helpers Michael S. Tsirkin
2023-03-08 1:14 ` [PULL 70/73] hw/pxb-cxl: Support passthrough HDM Decoders unless overridden Michael S. Tsirkin
2023-04-11 10:26 ` Peter Maydell
2023-04-17 11:22 ` Thomas Huth
2023-04-17 11:29 ` Michael S. Tsirkin
2023-04-17 13:04 ` Thomas Huth
2023-04-19 13:43 ` Jonathan Cameron via
2023-04-19 13:57 ` Jonathan Cameron via
2023-04-19 14:49 ` Jonathan Cameron via
2023-04-19 16:25 ` Peter Maydell
2023-04-19 17:18 ` Jonathan Cameron via
2023-03-08 1:14 ` [PULL 71/73] hw/virtio/vhost-user: avoid using unitialized errp Michael S. Tsirkin
2023-03-08 1:14 ` [PULL 72/73] virtio: fix reachable assertion due to stale value of cached region size Michael S. Tsirkin
2023-03-08 1:14 ` [PULL 73/73] virtio: refresh vring region cache after updating a virtqueue size Michael S. Tsirkin
2023-03-09 14:48 ` Michael S. Tsirkin
2023-03-09 14:47 ` [PULL 00/73] virtio,pc,pci: features, fixes Michael S. Tsirkin
2023-03-10 17:32 ` Peter Maydell
2023-03-10 22:20 ` Philippe Mathieu-Daudé
2023-03-11 19:22 ` Michael S. Tsirkin
2023-03-13 8:03 ` Philippe Mathieu-Daudé
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=00ef422e9fbfef1fb40447b08826db0951d788dd.1678237635.git.mst@redhat.com \
--to=mst@redhat.com \
--cc=eperezma@redhat.com \
--cc=jasowang@redhat.com \
--cc=leiyang@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).