qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Jason Wang <jasowang@redhat.com>
To: qemu-devel@nongnu.org, peter.maydell@linaro.org
Cc: "Eugenio Pérez" <eperezma@redhat.com>,
	"Michael S . Tsirkin" <mst@redhat.com>,
	"Jason Wang" <jasowang@redhat.com>
Subject: [PULL 03/24] virtio-net: Expose ctrl virtqueue logic
Date: Tue, 19 Jul 2022 21:16:16 +0800	[thread overview]
Message-ID: <20220719131637.46131-4-jasowang@redhat.com> (raw)
In-Reply-To: <20220719131637.46131-1-jasowang@redhat.com>

From: Eugenio Pérez <eperezma@redhat.com>

This allows external vhost-net devices to modify the state of the
VirtIO device model once the vhost-vdpa device has acknowledged the
control commands.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
 hw/net/virtio-net.c            | 84 ++++++++++++++++++++++++------------------
 include/hw/virtio/virtio-net.h |  4 ++
 2 files changed, 53 insertions(+), 35 deletions(-)

diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index f83e96e..dd0d056 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -1433,57 +1433,71 @@ static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
     return VIRTIO_NET_OK;
 }
 
-static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
+size_t virtio_net_handle_ctrl_iov(VirtIODevice *vdev,
+                                  const struct iovec *in_sg, unsigned in_num,
+                                  const struct iovec *out_sg,
+                                  unsigned out_num)
 {
     VirtIONet *n = VIRTIO_NET(vdev);
     struct virtio_net_ctrl_hdr ctrl;
     virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
-    VirtQueueElement *elem;
     size_t s;
     struct iovec *iov, *iov2;
-    unsigned int iov_cnt;
+
+    if (iov_size(in_sg, in_num) < sizeof(status) ||
+        iov_size(out_sg, out_num) < sizeof(ctrl)) {
+        virtio_error(vdev, "virtio-net ctrl missing headers");
+        return 0;
+    }
+
+    iov2 = iov = g_memdup2(out_sg, sizeof(struct iovec) * out_num);
+    s = iov_to_buf(iov, out_num, 0, &ctrl, sizeof(ctrl));
+    iov_discard_front(&iov, &out_num, sizeof(ctrl));
+    if (s != sizeof(ctrl)) {
+        status = VIRTIO_NET_ERR;
+    } else if (ctrl.class == VIRTIO_NET_CTRL_RX) {
+        status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, out_num);
+    } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) {
+        status = virtio_net_handle_mac(n, ctrl.cmd, iov, out_num);
+    } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) {
+        status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, out_num);
+    } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) {
+        status = virtio_net_handle_announce(n, ctrl.cmd, iov, out_num);
+    } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) {
+        status = virtio_net_handle_mq(n, ctrl.cmd, iov, out_num);
+    } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) {
+        status = virtio_net_handle_offloads(n, ctrl.cmd, iov, out_num);
+    }
+
+    s = iov_from_buf(in_sg, in_num, 0, &status, sizeof(status));
+    assert(s == sizeof(status));
+
+    g_free(iov2);
+    return sizeof(status);
+}
+
+static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
+{
+    VirtQueueElement *elem;
 
     for (;;) {
+        size_t written;
         elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
         if (!elem) {
             break;
         }
-        if (iov_size(elem->in_sg, elem->in_num) < sizeof(status) ||
-            iov_size(elem->out_sg, elem->out_num) < sizeof(ctrl)) {
-            virtio_error(vdev, "virtio-net ctrl missing headers");
+
+        written = virtio_net_handle_ctrl_iov(vdev, elem->in_sg, elem->in_num,
+                                             elem->out_sg, elem->out_num);
+        if (written > 0) {
+            virtqueue_push(vq, elem, written);
+            virtio_notify(vdev, vq);
+            g_free(elem);
+        } else {
             virtqueue_detach_element(vq, elem, 0);
             g_free(elem);
             break;
         }
-
-        iov_cnt = elem->out_num;
-        iov2 = iov = g_memdup2(elem->out_sg,
-                               sizeof(struct iovec) * elem->out_num);
-        s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl));
-        iov_discard_front(&iov, &iov_cnt, sizeof(ctrl));
-        if (s != sizeof(ctrl)) {
-            status = VIRTIO_NET_ERR;
-        } else if (ctrl.class == VIRTIO_NET_CTRL_RX) {
-            status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt);
-        } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) {
-            status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt);
-        } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) {
-            status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt);
-        } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) {
-            status = virtio_net_handle_announce(n, ctrl.cmd, iov, iov_cnt);
-        } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) {
-            status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt);
-        } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) {
-            status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt);
-        }
-
-        s = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, sizeof(status));
-        assert(s == sizeof(status));
-
-        virtqueue_push(vq, elem, sizeof(status));
-        virtio_notify(vdev, vq);
-        g_free(iov2);
-        g_free(elem);
     }
 }
 
diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h
index cce1c55..ef234ff 100644
--- a/include/hw/virtio/virtio-net.h
+++ b/include/hw/virtio/virtio-net.h
@@ -221,6 +221,10 @@ struct VirtIONet {
     struct EBPFRSSContext ebpf_rss;
 };
 
+size_t virtio_net_handle_ctrl_iov(VirtIODevice *vdev,
+                                  const struct iovec *in_sg, unsigned in_num,
+                                  const struct iovec *out_sg,
+                                  unsigned out_num);
 void virtio_net_set_netclient_name(VirtIONet *n, const char *name,
                                    const char *type);
 
-- 
2.7.4



  parent reply	other threads:[~2022-07-19 13:22 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-07-19 13:16 [PULL 00/24] Net Patches Jason Wang
2022-07-19 13:16 ` [PULL 01/24] vhost: move descriptor translation to vhost_svq_vring_write_descs Jason Wang
2022-07-19 13:16 ` [PULL 02/24] virtio-net: Expose MAC_TABLE_ENTRIES Jason Wang
2022-07-19 13:16 ` Jason Wang [this message]
2022-07-19 13:16 ` [PULL 04/24] vdpa: Avoid compiler to squash reads to used idx Jason Wang
2022-07-19 13:16 ` [PULL 05/24] vhost: Reorder vhost_svq_kick Jason Wang
2022-07-19 13:16 ` [PULL 06/24] vhost: Move vhost_svq_kick call to vhost_svq_add Jason Wang
2022-07-19 13:16 ` [PULL 07/24] vhost: Check for queue full at vhost_svq_add Jason Wang
2022-07-19 13:16 ` [PULL 08/24] vhost: Decouple vhost_svq_add from VirtQueueElement Jason Wang
2022-07-19 13:16 ` [PULL 09/24] vhost: Add SVQDescState Jason Wang
2022-07-19 13:16 ` [PULL 10/24] vhost: Track number of descs in SVQDescState Jason Wang
2022-07-19 13:16 ` [PULL 11/24] vhost: add vhost_svq_push_elem Jason Wang
2022-07-19 13:16 ` [PULL 12/24] vhost: Expose vhost_svq_add Jason Wang
2022-07-19 13:16 ` [PULL 13/24] vhost: add vhost_svq_poll Jason Wang
2022-07-19 13:16 ` [PULL 14/24] vhost: Add svq avail_handler callback Jason Wang
2022-07-19 13:16 ` [PULL 15/24] vdpa: Export vhost_vdpa_dma_map and unmap calls Jason Wang
2022-07-19 13:16 ` [PULL 16/24] vdpa: manual forward CVQ buffers Jason Wang
2022-07-19 13:16 ` [PULL 17/24] vdpa: Buffer CVQ support on shadow virtqueue Jason Wang
2022-07-19 13:16 ` [PULL 18/24] vdpa: Extract get features part from vhost_vdpa_get_max_queue_pairs Jason Wang
2022-07-19 13:16 ` [PULL 19/24] vdpa: Add device migration blocker Jason Wang
2022-07-19 13:16 ` [PULL 20/24] vdpa: Add x-svq to NetdevVhostVDPAOptions Jason Wang
2022-07-19 13:16 ` [PULL 21/24] softmmu/runstate.c: add RunStateTransition support form COLO to PRELAUNCH Jason Wang
2022-07-19 13:16 ` [PULL 22/24] net/colo: Fix a "double free" crash to clear the conn_list Jason Wang
2022-07-19 13:16 ` [PULL 23/24] net/colo.c: No need to track conn_list for filter-rewriter Jason Wang
2022-07-19 13:16 ` [PULL 24/24] net/colo.c: fix segmentation fault when packet is not parsed correctly Jason Wang
2022-07-19 16:40 ` [PULL 00/24] Net Patches Peter Maydell
2022-07-20  3:40   ` Jason Wang
2022-07-20  6:02     ` Eugenio Perez Martin
2022-07-20  6:06       ` Jason Wang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220719131637.46131-4-jasowang@redhat.com \
    --to=jasowang@redhat.com \
    --cc=eperezma@redhat.com \
    --cc=mst@redhat.com \
    --cc=peter.maydell@linaro.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).