From: Stefan Hajnoczi <stefanha@redhat.com>
To: Antoine Damhet <adamhet@scaleway.com>,
"Michael S. Tsirkin" <mst@redhat.com>,
Jason Wang <jasowang@redhat.com>
Cc: qemu-devel@nongnu.org, Akihiko Odaki <akihiko.odaki@daynix.com>,
qemu-stable@nongnu.org
Subject: Re: [PATCH] Revert "virtio-net: Copy received header to buffer"
Date: Tue, 8 Apr 2025 09:58:22 -0400 [thread overview]
Message-ID: <20250408135822.GA538171@fedora> (raw)
In-Reply-To: <20250404151835.328368-1-adamhet@scaleway.com>
[-- Attachment #1: Type: text/plain, Size: 7915 bytes --]
On Fri, Apr 04, 2025 at 05:18:21PM +0200, Antoine Damhet wrote:
> This reverts commit 7987d2be5a8bc3a502f89ba8cf3ac3e09f64d1ce.
>
> The goal was to remove the need to patch the (const) input buffer
> with a recomputed UDP checksum by copying headers to a RW region and
> inject the checksum there. The patch computed the checksum only from the
> header fields (missing the rest of the payload) producing an invalid one
> and making guests fail to acquire a DHCP lease.
>
> Resolves: https://gitlab.com/qemu-project/qemu/-/issues/2727
> Cc: qemu-stable@nongnu.org
> Signed-off-by: Antoine Damhet <adamhet@scaleway.com>
> ---
> hw/net/virtio-net.c | 85 +++++++++++++++++++++------------------------
> 1 file changed, 39 insertions(+), 46 deletions(-)
This patch fails to apply due to a conflict with:
commit c17ad4b11bd268a35506cd976884562df6ca69d7
Author: Akihiko Odaki <akihiko.odaki@daynix.com>
Date: Wed Jan 8 21:13:29 2025 +0900
virtio-net: Fix num_buffers for version 1
Please rebase.
Michael or Jason: Are you still sending a pull request for 10.0.0-rc3?
It's being tagged today.
Stefan
>
> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> index de87cfadffe1..028e7e873c42 100644
> --- a/hw/net/virtio-net.c
> +++ b/hw/net/virtio-net.c
> @@ -1702,44 +1702,41 @@ static void virtio_net_hdr_swap(VirtIODevice *vdev, struct virtio_net_hdr *hdr)
> * cache.
> */
> static void work_around_broken_dhclient(struct virtio_net_hdr *hdr,
> - size_t *hdr_len, const uint8_t *buf,
> - size_t buf_size, size_t *buf_offset)
> + uint8_t *buf, size_t size)
> {
> size_t csum_size = ETH_HLEN + sizeof(struct ip_header) +
> sizeof(struct udp_header);
>
> - buf += *buf_offset;
> - buf_size -= *buf_offset;
> -
> if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */
> - (buf_size >= csum_size && buf_size < 1500) && /* normal sized MTU */
> + (size >= csum_size && size < 1500) && /* normal sized MTU */
> (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */
> (buf[23] == 17) && /* ip.protocol == UDP */
> (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */
> - memcpy((uint8_t *)hdr + *hdr_len, buf, csum_size);
> - net_checksum_calculate((uint8_t *)hdr + *hdr_len, csum_size, CSUM_UDP);
> + net_checksum_calculate(buf, size, CSUM_UDP);
> hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM;
> - *hdr_len += csum_size;
> - *buf_offset += csum_size;
> }
> }
>
> -static size_t receive_header(VirtIONet *n, struct virtio_net_hdr *hdr,
> - const void *buf, size_t buf_size,
> - size_t *buf_offset)
> +static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt,
> + const void *buf, size_t size)
> {
> - size_t hdr_len = n->guest_hdr_len;
> -
> - memcpy(hdr, buf, sizeof(struct virtio_net_hdr));
> -
> - *buf_offset = n->host_hdr_len;
> - work_around_broken_dhclient(hdr, &hdr_len, buf, buf_size, buf_offset);
> + if (n->has_vnet_hdr) {
> + /* FIXME this cast is evil */
> + void *wbuf = (void *)buf;
> + work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len,
> + size - n->host_hdr_len);
>
> - if (n->needs_vnet_hdr_swap) {
> - virtio_net_hdr_swap(VIRTIO_DEVICE(n), hdr);
> + if (n->needs_vnet_hdr_swap) {
> + virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf);
> + }
> + iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr));
> + } else {
> + struct virtio_net_hdr hdr = {
> + .flags = 0,
> + .gso_type = VIRTIO_NET_HDR_GSO_NONE
> + };
> + iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr);
> }
> -
> - return hdr_len;
> }
>
> static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
> @@ -1907,13 +1904,6 @@ static int virtio_net_process_rss(NetClientState *nc, const uint8_t *buf,
> return (index == new_index) ? -1 : new_index;
> }
>
> -typedef struct Header {
> - struct virtio_net_hdr_v1_hash virtio_net;
> - struct eth_header eth;
> - struct ip_header ip;
> - struct udp_header udp;
> -} Header;
> -
> static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
> size_t size)
> {
> @@ -1923,15 +1913,15 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
> VirtQueueElement *elems[VIRTQUEUE_MAX_SIZE];
> size_t lens[VIRTQUEUE_MAX_SIZE];
> struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
> - Header hdr;
> + struct virtio_net_hdr_v1_hash extra_hdr;
> unsigned mhdr_cnt = 0;
> size_t offset, i, guest_offset, j;
> ssize_t err;
>
> - memset(&hdr.virtio_net, 0, sizeof(hdr.virtio_net));
> + memset(&extra_hdr, 0, sizeof(extra_hdr));
>
> if (n->rss_data.enabled && n->rss_data.enabled_software_rss) {
> - int index = virtio_net_process_rss(nc, buf, size, &hdr.virtio_net);
> + int index = virtio_net_process_rss(nc, buf, size, &extra_hdr);
> if (index >= 0) {
> nc = qemu_get_subqueue(n->nic, index % n->curr_queue_pairs);
> }
> @@ -1996,18 +1986,21 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
> if (n->mergeable_rx_bufs) {
> mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg),
> sg, elem->in_num,
> - offsetof(typeof(hdr),
> - virtio_net.hdr.num_buffers),
> - sizeof(hdr.virtio_net.hdr.num_buffers));
> + offsetof(typeof(extra_hdr), hdr.num_buffers),
> + sizeof(extra_hdr.hdr.num_buffers));
> }
>
> - guest_offset = n->has_vnet_hdr ?
> - receive_header(n, (struct virtio_net_hdr *)&hdr,
> - buf, size, &offset) :
> - n->guest_hdr_len;
> -
> - iov_from_buf(sg, elem->in_num, 0, &hdr, guest_offset);
> - total += guest_offset;
> + receive_header(n, sg, elem->in_num, buf, size);
> + if (n->rss_data.populate_hash) {
> + offset = offsetof(typeof(extra_hdr), hash_value);
> + iov_from_buf(sg, elem->in_num, offset,
> + (char *)&extra_hdr + offset,
> + sizeof(extra_hdr.hash_value) +
> + sizeof(extra_hdr.hash_report));
> + }
> + offset = n->host_hdr_len;
> + total += n->guest_hdr_len;
> + guest_offset = n->guest_hdr_len;
> } else {
> guest_offset = 0;
> }
> @@ -2033,11 +2026,11 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
> }
>
> if (mhdr_cnt) {
> - virtio_stw_p(vdev, &hdr.virtio_net.hdr.num_buffers, i);
> + virtio_stw_p(vdev, &extra_hdr.hdr.num_buffers, i);
> iov_from_buf(mhdr_sg, mhdr_cnt,
> 0,
> - &hdr.virtio_net.hdr.num_buffers,
> - sizeof hdr.virtio_net.hdr.num_buffers);
> + &extra_hdr.hdr.num_buffers,
> + sizeof extra_hdr.hdr.num_buffers);
> }
>
> for (j = 0; j < i; j++) {
> --
> 2.49.0
>
>
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 488 bytes --]
prev parent reply other threads:[~2025-04-08 13:59 UTC|newest]
Thread overview: 3+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-04-04 15:18 [PATCH] Revert "virtio-net: Copy received header to buffer" Antoine Damhet
2025-04-07 2:09 ` Jason Wang
2025-04-08 13:58 ` Stefan Hajnoczi [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250408135822.GA538171@fedora \
--to=stefanha@redhat.com \
--cc=adamhet@scaleway.com \
--cc=akihiko.odaki@daynix.com \
--cc=jasowang@redhat.com \
--cc=mst@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=qemu-stable@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).