* [PATCH] Revert "virtio-net: Copy received header to buffer"
@ 2025-04-04 15:18 Antoine Damhet
2025-04-07 2:09 ` Jason Wang
2025-04-08 13:58 ` Stefan Hajnoczi
0 siblings, 2 replies; 3+ messages in thread
From: Antoine Damhet @ 2025-04-04 15:18 UTC (permalink / raw)
To: qemu-devel
Cc: Antoine Damhet, Michael S. Tsirkin, Akihiko Odaki, Jason Wang,
qemu-stable
This reverts commit 7987d2be5a8bc3a502f89ba8cf3ac3e09f64d1ce.
The goal was to remove the need to patch the (const) input buffer
with a recomputed UDP checksum by copying headers to a RW region and
inject the checksum there. The patch computed the checksum only from the
header fields (missing the rest of the payload) producing an invalid one
and making guests fail to acquire a DHCP lease.
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/2727
Cc: qemu-stable@nongnu.org
Signed-off-by: Antoine Damhet <adamhet@scaleway.com>
---
hw/net/virtio-net.c | 85 +++++++++++++++++++++------------------------
1 file changed, 39 insertions(+), 46 deletions(-)
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index de87cfadffe1..028e7e873c42 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -1702,44 +1702,41 @@ static void virtio_net_hdr_swap(VirtIODevice *vdev, struct virtio_net_hdr *hdr)
* cache.
*/
static void work_around_broken_dhclient(struct virtio_net_hdr *hdr,
- size_t *hdr_len, const uint8_t *buf,
- size_t buf_size, size_t *buf_offset)
+ uint8_t *buf, size_t size)
{
size_t csum_size = ETH_HLEN + sizeof(struct ip_header) +
sizeof(struct udp_header);
- buf += *buf_offset;
- buf_size -= *buf_offset;
-
if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */
- (buf_size >= csum_size && buf_size < 1500) && /* normal sized MTU */
+ (size >= csum_size && size < 1500) && /* normal sized MTU */
(buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */
(buf[23] == 17) && /* ip.protocol == UDP */
(buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */
- memcpy((uint8_t *)hdr + *hdr_len, buf, csum_size);
- net_checksum_calculate((uint8_t *)hdr + *hdr_len, csum_size, CSUM_UDP);
+ net_checksum_calculate(buf, size, CSUM_UDP);
hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM;
- *hdr_len += csum_size;
- *buf_offset += csum_size;
}
}
-static size_t receive_header(VirtIONet *n, struct virtio_net_hdr *hdr,
- const void *buf, size_t buf_size,
- size_t *buf_offset)
+static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt,
+ const void *buf, size_t size)
{
- size_t hdr_len = n->guest_hdr_len;
-
- memcpy(hdr, buf, sizeof(struct virtio_net_hdr));
-
- *buf_offset = n->host_hdr_len;
- work_around_broken_dhclient(hdr, &hdr_len, buf, buf_size, buf_offset);
+ if (n->has_vnet_hdr) {
+ /* FIXME this cast is evil */
+ void *wbuf = (void *)buf;
+ work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len,
+ size - n->host_hdr_len);
- if (n->needs_vnet_hdr_swap) {
- virtio_net_hdr_swap(VIRTIO_DEVICE(n), hdr);
+ if (n->needs_vnet_hdr_swap) {
+ virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf);
+ }
+ iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr));
+ } else {
+ struct virtio_net_hdr hdr = {
+ .flags = 0,
+ .gso_type = VIRTIO_NET_HDR_GSO_NONE
+ };
+ iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr);
}
-
- return hdr_len;
}
static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
@@ -1907,13 +1904,6 @@ static int virtio_net_process_rss(NetClientState *nc, const uint8_t *buf,
return (index == new_index) ? -1 : new_index;
}
-typedef struct Header {
- struct virtio_net_hdr_v1_hash virtio_net;
- struct eth_header eth;
- struct ip_header ip;
- struct udp_header udp;
-} Header;
-
static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
size_t size)
{
@@ -1923,15 +1913,15 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
VirtQueueElement *elems[VIRTQUEUE_MAX_SIZE];
size_t lens[VIRTQUEUE_MAX_SIZE];
struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
- Header hdr;
+ struct virtio_net_hdr_v1_hash extra_hdr;
unsigned mhdr_cnt = 0;
size_t offset, i, guest_offset, j;
ssize_t err;
- memset(&hdr.virtio_net, 0, sizeof(hdr.virtio_net));
+ memset(&extra_hdr, 0, sizeof(extra_hdr));
if (n->rss_data.enabled && n->rss_data.enabled_software_rss) {
- int index = virtio_net_process_rss(nc, buf, size, &hdr.virtio_net);
+ int index = virtio_net_process_rss(nc, buf, size, &extra_hdr);
if (index >= 0) {
nc = qemu_get_subqueue(n->nic, index % n->curr_queue_pairs);
}
@@ -1996,18 +1986,21 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
if (n->mergeable_rx_bufs) {
mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg),
sg, elem->in_num,
- offsetof(typeof(hdr),
- virtio_net.hdr.num_buffers),
- sizeof(hdr.virtio_net.hdr.num_buffers));
+ offsetof(typeof(extra_hdr), hdr.num_buffers),
+ sizeof(extra_hdr.hdr.num_buffers));
}
- guest_offset = n->has_vnet_hdr ?
- receive_header(n, (struct virtio_net_hdr *)&hdr,
- buf, size, &offset) :
- n->guest_hdr_len;
-
- iov_from_buf(sg, elem->in_num, 0, &hdr, guest_offset);
- total += guest_offset;
+ receive_header(n, sg, elem->in_num, buf, size);
+ if (n->rss_data.populate_hash) {
+ offset = offsetof(typeof(extra_hdr), hash_value);
+ iov_from_buf(sg, elem->in_num, offset,
+ (char *)&extra_hdr + offset,
+ sizeof(extra_hdr.hash_value) +
+ sizeof(extra_hdr.hash_report));
+ }
+ offset = n->host_hdr_len;
+ total += n->guest_hdr_len;
+ guest_offset = n->guest_hdr_len;
} else {
guest_offset = 0;
}
@@ -2033,11 +2026,11 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
}
if (mhdr_cnt) {
- virtio_stw_p(vdev, &hdr.virtio_net.hdr.num_buffers, i);
+ virtio_stw_p(vdev, &extra_hdr.hdr.num_buffers, i);
iov_from_buf(mhdr_sg, mhdr_cnt,
0,
- &hdr.virtio_net.hdr.num_buffers,
- sizeof hdr.virtio_net.hdr.num_buffers);
+ &extra_hdr.hdr.num_buffers,
+ sizeof extra_hdr.hdr.num_buffers);
}
for (j = 0; j < i; j++) {
--
2.49.0
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [PATCH] Revert "virtio-net: Copy received header to buffer"
2025-04-04 15:18 [PATCH] Revert "virtio-net: Copy received header to buffer" Antoine Damhet
@ 2025-04-07 2:09 ` Jason Wang
2025-04-08 13:58 ` Stefan Hajnoczi
1 sibling, 0 replies; 3+ messages in thread
From: Jason Wang @ 2025-04-07 2:09 UTC (permalink / raw)
To: Antoine Damhet; +Cc: qemu-devel, Michael S. Tsirkin, Akihiko Odaki, qemu-stable
On Fri, Apr 4, 2025 at 11:19 PM Antoine Damhet <adamhet@scaleway.com> wrote:
>
> This reverts commit 7987d2be5a8bc3a502f89ba8cf3ac3e09f64d1ce.
>
> The goal was to remove the need to patch the (const) input buffer
> with a recomputed UDP checksum by copying headers to a RW region and
> inject the checksum there. The patch computed the checksum only from the
> header fields (missing the rest of the payload) producing an invalid one
> and making guests fail to acquire a DHCP lease.
>
> Resolves: https://gitlab.com/qemu-project/qemu/-/issues/2727
> Cc: qemu-stable@nongnu.org
> Signed-off-by: Antoine Damhet <adamhet@scaleway.com>
> ---
Acked-by: Jason Wang <jasowang@redhat.com>
Thanks
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH] Revert "virtio-net: Copy received header to buffer"
2025-04-04 15:18 [PATCH] Revert "virtio-net: Copy received header to buffer" Antoine Damhet
2025-04-07 2:09 ` Jason Wang
@ 2025-04-08 13:58 ` Stefan Hajnoczi
1 sibling, 0 replies; 3+ messages in thread
From: Stefan Hajnoczi @ 2025-04-08 13:58 UTC (permalink / raw)
To: Antoine Damhet, Michael S. Tsirkin, Jason Wang
Cc: qemu-devel, Akihiko Odaki, qemu-stable
[-- Attachment #1: Type: text/plain, Size: 7915 bytes --]
On Fri, Apr 04, 2025 at 05:18:21PM +0200, Antoine Damhet wrote:
> This reverts commit 7987d2be5a8bc3a502f89ba8cf3ac3e09f64d1ce.
>
> The goal was to remove the need to patch the (const) input buffer
> with a recomputed UDP checksum by copying headers to a RW region and
> inject the checksum there. The patch computed the checksum only from the
> header fields (missing the rest of the payload) producing an invalid one
> and making guests fail to acquire a DHCP lease.
>
> Resolves: https://gitlab.com/qemu-project/qemu/-/issues/2727
> Cc: qemu-stable@nongnu.org
> Signed-off-by: Antoine Damhet <adamhet@scaleway.com>
> ---
> hw/net/virtio-net.c | 85 +++++++++++++++++++++------------------------
> 1 file changed, 39 insertions(+), 46 deletions(-)
This patch fails to apply due to a conflict with:
commit c17ad4b11bd268a35506cd976884562df6ca69d7
Author: Akihiko Odaki <akihiko.odaki@daynix.com>
Date: Wed Jan 8 21:13:29 2025 +0900
virtio-net: Fix num_buffers for version 1
Please rebase.
Michael or Jason: Are you still sending a pull request for 10.0.0-rc3?
It's being tagged today.
Stefan
>
> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> index de87cfadffe1..028e7e873c42 100644
> --- a/hw/net/virtio-net.c
> +++ b/hw/net/virtio-net.c
> @@ -1702,44 +1702,41 @@ static void virtio_net_hdr_swap(VirtIODevice *vdev, struct virtio_net_hdr *hdr)
> * cache.
> */
> static void work_around_broken_dhclient(struct virtio_net_hdr *hdr,
> - size_t *hdr_len, const uint8_t *buf,
> - size_t buf_size, size_t *buf_offset)
> + uint8_t *buf, size_t size)
> {
> size_t csum_size = ETH_HLEN + sizeof(struct ip_header) +
> sizeof(struct udp_header);
>
> - buf += *buf_offset;
> - buf_size -= *buf_offset;
> -
> if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */
> - (buf_size >= csum_size && buf_size < 1500) && /* normal sized MTU */
> + (size >= csum_size && size < 1500) && /* normal sized MTU */
> (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */
> (buf[23] == 17) && /* ip.protocol == UDP */
> (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */
> - memcpy((uint8_t *)hdr + *hdr_len, buf, csum_size);
> - net_checksum_calculate((uint8_t *)hdr + *hdr_len, csum_size, CSUM_UDP);
> + net_checksum_calculate(buf, size, CSUM_UDP);
> hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM;
> - *hdr_len += csum_size;
> - *buf_offset += csum_size;
> }
> }
>
> -static size_t receive_header(VirtIONet *n, struct virtio_net_hdr *hdr,
> - const void *buf, size_t buf_size,
> - size_t *buf_offset)
> +static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt,
> + const void *buf, size_t size)
> {
> - size_t hdr_len = n->guest_hdr_len;
> -
> - memcpy(hdr, buf, sizeof(struct virtio_net_hdr));
> -
> - *buf_offset = n->host_hdr_len;
> - work_around_broken_dhclient(hdr, &hdr_len, buf, buf_size, buf_offset);
> + if (n->has_vnet_hdr) {
> + /* FIXME this cast is evil */
> + void *wbuf = (void *)buf;
> + work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len,
> + size - n->host_hdr_len);
>
> - if (n->needs_vnet_hdr_swap) {
> - virtio_net_hdr_swap(VIRTIO_DEVICE(n), hdr);
> + if (n->needs_vnet_hdr_swap) {
> + virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf);
> + }
> + iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr));
> + } else {
> + struct virtio_net_hdr hdr = {
> + .flags = 0,
> + .gso_type = VIRTIO_NET_HDR_GSO_NONE
> + };
> + iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr);
> }
> -
> - return hdr_len;
> }
>
> static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
> @@ -1907,13 +1904,6 @@ static int virtio_net_process_rss(NetClientState *nc, const uint8_t *buf,
> return (index == new_index) ? -1 : new_index;
> }
>
> -typedef struct Header {
> - struct virtio_net_hdr_v1_hash virtio_net;
> - struct eth_header eth;
> - struct ip_header ip;
> - struct udp_header udp;
> -} Header;
> -
> static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
> size_t size)
> {
> @@ -1923,15 +1913,15 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
> VirtQueueElement *elems[VIRTQUEUE_MAX_SIZE];
> size_t lens[VIRTQUEUE_MAX_SIZE];
> struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
> - Header hdr;
> + struct virtio_net_hdr_v1_hash extra_hdr;
> unsigned mhdr_cnt = 0;
> size_t offset, i, guest_offset, j;
> ssize_t err;
>
> - memset(&hdr.virtio_net, 0, sizeof(hdr.virtio_net));
> + memset(&extra_hdr, 0, sizeof(extra_hdr));
>
> if (n->rss_data.enabled && n->rss_data.enabled_software_rss) {
> - int index = virtio_net_process_rss(nc, buf, size, &hdr.virtio_net);
> + int index = virtio_net_process_rss(nc, buf, size, &extra_hdr);
> if (index >= 0) {
> nc = qemu_get_subqueue(n->nic, index % n->curr_queue_pairs);
> }
> @@ -1996,18 +1986,21 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
> if (n->mergeable_rx_bufs) {
> mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg),
> sg, elem->in_num,
> - offsetof(typeof(hdr),
> - virtio_net.hdr.num_buffers),
> - sizeof(hdr.virtio_net.hdr.num_buffers));
> + offsetof(typeof(extra_hdr), hdr.num_buffers),
> + sizeof(extra_hdr.hdr.num_buffers));
> }
>
> - guest_offset = n->has_vnet_hdr ?
> - receive_header(n, (struct virtio_net_hdr *)&hdr,
> - buf, size, &offset) :
> - n->guest_hdr_len;
> -
> - iov_from_buf(sg, elem->in_num, 0, &hdr, guest_offset);
> - total += guest_offset;
> + receive_header(n, sg, elem->in_num, buf, size);
> + if (n->rss_data.populate_hash) {
> + offset = offsetof(typeof(extra_hdr), hash_value);
> + iov_from_buf(sg, elem->in_num, offset,
> + (char *)&extra_hdr + offset,
> + sizeof(extra_hdr.hash_value) +
> + sizeof(extra_hdr.hash_report));
> + }
> + offset = n->host_hdr_len;
> + total += n->guest_hdr_len;
> + guest_offset = n->guest_hdr_len;
> } else {
> guest_offset = 0;
> }
> @@ -2033,11 +2026,11 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
> }
>
> if (mhdr_cnt) {
> - virtio_stw_p(vdev, &hdr.virtio_net.hdr.num_buffers, i);
> + virtio_stw_p(vdev, &extra_hdr.hdr.num_buffers, i);
> iov_from_buf(mhdr_sg, mhdr_cnt,
> 0,
> - &hdr.virtio_net.hdr.num_buffers,
> - sizeof hdr.virtio_net.hdr.num_buffers);
> + &extra_hdr.hdr.num_buffers,
> + sizeof extra_hdr.hdr.num_buffers);
> }
>
> for (j = 0; j < i; j++) {
> --
> 2.49.0
>
>
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 488 bytes --]
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2025-04-08 13:59 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-04-04 15:18 [PATCH] Revert "virtio-net: Copy received header to buffer" Antoine Damhet
2025-04-07 2:09 ` Jason Wang
2025-04-08 13:58 ` Stefan Hajnoczi
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).