From: Paolo Abeni <pabeni@redhat.com>
To: qemu-devel@nongnu.org
Cc: Paolo Bonzini <pbonzini@redhat.com>,
Dmitry Fleytman <dmitry.fleytman@gmail.com>,
Akihiko Odaki <odaki@rsg.ci.i.u-tokyo.ac.jp>,
Jason Wang <jasowang@redhat.com>,
Sriram Yagnaraman <sriram.yagnaraman@ericsson.com>,
"Michael S. Tsirkin" <mst@redhat.com>,
Stefano Garzarella <sgarzare@redhat.com>,
Cornelia Huck <cohuck@redhat.com>,
Luigi Rizzo <lrizzo@google.com>,
Giuseppe Lettieri <g.lettieri@iet.unipi.it>,
Vincenzo Maffione <v.maffione@gmail.com>,
Eric Blake <eblake@redhat.com>,
Markus Armbruster <armbru@redhat.com>
Subject: [PATCH RFC v3 11/13] virtio-net: implement extended features support
Date: Fri, 18 Jul 2025 10:52:37 +0200 [thread overview]
Message-ID: <d384aea8126ca9ff7df9401ab8b001e5ee4d840c.1752828082.git.pabeni@redhat.com> (raw)
In-Reply-To: <cover.1752828082.git.pabeni@redhat.com>
Use the extended types and helpers to manipulate the virtio_net
features.
Note that offloads are still 64bits wide, as per specification,
and extended offloads will be mapped into such range.
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
---
v2 -> v3:
- rebased on top of 2deec9ab7d ("virtio-net: Move
virtio_net_get_features() down")
- _array -> _ex
v1 -> v2:
- uint128_t -> uint64_t[]
- more verbose macro definitions
---
hw/net/virtio-net.c | 136 +++++++++++++++++++--------------
include/hw/virtio/virtio-net.h | 2 +-
2 files changed, 80 insertions(+), 58 deletions(-)
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 8953c329e7..53413ec4d5 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -90,6 +90,19 @@
VIRTIO_NET_RSS_HASH_TYPE_TCP_EX | \
VIRTIO_NET_RSS_HASH_TYPE_UDP_EX)
+/*
+ * Features starting from VIRTIO_NET_FEATURES_MAP_MIN bit correspond
+ * to guest offloads in the VIRTIO_NET_OFFLOAD_MAP range
+ */
+#define VIRTIO_NET_OFFLOAD_MAP_MIN 46
+#define VIRTIO_NET_OFFLOAD_MAP_LENGTH 4
+#define VIRTIO_NET_OFFLOAD_MAP MAKE_64BIT_MASK( \
+ VIRTIO_NET_OFFLOAD_MAP_MIN, \
+ VIRTIO_NET_OFFLOAD_MAP_LENGTH)
+#define VIRTIO_NET_FEATURES_MAP_MIN 65
+#define VIRTIO_NET_F2O_SHIFT (VIRTIO_NET_OFFLOAD_MAP_MIN - \
+ VIRTIO_NET_FEATURES_MAP_MIN + 64)
+
static const VirtIOFeature feature_sizes[] = {
{.flags = 1ULL << VIRTIO_NET_F_MAC,
.end = endof(struct virtio_net_config, mac)},
@@ -786,7 +799,14 @@ static void virtio_net_apply_guest_offloads(VirtIONet *n)
qemu_set_offload(qemu_get_queue(n->nic)->peer, &ol);
}
-static uint64_t virtio_net_guest_offloads_by_features(uint64_t features)
+static uint64_t virtio_net_features_to_offload(const uint64_t *features)
+{
+ return (features[0] & ~VIRTIO_NET_OFFLOAD_MAP) |
+ ((features[1] << VIRTIO_NET_F2O_SHIFT) & VIRTIO_NET_OFFLOAD_MAP);
+}
+
+static uint64_t
+virtio_net_guest_offloads_by_features(const uint64_t *features)
{
static const uint64_t guest_offloads_mask =
(1ULL << VIRTIO_NET_F_GUEST_CSUM) |
@@ -797,13 +817,13 @@ static uint64_t virtio_net_guest_offloads_by_features(uint64_t features)
(1ULL << VIRTIO_NET_F_GUEST_USO4) |
(1ULL << VIRTIO_NET_F_GUEST_USO6);
- return guest_offloads_mask & features;
+ return guest_offloads_mask & virtio_net_features_to_offload(features);
}
uint64_t virtio_net_supported_guest_offloads(const VirtIONet *n)
{
VirtIODevice *vdev = VIRTIO_DEVICE(n);
- return virtio_net_guest_offloads_by_features(vdev->guest_features);
+ return virtio_net_guest_offloads_by_features(vdev->guest_features_ex);
}
typedef struct {
@@ -882,34 +902,39 @@ static void failover_add_primary(VirtIONet *n, Error **errp)
error_propagate(errp, err);
}
-static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
+static void virtio_net_set_features(VirtIODevice *vdev,
+ const uint64_t *in_features)
{
+ uint64_t features[VIRTIO_FEATURES_DWORDS];
VirtIONet *n = VIRTIO_NET(vdev);
Error *err = NULL;
int i;
+ virtio_features_copy(features, in_features);
if (n->mtu_bypass_backend &&
!virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_MTU)) {
- features &= ~(1ULL << VIRTIO_NET_F_MTU);
+ virtio_clear_feature_ex(features, VIRTIO_NET_F_MTU);
}
virtio_net_set_multiqueue(n,
- virtio_has_feature(features, VIRTIO_NET_F_RSS) ||
- virtio_has_feature(features, VIRTIO_NET_F_MQ));
+ virtio_has_feature_ex(features,
+ VIRTIO_NET_F_RSS) ||
+ virtio_has_feature_ex(features,
+ VIRTIO_NET_F_MQ));
virtio_net_set_mrg_rx_bufs(n,
- virtio_has_feature(features,
+ virtio_has_feature_ex(features,
VIRTIO_NET_F_MRG_RXBUF),
- virtio_has_feature(features,
+ virtio_has_feature_ex(features,
VIRTIO_F_VERSION_1),
- virtio_has_feature(features,
+ virtio_has_feature_ex(features,
VIRTIO_NET_F_HASH_REPORT));
- n->rsc4_enabled = virtio_has_feature(features, VIRTIO_NET_F_RSC_EXT) &&
- virtio_has_feature(features, VIRTIO_NET_F_GUEST_TSO4);
- n->rsc6_enabled = virtio_has_feature(features, VIRTIO_NET_F_RSC_EXT) &&
- virtio_has_feature(features, VIRTIO_NET_F_GUEST_TSO6);
- n->rss_data.redirect = virtio_has_feature(features, VIRTIO_NET_F_RSS);
+ n->rsc4_enabled = virtio_has_feature_ex(features, VIRTIO_NET_F_RSC_EXT) &&
+ virtio_has_feature_ex(features, VIRTIO_NET_F_GUEST_TSO4);
+ n->rsc6_enabled = virtio_has_feature_ex(features, VIRTIO_NET_F_RSC_EXT) &&
+ virtio_has_feature_ex(features, VIRTIO_NET_F_GUEST_TSO6);
+ n->rss_data.redirect = virtio_has_feature_ex(features, VIRTIO_NET_F_RSS);
if (n->has_vnet_hdr) {
n->curr_guest_offloads =
@@ -923,7 +948,7 @@ static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
if (!get_vhost_net(nc->peer)) {
continue;
}
- vhost_net_ack_features(get_vhost_net(nc->peer), features);
+ vhost_net_ack_features_ex(get_vhost_net(nc->peer), features);
/*
* keep acked_features in NetVhostUserState up-to-date so it
@@ -932,11 +957,11 @@ static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
vhost_net_save_acked_features(nc->peer);
}
- if (!virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) {
+ if (!virtio_has_feature_ex(features, VIRTIO_NET_F_CTRL_VLAN)) {
memset(n->vlans, 0xff, MAX_VLAN >> 3);
}
- if (virtio_has_feature(features, VIRTIO_NET_F_STANDBY)) {
+ if (virtio_has_feature_ex(features, VIRTIO_NET_F_STANDBY)) {
qapi_event_send_failover_negotiated(n->netclient_name);
qatomic_set(&n->failover_primary_hidden, false);
failover_add_primary(n, &err);
@@ -1901,10 +1926,10 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
virtio_error(vdev, "virtio-net unexpected empty queue: "
"i %zd mergeable %d offset %zd, size %zd, "
"guest hdr len %zd, host hdr len %zd "
- "guest features 0x%" PRIx64,
+ "guest features 0x" VIRTIO_FEATURES_FMT,
i, n->mergeable_rx_bufs, offset, size,
n->guest_hdr_len, n->host_hdr_len,
- vdev->guest_features);
+ VIRTIO_FEATURES_PR(vdev->guest_features_ex));
}
err = -1;
goto err;
@@ -3011,8 +3036,8 @@ static int virtio_net_pre_load_queues(VirtIODevice *vdev, uint32_t n)
return 0;
}
-static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
- Error **errp)
+static void virtio_net_get_features(VirtIODevice *vdev, uint64_t *features,
+ Error **errp)
{
VirtIONet *n = VIRTIO_NET(vdev);
NetClientState *nc = qemu_get_queue(n->nic);
@@ -3026,68 +3051,67 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
(supported_hash_types & peer_hash_types) == supported_hash_types;
/* Firstly sync all virtio-net possible supported features */
- features |= n->host_features;
+ virtio_features_or(features, features, n->host_features_ex);
- virtio_add_feature(&features, VIRTIO_NET_F_MAC);
+ virtio_add_feature_ex(features, VIRTIO_NET_F_MAC);
if (!peer_has_vnet_hdr(n)) {
- virtio_clear_feature(&features, VIRTIO_NET_F_CSUM);
- virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4);
- virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6);
- virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN);
+ virtio_clear_feature_ex(features, VIRTIO_NET_F_CSUM);
+ virtio_clear_feature_ex(features, VIRTIO_NET_F_HOST_TSO4);
+ virtio_clear_feature_ex(features, VIRTIO_NET_F_HOST_TSO6);
+ virtio_clear_feature_ex(features, VIRTIO_NET_F_HOST_ECN);
- virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM);
- virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4);
- virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6);
- virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN);
+ virtio_clear_feature_ex(features, VIRTIO_NET_F_GUEST_CSUM);
+ virtio_clear_feature_ex(features, VIRTIO_NET_F_GUEST_TSO4);
+ virtio_clear_feature_ex(features, VIRTIO_NET_F_GUEST_TSO6);
+ virtio_clear_feature_ex(features, VIRTIO_NET_F_GUEST_ECN);
- virtio_clear_feature(&features, VIRTIO_NET_F_HOST_USO);
- virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO4);
- virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO6);
+ virtio_clear_feature_ex(features, VIRTIO_NET_F_HOST_USO);
+ virtio_clear_feature_ex(features, VIRTIO_NET_F_GUEST_USO4);
+ virtio_clear_feature_ex(features, VIRTIO_NET_F_GUEST_USO6);
- virtio_clear_feature(&features, VIRTIO_NET_F_HASH_REPORT);
+ virtio_clear_feature_ex(features, VIRTIO_NET_F_HASH_REPORT);
}
if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) {
- virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_UFO);
- virtio_clear_feature(&features, VIRTIO_NET_F_HOST_UFO);
+ virtio_clear_feature_ex(features, VIRTIO_NET_F_GUEST_UFO);
+ virtio_clear_feature_ex(features, VIRTIO_NET_F_HOST_UFO);
}
-
if (!peer_has_uso(n)) {
- virtio_clear_feature(&features, VIRTIO_NET_F_HOST_USO);
- virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO4);
- virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO6);
+ virtio_clear_feature_ex(features, VIRTIO_NET_F_HOST_USO);
+ virtio_clear_feature_ex(features, VIRTIO_NET_F_GUEST_USO4);
+ virtio_clear_feature_ex(features, VIRTIO_NET_F_GUEST_USO6);
}
if (!get_vhost_net(nc->peer)) {
if (!use_own_hash) {
- virtio_clear_feature(&features, VIRTIO_NET_F_HASH_REPORT);
- virtio_clear_feature(&features, VIRTIO_NET_F_RSS);
- } else if (virtio_has_feature(features, VIRTIO_NET_F_RSS)) {
+ virtio_clear_feature_ex(features, VIRTIO_NET_F_HASH_REPORT);
+ virtio_clear_feature_ex(features, VIRTIO_NET_F_RSS);
+ } else if (virtio_has_feature_ex(features, VIRTIO_NET_F_RSS)) {
virtio_net_load_ebpf(n, errp);
}
- return features;
+ return;
}
if (!use_peer_hash) {
- virtio_clear_feature(&features, VIRTIO_NET_F_HASH_REPORT);
+ virtio_clear_feature_ex(features, VIRTIO_NET_F_HASH_REPORT);
if (!use_own_hash || !virtio_net_attach_ebpf_to_backend(n->nic, -1)) {
if (!virtio_net_load_ebpf(n, errp)) {
- return features;
+ return;
}
- virtio_clear_feature(&features, VIRTIO_NET_F_RSS);
+ virtio_clear_feature_ex(features, VIRTIO_NET_F_RSS);
}
}
- features = vhost_net_get_features(get_vhost_net(nc->peer), features);
- vdev->backend_features = features;
+ vhost_net_get_features_ex(get_vhost_net(nc->peer), features);
+ virtio_features_copy(vdev->backend_features_ex, features);
if (n->mtu_bypass_backend &&
(n->host_features & 1ULL << VIRTIO_NET_F_MTU)) {
- features |= (1ULL << VIRTIO_NET_F_MTU);
+ virtio_add_feature_ex(features, VIRTIO_NET_F_MTU);
}
/*
@@ -3102,10 +3126,8 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
* support it.
*/
if (!virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_CTRL_VQ)) {
- virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ANNOUNCE);
+ virtio_clear_feature_ex(features, VIRTIO_NET_F_GUEST_ANNOUNCE);
}
-
- return features;
}
static int virtio_net_post_load_device(void *opaque, int version_id)
@@ -4237,8 +4259,8 @@ static void virtio_net_class_init(ObjectClass *klass, const void *data)
vdc->unrealize = virtio_net_device_unrealize;
vdc->get_config = virtio_net_get_config;
vdc->set_config = virtio_net_set_config;
- vdc->get_features = virtio_net_get_features;
- vdc->set_features = virtio_net_set_features;
+ vdc->get_features_ex = virtio_net_get_features;
+ vdc->set_features_ex = virtio_net_set_features;
vdc->bad_features = virtio_net_bad_features;
vdc->reset = virtio_net_reset;
vdc->queue_reset = virtio_net_queue_reset;
diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h
index 73fdefc0dc..5b8ab7bda7 100644
--- a/include/hw/virtio/virtio-net.h
+++ b/include/hw/virtio/virtio-net.h
@@ -182,7 +182,7 @@ struct VirtIONet {
uint32_t has_vnet_hdr;
size_t host_hdr_len;
size_t guest_hdr_len;
- uint64_t host_features;
+ VIRTIO_DECLARE_FEATURES(host_features);
uint32_t rsc_timeout;
uint8_t rsc4_enabled;
uint8_t rsc6_enabled;
--
2.50.0
next prev parent reply other threads:[~2025-07-18 8:56 UTC|newest]
Thread overview: 54+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-07-18 8:52 [PATCH RFC v3 00/13] virtio: introduce support for GSO over UDP tunnel Paolo Abeni
2025-07-18 8:52 ` [PATCH RFC v3 01/13] net: bundle all offloads in a single struct Paolo Abeni
2025-07-20 10:31 ` Akihiko Odaki
2025-07-18 8:52 ` [PATCH RFC v3 02/13] linux-headers: Update to Linux ~v6.16-rc5 net-next Paolo Abeni
2025-07-18 8:52 ` [PATCH RFC v3 03/13] virtio: introduce extended features type Paolo Abeni
2025-07-20 10:41 ` Akihiko Odaki
2025-07-21 7:33 ` Paolo Abeni
2025-07-21 7:49 ` Akihiko Odaki
2025-07-21 8:45 ` Paolo Abeni
2025-07-18 8:52 ` [PATCH RFC v3 04/13] virtio: serialize extended features state Paolo Abeni
2025-07-18 15:06 ` Stefano Garzarella
2025-07-20 10:44 ` Akihiko Odaki
2025-07-21 7:51 ` Paolo Abeni
2025-07-21 7:55 ` Akihiko Odaki
2025-07-18 8:52 ` [PATCH RFC v3 05/13] virtio: add support for negotiating extended features Paolo Abeni
2025-07-18 8:52 ` [PATCH RFC v3 06/13] virtio-pci: implement support for " Paolo Abeni
2025-07-22 3:28 ` Jason Wang
2025-07-22 7:37 ` Paolo Abeni
2025-07-23 5:47 ` Jason Wang
2025-07-23 11:21 ` Paolo Abeni
2025-07-18 8:52 ` [PATCH RFC v3 07/13] vhost: add support for negotiating " Paolo Abeni
2025-07-18 14:36 ` Stefano Garzarella
2025-07-21 2:53 ` Lei Yang
2025-07-21 8:21 ` Paolo Abeni
2025-07-21 7:00 ` Paolo Abeni
2025-07-22 3:32 ` Jason Wang
2025-07-22 16:55 ` Paolo Abeni
2025-07-23 5:56 ` Jason Wang
2025-07-18 8:52 ` [PATCH RFC v3 08/13] qmp: update virtio features map to support " Paolo Abeni
2025-07-18 10:18 ` Stefano Garzarella
2025-07-18 10:23 ` Paolo Abeni
2025-07-18 10:28 ` Stefano Garzarella
2025-07-19 6:57 ` Markus Armbruster
2025-07-21 7:07 ` Paolo Abeni
2025-07-21 7:23 ` Akihiko Odaki
2025-07-21 7:45 ` Stefano Garzarella
2025-07-21 8:04 ` Paolo Abeni
2025-07-18 8:52 ` [PATCH RFC v3 09/13] vhost-backend: implement extended features support Paolo Abeni
2025-07-18 8:52 ` [PATCH RFC v3 10/13] vhost-net: " Paolo Abeni
2025-07-18 13:01 ` Stefano Garzarella
2025-07-18 14:33 ` Paolo Abeni
2025-07-18 8:52 ` Paolo Abeni [this message]
2025-07-18 8:52 ` [PATCH RFC v3 12/13] net: implement tunnel probing Paolo Abeni
2025-07-18 11:17 ` Stefano Garzarella
2025-07-21 8:48 ` Paolo Abeni
2025-07-22 3:50 ` Jason Wang
2025-07-22 7:33 ` Paolo Abeni
2025-07-23 5:47 ` Jason Wang
2025-07-22 4:15 ` Akihiko Odaki
2025-07-18 8:52 ` [PATCH RFC v3 13/13] net: implement UDP tunnel features offloading Paolo Abeni
2025-07-18 13:22 ` Stefano Garzarella
2025-07-18 13:44 ` Paolo Abeni
2025-07-18 13:48 ` Stefano Garzarella
2025-07-18 15:21 ` Stefano Garzarella
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=d384aea8126ca9ff7df9401ab8b001e5ee4d840c.1752828082.git.pabeni@redhat.com \
--to=pabeni@redhat.com \
--cc=armbru@redhat.com \
--cc=cohuck@redhat.com \
--cc=dmitry.fleytman@gmail.com \
--cc=eblake@redhat.com \
--cc=g.lettieri@iet.unipi.it \
--cc=jasowang@redhat.com \
--cc=lrizzo@google.com \
--cc=mst@redhat.com \
--cc=odaki@rsg.ci.i.u-tokyo.ac.jp \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=sgarzare@redhat.com \
--cc=sriram.yagnaraman@ericsson.com \
--cc=v.maffione@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).