qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: "Michael S. Tsirkin" <mst@redhat.com>
To: qemu-devel@nongnu.org
Cc: Peter Maydell <peter.maydell@linaro.org>,
	Paolo Abeni <pabeni@redhat.com>,
	Akihiko Odaki <odaki@rsg.ci.i.u-tokyo.ac.jp>,
	Jason Wang <jasowang@redhat.com>, Lei Yang <leiyang@redhat.com>,
	Stefano Garzarella <sgarzare@redhat.com>
Subject: [PULL 12/75] virtio-net: implement extended features support
Date: Sun, 5 Oct 2025 15:16:27 -0400	[thread overview]
Message-ID: <3a7741c3bdc3537de4159418d712debbd22e4df6.1759691708.git.mst@redhat.com> (raw)
In-Reply-To: <cover.1759691708.git.mst@redhat.com>

From: Paolo Abeni <pabeni@redhat.com>

Use the extended types and helpers to manipulate the virtio_net
features.

Note that offloads are still 64bits wide, as per specification,
and extended offloads will be mapped into such range.

Reviewed-by: Akihiko Odaki <odaki@rsg.ci.i.u-tokyo.ac.jp>
Acked-by: Jason Wang <jasowang@redhat.com>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Tested-by: Lei Yang <leiyang@redhat.com>
Acked-by: Stefano Garzarella <sgarzare@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Message-ID: <bc5afdc5c1cb1a37238dd2b36004db3d46cbf211.1758549625.git.pabeni@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
 include/hw/virtio/virtio-net.h |   2 +-
 hw/net/virtio-net.c            | 140 +++++++++++++++++++--------------
 2 files changed, 83 insertions(+), 59 deletions(-)

diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h
index 73fdefc0dc..5b8ab7bda7 100644
--- a/include/hw/virtio/virtio-net.h
+++ b/include/hw/virtio/virtio-net.h
@@ -182,7 +182,7 @@ struct VirtIONet {
     uint32_t has_vnet_hdr;
     size_t host_hdr_len;
     size_t guest_hdr_len;
-    uint64_t host_features;
+    VIRTIO_DECLARE_FEATURES(host_features);
     uint32_t rsc_timeout;
     uint8_t rsc4_enabled;
     uint8_t rsc6_enabled;
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 129bebd82e..39f2a8c987 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -90,6 +90,19 @@
                                          VIRTIO_NET_RSS_HASH_TYPE_TCP_EX | \
                                          VIRTIO_NET_RSS_HASH_TYPE_UDP_EX)
 
+/*
+ * Features starting from VIRTIO_NET_FEATURES_MAP_MIN bit correspond
+ * to guest offloads in the VIRTIO_NET_OFFLOAD_MAP range
+ */
+#define VIRTIO_NET_OFFLOAD_MAP_MIN    46
+#define VIRTIO_NET_OFFLOAD_MAP_LENGTH 4
+#define VIRTIO_NET_OFFLOAD_MAP        MAKE_64BIT_MASK(                    \
+                                              VIRTIO_NET_OFFLOAD_MAP_MIN, \
+                                              VIRTIO_NET_OFFLOAD_MAP_LENGTH)
+#define VIRTIO_NET_FEATURES_MAP_MIN   65
+#define VIRTIO_NET_F2O_SHIFT          (VIRTIO_NET_OFFLOAD_MAP_MIN - \
+                                       VIRTIO_NET_FEATURES_MAP_MIN + 64)
+
 static const VirtIOFeature feature_sizes[] = {
     {.flags = 1ULL << VIRTIO_NET_F_MAC,
      .end = endof(struct virtio_net_config, mac)},
@@ -786,7 +799,14 @@ static void virtio_net_apply_guest_offloads(VirtIONet *n)
     qemu_set_offload(qemu_get_queue(n->nic)->peer, &ol);
 }
 
-static uint64_t virtio_net_guest_offloads_by_features(uint64_t features)
+static uint64_t virtio_net_features_to_offload(const uint64_t *features)
+{
+    return (features[0] & ~VIRTIO_NET_OFFLOAD_MAP) |
+           ((features[1] << VIRTIO_NET_F2O_SHIFT) & VIRTIO_NET_OFFLOAD_MAP);
+}
+
+static uint64_t
+virtio_net_guest_offloads_by_features(const uint64_t *features)
 {
     static const uint64_t guest_offloads_mask =
         (1ULL << VIRTIO_NET_F_GUEST_CSUM) |
@@ -797,13 +817,13 @@ static uint64_t virtio_net_guest_offloads_by_features(uint64_t features)
         (1ULL << VIRTIO_NET_F_GUEST_USO4) |
         (1ULL << VIRTIO_NET_F_GUEST_USO6);
 
-    return guest_offloads_mask & features;
+    return guest_offloads_mask & virtio_net_features_to_offload(features);
 }
 
 uint64_t virtio_net_supported_guest_offloads(const VirtIONet *n)
 {
     VirtIODevice *vdev = VIRTIO_DEVICE(n);
-    return virtio_net_guest_offloads_by_features(vdev->guest_features);
+    return virtio_net_guest_offloads_by_features(vdev->guest_features_ex);
 }
 
 typedef struct {
@@ -882,34 +902,39 @@ static void failover_add_primary(VirtIONet *n, Error **errp)
     error_propagate(errp, err);
 }
 
-static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
+static void virtio_net_set_features(VirtIODevice *vdev,
+                                    const uint64_t *in_features)
 {
+    uint64_t features[VIRTIO_FEATURES_NU64S];
     VirtIONet *n = VIRTIO_NET(vdev);
     Error *err = NULL;
     int i;
 
+    virtio_features_copy(features, in_features);
     if (n->mtu_bypass_backend &&
             !virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_MTU)) {
-        features &= ~(1ULL << VIRTIO_NET_F_MTU);
+        virtio_clear_feature_ex(features, VIRTIO_NET_F_MTU);
     }
 
     virtio_net_set_multiqueue(n,
-                              virtio_has_feature(features, VIRTIO_NET_F_RSS) ||
-                              virtio_has_feature(features, VIRTIO_NET_F_MQ));
+                              virtio_has_feature_ex(features,
+                                                    VIRTIO_NET_F_RSS) ||
+                              virtio_has_feature_ex(features,
+                                                    VIRTIO_NET_F_MQ));
 
     virtio_net_set_mrg_rx_bufs(n,
-                               virtio_has_feature(features,
+                               virtio_has_feature_ex(features,
                                                   VIRTIO_NET_F_MRG_RXBUF),
-                               virtio_has_feature(features,
+                               virtio_has_feature_ex(features,
                                                   VIRTIO_F_VERSION_1),
-                               virtio_has_feature(features,
+                               virtio_has_feature_ex(features,
                                                   VIRTIO_NET_F_HASH_REPORT));
 
-    n->rsc4_enabled = virtio_has_feature(features, VIRTIO_NET_F_RSC_EXT) &&
-        virtio_has_feature(features, VIRTIO_NET_F_GUEST_TSO4);
-    n->rsc6_enabled = virtio_has_feature(features, VIRTIO_NET_F_RSC_EXT) &&
-        virtio_has_feature(features, VIRTIO_NET_F_GUEST_TSO6);
-    n->rss_data.redirect = virtio_has_feature(features, VIRTIO_NET_F_RSS);
+    n->rsc4_enabled = virtio_has_feature_ex(features, VIRTIO_NET_F_RSC_EXT) &&
+        virtio_has_feature_ex(features, VIRTIO_NET_F_GUEST_TSO4);
+    n->rsc6_enabled = virtio_has_feature_ex(features, VIRTIO_NET_F_RSC_EXT) &&
+        virtio_has_feature_ex(features, VIRTIO_NET_F_GUEST_TSO6);
+    n->rss_data.redirect = virtio_has_feature_ex(features, VIRTIO_NET_F_RSS);
 
     if (n->has_vnet_hdr) {
         n->curr_guest_offloads =
@@ -923,7 +948,7 @@ static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
         if (!get_vhost_net(nc->peer)) {
             continue;
         }
-        vhost_net_ack_features(get_vhost_net(nc->peer), features);
+        vhost_net_ack_features_ex(get_vhost_net(nc->peer), features);
 
         /*
          * keep acked_features in NetVhostUserState up-to-date so it
@@ -932,12 +957,14 @@ static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
         vhost_net_save_acked_features(nc->peer);
     }
 
-    if (virtio_has_feature(vdev->guest_features ^ features, VIRTIO_NET_F_CTRL_VLAN)) {
-        bool vlan = virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN);
+    if (virtio_has_feature_ex(features, VIRTIO_NET_F_CTRL_VLAN) !=
+        virtio_has_feature_ex(vdev->guest_features_ex,
+                              VIRTIO_NET_F_CTRL_VLAN)) {
+        bool vlan = virtio_has_feature_ex(features, VIRTIO_NET_F_CTRL_VLAN);
         memset(n->vlans, vlan ? 0 : 0xff, MAX_VLAN >> 3);
     }
 
-    if (virtio_has_feature(features, VIRTIO_NET_F_STANDBY)) {
+    if (virtio_has_feature_ex(features, VIRTIO_NET_F_STANDBY)) {
         qapi_event_send_failover_negotiated(n->netclient_name);
         qatomic_set(&n->failover_primary_hidden, false);
         failover_add_primary(n, &err);
@@ -1908,10 +1935,10 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
                 virtio_error(vdev, "virtio-net unexpected empty queue: "
                              "i %zd mergeable %d offset %zd, size %zd, "
                              "guest hdr len %zd, host hdr len %zd "
-                             "guest features 0x%" PRIx64,
+                             "guest features 0x" VIRTIO_FEATURES_FMT,
                              i, n->mergeable_rx_bufs, offset, size,
                              n->guest_hdr_len, n->host_hdr_len,
-                             vdev->guest_features);
+                             VIRTIO_FEATURES_PR(vdev->guest_features_ex));
             }
             err = -1;
             goto err;
@@ -3018,8 +3045,8 @@ static int virtio_net_pre_load_queues(VirtIODevice *vdev, uint32_t n)
     return 0;
 }
 
-static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
-                                        Error **errp)
+static void virtio_net_get_features(VirtIODevice *vdev, uint64_t *features,
+                                    Error **errp)
 {
     VirtIONet *n = VIRTIO_NET(vdev);
     NetClientState *nc = qemu_get_queue(n->nic);
@@ -3033,68 +3060,67 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
         (supported_hash_types & peer_hash_types) == supported_hash_types;
 
     /* Firstly sync all virtio-net possible supported features */
-    features |= n->host_features;
+    virtio_features_or(features, features, n->host_features_ex);
 
-    virtio_add_feature(&features, VIRTIO_NET_F_MAC);
+    virtio_add_feature_ex(features, VIRTIO_NET_F_MAC);
 
     if (!peer_has_vnet_hdr(n)) {
-        virtio_clear_feature(&features, VIRTIO_NET_F_CSUM);
-        virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4);
-        virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6);
-        virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN);
+        virtio_clear_feature_ex(features, VIRTIO_NET_F_CSUM);
+        virtio_clear_feature_ex(features, VIRTIO_NET_F_HOST_TSO4);
+        virtio_clear_feature_ex(features, VIRTIO_NET_F_HOST_TSO6);
+        virtio_clear_feature_ex(features, VIRTIO_NET_F_HOST_ECN);
 
-        virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM);
-        virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4);
-        virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6);
-        virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN);
+        virtio_clear_feature_ex(features, VIRTIO_NET_F_GUEST_CSUM);
+        virtio_clear_feature_ex(features, VIRTIO_NET_F_GUEST_TSO4);
+        virtio_clear_feature_ex(features, VIRTIO_NET_F_GUEST_TSO6);
+        virtio_clear_feature_ex(features, VIRTIO_NET_F_GUEST_ECN);
 
-        virtio_clear_feature(&features, VIRTIO_NET_F_HOST_USO);
-        virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO4);
-        virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO6);
+        virtio_clear_feature_ex(features, VIRTIO_NET_F_HOST_USO);
+        virtio_clear_feature_ex(features, VIRTIO_NET_F_GUEST_USO4);
+        virtio_clear_feature_ex(features, VIRTIO_NET_F_GUEST_USO6);
 
-        virtio_clear_feature(&features, VIRTIO_NET_F_HASH_REPORT);
+        virtio_clear_feature_ex(features, VIRTIO_NET_F_HASH_REPORT);
     }
 
     if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) {
-        virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_UFO);
-        virtio_clear_feature(&features, VIRTIO_NET_F_HOST_UFO);
+        virtio_clear_feature_ex(features, VIRTIO_NET_F_GUEST_UFO);
+        virtio_clear_feature_ex(features, VIRTIO_NET_F_HOST_UFO);
     }
-
     if (!peer_has_uso(n)) {
-        virtio_clear_feature(&features, VIRTIO_NET_F_HOST_USO);
-        virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO4);
-        virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO6);
+        virtio_clear_feature_ex(features, VIRTIO_NET_F_HOST_USO);
+        virtio_clear_feature_ex(features, VIRTIO_NET_F_GUEST_USO4);
+        virtio_clear_feature_ex(features, VIRTIO_NET_F_GUEST_USO6);
     }
 
     if (!get_vhost_net(nc->peer)) {
         if (!use_own_hash) {
-            virtio_clear_feature(&features, VIRTIO_NET_F_HASH_REPORT);
-            virtio_clear_feature(&features, VIRTIO_NET_F_RSS);
-        } else if (virtio_has_feature(features, VIRTIO_NET_F_RSS)) {
+            virtio_clear_feature_ex(features, VIRTIO_NET_F_HASH_REPORT);
+            virtio_clear_feature_ex(features, VIRTIO_NET_F_RSS);
+        } else if (virtio_has_feature_ex(features, VIRTIO_NET_F_RSS)) {
             virtio_net_load_ebpf(n, errp);
         }
 
-        return features;
+        return;
     }
 
     if (!use_peer_hash) {
-        virtio_clear_feature(&features, VIRTIO_NET_F_HASH_REPORT);
+        virtio_clear_feature_ex(features, VIRTIO_NET_F_HASH_REPORT);
 
         if (!use_own_hash || !virtio_net_attach_ebpf_to_backend(n->nic, -1)) {
             if (!virtio_net_load_ebpf(n, errp)) {
-                return features;
+                return;
             }
 
-            virtio_clear_feature(&features, VIRTIO_NET_F_RSS);
+            virtio_clear_feature_ex(features, VIRTIO_NET_F_RSS);
         }
     }
 
-    features = vhost_net_get_features(get_vhost_net(nc->peer), features);
-    vdev->backend_features = features;
+    vhost_net_get_features_ex(get_vhost_net(nc->peer), features);
+    virtio_features_copy(vdev->backend_features_ex, features);
 
     if (n->mtu_bypass_backend &&
             (n->host_features & 1ULL << VIRTIO_NET_F_MTU)) {
-        features |= (1ULL << VIRTIO_NET_F_MTU);
+        virtio_add_feature_ex(features, VIRTIO_NET_F_MTU);
     }
 
     /*
@@ -3109,10 +3135,8 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
      * support it.
      */
     if (!virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_CTRL_VQ)) {
-        virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ANNOUNCE);
+        virtio_clear_feature_ex(features, VIRTIO_NET_F_GUEST_ANNOUNCE);
     }
-
-    return features;
 }
 
 static int virtio_net_post_load_device(void *opaque, int version_id)
@@ -4244,8 +4268,8 @@ static void virtio_net_class_init(ObjectClass *klass, const void *data)
     vdc->unrealize = virtio_net_device_unrealize;
     vdc->get_config = virtio_net_get_config;
     vdc->set_config = virtio_net_set_config;
-    vdc->get_features = virtio_net_get_features;
-    vdc->set_features = virtio_net_set_features;
+    vdc->get_features_ex = virtio_net_get_features;
+    vdc->set_features_ex = virtio_net_set_features;
     vdc->bad_features = virtio_net_bad_features;
     vdc->reset = virtio_net_reset;
     vdc->queue_reset = virtio_net_queue_reset;
-- 
MST



  parent reply	other threads:[~2025-10-05 19:18 UTC|newest]

Thread overview: 85+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-10-05 19:16 [PULL 00/75] virtio,pci,pc: features, fixes Michael S. Tsirkin
2025-10-05 19:16 ` [PULL 01/75] net: bundle all offloads in a single struct Michael S. Tsirkin
2025-10-05 19:16 ` [PULL 02/75] linux-headers: deal with counted_by annotation Michael S. Tsirkin
2025-10-05 19:16 ` [PULL 03/75] linux-headers: Update to Linux v6.17-rc1 Michael S. Tsirkin
2025-10-05 19:16 ` [PULL 04/75] virtio: introduce extended features type Michael S. Tsirkin
2025-10-05 19:16 ` [PULL 05/75] virtio: serialize extended features state Michael S. Tsirkin
2025-10-05 19:16 ` [PULL 06/75] virtio: add support for negotiating extended features Michael S. Tsirkin
2025-10-05 19:16 ` [PULL 07/75] virtio-pci: implement support for " Michael S. Tsirkin
2025-10-05 19:16 ` [PULL 08/75] vhost: add support for negotiating " Michael S. Tsirkin
2025-10-05 19:16 ` [PULL 09/75] qmp: update virtio features map to support " Michael S. Tsirkin
2025-10-05 19:16 ` [PULL 10/75] vhost-backend: implement extended features support Michael S. Tsirkin
2025-10-05 19:16 ` [PULL 11/75] vhost-net: " Michael S. Tsirkin
2025-10-05 19:16 ` Michael S. Tsirkin [this message]
2025-10-05 19:16 ` [PULL 13/75] net: implement tunnel probing Michael S. Tsirkin
2025-10-05 19:16 ` [PULL 14/75] net: implement UDP tunnel features offloading Michael S. Tsirkin
2025-10-05 19:16 ` [PULL 15/75] Revert "hw/acpi/ghes: Make ghes_record_cper_errors() static" Michael S. Tsirkin
2025-10-05 19:16 ` [PULL 16/75] acpi/ghes: Cleanup the code which gets ghes ged state Michael S. Tsirkin
2025-10-05 19:16 ` [PULL 17/75] acpi/ghes: prepare to change the way HEST offsets are calculated Michael S. Tsirkin
2025-10-05 19:16 ` [PULL 18/75] acpi/ghes: add a firmware file with HEST address Michael S. Tsirkin
2025-10-05 19:16 ` [PULL 19/75] acpi/ghes: Use HEST table offsets when preparing GHES records Michael S. Tsirkin
2025-10-05 19:16 ` [PULL 20/75] acpi/ghes: don't hard-code the number of sources for HEST table Michael S. Tsirkin
2025-10-05 19:16 ` [PULL 21/75] acpi/ghes: add a notifier to notify when error data is ready Michael S. Tsirkin
2025-10-05 19:16 ` [PULL 22/75] acpi/generic_event_device: Update GHES migration to cover hest addr Michael S. Tsirkin
2025-10-05 19:16 ` [PULL 23/75] acpi/generic_event_device: add logic to detect if HEST addr is available Michael S. Tsirkin
2025-10-05 19:16 ` [PULL 24/75] acpi/generic_event_device: add an APEI error device Michael S. Tsirkin
2025-10-05 19:16 ` [PULL 25/75] tests/acpi: virt: allow acpi table changes at DSDT and HEST tables Michael S. Tsirkin
2025-10-05 19:16 ` [PULL 26/75] arm/virt: Wire up a GED error device for ACPI / GHES Michael S. Tsirkin
2025-10-05 19:17 ` [PULL 27/75] qapi/acpi-hest: add an interface to do generic CPER error injection Michael S. Tsirkin
2025-10-05 19:17 ` [PULL 28/75] acpi/generic_event_device.c: enable use_hest_addr for QEMU 10.x Michael S. Tsirkin
2025-10-05 19:17 ` [PULL 29/75] tests/acpi: virt: update HEST and DSDT tables Michael S. Tsirkin
2025-10-05 19:17 ` [PULL 30/75] docs: hest: add new "etc/acpi_table_hest_addr" and update workflow Michael S. Tsirkin
2025-10-05 19:17 ` [PULL 31/75] scripts/ghes_inject: add a script to generate GHES error inject Michael S. Tsirkin
2025-10-05 19:17 ` [PULL 32/75] hw/smbios: allow clearing the VM bit in SMBIOS table 0 Michael S. Tsirkin
2025-10-05 19:17 ` [PULL 33/75] hw/i386/pc: Avoid overlap between CXL window and PCI 64bit BARs in QEMU Michael S. Tsirkin
2025-10-06 17:08   ` Michael Tokarev
2025-10-28 19:26     ` Michael Tokarev
2025-10-05 19:17 ` [PULL 34/75] pcie_sriov: Fix broken MMIO accesses from SR-IOV VFs Michael S. Tsirkin
2025-10-05 19:17 ` [PULL 35/75] hw/virtio: rename vhost-user-device and make user creatable Michael S. Tsirkin
2025-10-05 19:17 ` [PULL 36/75] smbios: cap DIMM size to 2Tb as workaround for broken Windows Michael S. Tsirkin
2025-10-05 19:17 ` [PULL 37/75] pcie: Add a way to get the outstanding page request allocation (pri) from the config space Michael S. Tsirkin
2025-10-05 19:17 ` [PULL 38/75] intel_iommu: Bypass barrier wait descriptor Michael S. Tsirkin
2025-10-05 19:17 ` [PULL 39/75] intel_iommu: Declare PRI constants and structures Michael S. Tsirkin
2025-10-05 19:17 ` [PULL 40/75] intel_iommu: Declare registers for PRI Michael S. Tsirkin
2025-10-05 19:17 ` [PULL 41/75] intel_iommu: Add PRI operations support Michael S. Tsirkin
2025-10-05 19:17 ` [PULL 42/75] x86: ich9: fix default value of 'No Reboot' bit in GCS Michael S. Tsirkin
2025-10-05 19:17 ` [PULL 43/75] vhost: use virtio_config_get_guest_notifier() Michael S. Tsirkin
2025-10-05 19:17 ` [PULL 44/75] virtio: unify virtio_notify_irqfd() and virtio_notify() Michael S. Tsirkin
2025-10-05 19:17 ` [PULL 45/75] virtio: support irqfd in virtio_notify_config() Michael S. Tsirkin
2025-10-05 19:17 ` [PULL 46/75] tests/libqos: extract qvirtqueue_set_avail_idx() Michael S. Tsirkin
2025-10-05 19:17 ` [PULL 47/75] tests/virtio-scsi: add a virtio_error() IOThread test Michael S. Tsirkin
2025-10-05 19:17 ` [PULL 48/75] pcie_sriov: make pcie_sriov_pf_exit() safe on non-SR-IOV devices Michael S. Tsirkin
2025-10-05 19:17 ` [PULL 49/75] virtio: Add function name to error messages Michael S. Tsirkin
2025-10-05 20:13   ` Alessandro Ratti
2025-10-05 20:24     ` Michael S. Tsirkin
2025-10-08 10:01     ` Michael S. Tsirkin
2025-10-08 16:53       ` Alessandro Ratti
2025-10-05 20:19   ` [PULL v2 75/75] virtio: improve virtqueue mapping " Michael S. Tsirkin
2025-10-05 19:17 ` [PULL 50/75] memory: Adjust event ranges to fit within notifier boundaries Michael S. Tsirkin
2025-10-05 19:17 ` [PULL 51/75] amd_iommu: Document '-device amd-iommu' common options Michael S. Tsirkin
2025-10-05 19:17 ` [PULL 52/75] amd_iommu: Reorder device and page table helpers Michael S. Tsirkin
2025-10-05 19:17 ` [PULL 53/75] amd_iommu: Helper to decode size of page invalidation command Michael S. Tsirkin
2025-10-05 19:18 ` [PULL 54/75] amd_iommu: Add helper function to extract the DTE Michael S. Tsirkin
2025-10-05 19:18 ` [PULL 55/75] amd_iommu: Return an error when unable to read PTE from guest memory Michael S. Tsirkin
2025-10-05 19:18 ` [PULL 56/75] amd_iommu: Add helpers to walk AMD v1 Page Table format Michael S. Tsirkin
2025-10-05 19:18 ` [PULL 57/75] amd_iommu: Add a page walker to sync shadow page tables on invalidation Michael S. Tsirkin
2025-10-05 19:18 ` [PULL 58/75] amd_iommu: Add basic structure to support IOMMU notifier updates Michael S. Tsirkin
2025-10-05 19:18 ` [PULL 59/75] amd_iommu: Sync shadow page tables on page invalidation Michael S. Tsirkin
2025-10-05 19:18 ` [PULL 60/75] amd_iommu: Use iova_tree records to determine large page size on UNMAP Michael S. Tsirkin
2025-10-05 19:18 ` [PULL 61/75] amd_iommu: Unmap all address spaces under the AMD IOMMU on reset Michael S. Tsirkin
2025-10-05 19:18 ` [PULL 62/75] amd_iommu: Add replay callback Michael S. Tsirkin
2025-10-05 19:18 ` [PULL 63/75] amd_iommu: Invalidate address translations on INVALIDATE_IOMMU_ALL Michael S. Tsirkin
2025-10-05 19:18 ` [PULL 64/75] amd_iommu: Toggle memory regions based on address translation mode Michael S. Tsirkin
2025-10-05 19:18 ` [PULL 65/75] amd_iommu: Set all address spaces to use passthrough mode on reset Michael S. Tsirkin
2025-10-05 19:18 ` [PULL 66/75] amd_iommu: Add dma-remap property to AMD vIOMMU device Michael S. Tsirkin
2025-10-05 19:18 ` [PULL 67/75] amd_iommu: Toggle address translation mode on devtab entry invalidation Michael S. Tsirkin
2025-10-05 19:18 ` [PULL 68/75] amd_iommu: Do not assume passthrough translation when DTE[TV]=0 Michael S. Tsirkin
2025-10-05 19:18 ` [PULL 69/75] amd_iommu: Refactor amdvi_page_walk() to use common code for page walk Michael S. Tsirkin
2025-10-05 19:18 ` [PULL 70/75] intel-iommu: Move dma_translation to x86-iommu Michael S. Tsirkin
2025-10-05 19:18 ` [PULL 71/75] amd_iommu: HATDis/HATS=11 support Michael S. Tsirkin
2025-10-05 19:18 ` [PULL 72/75] vdpa-dev: add get_vhost() callback for vhost-vdpa device Michael S. Tsirkin
2025-10-05 19:18 ` [PULL 73/75] intel_iommu: Enable Enhanced Set Root Table Pointer Support (ESRTPS) Michael S. Tsirkin
2025-10-05 19:18 ` [PULL 74/75] intel_iommu: Simplify caching mode check with VFIO device Michael S. Tsirkin
2025-10-05 19:18 ` [PULL 75/75] pci: Fix wrong parameter passing to pci_device_get_iommu_bus_devfn() Michael S. Tsirkin
2025-10-05 20:20 ` [PULL 00/75] virtio,pci,pc: features, fixes Michael S. Tsirkin
2025-10-06 21:59 ` Richard Henderson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=3a7741c3bdc3537de4159418d712debbd22e4df6.1759691708.git.mst@redhat.com \
    --to=mst@redhat.com \
    --cc=jasowang@redhat.com \
    --cc=leiyang@redhat.com \
    --cc=odaki@rsg.ci.i.u-tokyo.ac.jp \
    --cc=pabeni@redhat.com \
    --cc=peter.maydell@linaro.org \
    --cc=qemu-devel@nongnu.org \
    --cc=sgarzare@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).