qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v3 0/6] Vhost-vdpa Shadow Virtqueue multiqueue support.
@ 2022-08-25 19:03 Eugenio Pérez
  2022-08-25 19:03 ` [PATCH v3 1/6] vdpa: Make VhostVDPAState cvq_cmd_in_buffer control ack type Eugenio Pérez
                   ` (5 more replies)
  0 siblings, 6 replies; 7+ messages in thread
From: Eugenio Pérez @ 2022-08-25 19:03 UTC (permalink / raw)
  To: qemu-devel
  Cc: Cornelia Huck, Si-Wei Liu, Parav Pandit, Jason Wang,
	Michael S. Tsirkin, Harpreet Singh Anand, Stefan Hajnoczi,
	Zhu Lingshan, Cindy Lu, Gautam Dawar, Paolo Bonzini, Eli Cohen,
	Gonglei (Arei), Liuxiangdong, Laurent Vivier, Stefano Garzarella

This series enables shadowed CVQ to intercept multiqueue commands through
shadowed CVQ, update the virtio NIC device model so qemu send it in a
migration, and the restore of that MQ state in the destination.

It needs to be applied on top of [1].

[1] https://lists.gnu.org/archive/html/qemu-devel/2022-08/msg02965.html

v2:
* Add vhost_vdpa_net_load_cmd helper to avoid out buffers castings.
* Make cvq_cmd_in_buffer virtio_net_ctrl_ack type.

Eugenio Pérez (6):
  vdpa: Make VhostVDPAState cvq_cmd_in_buffer control ack type
  vdpa: extract vhost_vdpa_net_load_mac from vhost_vdpa_net_load
  vdpa: Add vhost_vdpa_net_load_mq
  vdpa: validate MQ CVQ commands
  virtio-net: Update virtio-net curr_queue_pairs in vdpa backends
  vdpa: Allow MQ feature in SVQ

 hw/net/virtio-net.c |  17 +++----
 net/vhost-vdpa.c    | 119 ++++++++++++++++++++++++++++++++------------
 2 files changed, 93 insertions(+), 43 deletions(-)

-- 
2.31.1




^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH v3 1/6] vdpa: Make VhostVDPAState cvq_cmd_in_buffer control ack type
  2022-08-25 19:03 [PATCH v3 0/6] Vhost-vdpa Shadow Virtqueue multiqueue support Eugenio Pérez
@ 2022-08-25 19:03 ` Eugenio Pérez
  2022-08-25 19:03 ` [PATCH v3 2/6] vdpa: extract vhost_vdpa_net_load_mac from vhost_vdpa_net_load Eugenio Pérez
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 7+ messages in thread
From: Eugenio Pérez @ 2022-08-25 19:03 UTC (permalink / raw)
  To: qemu-devel
  Cc: Cornelia Huck, Si-Wei Liu, Parav Pandit, Jason Wang,
	Michael S. Tsirkin, Harpreet Singh Anand, Stefan Hajnoczi,
	Zhu Lingshan, Cindy Lu, Gautam Dawar, Paolo Bonzini, Eli Cohen,
	Gonglei (Arei), Liuxiangdong, Laurent Vivier, Stefano Garzarella

This allows to simplify the code. Rename to status while we're at it.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
v3: Rename it to status.
---
 net/vhost-vdpa.c | 23 ++++++++++++-----------
 1 file changed, 12 insertions(+), 11 deletions(-)

diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 6ce68fcd3f..535315c1d0 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -35,7 +35,9 @@ typedef struct VhostVDPAState {
     VHostNetState *vhost_net;
 
     /* Control commands shadow buffers */
-    void *cvq_cmd_out_buffer, *cvq_cmd_in_buffer;
+    void *cvq_cmd_out_buffer;
+    virtio_net_ctrl_ack *status;
+
     bool started;
 } VhostVDPAState;
 
@@ -158,7 +160,7 @@ static void vhost_vdpa_cleanup(NetClientState *nc)
     struct vhost_dev *dev = &s->vhost_net->dev;
 
     qemu_vfree(s->cvq_cmd_out_buffer);
-    qemu_vfree(s->cvq_cmd_in_buffer);
+    qemu_vfree(s->status);
     if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
         g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
     }
@@ -310,7 +312,7 @@ static int vhost_vdpa_net_cvq_start(NetClientState *nc)
         return r;
     }
 
-    r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_in_buffer,
+    r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status,
                                vhost_vdpa_net_cvq_cmd_page_len(), true);
     if (unlikely(r < 0)) {
         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
@@ -327,7 +329,7 @@ static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
 
     if (s->vhost_vdpa.shadow_vqs_enabled) {
         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
-        vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_in_buffer);
+        vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
     }
 }
 
@@ -340,7 +342,7 @@ static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
         .iov_len = out_len,
     };
     const struct iovec in = {
-        .iov_base = s->cvq_cmd_in_buffer,
+        .iov_base = s->status,
         .iov_len = sizeof(virtio_net_ctrl_ack),
     };
     VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
@@ -396,7 +398,7 @@ static int vhost_vdpa_net_load(NetClientState *nc)
             return dev_written;
         }
 
-        return *((virtio_net_ctrl_ack *)s->cvq_cmd_in_buffer) != VIRTIO_NET_OK;
+        return *s->status != VIRTIO_NET_OK;
     }
 
     return 0;
@@ -491,8 +493,7 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
         goto out;
     }
 
-    memcpy(&status, s->cvq_cmd_in_buffer, sizeof(status));
-    if (status != VIRTIO_NET_OK) {
+    if (*s->status != VIRTIO_NET_OK) {
         return VIRTIO_NET_ERR;
     }
 
@@ -549,9 +550,9 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
         s->cvq_cmd_out_buffer = qemu_memalign(qemu_real_host_page_size(),
                                             vhost_vdpa_net_cvq_cmd_page_len());
         memset(s->cvq_cmd_out_buffer, 0, vhost_vdpa_net_cvq_cmd_page_len());
-        s->cvq_cmd_in_buffer = qemu_memalign(qemu_real_host_page_size(),
-                                            vhost_vdpa_net_cvq_cmd_page_len());
-        memset(s->cvq_cmd_in_buffer, 0, vhost_vdpa_net_cvq_cmd_page_len());
+        s->status = qemu_memalign(qemu_real_host_page_size(),
+                                  vhost_vdpa_net_cvq_cmd_page_len());
+        memset(s->status, 0, vhost_vdpa_net_cvq_cmd_page_len());
 
         s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops;
         s->vhost_vdpa.shadow_vq_ops_opaque = s;
-- 
2.31.1



^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH v3 2/6] vdpa: extract vhost_vdpa_net_load_mac from vhost_vdpa_net_load
  2022-08-25 19:03 [PATCH v3 0/6] Vhost-vdpa Shadow Virtqueue multiqueue support Eugenio Pérez
  2022-08-25 19:03 ` [PATCH v3 1/6] vdpa: Make VhostVDPAState cvq_cmd_in_buffer control ack type Eugenio Pérez
@ 2022-08-25 19:03 ` Eugenio Pérez
  2022-08-25 19:03 ` [PATCH v3 3/6] vdpa: Add vhost_vdpa_net_load_mq Eugenio Pérez
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 7+ messages in thread
From: Eugenio Pérez @ 2022-08-25 19:03 UTC (permalink / raw)
  To: qemu-devel
  Cc: Cornelia Huck, Si-Wei Liu, Parav Pandit, Jason Wang,
	Michael S. Tsirkin, Harpreet Singh Anand, Stefan Hajnoczi,
	Zhu Lingshan, Cindy Lu, Gautam Dawar, Paolo Bonzini, Eli Cohen,
	Gonglei (Arei), Liuxiangdong, Laurent Vivier, Stefano Garzarella

Since there may be many commands we need to issue to load the NIC
state, let's split them in individual functions

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
--
v2: Add vhost_vdpa_net_load_cmd helper
v3: Move ctrl header definition to vhost_vdpa_net_load_cmd
---
 net/vhost-vdpa.c | 62 +++++++++++++++++++++++++++++++-----------------
 1 file changed, 40 insertions(+), 22 deletions(-)

diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 535315c1d0..e799e744cd 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -365,12 +365,47 @@ static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
     return vhost_svq_poll(svq);
 }
 
+static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
+                                       uint8_t cmd, const void *data,
+                                       size_t data_size)
+{
+    const struct virtio_net_ctrl_hdr ctrl = {
+        .class = class,
+        .cmd = cmd,
+    };
+
+    assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
+
+    memcpy(s->cvq_cmd_out_buffer, &ctrl, sizeof(ctrl));
+    memcpy(s->cvq_cmd_out_buffer + sizeof(ctrl), data, data_size);
+
+    return vhost_vdpa_net_cvq_add(s, sizeof(ctrl) + data_size,
+                                  sizeof(virtio_net_ctrl_ack));
+}
+
+static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
+{
+    uint64_t features = n->parent_obj.guest_features;
+    if (features & BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR)) {
+        ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MAC,
+                                                  VIRTIO_NET_CTRL_MAC_ADDR_SET,
+                                                  n->mac, sizeof(n->mac));
+        if (unlikely(dev_written < 0)) {
+            return dev_written;
+        }
+
+        return *s->status != VIRTIO_NET_OK;
+    }
+
+    return 0;
+}
+
 static int vhost_vdpa_net_load(NetClientState *nc)
 {
     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
-    const struct vhost_vdpa *v = &s->vhost_vdpa;
+    struct vhost_vdpa *v = &s->vhost_vdpa;
     const VirtIONet *n;
-    uint64_t features;
+    int r;
 
     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
 
@@ -379,26 +414,9 @@ static int vhost_vdpa_net_load(NetClientState *nc)
     }
 
     n = VIRTIO_NET(v->dev->vdev);
-    features = n->parent_obj.guest_features;
-    if (features & BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR)) {
-        const struct virtio_net_ctrl_hdr ctrl = {
-            .class = VIRTIO_NET_CTRL_MAC,
-            .cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET,
-        };
-        char *cursor = s->cvq_cmd_out_buffer;
-        ssize_t dev_written;
-
-        memcpy(cursor, &ctrl, sizeof(ctrl));
-        cursor += sizeof(ctrl);
-        memcpy(cursor, n->mac, sizeof(n->mac));
-
-        dev_written = vhost_vdpa_net_cvq_add(s, sizeof(ctrl) + sizeof(n->mac),
-                                             sizeof(virtio_net_ctrl_ack));
-        if (unlikely(dev_written < 0)) {
-            return dev_written;
-        }
-
-        return *s->status != VIRTIO_NET_OK;
+    r = vhost_vdpa_net_load_mac(s, n);
+    if (unlikely(r < 0)) {
+        return r;
     }
 
     return 0;
-- 
2.31.1



^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH v3 3/6] vdpa: Add vhost_vdpa_net_load_mq
  2022-08-25 19:03 [PATCH v3 0/6] Vhost-vdpa Shadow Virtqueue multiqueue support Eugenio Pérez
  2022-08-25 19:03 ` [PATCH v3 1/6] vdpa: Make VhostVDPAState cvq_cmd_in_buffer control ack type Eugenio Pérez
  2022-08-25 19:03 ` [PATCH v3 2/6] vdpa: extract vhost_vdpa_net_load_mac from vhost_vdpa_net_load Eugenio Pérez
@ 2022-08-25 19:03 ` Eugenio Pérez
  2022-08-25 19:03 ` [PATCH v3 4/6] vdpa: validate MQ CVQ commands Eugenio Pérez
                   ` (2 subsequent siblings)
  5 siblings, 0 replies; 7+ messages in thread
From: Eugenio Pérez @ 2022-08-25 19:03 UTC (permalink / raw)
  To: qemu-devel
  Cc: Cornelia Huck, Si-Wei Liu, Parav Pandit, Jason Wang,
	Michael S. Tsirkin, Harpreet Singh Anand, Stefan Hajnoczi,
	Zhu Lingshan, Cindy Lu, Gautam Dawar, Paolo Bonzini, Eli Cohen,
	Gonglei (Arei), Liuxiangdong, Laurent Vivier, Stefano Garzarella

Same way as with the MAC, restore the expected number of queues at
device's start.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
 net/vhost-vdpa.c | 26 ++++++++++++++++++++++++++
 1 file changed, 26 insertions(+)

diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index e799e744cd..3950e4f25d 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -400,6 +400,28 @@ static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
     return 0;
 }
 
+static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
+                                  const VirtIONet *n)
+{
+    struct virtio_net_ctrl_mq mq;
+    uint64_t features = n->parent_obj.guest_features;
+    ssize_t dev_written;
+
+    if (!(features & BIT_ULL(VIRTIO_NET_F_MQ))) {
+        return 0;
+    }
+
+    mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs);
+    dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MQ,
+                                          VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &mq,
+                                          sizeof(mq));
+    if (unlikely(dev_written < 0)) {
+        return dev_written;
+    }
+
+    return *s->status != VIRTIO_NET_OK;
+}
+
 static int vhost_vdpa_net_load(NetClientState *nc)
 {
     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
@@ -418,6 +440,10 @@ static int vhost_vdpa_net_load(NetClientState *nc)
     if (unlikely(r < 0)) {
         return r;
     }
+    r = vhost_vdpa_net_load_mq(s, n);
+    if (unlikely(r)) {
+        return r;
+    }
 
     return 0;
 }
-- 
2.31.1



^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH v3 4/6] vdpa: validate MQ CVQ commands
  2022-08-25 19:03 [PATCH v3 0/6] Vhost-vdpa Shadow Virtqueue multiqueue support Eugenio Pérez
                   ` (2 preceding siblings ...)
  2022-08-25 19:03 ` [PATCH v3 3/6] vdpa: Add vhost_vdpa_net_load_mq Eugenio Pérez
@ 2022-08-25 19:03 ` Eugenio Pérez
  2022-08-25 19:03 ` [PATCH v3 5/6] virtio-net: Update virtio-net curr_queue_pairs in vdpa backends Eugenio Pérez
  2022-08-25 19:03 ` [PATCH v3 6/6] vdpa: Allow MQ feature in SVQ Eugenio Pérez
  5 siblings, 0 replies; 7+ messages in thread
From: Eugenio Pérez @ 2022-08-25 19:03 UTC (permalink / raw)
  To: qemu-devel
  Cc: Cornelia Huck, Si-Wei Liu, Parav Pandit, Jason Wang,
	Michael S. Tsirkin, Harpreet Singh Anand, Stefan Hajnoczi,
	Zhu Lingshan, Cindy Lu, Gautam Dawar, Paolo Bonzini, Eli Cohen,
	Gonglei (Arei), Liuxiangdong, Laurent Vivier, Stefano Garzarella

So we are sure we can update the device model properly before sending to
the device.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
 net/vhost-vdpa.c | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 3950e4f25d..c6cbe2fb5c 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -486,6 +486,15 @@ static bool vhost_vdpa_net_cvq_validate_cmd(const void *out_buf, size_t len)
                           __func__, ctrl.cmd);
         };
         break;
+    case VIRTIO_NET_CTRL_MQ:
+        switch (ctrl.cmd) {
+        case VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET:
+            return true;
+        default:
+            qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid mq cmd %u\n",
+                          __func__, ctrl.cmd);
+        };
+        break;
     default:
         qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid control class %u\n",
                       __func__, ctrl.class);
-- 
2.31.1



^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH v3 5/6] virtio-net: Update virtio-net curr_queue_pairs in vdpa backends
  2022-08-25 19:03 [PATCH v3 0/6] Vhost-vdpa Shadow Virtqueue multiqueue support Eugenio Pérez
                   ` (3 preceding siblings ...)
  2022-08-25 19:03 ` [PATCH v3 4/6] vdpa: validate MQ CVQ commands Eugenio Pérez
@ 2022-08-25 19:03 ` Eugenio Pérez
  2022-08-25 19:03 ` [PATCH v3 6/6] vdpa: Allow MQ feature in SVQ Eugenio Pérez
  5 siblings, 0 replies; 7+ messages in thread
From: Eugenio Pérez @ 2022-08-25 19:03 UTC (permalink / raw)
  To: qemu-devel
  Cc: Cornelia Huck, Si-Wei Liu, Parav Pandit, Jason Wang,
	Michael S. Tsirkin, Harpreet Singh Anand, Stefan Hajnoczi,
	Zhu Lingshan, Cindy Lu, Gautam Dawar, Paolo Bonzini, Eli Cohen,
	Gonglei (Arei), Liuxiangdong, Laurent Vivier, Stefano Garzarella

It was returned as error before. Instead of it, simply update the
corresponding field so qemu can send it in the migration data.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
 hw/net/virtio-net.c | 17 ++++++-----------
 1 file changed, 6 insertions(+), 11 deletions(-)

diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index dd0d056fde..63a8332cd0 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -1412,19 +1412,14 @@ static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
         return VIRTIO_NET_ERR;
     }
 
-    /* Avoid changing the number of queue_pairs for vdpa device in
-     * userspace handler. A future fix is needed to handle the mq
-     * change in userspace handler with vhost-vdpa. Let's disable
-     * the mq handling from userspace for now and only allow get
-     * done through the kernel. Ripples may be seen when falling
-     * back to userspace, but without doing it qemu process would
-     * crash on a recursive entry to virtio_net_set_status().
-     */
+    n->curr_queue_pairs = queue_pairs;
     if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
-        return VIRTIO_NET_ERR;
+        /*
+         * Avoid updating the backend for a vdpa device: We're only interested
+         * in updating the device model queues.
+         */
+        return VIRTIO_NET_OK;
     }
-
-    n->curr_queue_pairs = queue_pairs;
     /* stop the backend before changing the number of queue_pairs to avoid handling a
      * disabled queue */
     virtio_net_set_status(vdev, vdev->status);
-- 
2.31.1



^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH v3 6/6] vdpa: Allow MQ feature in SVQ
  2022-08-25 19:03 [PATCH v3 0/6] Vhost-vdpa Shadow Virtqueue multiqueue support Eugenio Pérez
                   ` (4 preceding siblings ...)
  2022-08-25 19:03 ` [PATCH v3 5/6] virtio-net: Update virtio-net curr_queue_pairs in vdpa backends Eugenio Pérez
@ 2022-08-25 19:03 ` Eugenio Pérez
  5 siblings, 0 replies; 7+ messages in thread
From: Eugenio Pérez @ 2022-08-25 19:03 UTC (permalink / raw)
  To: qemu-devel
  Cc: Cornelia Huck, Si-Wei Liu, Parav Pandit, Jason Wang,
	Michael S. Tsirkin, Harpreet Singh Anand, Stefan Hajnoczi,
	Zhu Lingshan, Cindy Lu, Gautam Dawar, Paolo Bonzini, Eli Cohen,
	Gonglei (Arei), Liuxiangdong, Laurent Vivier, Stefano Garzarella

Finally enable SVQ with MQ feature.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
 net/vhost-vdpa.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index c6cbe2fb5c..4bc3fd01a8 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -94,6 +94,7 @@ static const uint64_t vdpa_svq_device_features =
     BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) |
     BIT_ULL(VIRTIO_NET_F_STATUS) |
     BIT_ULL(VIRTIO_NET_F_CTRL_VQ) |
+    BIT_ULL(VIRTIO_NET_F_MQ) |
     BIT_ULL(VIRTIO_F_ANY_LAYOUT) |
     BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) |
     BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
-- 
2.31.1



^ permalink raw reply related	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2022-08-25 19:18 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-08-25 19:03 [PATCH v3 0/6] Vhost-vdpa Shadow Virtqueue multiqueue support Eugenio Pérez
2022-08-25 19:03 ` [PATCH v3 1/6] vdpa: Make VhostVDPAState cvq_cmd_in_buffer control ack type Eugenio Pérez
2022-08-25 19:03 ` [PATCH v3 2/6] vdpa: extract vhost_vdpa_net_load_mac from vhost_vdpa_net_load Eugenio Pérez
2022-08-25 19:03 ` [PATCH v3 3/6] vdpa: Add vhost_vdpa_net_load_mq Eugenio Pérez
2022-08-25 19:03 ` [PATCH v3 4/6] vdpa: validate MQ CVQ commands Eugenio Pérez
2022-08-25 19:03 ` [PATCH v3 5/6] virtio-net: Update virtio-net curr_queue_pairs in vdpa backends Eugenio Pérez
2022-08-25 19:03 ` [PATCH v3 6/6] vdpa: Allow MQ feature in SVQ Eugenio Pérez

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).